Пример #1
0
    def __init__(self,
                 observation_space,
                 action_space,
                 m_dir=None,
                 log_name=None,
                 start_step=0,
                 start_round=0,
                 load_buffer=False):
        """
        model : The actual training model
        t_model : Fixed target model
        """
        print('Model directory : {}'.format(m_dir))
        print('Log name : {}'.format(log_name))
        print('Starting from step {}'.format(start_step))
        print('Starting from round {}'.format(start_round))
        print('Load buffer? {}'.format(load_buffer))
        self.action_n = action_space.n
        self.observation_space = observation_space
        #Inputs
        if m_dir is None:
            left_input = keras.Input(observation_space['Left'].shape,
                                     name='Left')
            right_input = keras.Input(observation_space['Right'].shape,
                                      name='Right')
            # Spare eye model for later use
            left_input_shape = observation_space['Left'].shape
            right_input_shape = observation_space['Right'].shape
            left_eye_model = self.eye_model(left_input_shape, 'Left')
            right_eye_model = self.eye_model(right_input_shape, 'Right')
            # Get outputs of the model
            left_encoded = left_eye_model(left_input)
            right_encoded = right_eye_model(right_input)
            # Concatenate both eye's inputs
            concat = layers.Concatenate()([left_encoded, right_encoded])
            outputs = self.brain_layers(concat)
            # Build models
            self.model = keras.Model(inputs=[left_input, right_input],
                                     outputs=outputs)
            self.optimizer = keras.optimizers.Adam()
            self.optimizer = mixed_precision.LossScaleOptimizer(
                self.optimizer, loss_scale='dynamic')
        else:
            self.model = keras.models.load_model(m_dir)
            print('model loaded')
        self.t_model = keras.models.clone_model(self.model)
        self.t_model.set_weights(self.model.get_weights())
        self.t_model.compile()
        self.model.summary()

        # Buffers
        if load_buffer:
            print('loading buffers...')
            with open(path.join(m_dir, 'buffer.bin'), 'rb') as f:
                self.buffer = pickle.load(f)
            print('loaded : {} filled in buffer'.format(
                self.buffer.num_in_buffer))
            print('Current buffer index : {}'.format(self.buffer.next_idx))
        else:
            self.buffer = ReplayBuffer(hp.Buffer_size, self.observation_space)

        # File writer for tensorboard
        if log_name is None:
            self.log_name = datetime.now().strftime('%m_%d_%H_%M_%S')
        else:
            self.log_name = log_name
        self.file_writer = tf.summary.create_file_writer(
            path.join('log', self.log_name))
        self.file_writer.set_as_default()
        print('Writing logs at ' + self.log_name)

        # Scalars
        self.start_training = False
        self.total_steps = start_step
        self.current_steps = 1
        self.score = 0
        self.rounds = start_round
        self.cumreward = 0

        # Savefile folder directory
        if m_dir is None:
            self.save_dir = path.join(
                'savefiles',
                datetime.now().strftime('%m_%d_%H_%M_%S'))
            self.save_count = 0
        else:
            self.save_dir, self.save_count = path.split(m_dir)
            self.save_count = int(self.save_count)
Пример #2
0
plt.legend()
plt.title("Loss")

plt.subplot(1, 2, 2)
plt.plot(records.history['categorical_accuracy'], color='red', label='Train')
plt.plot(records.history['val_categorical_accuracy'],
         color='blue',
         label='Valid')
plt.legend()
plt.title("Accuracy")

# plt.show(block=True)
# ===========================================================================
# Get intermediate representation and plot it
# ===========================================================================
intermediate_model = keras.Model(inputs=model.input,
                                 outputs=model.get_layer(name='latent').output)
intermediate_train = intermediate_model.predict(X_train, batch_size=BATCH_SIZE)
intermediate_score = intermediate_model.predict(X_score, batch_size=BATCH_SIZE)
# ====== extra fun, visualizing T-SNE clusters ====== #
show_tsne_clusters(X=X_score, y=y_score, title='Score - Acoustic Feat')
show_tsne_clusters(X=intermediate_score,
                   y=y_score,
                   title='Score - Latent Space')

# plt.show(block=True)
# ===========================================================================
# Evaluate the model
# ===========================================================================
# ====== evaluate the train data ====== #
y_pred_probas = model.predict(X_train, batch_size=BATCH_SIZE)
y_pred = np.argmax(y_pred_probas, axis=-1)
Пример #3
0
def build_model(config: Mapping, cardinalities: Mapping[str,
                                                        int]) -> keras.Model:
    """Construct model specified in the configuration.

    Also create optimizer and set the loss function.

    Args:
        config:  Dictionary representing configuration file.
        cardinalities:  Cardinalities of categorical features (needed to
            construct their embeddings).

    Return:
        Compiled model.
    """

    model_config = config['model']
    if isinstance(model_config, str):
        model = keras.models.load_model(
            model_config,
            custom_objects={'loss_fn': _create_loss(config['loss'])})

        return model

    features = Features(config['features'])
    inputs_all = []

    # Constituents of different types
    constituent_types = [
        key for key in sorted(model_config.keys())  # Ensure order
        if key not in {'head', 'load_weights'}
    ]
    outputs_constituents = []
    for constituent_type in constituent_types:
        inputs_numerical = keras.Input(
            shape=(None, len(features.numerical(constituent_type))),
            ragged=True,
            name=f'{constituent_type}_numerical')
        inputs_categorical = OrderedDict()
        for feature in features.categorical(constituent_type):
            inputs_categorical[feature] = keras.Input(shape=(None, ),
                                                      ragged=True,
                                                      name=feature)
        inputs_all.append(inputs_numerical)
        inputs_all.extend(inputs_categorical.values())

        outputs = _apply_deep_set(inputs_numerical, inputs_categorical,
                                  model_config[constituent_type],
                                  cardinalities, constituent_type)
        outputs_constituents.append(outputs)

    # Head
    inputs_global_numerical = keras.Input(shape=(len(
        features.numerical('global')), ),
                                          name='global_numerical')
    inputs_global_categorical = OrderedDict()
    for feature in features.categorical('global'):
        inputs_global_categorical[feature] = keras.Input(shape=(None, ),
                                                         name=feature)
    embeddings_global = {
        feature: Embedding(cardinalities[feature],
                           model_config['head']['embeddings'][feature],
                           name=feature + '_embeddings')(inputs)
        for feature, inputs in inputs_global_categorical.items()
    }
    inputs_all.append(inputs_global_numerical)
    inputs_all.extend(inputs_global_categorical.values())
    inputs_head = Concatenate(
        name='head_concatenate')([inputs_global_numerical] + [
            embeddings_global[feature]
            for feature in inputs_global_categorical.values()
        ] + outputs_constituents)
    outputs = _apply_dense_from_config(inputs_head,
                                       model_config['head'],
                                       name_prefix='head_')

    outputs = Dense(1, name='head_dense_output')(outputs)  # Output unit
    model = keras.Model(inputs=inputs_all, outputs=outputs, name='full')

    model.compile(optimizer=_create_optimizer(config.get('optimizer', None)),
                  loss=_create_loss(config['loss']))
    if 'load_weights' in model_config:
        # Normally, a saved model should be loaded
        # keras.models.load_model at the beginning of thsi function.
        # However, this is currently not supported for models that use
        # ragged tensors [1].  As a workaround, construct the model anew
        # and then load saved weights.  The path to weights would
        # usually be "{model_directory}/variables/variables", with the
        # ".index" file extension stripped off.  This doesn't restore
        # the state of the optimizer.
        # [1] https://github.com/tensorflow/tensorflow/issues/41034
        model.load_weights(model_config['load_weights'])
    return model
Пример #4
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun  1 12:48:11 2021

@author: orram
"""

import tensorflow as tf
import tensorflow.keras as keras

HR_img = keras.Input(shape = (28,28,1), name = 'HR_img')
model = keras.Sequential([keras.layers.Conv2D(16,3,activation = 'relu'),
                           keras.layers.MaxPool2D(),
                           keras.layers.Dropout(0.2),
                           keras.layers.Conv2D(32,3,activation = 'relu'),
                           keras.layers.MaxPool2D(),
                           keras.layers.Dropout(0.2),
                           keras.layers.Conv2D(16,3, activation = 'relu', name = 'teacher_features'), 
                           keras.layers.Flatten(),
                           keras.layers.Dense(10, activation = 'softmax'),
                           ],
                           name = 'teacher'
                           )
tmp_model = keras.Model(model.layers[0].input, model.layers[3].output)
Пример #5
0
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental.preprocessing import Rescaling
from tensorflow.keras.layers.experimental.preprocessing import CenterCrop
import numpy as np

# Get the data as Numpy arrays
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()

# Build a simple model
inputs = keras.Input(shape=(28, 28))
x = layers.experimental.preprocessing.Rescaling(1.0 / 255)(inputs)
x = layers.Flatten()(x)
x = layers.Dense(128, activation="relu")(x)
x = layers.Dense(128, activation="relu")(x)
outputs = layers.Dense(10, activation="softmax")(x)
model = keras.Model(inputs, outputs)
model.summary()

# Compile the model
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy")

# Train the model for 1 epoch from Numpy data
batch_size = 64
print("Fit on NumPy data")
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=1)
# The fit() call returns a "history" object which records what happened over the course of training.

# Train the model for 1 epoch using a dataset
dataset = tf.data.Dataset.from_tensor_slices(
    (x_train, y_train)).batch(batch_size)
print("Fit on Dataset")
def get_model():
    inputs = keras.Input(shape=(128, ))
    outputs = layers.Dense(1, activation='sigmoid')(inputs)
    return keras.Model(inputs, outputs)
Пример #7
0
 def model(self, shape):
     input = layers.Input(shape, batch_size=1, name='input')
     return keras.Model(inputs=[input], outputs=self.call(input, False))
    def generateGenerator(self):
        init = keras.initializers.RandomNormal(stddev=.02)

        in_label = keras.layers.Input(shape=(1, ))
        li = keras.layers.Embedding(self.n_classes, 30)(in_label)
        li = keras.layers.Dense(1 * 7)(li)
        li = keras.layers.Reshape((1, 1, 7))(li)

        # li = keras.layers.Dropout(.3) (li)

        in_lat = keras.layers.Input(shape=self.noiseShape)
        gen = keras.layers.Dense(1 * 1 * 128)(in_lat)
        gen = keras.layers.LeakyReLU(alpha=.2)(gen)
        gen = keras.layers.Reshape((1, 1, 128))(gen)

        # gen = keras.layers.Dropout(.3) (gen)

        merge = keras.layers.Concatenate()([gen, li])

        gen = keras.layers.Conv2DTranspose(filters=512,
                                           kernel_size=(4, 4),
                                           strides=(1, 1),
                                           padding="valid",
                                           data_format="channels_last",
                                           kernel_initializer=init)(merge)
        gen = keras.layers.BatchNormalization(momentum=.5)(gen)
        gen = keras.layers.LeakyReLU(.2)(gen)

        # gen = keras.layers.Dropout(.25) (gen)

        gen = keras.layers.Conv2DTranspose(filters=256,
                                           kernel_size=(4, 4),
                                           strides=(2, 2),
                                           padding="same",
                                           data_format="channels_last",
                                           kernel_initializer=init)(gen)
        gen = keras.layers.BatchNormalization(momentum=.5)(gen)
        gen = keras.layers.LeakyReLU(.2)(gen)

        # gen = keras.layers.Dropout(.25) (gen)

        gen = keras.layers.Conv2DTranspose(filters=128,
                                           kernel_size=(4, 4),
                                           strides=(2, 2),
                                           padding="same",
                                           data_format="channels_last",
                                           kernel_initializer=init)(gen)
        gen = keras.layers.BatchNormalization(momentum=.5)(gen)
        gen = keras.layers.LeakyReLU(.2)(gen)

        # gen = keras.layers.Dropout(.25) (gen)

        gen = keras.layers.Conv2DTranspose(filters=64,
                                           kernel_size=(4, 4),
                                           strides=(2, 2),
                                           padding="same",
                                           data_format="channels_last",
                                           kernel_initializer=init)(gen)
        gen = keras.layers.BatchNormalization(momentum=.5)(gen)
        gen = keras.layers.LeakyReLU(.2)(gen)

        gen = keras.layers.Conv2DTranspose(filters=64,
                                           kernel_size=(3, 3),
                                           strides=(1, 1),
                                           padding="same",
                                           data_format="channels_last",
                                           kernel_initializer=init)(gen)
        gen = keras.layers.BatchNormalization(momentum=.5)(gen)
        gen = keras.layers.LeakyReLU(.2)(gen)

        gen = keras.layers.Conv2DTranspose(filters=3,
                                           kernel_size=(4, 4),
                                           strides=(2, 2),
                                           padding="same",
                                           data_format="channels_last",
                                           kernel_initializer=init)(gen)
        out_layer = keras.layers.Activation("tanh")(gen)
        model = keras.Model([in_lat, in_label], out_layer)
        return model
Пример #9
0
y1 = L.UpSampling2D(size=(SIZE_TOP, SIZE_TOP))(y2)
y1 = L.Concatenate()([x1, y1])
y1 = MXM_2D(y1, 128)(y1)

y0 = L.UpSampling2D(size=(SIZE_SUB, SIZE_SUB))(y1)
y0 = L.Concatenate()([x0, y0])
y0 = MXM_2D(y0, 64)(y0)

y = L.Dense(50, activation='softmax')(y0)

ouputs = y
ouputs = L.Multiply()([ouputs, mask1])
not_mask = L.Lambda(lambda x: 1 - x)(mask)

ouputs = L.Concatenate(name="segment_out")([ouputs, not_mask])
model = keras.Model(inputs=inputs1, outputs=[ouputs])

model.load_weights(model_path)

print(model.summary())

#%% get start and end label id for each object category
y_test_digits = np.argmax(y_test, -1)
class_label_region = np.zeros((16, 2), dtype=np.int)
for i_class in range(16):
    idx_list = np.where(y_test_digits == i_class)[0]
    pos_list = p_test[idx_list]
    gt_list = l_test[idx_list]

    label_min = gt_list.min()
    label_max = gt_list.max()
Пример #10
0
def generate_cnn(app="vgg"):
    inputs = keras.Input(shape=(224, 224, 3))

    if app == "vgg":
        net = keras.applications.VGG16(include_top=False,
                                       weights='imagenet',
                                       input_tensor=inputs)
        net.trainable = False
        # preprocessed = keras.applications.vgg16.preprocess_input(inputs)
        x = net(inputs)

    elif app == "efficientnet":
        net = keras.applications.EfficientNetB0(include_top=False,
                                                weights='imagenet')
        # net.trainable = False
        # preprocessed = keras.applications.efficientnet.preprocess_input(inputs)
        x = net(inputs)

    elif app == "mobilenet":
        net = keras.applications.MobileNet(
            include_top=False,
            weights='imagenet',
        )
        net.trainable = False
        # preprocessed = keras.applications.mobilenet.preprocess_input(inputs)
        x = net(inputs)

    elif app == "mobilenetv2":
        net = keras.applications.MobileNetV2(
            include_top=False,
            weights='imagenet',
        )
        net.trainable = False
        # preprocessed = keras.applications.mobilenet_v2.preprocess_input(inputs)
        x = net(inputs)

    elif app == "vggm":
        x = keras.layers.Conv2D(96,
                                kernel_size=7,
                                strides=2,
                                padding='same',
                                kernel_regularizer='l2')(inputs)
        x = layers.LeakyReLU()(x)
        x = keras.layers.BatchNormalization()(x)
        x = keras.layers.MaxPool2D(pool_size=3, strides=2)(x)
        x = keras.layers.Dropout(0.5)(x)
        x = keras.layers.Conv2D(256,
                                kernel_size=5,
                                strides=2,
                                padding='same',
                                kernel_regularizer='l2')(x)
        x = layers.LeakyReLU()(x)
        x = keras.layers.BatchNormalization()(x)
        x = keras.layers.MaxPool2D(pool_size=3, strides=2)(x)
        x = keras.layers.Dropout(0.5)(x)
        x = keras.layers.Conv2D(512,
                                kernel_size=3,
                                strides=1,
                                padding='same',
                                kernel_regularizer='l2')(x)
        x = layers.LeakyReLU()(x)
        x = keras.layers.Dropout(0.5)(x)
        x = keras.layers.Conv2D(512,
                                kernel_size=3,
                                strides=1,
                                padding='same',
                                kernel_regularizer='l2')(x)
        x = layers.LeakyReLU()(x)
        x = keras.layers.Dropout(0.5)(x)
        x = keras.layers.Conv2D(512,
                                kernel_size=3,
                                strides=1,
                                padding='same',
                                kernel_regularizer='l2')(x)
        x = layers.LeakyReLU()(x)
        x = keras.layers.MaxPool2D(pool_size=3, strides=2)(x)
        x = keras.layers.Dropout(0.5)(x)
        x = keras.layers.Flatten()(x)
        x = keras.layers.Dense(4096)(x)
        x = layers.LeakyReLU()(x)
        x = keras.layers.Dropout(0.5)(x)

    x = layers.Flatten()(x)

    # x_class = keras.layers.Dense(4096)(x)
    # x_class = layers.ReLU()(x_class)
    # x_class = keras.layers.Dropout(0.5)(x_class)
    # x_class = keras.layers.Dense(220)(x_class)
    # x_class = layers.ReLU()(x_class)
    # x_class = layers.Dropout(0.5)(x_class)
    #
    # x_view = keras.layers.Dense(4096)(x)
    # x_view = layers.ReLU()(x_view)
    # x_view = keras.layers.Dropout(0.5)(x_view)
    # x_view = keras.layers.Dense(220)(x_view)
    # x_view = layers.ReLU()(x_view)
    # x_view = keras.layers.Dropout(0.5)(x_view)

    out_class = layers.Dense(10, activation='softmax', name="class")(x)
    out_view = layers.Dense(60, activation='softmax', name="view")(x)
    model = keras.Model(inputs=inputs, outputs=[out_class, out_view])
    model.summary()
    losses = {
        "class": 'categorical_crossentropy',
        "view": 'categorical_crossentropy'
    }
    model.compile(keras.optimizers.Adam(learning_rate=float(args.lr)),
                  loss=losses,
                  metrics=METRICS)
    # keras.utils.plot_model(model, "net_structure.png", show_shapes=True, expand_nested=True)
    return model
    def generateCriticer(self):
        const = ClipConstraint(.01)
        init = keras.initializers.RandomNormal(stddev=.02)

        in_label = keras.layers.Input(shape=(1, ))
        # li = keras.layers.Embedding(self.n_classes, 30) (in_label)
        # li = keras.layers.Dense(64 * 64) (li)
        # li = keras.layers.Reshape((64, 64, 1)) (li)

        in_image = keras.layers.Input(shape=self.imageShape)

        # merge = keras.layers.Concatenate() ([in_image, li])

        fe = keras.layers.Conv2D(filters=64,
                                 kernel_size=(4, 4),
                                 strides=(2, 2),
                                 padding="same",
                                 data_format="channels_last",
                                 kernel_initializer=init,
                                 kernel_constraint=const)(in_image)  #(merge)
        fe = keras.layers.LeakyReLU(.2)(fe)

        fe = keras.layers.Conv2D(filters=128,
                                 kernel_size=(4, 4),
                                 strides=(2, 2),
                                 padding="same",
                                 data_format="channels_last",
                                 kernel_initializer=init,
                                 kernel_constraint=const)(fe)
        fe = keras.layers.BatchNormalization(momentum=.5)(fe)
        fe = keras.layers.LeakyReLU(.2)(fe)

        fe = keras.layers.Dropout(.4)(fe)

        # __change_begin__
        li = keras.layers.Embedding(self.n_classes, 30)(in_label)
        li = keras.layers.Dense(16 * 16)(li)
        li = keras.layers.Reshape((16, 16, 1))(li)

        merge = keras.layers.Concatenate()([fe, li])
        # __change_end__

        fe = keras.layers.Dropout(.4)(merge)  #(fe)

        fe = keras.layers.Conv2D(filters=256,
                                 kernel_size=(4, 4),
                                 strides=(2, 2),
                                 padding="same",
                                 data_format="channels_last",
                                 kernel_initializer=init,
                                 kernel_constraint=const)(fe)
        fe = keras.layers.BatchNormalization(momentum=.5)(fe)
        fe = keras.layers.LeakyReLU(.2)(fe)

        fe = keras.layers.Dropout(.4)(fe)

        fe = keras.layers.Conv2D(filters=512,
                                 kernel_size=(4, 4),
                                 strides=(2, 2),
                                 padding="same",
                                 data_format="channels_last",
                                 kernel_initializer=init,
                                 kernel_constraint=const)(fe)
        fe = keras.layers.BatchNormalization(momentum=.5)(fe)
        fe = keras.layers.LeakyReLU(.2)(fe)

        fe = keras.layers.Flatten()(fe)

        fe = keras.layers.Dropout(.2)(fe)

        out_layer = keras.layers.Dense(1, activation="linear")(fe)
        model = keras.Model([in_image, in_label], out_layer)
        model.compile(loss=wassersteinLoss,
                      optimizer=keras.optimizers.RMSprop(lr=.00005 * 2),
                      metrics=["accuracy"])
        return model
Пример #12
0
 def _create_test_mode_extra_layer(self):
     input = keras.Input(shape=TEST_DENSE_INPUT_DIMS, dtype='float32')
     x = keras.layers.Dense(TEST_NUM_CLASSES)(input)
     x = keras.layers.Softmax()(x)
     model = keras.Model(inputs=input, outputs=x)
     return model
Пример #13
0
 def instanciate_model(self):
     return keras.Model(inputs=self.inputs, outputs=self.outputs, name=self.model_name)
Пример #14
0



x_train = x_train.reshape((-1, 28*28)) / 255.0
x_test = x_test.reshape((-1, 28*28)) / 255.0

print(x_train.shape, ' ', y_train.shape)
print(x_test.shape, ' ', y_test.shape)

code_dim = 32
inputs = layers.Input(shape=(x_train.shape[1],), name='inputs')
code = layers.Dense(code_dim, activation='relu', name='code')(inputs)
outputs = layers.Dense(x_train.shape[1], activation='softmax', name='outputs')(code)

auto_encoder = keras.Model(inputs, outputs)
auto_encoder.summary()
# keras.utils.plot_model(auto_encoder, show_shapes=True)
auto_encoder.compile(optimizer='adam', loss='binary_crossentropy')
history = auto_encoder.fit(x_train, x_train, batch_size=64, epochs=100, validation_split=0.1)

pred = auto_encoder.predict(x_test)

plt.figure(figsize=(10,4))
n = 5
for i in range(n):
    ax = plt.subplot(2, n, i+1)
    plt.imshow(x_test[i].reshape(28,28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
Пример #15
0
    # round x to 2 digits behind comma
    x = round(x, 2)

    return x


num_inputs = 1  #no_bits
num_actions = 2  #3
num_hidden = 128

inputs = layers.Input(shape=(num_inputs, ))
common = layers.Dense(num_hidden, activation="relu")(inputs)
action = layers.Dense(num_actions, activation="softmax")(common)
critic = layers.Dense(1)(common)

model = keras.Model(inputs=inputs, outputs=[action, critic])
print(model.summary())
"""
## Train
"""

optimizer = keras.optimizers.Adam(learning_rate=0.1)
huber_loss = keras.losses.Huber()
action_probs_history = []
critic_value_history = []
rewards_history = []
running_reward = 0
episode_count = 0

while True:  # Run until solved
    state = preprocess_state(env.reset())
Пример #16
0
- We can save the model + optimizer state (SavedModel or h5 format)
- We can only save model weights .ckpt

Metrics and saving model:
- Before saving the model, reset the model metrics- this way model before and after loading will have
the same state(starting at ZERO??,)
- - Metric states are not preserved by Model.Save_weights
"""
import numpy as np
from tensorflow import keras

inputs = keras.Input(shape=(784,), name='digits')
x = keras.layers.Dense(64, activation='relu', name='dense1')(inputs)
x = keras.layers.Dense(64, activation='relu', name='dense_2')(x)
outputs = keras.layers.Dense(10, name='predictions')(x)
model = keras.Model(inputs=inputs, outputs=outputs, name='3_layer_MLP_model')
print(model.summary())

(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(60000, 784).astype('float32') / 255.
x_test = x_test.reshape(10000, 784).astype('float32') / 255.

loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = keras.optimizers.RMSprop()

model.compile(loss=loss_fn, optimizer=optimizer)
history = model.fit(x_train, y_train, batch_size=64, epochs=1)

# Save model predictions for future checks:
predictions = model.predict(x_test)
from tensorflow.python.keras import layers

dense = layers.Dense(64, activation='relu')
x = dense(inputs)
"""The "layer call" action is like drawing an arrow from "inputs" to this layer we created.
We're "passing" the inputs to the `dense` layer, and out we get `x`.

Let's add a few more layers to our graph of layers:
"""

x = layers.Dense(64, activation='relu')(x)
outputs = layers.Dense(10, activation='softmax')(x)
"""At this point, we can create a `Model` by specifying its inputs and outputs in the graph of layers:"""

model = keras.Model(inputs=inputs, outputs=outputs)
"""To recap, here is our full model definition process:"""

inputs = keras.Input(shape=(784, ), name='img')
x = layers.Dense(64, activation='relu')(inputs)
x = layers.Dense(64, activation='relu')(x)
outputs = layers.Dense(10, activation='softmax')(x)

model = keras.Model(inputs=inputs, outputs=outputs, name='mnist_model')
"""Let's check out what the model summary looks like:"""

model.summary()
"""We can also plot the model as a graph:"""

keras.utils.plot_model(model, 'my_first_model.png')
"""And optionally display the input and output shapes of each layer in the plotted graph:"""
Пример #18
0
def Decider():
    #trainingDataInputs = list
    #trainingLabelsOutputs = list
    #testDataInputs = list
    #testLabelsOutputs = list
    inputs = keras.Input(shape=(5, ))  # Returns a placeholder tensor

    # A layer instance is callable on a tensor, and returns a tensor.
    x = keras.layers.Dense(64, activation='sigmoid')(inputs)
    y = keras.layers.Dense(64, activation='relu')(x)
    z = keras.layers.Dense(64, activation='relu')(y)
    a = keras.layers.Dense(64, activation='relu')(z)
    predictions = keras.layers.Dense(1, activation='softmax')(a)

    # Instantiate the model given inputs and outputs.
    model = keras.Model(inputs=inputs, outputs=predictions)

    # The compile step specifies the training configuration.
    model.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # Trains for 5 epochs
    model.fit(trainingDataInputs,
              trainingLabelsOutputs,
              batch_size=5,
              epochs=500,
              validation_data=(testDataInputs, testLabelsOutputs))

    model.save('./FIND-model.h5')

    #given ratings from the classifier, use Machine Learning Magic to determine if fake news
    # model = keras.models.Sequential()
    # # Adds a densely-connected layer with 64 units to the model:
    # #model.add(keras.layers.Dense(64, kernel_regularizer=keras.regularizers.l1(0.01))
    # # Add another:
    # model.add(keras.layers.Dense(64, input_shape=(5,), activation='sigmoid'))
    # model.add(keras.layers.Dense(64, activation='sigmoid'))
    # model.add(keras.layers.Dense(64, activation='sigmoid'))
    # model.add(keras.layers.Dense(64, activation='relu'))
    # model.add(keras.layers.Dense(64, activation='relu'))
    # # Add a softmax layer with 1 output unit:
    # model.add(keras.layers.Dense(1, activation='softmax'))

    # model.compile(optimizer=tf.train.AdamOptimizer(0.001)), loss='categorical_crossentropy', metrics=['accuracy'])

    # data = np.random.random((1000, 32))
    # labels = np.random.random((1000, 1))

    # val_data = np.random.random((100, 32))
    # val_labels = np.random.random((100, 1))

    # callbacks = [
    #     # Interrupt training if `val_loss` stops improving for over 2 epochs
    #     keras.callbacks.EarlyStopping(patience=5, monitor='val_loss'),
    #     # Write TensorBoard logs to `./logs` directory
    #     keras.callbacks.TensorBoard(log_dir='./logs')
    # ]

    # model.fit(data, labels, epochs=10, callbacks=callbacks, batch_size=32, validation_data=(val_data, val_labels))
    # model.save('./FIND-model.h5')
    # model = keras.models.load_model('FIND-model.h5')
    # ^^^ to load a saved model

    return 0
Пример #19
0
    x = layers.concatenate([i for i in seq_output_list])
    x = transformer_block(x)
else:
    x = transformer_block(seq_output_list[0])
x = layers.GlobalAveragePooling1D()(x)
x = layers.Dropout(0.1)(x)
x = layers.Dense(128, activation="relu")(x)
x = layers.Dropout(0.1)(x)
x = layers.Dense(64, activation="relu")(x)
# x = layers.Dropout(0.1)(x)
# x = layers.Dense(64, activation="relu")(x)
combined = layers.concatenate([x, statics_inputs])
#z = layers.Dropout(0.1)(z)

outputs = layers.Dense(10, activation="softmax")(combined)
model = keras.Model(inputs=seq_input_list + [statics_inputs], outputs=outputs)

model.compile(loss="categorical_crossentropy",
              optimizer="adam",
              metrics=["accuracy"])
model.summary()

print(train_x.shape)
print(valid_x.shape)

mc = keras.callbacks.ModelCheckpoint(f'model/transform_age_checkpoint.h5',
                                     monitor="val_accuracy",
                                     verbose=0,
                                     save_best_only=True,
                                     save_weights_only=False,
                                     mode="auto",
Пример #20
0
    def build(self, hp):
        version = hp.Choice('version', ['v1', 'v2', 'next'], default='v2')

        # Version-conditional hyperparameters.
        with hp.name_scope(version):
            conv3_depth = hp.Choice(
                'conv3_depth',
                [4] if version == 'next' else [4, 8],
                default=4)
            conv4_depth = hp.Choice(
                'conv4_depth',
                [6, 23] if version == 'next' else [6, 23, 36],
                default=6)

        # Version-conditional fixed parameters
        preact = True if version == 'v2' else False
        use_bias = False if version == 'next' else True

        # Model definition.
        bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

        if self.input_tensor is not None:
            inputs = tf.keras.utils.get_source_inputs(self.input_tensor)
            x = self.input_tensor
        else:
            inputs = layers.Input(shape=self.input_shape)
            x = inputs

        # Initial conv2d block.
        x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(x)
        x = layers.Conv2D(
            64, 7, strides=2, use_bias=use_bias, name='conv1_conv')(x)
        if preact is False:
            x = layers.BatchNormalization(
                axis=bn_axis, epsilon=1.001e-5, name='conv1_bn')(x)
            x = layers.Activation('relu', name='conv1_relu')(x)
        x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
        x = layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)

        # Middle hypertunable stack.
        if version == 'v1':
            x = stack1(x, 64, 3, stride1=1, name='conv2')
            x = stack1(x, 128, conv3_depth, name='conv3')
            x = stack1(x, 256, conv4_depth, name='conv4')
            x = stack1(x, 512, 3, name='conv5')
        elif version == 'v2':
            x = stack2(x, 64, 3, name='conv2')
            x = stack2(x, 128, conv3_depth, name='conv3')
            x = stack2(x, 256, conv4_depth, name='conv4')
            x = stack2(x, 512, 3, stride1=1, name='conv5')
        elif version == 'next':
            x = stack3(x, 64, 3, name='conv2')
            x = stack3(x, 256, conv3_depth, name='conv3')
            x = stack3(x, 512, conv4_depth, name='conv4')
            x = stack3(x, 1024, 3, stride1=1, name='conv5')

        # Top of the model.
        if preact is True:
            x = layers.BatchNormalization(
                axis=bn_axis, epsilon=1.001e-5, name='post_bn')(x)
            x = layers.Activation('relu', name='post_relu')(x)

        pooling = hp.Choice('pooling', ['avg', 'max'], default='avg')
        if pooling == 'avg':
            x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling2D(name='max_pool')(x)

        if self.include_top:
            x = layers.Dense(
                self.classes, activation='softmax', name='probs')(x)
            model = keras.Model(inputs, x, name='ResNet')
            optimizer_name = hp.Choice(
                'optimizer', ['adam', 'rmsprop', 'sgd'], default='adam')
            optimizer = keras.optimizers.get(optimizer_name)
            optimizer.learning_rate = hp.Choice(
                'learning_rate', [0.1, 0.01, 0.001], default=0.01)
            model.compile(
                optimizer=optimizer,
                loss='categorical_crossentropy',
                metrics=['accuracy'])
            return model
        else:
            return keras.Model(inputs, x, name='ResNet')
Пример #21
0
    def _build_userencoder(self, titleencoder, type="ini"):
        """The main function to create user encoder of LSTUR.

        Args:
            titleencoder(obj): the news encoder of LSTUR. 

        Return:
            obj: the user encoder of LSTUR.
        """
        hparams = self.hparams
        his_input_title = keras.Input(
            shape=(hparams.his_size, hparams.doc_size), dtype="int32"
        )
        user_indexes = keras.Input(shape=(1,), dtype="int32")

        user_embedding_layer = layers.Embedding(
            hparams.user_num,
            hparams.gru_unit,
            trainable=True,
            embeddings_initializer="zeros",
        )

        long_u_emb = layers.Reshape((hparams.gru_unit,))(
            user_embedding_layer(user_indexes)
        )
        click_title_presents = layers.TimeDistributed(titleencoder)(his_input_title)

        if type == "ini":
            user_present = layers.GRU(
                hparams.gru_unit,
                kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed),
                recurrent_initializer=keras.initializers.glorot_uniform(seed=self.seed),
                bias_initializer=keras.initializers.Zeros(),
            )(
                layers.Masking(mask_value=0.0)(click_title_presents),
                initial_state=[long_u_emb],
            )
        elif type == "con":
            short_uemb = layers.GRU(
                hparams.gru_unit,
                kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed),
                recurrent_initializer=keras.initializers.glorot_uniform(seed=self.seed),
                bias_initializer=keras.initializers.Zeros(),
            )(layers.Masking(mask_value=0.0)(click_title_presents))
            
            user_present = layers.Concatenate()([short_uemb, long_u_emb])
            user_present = layers.Dense(
                hparams.gru_unit,
                bias_initializer=keras.initializers.Zeros(),
                kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed),
            )(user_present)

        click_title_presents = layers.TimeDistributed(titleencoder)(his_input_title)
        user_present = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)(
            click_title_presents
        )

        model = keras.Model(
            [his_input_title, user_indexes], user_present, name="user_encoder"
        )
        return model
Пример #22
0
    3) What will be the effect of cenceling some of the kernels? cenceling the 
        ones with the strongest mean activity will cause more effect? 
    After we finish with the HR images we can ask the same with syclop and 
    compare between the representations in syclop and in HR cnn. 
    Let's explore the ideas of - 
    coherent features in cnn
    entropy in intermidiate layers of cnn
    dynaimacal feature learning
    
    '
    
'''
#for layer in model.layers:
layer_name = layer_name
exec(layer_name + ' = pd.DataFrame()')
intermediate_layer_model = keras.Model(
    inputs=model.input, outputs=model.get_layer(layer_name).output)
feature_data_path = '/home/labs/ahissarlab/orra/imagewalker/teacher_student/feature_data/'
if os.path.exists(feature_data_path + 'train_features_{}'.format(layer_name)):

    #train_data = np.array(pickle.load(open('/home/orram/Documents/GitHub/imagewalker/teacher_student/train_features_{}'.format(layer_name),'rb')))
    intermediate_output = np.array(
        pickle.load(
            open(feature_data_path + 'train_features_{}'.format(layer_name),
                 'rb')))[45000:]
else:
    intermediate_output = np.array(intermediate_layer_model(trainX[45000:]))

intermediate_corrects = intermediate_output[corrects, :, :, :]
intermediate_incorrects = intermediate_output[incorrects, :, :, :]
#%%
########################### Part I ###########################################
Пример #23
0
    def call(self, inputs, training=None, mask=None):
        x = self.dense(inputs)
        x = self.bn(x)
        x = self.sigmoid(x)
        x = keras.layers.Reshape((-1, 20))(x)

        return x


if __name__ == '__main__':
    # test
    model1 = Prediction()
    img_inputs = keras.Input(shape=(224, 224, 3))
    y = model1(img_inputs)
    model2 = keras.Model(img_inputs, y)
    # print(model2.trainable_variables)
    # model2.summary()

    # we test model in model in tf2.0
    model3 = Prediction2()
    # model3.summary()
    img_inputs = keras.Input(shape=(224, 224, 3))
    y = model3(img_inputs)
    model4 = keras.Model(img_inputs, y)
    # print(model4.trainable_variables)
    # model4.summary()

    # we test back propagation in tf.GradientTape
    # if we don't put layer has params in the init method,
    # we can't get gradients.
Пример #24
0
## Begin Training With The 

# Transfer Learning - Load lightweight pre-trained DL model
model = tf.keras.applications.mobilenet.MobileNet()
# model.summary()

# Obtain transfer model layer input and output
base_input = model.layers[0].input
base_ouput = model.layers[-4].output

# Modify transfer model's last layer
Flat_layer = layers.Flatten()(base_ouput)
final_output = layers.Dense(1)(Flat_layer)
# Add sigmoid since this is a binary classification
final_output = layers.Activation('sigmoid')(final_output)

# Create the revised model
new_model = keras.Model(inputs = base_input, outputs = final_output)
new_model.summary()

# Set up the model
new_model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])

# Fit the model to the data
epochs = 20
new_model.fit(X, Y, epochs = epochs, validation_split = 0.1)


# Save Our Model
new_model.save("Kaveh_With_Mask_Detection.h5")
Пример #25
0
encoder_inputs = keras.Input(shape=(w, h, c))
condition_inputs = keras.Input(shape=(z_condition,))
x = layers.GaussianNoise(stddev=0.2)(encoder_inputs)
x = layers.Conv2D(64, 3, activation="relu")(x)
x = layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")(x)
x = layers.Conv2D(64, 3, activation="relu", strides=3, padding="same")(x)
x = layers.Conv2D(128, 3, activation="relu", strides=3, padding="same")(x)
x = layers.Flatten()(x)

if args.conditional:
    x = layers.Concatenate()([x, condition_inputs])
    x = layers.Dense(64, activation="relu")(x)
    z_mean = layers.Dense(latent_dim, name="z_mean")(x)
    z_log_var = layers.Dense(latent_dim, name="z_log_var")(x)
    z = Sampling()([z_mean, z_log_var])
    encoder = keras.Model([encoder_inputs, condition_inputs], [z_mean, z_log_var, z], name="encoder")
else:
    x = layers.Dense(64, activation="relu")(x)
    z_mean = layers.Dense(latent_dim, name="z_mean")(x)
    z_log_var = layers.Dense(latent_dim, name="z_log_var")(x)
    z = Sampling()([z_mean, z_log_var])
    encoder = keras.Model(encoder_inputs, [z_mean, z_log_var, z], name="encoder")
encoder.summary()    

# build the decoder

latent_inputs = keras.Input(shape=(latent_dim,))
if args.conditional:
    x = layers.Concatenate()([latent_inputs, condition_inputs])
    x = layers.Dense(7 * 7 * 64, activation="relu")(x)
else:
Пример #26
0
    def __build_network__(self):
        """This function returns a keras model.
            
            Args:
                loadWeights (bool) : Load weights specified in the weightsFile param
                weightsFile (str) : Path to weights
            
            Returns:
                :class:`keras.model.Model` : Neural Network 

        """
        x = keras.Input(shape=(self.resize_dim[1], self.resize_dim[0], 3))

        conv1 = layers.Conv2D(
            128, (9, 9),
            padding='same',
            kernel_initializer=keras.initializers.lecun_uniform(seed=None))(x)
        batchnorm1 = layers.BatchNormalization()(conv1)
        act1 = layers.LeakyReLU()(batchnorm1)
        # maxpool1 = layers.MaxPooling2D((2,2))(act1)

        conv2 = layers.Conv2D(
            64, (9, 9),
            padding='same',
            kernel_initializer=keras.initializers.lecun_uniform(
                seed=None))(act1)
        batchnorm2 = layers.BatchNormalization()(conv2)
        act2 = layers.LeakyReLU()(batchnorm2)
        # maxpool2 = layers.MaxPooling2D((2,2))(act2)

        conv3 = layers.Conv2D(
            32, (9, 9),
            padding='same',
            kernel_initializer=keras.initializers.lecun_uniform(
                seed=None))(act2)
        batchnorm3 = layers.BatchNormalization()(conv3)
        act3 = layers.LeakyReLU()(batchnorm3)
        # maxpool3 = layers.MaxPooling2D((2,2))(act3)

        out = tf.keras.layers.Conv2DTranspose(self.num_class + 1,
                                              9,
                                              padding='same',
                                              activation='softmax')(act3)

        # conv4 = layers.Conv2D(6, (3,3), padding='same',kernel_initializer=keras.initializers.lecun_uniform(seed=None))(act3)
        # batchnorm4 = layers.BatchNormalization()(conv4)
        # act4 = layers.Conv2DTranspose('softmax',name="output")(batchnorm4)
        # maxpool4 = layers.MaxPooling2D((2,2))(act4)

        # flat1 = layers.Flatten()(x)
        # D1 = layers.Dense(128,kernel_initializer=keras.initializers.lecun_uniform(seed=None))(flat1)
        # batchnorm5 = layers.BatchNormalization()(D1)
        # act5 = layers.LeakyReLU()(batchnorm5)

        # D2 = layers.Dense(64,kernel_initializer=keras.initializers.lecun_uniform(seed=None))(act5)
        # batchnorm6 = layers.BatchNormalization()(D2)
        # act6 = layers.LeakyReLU()(batchnorm6)

        # D_reg = layers.Dense(self.num_class*2,kernel_initializer=keras.initializers.lecun_uniform(seed=None))(act6)
        # batchnorm7 = layers.BatchNormalization()(D_reg)
        # out = layers.Activation('linear',name="output")(batchnorm7)
        # outReshape = layers.Reshape((self.num_class, 2))(out)
        model = keras.Model(inputs=x, outputs=[out])
        if (self.loadWeights):
            model.load_weights(self.weightsFile, by_name=True)
        return model
Пример #27
0
def main(config = None):
    run = wandb.init(config = config)
    config = wandb.config

    run.name = "Embedding Size: " + str(config.embedding_size) + " Cell Type: " + config.cell_type + " Dropout: " + str(config.dropout) + " Beam Size: " + str(config.beam_size) + " Encoder Layers: " + str(config.encoder_layers) + " Decoder Layers: " + str(config.decoder_layers) + " Hidder Layer Size: " + str(config.hidden_layer_size)

    # Configuration
    batch_size = 128
    epochs = 25
    embedding_size = config.embedding_size
    enc_latent_dims = [config.hidden_layer_size] * config.encoder_layers
    dec_latent_dims  = [config.hidden_layer_size] * config.decoder_layers
    cell_type = config.cell_type
    dropout = config.dropout
    beam_size = config.beam_size

    # Encoder
    encoder_inputs = keras.Input(shape = (None, ))
    encoder_outputs = keras.layers.Embedding(input_dim = num_encoder_characters, output_dim = embedding_size, input_length = max_encoder_seq_length)(encoder_inputs)

    # Encoder LSTM layers
    encoder_states = list()
    for j in range(len(enc_latent_dims)):
        if cell_type == "rnn":
            encoder_outputs, state = keras.layers.SimpleRNN(enc_latent_dims[j], dropout = dropout, return_state = True, return_sequences = True)(encoder_outputs)
            encoder_states = [state]
        if cell_type == "lstm":
            encoder_outputs, state_h, state_c = keras.layers.LSTM(enc_latent_dims[j], dropout = dropout, return_state = True, return_sequences = True)(encoder_outputs)
            encoder_states = [state_h,state_c]
        if cell_type == "gru":
            encoder_outputs, state = keras.layers.GRU(enc_latent_dims[j], dropout = dropout, return_state = True, return_sequences = True)(encoder_outputs)
            encoder_states = [state]

    # Decoder
    decoder_inputs = keras.Input(shape=(None, ))
    decoder_outputs = keras.layers.Embedding(input_dim = num_decoder_characters, output_dim = embedding_size, input_length = max_decoder_seq_length)(decoder_inputs)

    # We set up our decoder to return full output sequences,
    # and to return internal states as well. We don't use the
    # return states in the training model, but we will use them in inference.
    decoder_states = encoder_states.copy()

    for j in range(len(dec_latent_dims)):
        if cell_type == "rnn":
            decoder = keras.layers.SimpleRNN(dec_latent_dims[j], dropout = dropout, return_sequences = True, return_state = True)
            decoder_outputs, state = decoder(decoder_outputs, initial_state = decoder_states)
            # decoder_states = [state]
        if cell_type == "lstm":
            decoder = keras.layers.LSTM(dec_latent_dims[j], dropout = dropout, return_sequences = True, return_state = True)
            decoder_outputs, state_h, state_c = decoder(decoder_outputs, initial_state = decoder_states)
            # decoder_states = [state_h, state_c]
        if cell_type == "gru":
            decoder = keras.layers.GRU(dec_latent_dims[j], dropout = dropout, return_sequences = True, return_state = True)
            decoder_outputs, state = decoder(decoder_outputs, initial_state = decoder_states)
            # decoder_states = [state]

    decoder_dense = keras.layers.Dense(num_decoder_characters, activation = "softmax")
    decoder_outputs = decoder_dense(decoder_outputs)

    # Define the model that will turn
    # encoder_input_data & decoder_input_data into decoder_output_data
    model = keras.Model([encoder_inputs, decoder_inputs], decoder_outputs)

    model.compile(
        optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"]
    )

    model.fit(
        [encoder_train_input_data, decoder_train_input_data],
        decoder_train_target_data,
        batch_size = batch_size,
        epochs = epochs,
        callbacks = [WandbCallback()]
    )

    # Save model
    model.save("seq2seq")

    # Inference Call for Validation Data
    val_accuracy = inference.infer(encoder_val_input_data, val_input_words, val_target_words, num_decoder_characters, max_decoder_seq_length, target_characters_index, inverse_target_characters_index, enc_latent_dims, dec_latent_dims, cell_type, beam_size)
    wandb.log( { "val_accuracy": val_accuracy})
Пример #28
0
    def __build_network__mobileNet(self):
        """This function returns a keras model.
            
            Args:
                loadWeights (bool) : Load weights specified in the weightsFile param
                weightsFile (str) : Path to weights
            
            Returns:
                :class:`keras.model.Model` : Neural Network 

        """
        base_model = tf.keras.applications.MobileNetV2(
            input_shape=[self.resize_dim[1], self.resize_dim[0], 3],
            include_top=False)

        # Use the activations of these layers
        layer_names = [
            'block_1_expand_relu',  # 64x64
            'block_3_expand_relu',  # 32x32
            # 'block_6_expand_relu',   # 16x16
            # 'block_13_expand_relu',  # 8x8
            # 'block_16_project',      # 4x4
        ]
        Layers = [base_model.get_layer(name).output for name in layer_names]

        # Create the feature extraction model
        down_stack = tf.keras.Model(inputs=base_model.input, outputs=Layers)

        down_stack.trainable = True

        # print(down_stack.summary())
        inputs = tf.keras.layers.Input(shape=[128, 128, 3])
        featurization = down_stack(inputs)

        # conv_inc_channels = layers.Conv2D(576, (3,3), padding='same',kernel_initializer=keras.initializers.lecun_uniform(seed=None))(featurization[-1])
        # batchnorm1 = layers.BatchNormalization()(conv_inc_channels)
        # act1 = layers.LeakyReLU()(batchnorm1)

        # upsample_layer_1 = layers.Conv2DTranspose(576, 2, padding='valid',strides=2,activation='relu',kernel_initializer=keras.initializers.lecun_uniform(seed=None))(act1)
        # concat_1 = layers.concatenate([featurization[-2],upsample_layer_1])

        # conv_dec_channels_2 = layers.Conv2D(192, (3,3), padding='same',kernel_initializer=keras.initializers.lecun_uniform(seed=None))(concat_1)
        # batchnorm2 = layers.BatchNormalization()(conv_dec_channels_2)
        # act2 = layers.LeakyReLU()(batchnorm2)

        # upsample_layer_2 = layers.Conv2DTranspose(192, 2, padding='valid',strides=2,activation='relu',kernel_initializer=keras.initializers.lecun_uniform(seed=None))(act2)
        # concat_2 = layers.concatenate([featurization[-3],upsample_layer_2])

        # conv_dec_channels_3 = layers.Conv2D(144, (3,3), padding='same',kernel_initializer=keras.initializers.lecun_uniform(seed=None))(concat_2)
        # batchnorm3 = layers.BatchNormalization()(conv_dec_channels_3)
        # act3 = layers.LeakyReLU()(batchnorm3)

        # upsample_layer_3 = layers.Conv2DTranspose(144, 2, padding='valid',strides=2,activation='relu',kernel_initializer=keras.initializers.lecun_uniform(seed=None))(act3)
        # concat_3 = layers.concatenate([featurization[-4],upsample_layer_3])

        conv_dec_channels_4 = layers.Conv2D(
            96, (3, 3),
            padding='same',
            kernel_initializer=keras.initializers.lecun_uniform(seed=None))(
                featurization[-1])
        batchnorm4 = layers.BatchNormalization()(conv_dec_channels_4)
        act4 = layers.LeakyReLU()(batchnorm4)

        upsample_layer_4 = layers.Conv2DTranspose(
            96,
            2,
            padding='valid',
            strides=2,
            activation='relu',
            kernel_initializer=keras.initializers.lecun_uniform(
                seed=None))(act4)
        concat_4 = layers.concatenate([featurization[-2], upsample_layer_4])

        conv_dec_channels_5 = layers.Conv2D(
            36, (3, 3),
            padding='same',
            kernel_initializer=keras.initializers.lecun_uniform(
                seed=None))(concat_4)
        batchnorm5 = layers.BatchNormalization()(conv_dec_channels_5)
        act5 = layers.LeakyReLU()(batchnorm5)

        upsample_layer_5 = layers.Conv2DTranspose(
            16,
            2,
            padding='valid',
            strides=2,
            activation='relu',
            kernel_initializer=keras.initializers.lecun_uniform(
                seed=None))(act5)

        conv_dec_channels_6 = layers.Conv2D(
            self.num_class + 1,
            1,
            padding='same',
            kernel_initializer=keras.initializers.lecun_uniform(
                seed=None))(upsample_layer_5)
        batchnorm6 = layers.BatchNormalization()(conv_dec_channels_6)
        act6 = layers.Softmax()(batchnorm6)

        print(upsample_layer_5.shape)

        model = keras.Model(inputs=inputs, outputs=[act6])
        if (self.loadWeights):
            model.load_weights(self.weightsFile, by_name=True)

        return model
Пример #29
0
def constructModel(_vocabSize,
                   _embeddingMatrix,
                   _padLength,
                   _dimensions=50,
                   _cnnFilters=50,
                   _cnnKernel=5,
                   _convActivation='relu',
                   _cnnPool=5,
                   _cnnFlatten=True,
                   _cnnDense=50,
                   _denseActivation='relu',
                   _cnnDropout=0.0,
                   _outputActivation='softmax',
                   _lossFunction='categorical_crossentropy',
                   _summarize=True,
                   _optimizer='adam'):

    # Initialize the model by creating an input layer.  This layer matches the shape of the
    # article's list of word IDs.
    inputLayer = keras.layers.Input(shape=(_padLength, ), name='inputLayer')

    # Add our word embedding layer.  This layer converts the word indices sent to the model into
    # vectors of length N, where N is the length of the GloVe word vectors.  This conversion is
    # done for each word in an article, resulting in a matrix of size [_padLength, N] (... maybe
    # transposed from that?)
    wordEmbedding = keras.layers.Embedding(
        input_dim=_vocabSize,
        output_dim=_dimensions,
        embeddings_initializer=keras.initializers.Constant(_embeddingMatrix),
        trainable=False,
        name='embeddingLayer')(inputLayer)

    # Add a 1-dimensional convolution layer.  This layer moves a window of size _cnnKernel across
    # the input and creates an output of length _cnnFilters for each window.
    convLayer = keras.layers.Conv1D(filters=_cnnFilters,
                                    kernel_size=_cnnKernel,
                                    activation=_convActivation,
                                    name='convolutionLayer')(wordEmbedding)

    # Add a dropout layer.  This layer reduces overfitting by randomly "turning off" nodes
    # during each training epoch.  Doing this prevents a small set of nodes doing all the
    # work while a bunch of other nodes sit around playing poker.
    if _cnnDropout > 0.0:
        convLayer = keras.layers.Dropout(
            _cnnDropout, name='convolutionDropoutLayer')(convLayer)

    # Add a max pooling layer.  This layer looks at the vectors contained in a window of size _cnnPool
    # and outputs the vector with the greatest L2 norm.
    maxPoolLayer = keras.layers.MaxPool1D(pool_size=_cnnPool,
                                          name='maxPoolingLayer')(convLayer)

    # Add a flatten layer.  This layer removes reduces the output to a one-dimensional vector
    if _cnnFlatten:
        maxPoolLayer = keras.layers.Flatten(name='flatteLayer')(maxPoolLayer)

    # Add a fully connected dense layer.  This layer adds a lot of nodes to the model to allow
    # for different features in the article to activate different groups of nodes.
    denseLayer = keras.layers.Dense(units=_cnnDense,
                                    activation=_denseActivation,
                                    name='denseLayer')(maxPoolLayer)

    # Add a dropout layer.  This layer reduces overfitting by randomly "turning off" nodes
    # during each training epoch.  Doing this prevents a small set of nodes doing all the
    # work while a bunch of other nodes sit around playing poker.
    if _cnnDropout > 0.0:
        denseLayer = keras.layers.Dropout(_cnnDropout,
                                          name='denseDropoutLayer')(denseLayer)

    # Add our output layer.  We have 5 classes of output "left", "left-center", "least",
    # "right-center", and "right".  This layer converts the inputs from the dense/dropout
    # layer into outputs for these 5 classes, essentially predicting the article leaning.
    outputLayer = keras.layers.Dense(5,
                                     activation=_outputActivation,
                                     name='outputLayer')(denseLayer)

    # Create the actual model from the layers described above.
    theModel = keras.Model(inputs=[inputLayer], outputs=[outputLayer])

    # Display a summary of our model
    if _summarize:
        theModel.summary()

    # Compile our model.  We use categorical crossentropy for the training loss function
    # since our predictions are multiclass.  We use categorical accuracy as our
    # performance metric, though we can report others as well.  The optimizer can
    # be any of the allowed optimizers (default is 'adam', a form of stochastic
    # gradient descent).
    theModel.compile(optimizer=_optimizer,
                     loss='categorical_crossentropy',
                     metrics=['categorical_accuracy'])
    return theModel
Пример #30
0
encoded = layers.Dense(buzzDim,
                       activation='relu',
                       activity_regularizer=regularizers.l1(regRate)
                      )(encoded)

decoded = layers.Dense(int(nPoints * nChannels / 8), 
                       activity_regularizer=regularizers.l1(regRate),
                       activation='relu')(encoded)
decoded = layers.Dense(int(nPoints * nChannels / 4), 
                       activity_regularizer=regularizers.l1(regRate),
                       activation='relu')(decoded)
decoded = layers.Dense(nPoints * nChannels, 
                       activation='sigmoid')(decoded)


autoencoder = keras.Model(inputData, decoded)

encoder = keras.Model(inputData, encoded)
encoded_input = keras.Input(shape=(buzzDim))


# create the decoder model for validating
decoder = autoencoder.layers[-3](encoded_input)
decoder = autoencoder.layers[-2](decoder)
decoder = autoencoder.layers[-1](decoder)


decoder = keras.Model(encoded_input, decoder)


autoencoder.compile(optimizer='adam', loss='mse')