コード例 #1
0
ファイル: train.py プロジェクト: anilksistla/ml-class-1
                wandb.Image(np.hstack([data, pred_data[i]]), caption=str(i))
                for i, data in enumerate(test_data)
            ]
        },
        commit=False)


(x_train, _), (x_test, _) = fashion_mnist.load_data()
(x_train_noisy, x_test_noisy) = add_noise(x_train, x_test)
img_width = x_train.shape[1]
img_height = x_train.shape[2]

x_train = x_train / 255.
x_test = x_test / 255.

# create model
model = Sequential()
model.add(Flatten(input_shape=(img_width, img_height)))
model.add(Dense(config.encoding_dim, activation="relu"))
model.add(Dense(img_width * img_height, activation="sigmoid"))
model.add(Reshape((img_width, img_height)))
model.compile(loss='mse', optimizer='adam', metrics=['mse'])

# Fit the model
model.fit(x_train_noisy,
          x_train,
          epochs=30,
          validation_data=(x_test_noisy, x_test),
          callbacks=[WandbCallback(),
                     LambdaCallback(on_epoch_end=log_images)])
コード例 #2
0
ファイル: Untitled.ipynb.py プロジェクト: Cxyeinx/Gans-Mnist
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm


(x_train, y_train), (x_test, y_test) = load_data()
x_train = x_train / 255
x_test = x_test / 255
x_train = np.expand_dims(x_train, axis=-1)
x_test = np.expand_dims(x_test, axis=-1)


generator = Sequential()

generator.add(Dense(128*7*7, input_dim=100, activation=relu))
generator.add(Reshape((7, 7, 128)))
generator.add(BatchNormalization())

generator.add(Conv2DTranspose(128, (3,3), strides=(2,2), padding="same", activation=relu))
generator.add(Conv2DTranspose(128, (3,3), strides=(2,2), padding="same", activation=relu))
generator.add(BatchNormalization())
generator.add(Dropout(0.3))

generator.add(Conv2D(64, (3,3), padding="same", activation=relu))
generator.add(Conv2D(1, (3,3), padding="same", activation=tanh))

generator.summary()


discriminator = Sequential()
コード例 #3
0
ファイル: text_cnn.py プロジェクト: zzachw/deepchem
  def __init__(
      self,
      n_tasks,
      char_dict,
      seq_length,
      n_embedding=75,
      kernel_sizes=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20],
      num_filters=[100, 200, 200, 200, 200, 100, 100, 100, 100, 100, 160, 160],
      dropout=0.25,
      mode="classification",
      **kwargs):
    """
    Parameters
    ----------
    n_tasks: int
      Number of tasks
    char_dict: dict
      Mapping from characters in smiles to integers
    seq_length: int
      Length of sequences(after padding)
    n_embedding: int, optional
      Length of embedding vector
    filter_sizes: list of int, optional
      Properties of filters used in the conv net
    num_filters: list of int, optional
      Properties of filters used in the conv net
    dropout: float, optional
      Dropout rate
    mode: str
      Either "classification" or "regression" for type of model.
    """
    self.n_tasks = n_tasks
    self.char_dict = char_dict
    self.seq_length = max(seq_length, max(kernel_sizes))
    self.n_embedding = n_embedding
    self.kernel_sizes = kernel_sizes
    self.num_filters = num_filters
    self.dropout = dropout
    self.mode = mode

    # Build the model.

    smiles_seqs = Input(shape=(self.seq_length,), dtype=tf.int32)
    # Character embedding
    embedding = layers.DTNNEmbedding(
        n_embedding=self.n_embedding,
        periodic_table_length=len(self.char_dict.keys()) + 1)(smiles_seqs)
    pooled_outputs = []
    conv_layers = []
    for filter_size, num_filter in zip(self.kernel_sizes, self.num_filters):
      # Multiple convolutional layers with different filter widths
      conv_layers.append(
          Conv1D(kernel_size=filter_size, filters=num_filter,
                 padding='valid')(embedding))
      # Max-over-time pooling
      reduced = Lambda(lambda x: tf.reduce_max(x, axis=1))(conv_layers[-1])
      pooled_outputs.append(reduced)
    # Concat features from all filters(one feature per filter)
    concat_outputs = Concatenate(axis=1)(pooled_outputs)
    dropout = Dropout(rate=self.dropout)(concat_outputs)
    dense = Dense(200, activation=tf.nn.relu)(dropout)
    # Highway layer from https://arxiv.org/pdf/1505.00387.pdf
    gather = layers.Highway()(dense)

    if self.mode == "classification":
      logits = Dense(self.n_tasks * 2)(gather)
      logits = Reshape((self.n_tasks, 2))(logits)
      output = Softmax()(logits)
      outputs = [output, logits]
      output_types = ['prediction', 'loss']
      loss = SoftmaxCrossEntropy()

    else:
      output = Dense(self.n_tasks * 1)(gather)
      output = Reshape((self.n_tasks, 1))(output)
      outputs = [output]
      output_types = ['prediction']
      loss = L2Loss()

    model = tf.keras.Model(inputs=[smiles_seqs], outputs=outputs)
    super(TextCNNModel, self).__init__(
        model, loss, output_types=output_types, **kwargs)
コード例 #4
0
ファイル: pytorch_tflite.py プロジェクト: sg47/oppo
# Remove edge branch from output
edge_model=Model(inputs=k_model.input,outputs=k_model.layers[-2].output)
edge_model.summary()

# Add softmax on output
sm=Lambda(lambda x: tf.nn.softmax(x))(edge_model.output)
soft_model=Model(inputs=edge_model.input, outputs=sm)
soft_model.summary()

# Get foreground softmax slice
ip = soft_model.output
str_slice=Lambda(lambda x: tf.strided_slice(x, [0,0, 0, 1], [1,224, 224, 2], [1, 1, 1, 1]))(ip)
stride_model=Model(inputs=soft_model.input, outputs=str_slice)
stride_model.summary()

# Flatten output
output = stride_model.output
newout=Reshape((50176,))(output)
reshape_model=Model(stride_model.input,newout)
reshape_model.summary()

# Save keras model
reshape_model.save('/content/portrait_video.h5')

# Convert to tflite
import tensorflow as tf

converter = tf.lite.TFLiteConverter.from_keras_model(reshape_model)
tflite_model = converter.convert()
open("/content/portrait_video.tflite", "wb").write(tflite_model)
コード例 #5
0
def MobileNetv2_tiny(input_shape, k, alpha=1.0):
    '''
    used for cifar10
    '''
    inputs = Input(shape=input_shape)

    first_filters = _make_divisible(32 * alpha, 8)
    x = _conv_block(inputs, first_filters, (3, 3), strides=(1, 1))

    x = _inverted_residual_block(x,
                                 16, (3, 3),
                                 t=1,
                                 alpha=alpha,
                                 strides=1,
                                 n=1)
    x = _inverted_residual_block(x,
                                 24, (3, 3),
                                 t=6,
                                 alpha=alpha,
                                 strides=1,
                                 n=2)
    # stage 1 : 16 x 16
    x = _inverted_residual_block(x,
                                 32, (3, 3),
                                 t=6,
                                 alpha=alpha,
                                 strides=2,
                                 n=2)
    x = _inverted_residual_block(x,
                                 64, (3, 3),
                                 t=6,
                                 alpha=alpha,
                                 strides=1,
                                 n=2)
    # stage 2 : 8 x 8
    x = _inverted_residual_block(x,
                                 96, (3, 3),
                                 t=6,
                                 alpha=alpha,
                                 strides=1,
                                 n=1)
    x = _inverted_residual_block(x,
                                 160, (3, 3),
                                 t=6,
                                 alpha=alpha,
                                 strides=2,
                                 n=1)
    x = _inverted_residual_block(x,
                                 320, (3, 3),
                                 t=6,
                                 alpha=alpha,
                                 strides=1,
                                 n=1)

    if alpha > 1.0:
        last_filters = _make_divisible(512 * alpha, 8)
    else:
        last_filters = 512

    x = _conv_block(x, last_filters, (1, 1), strides=(1, 1))
    x = GlobalAveragePooling2D()(x)
    x = Reshape((1, 1, last_filters))(x)
    x = Dropout(0.3, name='Dropout')(x)
    x = Conv2D(k, (1, 1), padding='same')(x)

    x = Activation('softmax', name='softmax')(x)
    output = Reshape((k, ))(x)

    model = Model(inputs, output)
    # plot_model(model, to_file='images/MobileNetv2.png', show_shapes=True)

    return model
コード例 #6
0
def generator(z_dim=100,
              architecture_size='large',
              use_batch_norm=False,
              n_classes=2):

    generator_filters = [1024, 512, 256, 128, 64]

    label_input = Input(shape=(1, ),
                        dtype='int32',
                        name='generator_label_input')
    label_em = Embedding(n_classes, n_classes * 20,
                         name='label_embedding')(label_input)
    label_em = Dense(16, name='label_dense')(label_em)
    label_em = Reshape((16, 1), name='label_respahe')(label_em)

    generator_input = Input(shape=(z_dim, ), name='generator_input')
    x = generator_input

    # if architecture_size == 'small':
    #     x = Dense(16384, name='generator_input_dense')(x)
    #     x = Reshape((16, 1024), name='generator_input_reshape')(x)
    #     if use_batch_norm == True:
    #             x = BatchNormalization()(x)

    if architecture_size == 'medium' or architecture_size == 'large':
        x = Dense(32768, name='generator_input_dense')(x)
        x = Reshape((16, 2048), name='generator_input_reshape')(x)
        if use_batch_norm == True:
            x = BatchNormalization()(x)

    x = ReLU()(x)

    x = Concatenate()([x, label_em])

    # if architecture_size == 'small':
    #     for i in range(4):
    #         x = Conv1DTranspose(
    #             input_tensor = x
    #             , filters = generator_filters[i+1]
    #             , kernel_size = 25
    #             , strides = 4
    #             , padding='same'
    #             , name = f'generator_Tconv_{i}'
    #             , activation = 'relu'
    #             )
    #         if use_batch_norm == True:
    #             x = BatchNormalization()(x)

    #     x = Conv1DTranspose(
    #         input_tensor = x
    #         , filters = 1
    #         , kernel_size = 25
    #         , strides = 4
    #         , padding='same'
    #         , name = 'generator_Tconv_4'
    #         , activation = 'tanh'
    #         )

    # if architecture_size == 'medium':
    #     #layer 0 to 4
    #     for i in range(5):
    #         x = Conv1DTranspose(
    #             input_tensor = x
    #             , filters = generator_filters[i]
    #             , kernel_size = 25
    #             , strides = 4
    #             , padding='same'
    #             , name = f'generator_Tconv_{i}'
    #             , activation = 'relu'
    #             )
    #         if use_batch_norm == True:
    #             x = BatchNormalization()(x)
    #     #layer 5
    #     x = Conv1DTranspose(
    #         input_tensor = x
    #         , filters = 1
    #         , kernel_size = 25
    #         , strides = 2
    #         , padding='same'
    #         , name = 'generator_Tconv_5'
    #         , activation = 'tanh'
    #         )

    if architecture_size == 'large':
        #layer 0 to 4
        for i in range(5):
            x = Conv1DTranspose(input_tensor=x,
                                filters=generator_filters[i],
                                kernel_size=25,
                                strides=4,
                                padding='same',
                                name=f'generator_Tconv_{i}',
                                activation='relu')
            if use_batch_norm == True:
                x = BatchNormalization()(x)

        #layer 5
        x = Conv1DTranspose(input_tensor=x,
                            filters=1,
                            kernel_size=25,
                            strides=7,
                            padding='same',
                            name='generator_Tconv_5',
                            activation='tanh')

    generator_output = x
    generator = Model([generator_input, label_input],
                      generator_output,
                      name='Generator')
    return generator
コード例 #7
0
inputs = Input(shape=(48, 7))
dense1 = Conv1D(512, 2, padding='same', activation='relu')(inputs)
dense1 = LeakyReLU(alpha=0.5)(dense1)
dense1 = Conv1D(216, 2, padding='same', activation='relu')(dense1)
dense1 = LeakyReLU(alpha=0.5)(dense1)
# dense1 = LeakyReLU(alpha=0.5) (dense1)
dense1 = Conv1D(128, 2, padding='same', activation='relu')(dense1)
dense1 = LeakyReLU(alpha=0.5)(dense1)
# dense1 = Conv1D(64, 2,activation='relu')(dense1)
dense1 = Flatten()(dense1)
dense1 = Dense(256, activation='relu')(dense1)
dense1 = LeakyReLU(alpha=0.5)(dense1)
# dense1 = Dense(312,activation='relu')(dense1)
# dense1 = LeakyReLU(alpha=0.5) (dense1)
dense1 = Dense(48 * 2)(dense1)
outputs = Reshape((48, 2))(dense1)

model = Model(inputs=inputs, outputs=outputs)

model.summary()

#3. compile fit
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
modelpath = '../data/modelcheckpoint/태양광_{epoch:02d}-{val_loss:.4f}.hdf5'
es = EarlyStopping(monitor='val_loss', patience=20)
cp = ModelCheckpoint(filepath=modelpath,
                     monitor='val_loss',
                     save_best_only=True,
                     mode='auto')
lr = ReduceLROnPlateau(monitor='val_loss', patience=10, factor=0.5, verbose=1)
# n번까지 참았는데도 개선이없으면 50퍼센트 감축시키겠다.
コード例 #8
0
def create_deeplob(T, NF, number_of_lstm):
    input_lmd = Input(shape=(T, NF, 1))

    # build the convolutional block
    conv_first1 = Conv2D(32, (1, 2), strides=(1, 2))(input_lmd)
    conv_first1 = keras.layers.LeakyReLU(alpha=0.01)(conv_first1)
    conv_first1 = Conv2D(32, (4, 1), padding='same')(conv_first1)
    conv_first1 = keras.layers.LeakyReLU(alpha=0.01)(conv_first1)
    conv_first1 = Conv2D(32, (4, 1), padding='same')(conv_first1)
    conv_first1 = keras.layers.LeakyReLU(alpha=0.01)(conv_first1)

    conv_first1 = Conv2D(32, (1, 2), strides=(1, 2))(conv_first1)
    conv_first1 = keras.layers.LeakyReLU(alpha=0.01)(conv_first1)
    conv_first1 = Conv2D(32, (4, 1), padding='same')(conv_first1)
    conv_first1 = keras.layers.LeakyReLU(alpha=0.01)(conv_first1)
    conv_first1 = Conv2D(32, (4, 1), padding='same')(conv_first1)
    conv_first1 = keras.layers.LeakyReLU(alpha=0.01)(conv_first1)

    conv_first1 = Conv2D(32, (1, 10))(conv_first1)
    conv_first1 = keras.layers.LeakyReLU(alpha=0.01)(conv_first1)
    conv_first1 = Conv2D(32, (4, 1), padding='same')(conv_first1)
    conv_first1 = keras.layers.LeakyReLU(alpha=0.01)(conv_first1)
    conv_first1 = Conv2D(32, (4, 1), padding='same')(conv_first1)
    conv_first1 = keras.layers.LeakyReLU(alpha=0.01)(conv_first1)

    # build the inception module
    convsecond_1 = Conv2D(64, (1, 1), padding='same')(conv_first1)
    convsecond_1 = keras.layers.LeakyReLU(alpha=0.01)(convsecond_1)
    convsecond_1 = Conv2D(64, (3, 1), padding='same')(convsecond_1)
    convsecond_1 = keras.layers.LeakyReLU(alpha=0.01)(convsecond_1)

    convsecond_2 = Conv2D(64, (1, 1), padding='same')(conv_first1)
    convsecond_2 = keras.layers.LeakyReLU(alpha=0.01)(convsecond_2)
    convsecond_2 = Conv2D(64, (5, 1), padding='same')(convsecond_2)
    convsecond_2 = keras.layers.LeakyReLU(alpha=0.01)(convsecond_2)

    convsecond_3 = MaxPooling2D((3, 1), strides=(1, 1),
                                padding='same')(conv_first1)
    convsecond_3 = Conv2D(64, (1, 1), padding='same')(convsecond_3)
    convsecond_3 = keras.layers.LeakyReLU(alpha=0.01)(convsecond_3)

    convsecond_output = keras.layers.concatenate(
        [convsecond_1, convsecond_2, convsecond_3], axis=3)

    # use the MC dropout here
    conv_reshape = Reshape(
        (int(convsecond_output.shape[1]),
         int(convsecond_output.shape[3])))(convsecond_output)

    # build the last LSTM layer
    conv_lstm = LSTM(number_of_lstm)(conv_reshape)

    # build the output layer
    out = Dense(3, activation='softmax')(conv_lstm)
    model = Model(inputs=input_lmd, outputs=out)
    adam = keras.optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1)
    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
コード例 #9
0
def get_resnet_model(save_path,
                     model_res=1024,
                     image_size=256,
                     depth=2,
                     size=0,
                     activation='elu',
                     loss='logcosh',
                     optimizer='adam'):
    # Build model
    if os.path.exists(save_path):
        print('Loading model')
        return load_model(save_path)

    print('Building model')
    model_scale = int(2 *
                      (math.log(model_res, 2) - 1))  # For example, 1024 -> 18

    if size <= 0:
        from keras.applications.resnet50 import ResNet50
        resnet = ResNet50(include_top=False,
                          pooling=None,
                          weights='imagenet',
                          input_shape=(image_size, image_size, 3))
    else:
        from keras_applications.resnet_v2 import ResNet50V2, ResNet101V2, ResNet152V2
    if size == 1:
        resnet = ResNet50V2(include_top=False,
                            pooling=None,
                            weights='imagenet',
                            input_shape=(image_size, image_size, 3),
                            backend=keras.backend,
                            layers=keras.layers,
                            models=keras.models,
                            utils=keras.utils)
    if size == 2:
        resnet = ResNet101V2(include_top=False,
                             pooling=None,
                             weights='imagenet',
                             input_shape=(image_size, image_size, 3),
                             backend=keras.backend,
                             layers=keras.layers,
                             models=keras.models,
                             utils=keras.utils)
    if size >= 3:
        resnet = ResNet152V2(include_top=False,
                             pooling=None,
                             weights='imagenet',
                             input_shape=(image_size, image_size, 3),
                             backend=keras.backend,
                             layers=keras.layers,
                             models=keras.models,
                             utils=keras.utils)

    layer_size = model_scale * 8 * 8 * 8
    if is_square(layer_size):  # work out layer dimensions
        layer_l = int(math.sqrt(layer_size) + 0.5)
        layer_r = layer_l
    else:
        layer_m = math.log(math.sqrt(layer_size), 2)
        layer_l = 2**math.ceil(layer_m)
        layer_r = layer_size // layer_l
    layer_l = int(layer_l)
    layer_r = int(layer_r)

    x_init = None
    inp = Input(shape=(image_size, image_size, 3))
    x = resnet(inp)

    if (depth < 0):
        depth = 1

    if (size <= 1):
        if (size <= 0):
            x = Conv2D(model_scale * 8, 1,
                       activation=activation)(x)  # scale down
            x = Reshape((layer_r, layer_l))(x)
        else:
            x = Conv2D(model_scale * 8 * 4, 1,
                       activation=activation)(x)  # scale down a little
            x = Reshape((layer_r * 2, layer_l * 2))(x)
    else:
        if (size == 2):
            x = Conv2D(1024, 1, activation=activation)(x)  # scale down a bit
            x = Reshape((256, 256))(x)
        else:
            x = Reshape((256, 512))(x)  # all weights used

    while (
            depth > 0
    ):  # See https://github.com/OliverRichter/TreeConnect/blob/master/cifar.py - TreeConnect inspired layers instead of dense layers.
        x = LocallyConnected1D(layer_r, 1, activation=activation)(x)
        x = Permute((2, 1))(x)
        x = LocallyConnected1D(layer_l, 1, activation=activation)(x)
        x = Permute((2, 1))(x)
        if x_init is not None:
            x = Add()([x, x_init])  # add skip connection
        x_init = x
        depth -= 1

    x = Reshape((model_scale, 512))(x)  # train against all dlatent values

    mirrored_strategy = tf.distribute.MirroredStrategy()
    with mirrored_strategy.scope():
        model = Model(inputs=inp, outputs=x)
        model.compile(loss=loss, metrics=[], optimizer=optimizer
                      )  # By default: adam optimizer, logcosh used for loss.

    return model
コード例 #10
0
test_images = train_images.astype('float32')
test_images = test_images.astype('float32')

#Vanilla AUTOENCODER
lat_dim = 20
# Encoder
inputs = Input(shape=(28, 28))
x = Flatten()(inputs)
x = Dense(units=64, activation=tf.nn.relu)(x)
latents = Dense(units=lat_dim, activation=tf.nn.relu)(x)
encoder = tf.keras.Model(inputs=inputs, outputs=latents, name='encoder')

# Decoder
lats = Input(shape=(lat_dim, ))
y = Dense(units=28 * 28)(lats)
y = Reshape((28, 28))(y)
decoder = tf.keras.Model(lats, y, name='decoder')

outputs = decoder([encoder(inputs)])
VanillaAE = tf.keras.Model(inputs, outputs)

#COMPILE
VanillaAE.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
#TRAIN
epoch_no, batch_size = 5, 60
train_model = VanillaAE.fit(train_images,
                            train_images,
                            epochs=epoch_no,
                            batch_size=batch_size)

test_loss, test_acc, = VanillaAE.evaluate(test_images, test_images, verbose=2)
コード例 #11
0
    def deserialize_model(model_bytes, load_model_fn):
        """Deserialize model from byte array."""
        bio = io.BytesIO(model_bytes)
        with h5py.File(bio) as f:
            return load_model_fn(f, custom_objects=CUSTOM_OBJECTS)


    # Do not use GPU for the session creation.
    config = tf.ConfigProto(device_count={'GPU': 0})
    K.set_session(tf.Session(config=config))

    # Build the model.
    inputs = {col: Input(shape=(1,), name=col) for col in all_cols}
    embeddings = [Embedding(len(vocab[col]), 10, input_length=1, name='emb_' + col)(inputs[col])
                  for col in categorical_cols]
    continuous_bn = Concatenate()([Reshape((1, 1), name='reshape_' + col)(inputs[col])
                                   for col in continuous_cols])
    continuous_bn = BatchNormalization()(continuous_bn)
    x = Concatenate()(embeddings + [continuous_bn])
    x = Flatten()(x)
    x = Dense(1000, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
    x = Dense(1000, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
    x = Dense(1000, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
    x = Dense(500, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
    x = Dropout(0.5)(x)
    output = Dense(1, activation=act_sigmoid_scaled)(x)
    model = tf.keras.Model([inputs[f] for f in all_cols], output)
    model.summary()

    # Horovod: add Distributed Optimizer.
    opt = tf.keras.optimizers.Adam(lr=args.learning_rate, epsilon=1e-3)
コード例 #12
0
model = Sequential()
# model.add(Conv2D(filters=100, kernel_size=(4, 4), padding='same',
#                  strides=1, input_shape=(28, 28, 1)))
# model.add(MaxPooling2D(pool_size=6))
# model.add(Dropout(0.5))
# model.add(Conv2D(1, 2))
# model.add(Conv2D(1, 2))
# model.add(Flatten())
model.add(Dense(1000, input_shape=(28, 28, 1), activation='relu'))
model.add(Dense(500, activation='relu'))
model.add(Dense(250, activation='relu'))
model.add(Dense(120, activation='relu'))
model.add(Flatten())
model.add(Dense(60, activation='relu'))
model.add(Dense(784, activation='relu'))
model.add(Reshape((28, 28, 1)))  # 위에 node랑 값 맞춰야함
model.add(Dense(1))

model.summary()
'''
# 3. 컴파일, 훈련

from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
early_stopping = EarlyStopping(monitor='val_loss', patience=6, mode='auto')
modelpath= '../data/modelcheckpoint/k57_mnist_{epoch:02d}-{val_loss:.4f}.hdf5'
cp = ModelCheckpoint(modelpath, monitor='val_loss', save_best_only=True, mode='auto')
reduce_lr =ReduceLROnPlateau(monitor='val_loss', patience=3, factor=0.5, verbose=1)


model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
hist = model.fit(x_train, y_train, batch_size=64, epochs=5, validation_split=0.2, callbacks=[early_stopping, cp, reduce_lr])
コード例 #13
0
ファイル: OptunaOptCondAE.py プロジェクト: AlexGKim/SDSS_PAE
def objective(trial):
    input = Input(shape=(dim, 1))
    input_mask = Input(shape=(dim, 1))
    input_noise = Input(shape=(dim, 1))
    input_type = Input(shape=(1, 1))
    input_params = Input(shape=(1, 1))

    if cond_on == 'type':
        z = input_type
    if cond_on == 'redshift':
        z = input_params

    n_layers = trial.suggest_int('n_layers', 2, 5)
    latent_dim = trial.suggest_int('latent_dim', 8, 14)

    x = input
    out_features = []
    for ii in range(n_layers - 1):
        if ii > 0:
            out_features.append(
                trial.suggest_int('n_units_l{}'.format(ii), latent_dim,
                                  min(dim, 2 * out_features[-1])))
            p = trial.suggest_float("dropout_encoder_l{}".format(ii),
                                    1e-5,
                                    0.3,
                                    log=True)
            x = Dropout(p)(x)
        else:
            out_features.append(
                trial.suggest_int('n_units_l{}'.format(ii), latent_dim, dim))
        x = dense_block(x, out_features[ii], spec_norm=True)
    x = dense_block(x, latent_dim, non_lin=False, spec_norm=True)
    x = Reshape((latent_dim, 1))(x)
    for ii in range(n_layers - 1):
        x = dense_cond_block(x, z, out_features[-1 - ii])
        if ii == 0:
            pass
        else:
            p = trial.suggest_float("dropout_decoder_l{}".format(ii),
                                    1e-5,
                                    0.3,
                                    log=True)
            x = Dropout(p)(x)
    x = dense_cond_block(x, z, dim, non_lin=False)

    lr_initial = trial.suggest_float("lr_init", 5e-4, 2e-3, log=False)
    lr_end = trial.suggest_float("lr_final", 5e-6, lr_initial, log=True)
    batchsize = trial.suggest_int("batchsize", 16, 256)
    decay_steps = trial.suggest_int("decay_steps",
                                    2000,
                                    40000 // batchsize * 20,
                                    log=True)
    if batchsize in param_history["batchsize"]:
        if lr_initial in param_history['lr_init']:
            raise optuna.exceptions.TrialPruned()

    param_history['batchsize'].append(batchsize)
    param_history['lr_init'].append(lr_initial)

    optimizer_name = trial.suggest_categorical("optimizer",
                                               ["Adam", "RMSprop", "SGD"])

    learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
        lr_initial, decay_steps, lr_end, power=0.5, cycle=True)

    optim = optimizers[optimizer_name]

    lstm_ae = CustomModel(
        inputs=[input, input_mask, input_noise, input_type, input_params],
        outputs=x)
    lstm_ae.compile(optimizer=optim(learning_rate=learning_rate_fn),
                    my_loss=lossFunction,
                    metrics=[],
                    run_eagerly=False)

    lstm_ae.fit(x=(train['spec'], train['mask'], train['noise'],
                   np.expand_dims(train['subclass'],
                                  -1), np.expand_dims(train['z'], -1)),
                batch_size=batchsize,
                epochs=EPOCHS,
                verbose=0)

    res_valid = lstm_ae.predict((valid['spec'], valid['mask'], valid['noise'],
                                 valid['subclass'], valid['z']))
    recon_error = custom_metric((valid['spec'], valid['mask'], valid['noise'],
                                 valid['subclass'], valid['z']), res_valid)

    return recon_error
コード例 #14
0
def line_lstm_ctc(input_shape,
                  output_shape,
                  window_width=28,
                  window_stride=14,
                  lstm_dim=128,
                  bidirectional=True,
                  conv_layers=1):
    image_height, image_width = input_shape
    output_length, num_classes = output_shape

    num_windows = int((image_width - window_width) / window_stride) + 1
    if num_windows < output_length:
        raise ValueError(
            f'Window width/stride need to generate at least {output_length} windows (currently {num_windows})'
        )

    image_input = Input(shape=input_shape, name='image')
    y_true = Input(shape=(output_length, ), name='y_true')
    input_length = Input(shape=(1, ), name='input_length')
    label_length = Input(shape=(1, ), name='label_length')

    gpu_present = len(device_lib.list_local_devices()) > 1
    lstm_fn = CuDNNLSTM if gpu_present else LSTM

    # Your code should use slide_window and extract image patches from image_input.
    # Pass a convolutional model over each image patch to generate a feature vector per window.
    # Pass these features through one or more LSTM layers.
    # Convert the lstm outputs to softmax outputs.
    # Note that lstms expect a input of shape (num_batch_size, num_timesteps, feature_length).

    ##### Your code below (Lab 3)
    image_reshaped = Reshape((image_height, image_width, 1))(image_input)
    # (image_height, image_width, 1)

    image_patches = Lambda(slide_window,
                           arguments={
                               'window_width': window_width,
                               'window_stride': window_stride
                           })(image_reshaped)
    # (num_windows, image_height, window_width, 1)

    # Make a LeNet and get rid of the last two layers (softmax and dropout)
    convnet = lenet((image_height, window_width, 1), (num_classes, ))
    convnet = KerasModel(inputs=convnet.inputs,
                         outputs=convnet.layers[-2].output)
    convnet_outputs = TimeDistributed(convnet)(image_patches)
    # (num_windows, 128)

    if bidirectional:
        lstm_output = Bidirectional(lstm_fn(
            lstm_dim, return_sequences=True))(convnet_outputs)
    else:
        lstm_output = lstm_fn(lstm_dim, return_sequences=True)(convnet_outputs)
    # (num_windows, lstm_dim)

    lstm_output = lstm_fn(lstm_dim, return_sequences=True)(lstm_output)
    # (num_windows, lstm_dim)

    softmax_output = Dense(num_classes,
                           activation='softmax',
                           name='softmax_output')(lstm_output)
    # (num_windows, num_classes)
    ##### Your code above (Lab 3)

    input_length_processed = Lambda(
        lambda x, num_windows=None: x * num_windows,
        arguments={'num_windows': num_windows})(input_length)

    ctc_loss_output = Lambda(
        lambda x: K.ctc_batch_cost(x[0], x[1], x[2], x[3]), name='ctc_loss')(
            [y_true, softmax_output, input_length_processed, label_length])

    ctc_decoded_output = Lambda(
        lambda x: ctc_decode(x[0], x[1], output_length),
        name='ctc_decoded')([softmax_output, input_length_processed])

    model = KerasModel(
        inputs=[image_input, y_true, input_length, label_length],
        outputs=[ctc_loss_output, ctc_decoded_output])
    return model
コード例 #15
0
initial_state = Concatenate()(
    [input_noise, auxiliary_inputs, charge_input, H_theta, H_pt_theta])

H = Dense(int(G_architecture[0]))(initial_state)
H = LeakyReLU(alpha=0.2)(H)
H = BatchNormalization(momentum=0.8)(H)

for layer in G_architecture[1:]:

    H = Dense(int(layer))(H)
    H = LeakyReLU(alpha=0.2)(H)
    H = BatchNormalization(momentum=0.8)(H)

H = Dense(4, activation='tanh')(H)

H = Reshape((1, 4))(H)

H_r = split_tensor(0, H)
# H_r = Activation('sigmoid')(H_r)
H_r = Reshape((1, 1))(H_r)

H_z = split_tensor(1, H)
# H_z = Activation('tanh')(H_z)
# H_z = ReLU()(H_z)
H_z = Reshape((1, 1))(H_z)

H_pt = split_tensor(2, H)
# H_pt = Activation('tanh')(H_pt)
# H_pt = ReLU()(H_pt)
H_pt = Reshape((1, 1))(H_pt)
コード例 #16
0
userModel = Concatenate(axis=1)(
    [usersIdDropOut, usersGenderDropout, usersAgeDropout, usersJobIdDropout])
userDense1 = Dense(64, activation='relu')(userModel)
# userDense1 = Reshape((-1, 64))(userDense1)

# ------------movie部分
moviesIdInput = Input(shape=(1, ), dtype="int32", name='movieId')
moviesIdEmbedding = Embedding(moviesIdInputDim + 1, 32,
                              input_length=1)(moviesIdInput)
moviesIdDense1 = Dense(16, activation='relu')(moviesIdEmbedding)
moviesIdDropout = Dropout(rate=0.4)(moviesIdDense1)

moviesGenresInput = Input(shape=(moviesGenresInputDim, ),
                          dtype="float32",
                          name='movieGenres')
moviesGenresReshape = Reshape((1, moviesGenresInputDim))(moviesGenresInput)
# moviesGenresEmbedding = Embedding(moviesGenresInputDim+1, 16, input_length=moviesGenresInputDim)(moviesGenresInput)
moviesGenresDense1 = Dense(16, activation='relu')(moviesGenresReshape)
moviesGenresDropout = Dropout(rate=0.4)(moviesGenresDense1)

moviesTitleInput = Input(shape=(15, ), dtype="int32", name='movieTitle')
moviesTitleEmbedding = Embedding(moviesTitleInputDim + 1, 4,
                                 input_length=15)(moviesTitleInput)
moviesTitleDense1 = Dense(16, activation='relu')(moviesTitleEmbedding)
moviesTitleDropout = Dropout(rate=0.4)(moviesTitleDense1)

movieModel = Concatenate(axis=1)(
    [moviesIdDropout, moviesGenresDropout, moviesTitleDropout])
movieDense1 = Dense(64, activation='relu')(movieModel)

# -----------combine
コード例 #17
0
def discriminator(architecture_size='large',
                  phaseshuffle_samples=0,
                  n_classes=2):

    discriminator_filters = [64, 128, 256, 512, 1024, 2048]

    if architecture_size == 'large':
        # audio_input_dim = 65536
        audio_input_dim = 114688
    # elif architecture_size == 'medium':
    #     audio_input_dim = 32786
    # elif architecture_size == 'small':
    #     audio_input_dim = 16384

    label_input = Input(shape=(1, ),
                        dtype='int32',
                        name='discriminator_label_input')
    label_em = Embedding(n_classes, n_classes * 20)(label_input)
    label_em = Dense(audio_input_dim)(label_em)
    label_em = Reshape((audio_input_dim, 1))(label_em)

    discriminator_input = Input(shape=(audio_input_dim, 1),
                                name='discriminator_input')
    x = Concatenate()([discriminator_input, label_em])

    # if architecture_size == 'small':
    #     # layers 0 to 3
    #     for i in range(4):
    #         x = Conv1D(
    #             filters = discriminator_filters[i]
    #             , kernel_size = 25
    #             , strides = 4
    #             , padding = 'same'
    #             , name = f'discriminator_conv_{i}'
    #             )(x)

    #         x = LeakyReLU(alpha = 0.2)(x)
    #         if phaseshuffle_samples > 0:
    #             x = Lambda(apply_phaseshuffle)([x, phaseshuffle_samples])

    #     #layer 4, no phase shuffle
    #     x = Conv1D(
    #         filters = discriminator_filters[4]
    #         , kernel_size = 25
    #         , strides = 4
    #         , padding = 'same'
    #         , name = f'discriminator_conv_4'
    #         )(x)

    #     x = Flatten()(x)

    # # if architecture_size == 'medium':

    # #     # layers
    # #     for i in range(4):
    # #         x = Conv1D(
    # #             filters = discriminator_filters[i]
    # #             , kernel_size = 25
    # #             , strides = 4
    # #             , padding = 'same'
    # #             , name = f'discriminator_conv_{i}'
    # #             )(x)

    # #         x = LeakyReLU(alpha = 0.2)(x)
    # #         if phaseshuffle_samples > 0:
    # #             x = Lambda(apply_phaseshuffle)([x, phaseshuffle_samples])

    # #     x = Conv1D(
    # #         filters = discriminator_filters[4]
    # #         , kernel_size = 25
    # #         , strides = 4
    # #         , padding = 'same'
    # #         , name = 'discriminator_conv_4'
    # #         )(x)

    # #     x = LeakyReLU(alpha = 0.2)(x)

    # #     x = Conv1D(
    # #         filters = discriminator_filters[5]
    # #         , kernel_size = 25
    # #         , strides = 2
    # #         , padding = 'same'
    # #         , name = 'discriminator_conv_5'
    # #         )(x)

    # #     x = LeakyReLU(alpha = 0.2)(x)
    # #     x = Flatten()(x)

    if architecture_size == 'large':

        # layers
        for i in range(4):
            x = Conv1D(filters=discriminator_filters[i],
                       kernel_size=25,
                       strides=4,
                       padding='same',
                       name=f'discriminator_conv_{i}')(x)
            x = LeakyReLU(alpha=0.2)(x)
            if phaseshuffle_samples > 0:
                x = Lambda(apply_phaseshuffle)([x, phaseshuffle_samples])

        #last 2 layers without phase shuffle
        x = Conv1D(filters=discriminator_filters[4],
                   kernel_size=25,
                   strides=4,
                   padding='same',
                   name='discriminator_conv_4')(x)
        x = LeakyReLU(alpha=0.2)(x)

        x = Conv1D(filters=discriminator_filters[5],
                   kernel_size=25,
                   strides=4,
                   padding='same',
                   name='discriminator_conv_5')(x)
        x = LeakyReLU(alpha=0.2)(x)
        x = Flatten()(x)

    discriminator_output = Dense(1)(x)
    discriminator = Model([discriminator_input, label_input],
                          discriminator_output,
                          name='Discriminator')
    return discriminator
コード例 #18
0
def multibox_head(source_layers, num_priors, normalizations=None, softmax=True):
    
    class_activation = 'softmax' if softmax else 'sigmoid'
    
    mbox_conf = []
    mbox_loc = []
    link_interlayer_conf = []
    link_crosslayer_conf = []
    for i in range(len(source_layers)):
        x = source_layers[i]
        name = x.name.split('/')[0]
        
        # normalize
        if normalizations is not None and normalizations[i] > 0:
            name = name + '_norm'
            x = Normalize(normalizations[i], name=name)(x)
            
        # confidence
        name1 = name + '_mbox_conf'
        x1 = Conv2D(num_priors[i] * 2, 3, padding='same', name=name1)(x)
        x1 = Flatten(name=name1+'_flat')(x1)
        mbox_conf.append(x1)

        # location
        name2 = name + '_mbox_loc'
        x2 = Conv2D(num_priors[i] * 5, 3, padding='same', name=name2)(x)
        x2 = Flatten(name=name2+'_flat')(x2)
        mbox_loc.append(x2)
        
        # link interlayer confidenc
        name3 = name + '_link_interlayer_conf'
        x3 = Conv2D(num_priors[i] * 2 * 8, 3, padding='same', name=name3)(x)
        x3 = Flatten(name=name3+'_flat')(x3)
        link_interlayer_conf.append(x3)
        
        # link crosslayer confidenc
        name4 = name + '_link_crosslayer_conf'
        x4 = Conv2D(num_priors[i] * 2 * 4, 3, padding='same', name=name4)(x)
        x4 = Flatten(name=name4+'_flat')(x4)
        link_crosslayer_conf.append(x4)

    mbox_conf = concatenate(mbox_conf, axis=1, name='mbox_conf')
    mbox_conf = Reshape((-1, 2), name='mbox_conf_logits')(mbox_conf)
    mbox_conf = Activation(class_activation, name='mbox_conf_final')(mbox_conf)
    
    mbox_loc = concatenate(mbox_loc, axis=1, name='mbox_loc')
    mbox_loc = Reshape((-1, 5), name='mbox_loc_final')(mbox_loc)
    
    link_interlayer_conf = concatenate(link_interlayer_conf, axis=1, name='link_interlayer_conf')
    link_interlayer_conf = Reshape((-1, 2), name='link_interlayer_conf_logits')(link_interlayer_conf)
    link_interlayer_conf = Activation(class_activation, name='link_interlayer_conf_softmax')(link_interlayer_conf)
    link_interlayer_conf = Reshape((-1, 2 * 8), name='link_interlayer_conf_final')(link_interlayer_conf)
    
    link_crosslayer_conf = concatenate(link_crosslayer_conf, axis=1, name='link_crosslayer_conf')
    link_crosslayer_conf = Reshape((-1, 2), name='link_crosslayer_conf_logits')(link_crosslayer_conf)
    link_crosslayer_conf = Activation(class_activation, name='link_crosslayer_conf_softmax')(link_crosslayer_conf)
    link_crosslayer_conf = Reshape((-1, 2 * 4), name='link_crosslayer_conf_final')(link_crosslayer_conf)
    
    predictions = concatenate([
            mbox_conf, 
            mbox_loc,
            link_interlayer_conf, 
            link_crosslayer_conf
            ], axis=2, name='predictions')
    
    return predictions
コード例 #19
0
def CRNN_STN(cfg):

    inputs = Input((cfg.width, cfg.height, cfg.nb_channels))
    c_1 = Conv2D(cfg.conv_filter_size[0], (3, 3),
                 activation='relu',
                 padding='same',
                 name='conv_1')(inputs)
    c_2 = Conv2D(cfg.conv_filter_size[1], (3, 3),
                 activation='relu',
                 padding='same',
                 name='conv_2')(c_1)
    c_3 = Conv2D(cfg.conv_filter_size[2], (3, 3),
                 activation='relu',
                 padding='same',
                 name='conv_3')(c_2)
    bn_3 = BatchNormalization(name='bn_3')(c_3)
    p_3 = MaxPooling2D(pool_size=(2, 2), name='maxpool_3')(bn_3)

    c_4 = Conv2D(cfg.conv_filter_size[3], (3, 3),
                 activation='relu',
                 padding='same',
                 name='conv_4')(p_3)
    c_5 = Conv2D(cfg.conv_filter_size[4], (3, 3),
                 activation='relu',
                 padding='same',
                 name='conv_5')(c_4)
    bn_5 = BatchNormalization(name='bn_5')(c_5)
    p_5 = MaxPooling2D(pool_size=(2, 2), name='maxpool_5')(bn_5)

    c_6 = Conv2D(cfg.conv_filter_size[5], (3, 3),
                 activation='relu',
                 padding='same',
                 name='conv_6')(p_5)
    c_7 = Conv2D(cfg.conv_filter_size[6], (3, 3),
                 activation='relu',
                 padding='same',
                 name='conv_7')(c_6)
    bn_7 = BatchNormalization(name='bn_7')(c_7)

    bn_7_shape = bn_7.get_shape()

    loc_input_shape = (bn_7_shape[1], bn_7_shape[2], bn_7_shape[3])
    stn = SpatialTransformer(localization_net=loc_net(loc_input_shape),
                             output_size=(loc_input_shape[0],
                                          loc_input_shape[1]))(bn_7)

    reshape = Reshape(target_shape=(int(bn_7_shape[1]),
                                    int(bn_7_shape[2] * bn_7_shape[3])),
                      name='reshape')(stn)

    fc_9 = Dense(cfg.lstm_nb_units[0], activation='relu', name='fc_9')(reshape)

    lstm_10 = LSTM(cfg.lstm_nb_units[0],
                   kernel_initializer="he_normal",
                   return_sequences=True,
                   name='lstm_10')(fc_9)
    lstm_10_back = LSTM(cfg.lstm_nb_units[0],
                        kernel_initializer="he_normal",
                        go_backwards=True,
                        return_sequences=True,
                        name='lstm_10_back')(fc_9)
    lstm_10_add = add([lstm_10, lstm_10_back])

    lstm_11 = LSTM(cfg.lstm_nb_units[1],
                   kernel_initializer="he_normal",
                   return_sequences=True,
                   name='lstm_11')(lstm_10_add)
    lstm_11_back = LSTM(cfg.lstm_nb_units[1],
                        kernel_initializer="he_normal",
                        go_backwards=True,
                        return_sequences=True,
                        name='lstm_11_back')(lstm_10_add)
    lstm_11_concat = concatenate([lstm_11, lstm_11_back])
    do_11 = Dropout(cfg.dropout_rate, name='dropout')(lstm_11_concat)

    fc_12 = Dense(len(cfg.characters),
                  kernel_initializer='he_normal',
                  activation='softmax',
                  name='fc_12')(do_11)

    prediction_model = Model(inputs=inputs, outputs=fc_12)

    labels = Input(name='labels', shape=[cfg.label_len], dtype='float32')
    input_length = Input(name='input_length', shape=[1], dtype='int64')
    label_length = Input(name='label_length', shape=[1], dtype='int64')

    ctc_loss = Lambda(ctc_lambda_func, output_shape=(1, ),
                      name='ctc')([fc_12, labels, input_length, label_length])

    training_model = Model(inputs=[inputs, labels, input_length, label_length],
                           outputs=[ctc_loss])

    return training_model, prediction_model
コード例 #20
0
ファイル: core.py プロジェクト: 0b01/AutoBassTab
def build_and_load_model(model_capacity):
    """
    Build the CNN model and load the weights

    Parameters
    ----------
    model_capacity : 'tiny', 'small', 'medium', 'large', or 'full'
        String specifying the model capacity, which determines the model's
        capacity multiplier to 4 (tiny), 8 (small), 16 (medium), 24 (large),
        or 32 (full). 'full' uses the model size specified in the paper,
        and the others use a reduced number of filters in each convolutional
        layer, resulting in a smaller model that is faster to evaluate at the
        cost of slightly reduced pitch estimation accuracy.

    Returns
    -------
    model : tensorflow.keras.models.Model
        The pre-trained keras model loaded in memory
    """
    from tensorflow.keras.layers import Input, Reshape, Conv2D, BatchNormalization
    from tensorflow.keras.layers import MaxPool2D, Dropout, Permute, Flatten, Dense
    from tensorflow.keras.models import Model

    if models[model_capacity] is None:
        capacity_multiplier = {
            'tiny': 4,
            'small': 8,
            'medium': 16,
            'large': 24,
            'full': 32
        }[model_capacity]

        layers = [1, 2, 3, 4, 5, 6]
        filters = [n * capacity_multiplier for n in [32, 4, 4, 4, 8, 16]]
        widths = [512, 64, 64, 64, 64, 64]
        strides = [(4, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)]

        x = Input(shape=(1024, ), name='input', dtype='float32')
        y = Reshape(target_shape=(1024, 1, 1), name='input-reshape')(x)

        for l, f, w, s in zip(layers, filters, widths, strides):
            y = Conv2D(f, (w, 1),
                       strides=s,
                       padding='same',
                       activation='relu',
                       name="conv%d" % l)(y)
            y = BatchNormalization(name="conv%d-BN" % l)(y)
            y = MaxPool2D(pool_size=(2, 1),
                          strides=None,
                          padding='valid',
                          name="conv%d-maxpool" % l)(y)
            y = Dropout(0.25, name="conv%d-dropout" % l)(y)

        y = Permute((2, 1, 3), name="transpose")(y)
        y = Flatten(name="flatten")(y)
        y = Dense(360, activation='sigmoid', name="classifier")(y)

        model = Model(inputs=x, outputs=y)

        package_dir = os.path.dirname(os.path.realpath(__file__))
        filename = "model-{}.h5".format(model_capacity)
        model.load_weights(os.path.join(package_dir, filename))
        model.compile('adam', 'binary_crossentropy')

        models[model_capacity] = model

    return models[model_capacity]
コード例 #21
0
    TimeDistributed(Conv2D(32, (10, 10),
                           strides=(3, 3),
                           padding='same',
                           activation='relu'),
                    input_shape=(timesteps, rows, cols, colour_channels)))
model.add(
    TimeDistributed(
        Conv2D(32, (8, 8), strides=(3, 3), padding='same', activation='relu')))
r_layer = TimeDistributed(
    Conv2D(32, (5, 5), strides=(3, 3), padding='same', activation='relu'))
model.add(r_layer)
# model.add(MaxPooling2D)
# first number is output size
# model.add(LSTM(32, return_sequences=True, input_shape=(timesteps, data_dim)))
shape = reduce(lambda x, y: x * y, r_layer.output_shape[-3:])
model.add(Reshape((20, shape)))
model.add(LSTM(200, return_sequences=True))
model.add(LSTM(50, return_sequences=True))
model.add(Dense(num_classes, activation='sigmoid'))

rms = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-6)
model.compile(loss='binary_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])
# model.load_weights("weights.hdf5", by_name=False)
model.summary()

util = Utility.Utility()
train_c, train_e, _, _ = dataImport.readData("data", 0, numOfExamples=10)
print("read training set")
val_c, val_e, _, _ = dataImport.readData("data", 1, numOfExamples=1)
コード例 #22
0
def augmented_conv1d(ip,
                     shape,
                     filters,
                     kernel_size=3,
                     strides=1,
                     padding='same',
                     depth_k=0.2,
                     depth_v=0.2,
                     num_heads=2,
                     relative_encodings=True):
    """
    Builds an Attention Augmented Convolution block.
    Args:
        ip: keras tensor.
        filters: number of output filters.
        kernel_size: convolution kernel size.
        strides: strides of the convolution.
        depth_k: float or int. Number of filters for k.
            Computes the number of filters for `v`.
            If passed as float, computed as `filters * depth_k`.
        depth_v: float or int. Number of filters for v.
            Computes the number of filters for `k`.
            If passed as float, computed as `filters * depth_v`.
        num_heads: int. Number of attention heads.
            Must be set such that `depth_k // num_heads` is > 0.
        relative_encodings: bool. Whether to use relative
            encodings or not.
    Returns:
        a keras tensor.
    """

    if type(kernel_size) == int:
        pass
    else:
        kernel_size = kernel_size[0]

    if type(strides) == int:
        pass
    else:
        strides = strides[0]

    t_n = shape[0]
    f_n = shape[1]

    # input_shape = K.int_shape(ip)
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    depth_k, depth_v = _normalize_depth_vars(depth_k, depth_v, filters)

    # print(kernel_size)
    # print(strides)

    conv_out = _conv_layer1d(ip,
                             t_n,
                             f_n,
                             filters - depth_v,
                             kernel_size,
                             strides,
                             padding='same')

    # Augmented Attention Block
    qkv_conv = _conv_layer1d(ip,
                             t_n,
                             f_n,
                             2 * depth_k + depth_v,
                             1,
                             strides,
                             padding='same')
    attn_out = AttentionAugmentation2D(depth_k, depth_v, num_heads,
                                       relative_encodings)(qkv_conv)
    attn_out = _conv_layer1r(attn_out,
                             t_n,
                             depth_v,
                             depth_v,
                             1,
                             strides,
                             padding='same')

    output = Concatenate(axis=channel_axis)([conv_out, attn_out])

    reshape = Reshape((t_n, filters))(output)

    return reshape
コード例 #23
0
def MobileNetv2(input_shape, k, alpha=1.0):
    """MobileNetv2
    This function defines a MobileNetv2 architectures.

    # Arguments
        input_shape: An integer or tuple/list of 3 integers, shape
            of input tensor.
        k: Integer, number of classes.
        alpha: Integer, width multiplier, better in [0.35, 0.50, 0.75, 1.0, 1.3, 1.4].

    # Returns
        MobileNetv2 model.
    """
    inputs = Input(shape=input_shape)

    first_filters = _make_divisible(32 * alpha, 8)
    x = _conv_block(inputs, first_filters, (3, 3), strides=(2, 2))

    x = _inverted_residual_block(x,
                                 16, (3, 3),
                                 t=1,
                                 alpha=alpha,
                                 strides=1,
                                 n=1)
    x = _inverted_residual_block(x,
                                 24, (3, 3),
                                 t=6,
                                 alpha=alpha,
                                 strides=2,
                                 n=2)
    x = _inverted_residual_block(x,
                                 32, (3, 3),
                                 t=6,
                                 alpha=alpha,
                                 strides=2,
                                 n=3)
    x = _inverted_residual_block(x,
                                 64, (3, 3),
                                 t=6,
                                 alpha=alpha,
                                 strides=2,
                                 n=4)
    x = _inverted_residual_block(x,
                                 96, (3, 3),
                                 t=6,
                                 alpha=alpha,
                                 strides=1,
                                 n=3)
    x = _inverted_residual_block(x,
                                 160, (3, 3),
                                 t=6,
                                 alpha=alpha,
                                 strides=2,
                                 n=3)
    x = _inverted_residual_block(x,
                                 320, (3, 3),
                                 t=6,
                                 alpha=alpha,
                                 strides=1,
                                 n=1)

    if alpha > 1.0:
        last_filters = _make_divisible(1280 * alpha, 8)
    else:
        last_filters = 1280

    x = _conv_block(x, last_filters, (1, 1), strides=(1, 1))
    x = GlobalAveragePooling2D()(x)
    x = Reshape((1, 1, last_filters))(x)
    x = Dropout(0.3, name='Dropout')(x)
    x = Conv2D(k, (1, 1), padding='same')(x)

    x = Activation('softmax', name='softmax')(x)
    output = Reshape((k, ))(x)

    model = Model(inputs, output)
    # plot_model(model, to_file='images/MobileNetv2.png', show_shapes=True)

    return model
コード例 #24
0
    def __init__(self,
                 input_dim=1,
                 exo_dim=0,
                 backcast_length=480,
                 forecast_length=1,
                 stack_types=(TREND_BLOCK, SEASONALITY_BLOCK),
                 nb_blocks_per_stack=5,
                 thetas_dim=(4, 8),
                 share_weights_in_stack=False,
                 hidden_layer_units=256,
                 nb_harmonics=None):

        self.stack_types = stack_types
        self.nb_blocks_per_stack = nb_blocks_per_stack
        self.thetas_dim = thetas_dim
        self.units = hidden_layer_units
        self.share_weights_in_stack = share_weights_in_stack
        self.backcast_length = backcast_length
        self.forecast_length = forecast_length
        self.input_dim = input_dim
        self.exo_dim = exo_dim
        self.input_shape = (self.backcast_length, self.input_dim)
        self.exo_shape = (self.backcast_length, self.exo_dim)
        self.output_shape = (self.forecast_length, self.input_dim)
        self.weights = {}
        self.nb_harmonics = nb_harmonics
        assert len(self.stack_types) == len(self.thetas_dim)

        x = Input(shape=self.input_shape, name='input_variable')
        x_ = {}
        for k in range(self.input_dim):
            x_[k] = Lambda(lambda z: z[..., k])(x)
        e_ = {}
        if self.has_exog():
            e = Input(shape=self.exo_shape, name='exos_variables')
            for k in range(self.exo_dim):
                e_[k] = Lambda(lambda z: z[..., k])(e)
        else:
            e = None
        y_ = {}

        for stack_id in range(len(self.stack_types)):
            stack_type = self.stack_types[stack_id]
            nb_poly = self.thetas_dim[stack_id]
            for block_id in range(self.nb_blocks_per_stack):
                backcast, forecast = self.create_block(x_, e_, stack_id,
                                                       block_id, stack_type,
                                                       nb_poly)
                for k in range(self.input_dim):
                    x_[k] = Subtract()([x_[k], backcast[k]])
                    if stack_id == 0 and block_id == 0:
                        y_[k] = forecast[k]
                    else:
                        y_[k] = Add()([y_[k], forecast[k]])

        for k in range(self.input_dim):
            y_[k] = Reshape(target_shape=(self.forecast_length, 1))(y_[k])
        if self.input_dim > 1:
            y_ = Concatenate(axis=-1)([y_[ll] for ll in range(self.input_dim)])
        else:
            y_ = y_[0]

        if self.has_exog():
            model = Model([x, e], y_)
        else:
            model = Model(x, y_)

        model.summary()

        self.n_beats = model
コード例 #25
0
ファイル: gan.py プロジェクト: Kyanji/MAGNETO
def generator(inputs,
              image_size,
              activation='sigmoid',
              labels=None,
              codes=None):
    """Build a Generator Model

    Stack of BN-ReLU-Conv2DTranpose to generate fake images.
    Output activation is sigmoid instead of tanh in [1].
    Sigmoid converges easily.

    Arguments:
        inputs (Layer): Input layer of the generator (the z-vector)
        image_size (int): Target size of one side
            (assuming square image)
        activation (string): Name of output activation layer
        labels (tensor): Input labels
        codes (list): 2-dim disentangled codes for InfoGAN

    Returns:
        Model: Generator Model
    """
    image_resize = image_size // 2
    # network parameters
    kernel_size = 5
    layer_filters = [256, 128, 64, 32, 1]

    if labels is not None:
        if codes is None:
            # ACGAN labels
            # concatenate z noise vector and one-hot labels
            inputs = [inputs, labels]
        else:
            # infoGAN codes
            # concatenate z noise vector,
            # one-hot labels and codes 1 & 2
            inputs = [inputs, labels] + codes
        x = concatenate(inputs, axis=1)
    elif codes is not None:
        # generator 0 of StackedGAN
        inputs = [inputs, codes]
        x = concatenate(inputs, axis=1)
    else:
        # default input is just 100-dim noise (z-code)
        x = inputs

    x = Dense(image_resize * image_resize * layer_filters[0])(x)
    x = Reshape((image_resize, image_resize, layer_filters[0]))(x)

    for filters in layer_filters:
        # first two convolution layers use strides = 2
        # the last two use strides = 1
        if filters > layer_filters[-4]:
            strides = 2
        else:
            strides = 1
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Conv2DTranspose(filters=filters,
                            kernel_size=kernel_size,
                            strides=strides,
                            padding='same')(x)
    import keras.backend as K

    if activation is not None:
        x = Activation(activation)(x)

    # generator output is the synthesized image x
    return Model(inputs, x, name='generator')
コード例 #26
0
def line_lstm_ctc(input_shape,
                  output_shape,
                  window_width=28,
                  window_stride=14):  # pylint: disable=too-many-locals
    image_height, image_width = input_shape
    output_length, num_classes = output_shape

    num_windows = int((image_width - window_width) / window_stride) + 1
    if num_windows < output_length:
        raise ValueError(
            f"Window width/stride need to generate >= {output_length} windows (currently {num_windows})"
        )

    image_input = Input(shape=input_shape, name="image")
    y_true = Input(shape=(output_length, ), name="y_true")
    input_length = Input(shape=(1, ),
                         name="input_length")  # length of image strip in px
    label_length = Input(
        shape=(1, ),
        name="label_length")  # length of target sentence. it varies.

    # Your code should use slide_window and extract image patches from image_input.
    # Pass a convolutional model over each image patch to generate a feature vector per window.
    # Pass these features through one or more LSTM layers.
    # Convert the lstm outputs to softmax outputs.
    # Note that lstms expect a input of shape (num_batch_size, num_timesteps, feature_length).

    # Your code below (Lab 3)
    image_reshaped = Reshape((image_height, image_width, 1))(image_input)
    # (image_height, image_width, 1)

    image_patches = Lambda(slide_window,
                           arguments={
                               "window_width": window_width,
                               "window_stride": window_stride
                           })(image_reshaped)
    # (num_windows, image_height, window_width, 1)

    # Make a LeNet and get rid of the last two layers (softmax and dropout)
    convnet = lenet((image_height, window_width, 1), (num_classes, ))
    convnet = KerasModel(inputs=convnet.inputs,
                         outputs=convnet.layers[-2].output)
    convnet_outputs = TimeDistributed(convnet)(image_patches)
    # (num_windows, 128)

    lstm_output = LSTM(128, return_sequences=True)(convnet_outputs)
    # (num_windows, 128)

    softmax_output = Dense(num_classes,
                           activation="softmax",
                           name="softmax_output")(lstm_output)
    # (num_windows, num_classes)
    # Your code above (Lab 3)

    input_length_processed = Lambda(
        lambda x, num_windows=None: x * num_windows,
        arguments={"num_windows": num_windows})(input_length)

    ctc_loss_output = Lambda(
        lambda x: K.ctc_batch_cost(x[0], x[1], x[2], x[3]), name="ctc_loss")(
            [y_true, softmax_output, input_length_processed, label_length])

    ctc_decoded_output = Lambda(
        lambda x: ctc_decode(x[0], x[1], output_length),
        name="ctc_decoded")([softmax_output, input_length_processed])

    model = KerasModel(
        inputs=[image_input, y_true, input_length, label_length],
        outputs=[ctc_loss_output, ctc_decoded_output],
    )
    return model
コード例 #27
0
        bio = io.BytesIO(model_bytes)
        with h5py.File(bio) as f:
            return load_model_fn(f, custom_objects=CUSTOM_OBJECTS)

    # Do not use GPU for the session creation.
    config = tf.ConfigProto(device_count={'GPU': 0})
    K.set_session(tf.Session(config=config))

    # Build the model.
    inputs = {col: Input(shape=(1, ), name=col) for col in all_cols}
    embeddings = [
        Embedding(len(vocab[col]), 10, input_length=1,
                  name='emb_' + col)(inputs[col]) for col in categorical_cols
    ]
    continuous_bn = Concatenate()([
        Reshape((1, 1), name='reshape_' + col)(inputs[col])
        for col in continuous_cols
    ])
    continuous_bn = BatchNormalization()(continuous_bn)
    x = Concatenate()(embeddings + [continuous_bn])
    x = Flatten()(x)
    x = Dense(1000,
              activation='relu',
              kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
    x = Dense(1000,
              activation='relu',
              kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
    x = Dense(1000,
              activation='relu',
              kernel_regularizer=tf.keras.regularizers.l2(0.00005))(x)
    x = Dense(500,
コード例 #28
0
def define_generator(input, depth=256, dim=4):
    #Initialisation
    model = Sequential(name="Generator")
    init = RandomNormal(stddev=0.02)
    nodes = 64 * dim**2
    model.add(Dense(nodes, input_dim=100, kernel_initializer=init))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Reshape((dim, dim, 64)))
    #upsample to 8x8
    model.add(
        Conv2DTranspose(128, (dim, dim),
                        strides=(2, 2),
                        padding='same',
                        kernel_initializer=init))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    #upsample to 16x16
    model.add(
        Conv2DTranspose(256, (dim, dim),
                        strides=(2, 2),
                        padding='same',
                        kernel_initializer=init))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    #upsample to 32x32
    model.add(
        Conv2DTranspose(256, (dim, dim),
                        strides=(2, 2),
                        padding='same',
                        kernel_initializer=init))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    #upsample to 64x64
    model.add(
        Conv2DTranspose(256, (dim, dim),
                        strides=(2, 2),
                        padding='same',
                        kernel_initializer=init))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    #Upsample to 128x128
    model.add(
        Conv2DTranspose(256, (dim, dim),
                        strides=(2, 2),
                        padding='same',
                        kernel_initializer=init))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    #final output to 128x128x3
    model.add(
        Conv2D(3, (3, 3),
               padding='same',
               activation='tanh',
               kernel_initializer=init))
    return model
コード例 #29
0
def build_encoder_decoder():
    # Encoder
    input_tensor = Input(shape=(320, 320, 4))
    x = Conv2D(64, (3, 3), padding='same', activation='relu',
               name='conv1_1')(input_tensor)
    x = Conv2D(64, (3, 3), padding='same', activation='relu',
               name='conv1_2')(x)
    orig_1 = x
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    x = Conv2D(128, (3, 3), padding='same', activation='relu',
               name='conv2_1')(x)
    x = Conv2D(128, (3, 3), padding='same', activation='relu',
               name='conv2_2')(x)
    orig_2 = x
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    x = Conv2D(256, (3, 3), padding='same', activation='relu',
               name='conv3_1')(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu',
               name='conv3_2')(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu',
               name='conv3_3')(x)
    orig_3 = x
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)
    inputs_size = x.get_shape()[1:3]

    conv_4_1x1 = SeparableConv2D(512, (1, 1),
                                 activation='relu',
                                 padding='same',
                                 name='conv4_1x1')(x)
    conv_4_3x3_1 = SeparableConv2D(512, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   dilation_rate=ATROUS_RATES[0],
                                   name='conv4_3x3_1')(x)
    conv_4_3x3_2 = SeparableConv2D(512, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   dilation_rate=ATROUS_RATES[1],
                                   name='conv4_3x3_2')(x)
    conv_4_3x3_3 = SeparableConv2D(512, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   dilation_rate=ATROUS_RATES[2],
                                   name='conv4_3x3_3')(x)
    # Image average pooling
    image_level_features = Lambda(
        lambda x: tf.reduce_mean(x, [1, 2], keepdims=True),
        name='global_average_pooling')(x)
    image_level_features = Conv2D(
        512, (1, 1),
        activation='relu',
        padding='same',
        name='image_level_features_conv_1x1')(image_level_features)
    image_level_features = Lambda(lambda x: tf.image.resize(x, inputs_size),
                                  name='upsample_1')(image_level_features)
    # Concat
    x = Concatenate(axis=3)([
        conv_4_1x1, conv_4_3x3_1, conv_4_3x3_2, conv_4_3x3_3,
        image_level_features
    ])
    x = Conv2D(512, (1, 1),
               activation='relu',
               padding='same',
               name='conv_1x1_1_concat')(x)
    x = Conv2D(512, (1, 1),
               activation='relu',
               padding='same',
               name='conv_1x1_2_concat')(x)
    orig_4 = x
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    x = Conv2D(512, (3, 3), activation='relu', padding='same',
               name='conv5_1')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same',
               name='conv5_2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same',
               name='conv5_3')(x)
    orig_5 = x
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    # Decoder
    #
    x = UpSampling2D(size=(2, 2))(x)
    the_shape = K.int_shape(orig_5)
    shape = (1, the_shape[1], the_shape[2], the_shape[3])
    origReshaped = Reshape(shape)(orig_5)
    xReshaped = Reshape(shape)(x)
    together = Concatenate(axis=1)([origReshaped, xReshaped])
    x = Unpooling()(together)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='deconv5_1',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='deconv5_2',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='deconv5_3',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)

    x = UpSampling2D(size=(2, 2))(x)
    the_shape = K.int_shape(orig_4)
    shape = (1, the_shape[1], the_shape[2], the_shape[3])
    origReshaped = Reshape(shape)(orig_4)
    xReshaped = Reshape(shape)(x)
    together = Concatenate(axis=1)([origReshaped, xReshaped])
    x = Unpooling()(together)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='deconv4_1',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='deconv4_2',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='deconv4_3',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)

    x = UpSampling2D(size=(2, 2))(x)
    the_shape = K.int_shape(orig_3)
    shape = (1, the_shape[1], the_shape[2], the_shape[3])
    origReshaped = Reshape(shape)(orig_3)
    xReshaped = Reshape(shape)(x)
    together = Concatenate(axis=1)([origReshaped, xReshaped])
    x = Unpooling()(together)
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='deconv3_1',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='deconv3_2',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='deconv3_3',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)

    x = UpSampling2D(size=(2, 2))(x)
    the_shape = K.int_shape(orig_2)
    shape = (1, the_shape[1], the_shape[2], the_shape[3])
    origReshaped = Reshape(shape)(orig_2)
    xReshaped = Reshape(shape)(x)
    together = Concatenate(axis=1)([origReshaped, xReshaped])
    x = Unpooling()(together)
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='deconv2_1',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='deconv2_2',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)

    x = UpSampling2D(size=(2, 2))(x)
    the_shape = K.int_shape(orig_1)
    shape = (1, the_shape[1], the_shape[2], the_shape[3])
    origReshaped = Reshape(shape)(orig_1)
    xReshaped = Reshape(shape)(x)
    together = Concatenate(axis=1)([origReshaped, xReshaped])
    x = Unpooling()(together)
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='deconv1_1',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='deconv1_2',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)

    x = Conv2D(1, (3, 3),
               activation='sigmoid',
               padding='same',
               name='pred',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)

    model = Model(inputs=input_tensor, outputs=x)
    return model
コード例 #30
0
def line_lstm_ctc(input_shape, output_shape, window_width=28, window_stride=7):
    image_height, image_width = input_shape
    output_length, num_classes = output_shape

    num_windows = int((image_width - window_width) / window_stride) + 1
    if num_windows < output_length:
        raise ValueError(
            f'Window width/stride need to generate at least {output_length} windows (currently {num_windows})'
        )

    image_input = Input(shape=input_shape, name='image')
    y_true = Input(shape=(output_length, ), name='y_true')
    input_length = Input(shape=(1, ), name='input_length')
    label_length = Input(shape=(1, ), name='label_length')

    gpu_present = len(device_lib.list_local_devices()) > 1
    lstm_fn = CuDNNLSTM if gpu_present else LSTM

    # Your code should use slide_window and extract image patches from image_input.
    # Pass a convolutional model over each image patch to generate a feature vector per window.
    # Pass these features through one or more LSTM layers.
    # Convert the lstm outputs to softmax outputs.
    # Note that lstms expect a input of shape (num_batch_size, num_timesteps, feature_length).

    ##### Your code below (Lab 3)

    image_reshaped = Reshape((image_height, image_width, 1))(image_input)

    # lenet option:
    ''''''
    image_patches = Lambda(slide_window,
                           arguments={
                               'window_width': window_width,
                               'window_stride': window_stride
                           })(image_reshaped)

    convnet = lenet((image_height, window_width, 1), (num_classes, ))
    convnet = KerasModel(inputs=convnet.inputs,
                         outputs=convnet.layers[-2].output)
    convnet_outputs = TimeDistributed(convnet)(image_patches)
    ''''''

    # straight conv to lstm w relu option:
    '''
    # conv = BatchNormalization()(image_reshaped)
    conv = Conv2D(128, (image_height, window_width), (1, window_stride), kernel_initializer = 'lecun_normal', activation = 'selu')(image_reshaped)
    conv = BatchNormalization()(conv)
    conv = AlphaDropout(0.07)(conv)
    
    # conv = MaxPooling2D(pool_size = (2, 2))(conv)
    
    # conv = Conv2D(128, (image_height, window_width), (1, window_stride), activation = 'relu')(image_reshaped)
    
    # conv = Conv2D(256, (1, window_stride), activation = 'relu')(conv)
    
    convnet_outputs = Lambda(lambda x: K.squeeze(x, 1))(conv)
    '''

    # convnet_do = AlphaDropout(0.05)(convnet_outputs)

    # lstm_output = Bidirectional(lstm_fn(128, return_sequences = True))(convnet_do)

    lstm1_output = Bidirectional(lstm_fn(
        128, return_sequences=True))(convnet_outputs)

    lstm1_do = AlphaDropout(0.04)(lstm1_output)

    lstm2_output = Bidirectional(lstm_fn(128, return_sequences=True))(lstm1_do)

    lstm2_do = AlphaDropout(0.04)(lstm2_output)
    ''''''
    lstm3_output = Bidirectional(lstm_fn(128, return_sequences=True))(lstm2_do)
    # softmax_output = Dense(num_classes, activation = 'softmax', name = 'softmax_output')(lstm3_output)
    ''''''

    lstm3_do = AlphaDropout(0.05)(lstm3_output)

    softmax_output = Dense(num_classes,
                           activation='softmax',
                           name='softmax_output')(lstm3_do)

    # First test evaluation: 0.612532686461871

    # Test evaluation: 0.6330963581464973
    '''
    image_reshaped = Reshape((image_height, image_width, 1))(image_input)
    # (image_height, image_width, 1)

    image_patches = Lambda(
        slide_window,
        arguments={'window_width': window_width, 'window_stride': window_stride}
    )(image_reshaped)
    # (num_windows, image_height, window_width, 1)

    # Make a LeNet and get rid of the last two layers (softmax and dropout)
    convnet = lenet((image_height, window_width, 1), (num_classes,))
    convnet = KerasModel(inputs=convnet.inputs, outputs=convnet.layers[-2].output)
    convnet_outputs = TimeDistributed(convnet)(image_patches)
    # (num_windows, 128)

    lstm_output = lstm_fn(128, return_sequences=True)(convnet_outputs)
    # (num_windows, 128)

    softmax_output = Dense(num_classes, activation='softmax', name='softmax_output')(lstm_output)
    # (num_windows, num_classes)
    '''
    ##### Your code above (Lab 3)

    input_length_processed = Lambda(
        lambda x, num_windows=None: x * num_windows,
        arguments={'num_windows': num_windows})(input_length)

    ctc_loss_output = Lambda(
        lambda x: K.ctc_batch_cost(x[0], x[1], x[2], x[3]), name='ctc_loss')(
            [y_true, softmax_output, input_length_processed, label_length])

    ctc_decoded_output = Lambda(
        lambda x: ctc_decode(x[0], x[1], output_length),
        name='ctc_decoded')([softmax_output, input_length_processed])

    model = KerasModel(
        inputs=[image_input, y_true, input_length, label_length],
        outputs=[ctc_loss_output, ctc_decoded_output])
    return model