Exemple #1
0
labels = LB.fit_transform(labels)
labels = to_categorical(labels)
print(labels)
(X_train, X_test, Y_train, Y_test) = train_test_split(data,
                                                      labels,
                                                      test_size=0.20,
                                                      stratify=labels,
                                                      random_state=42)
trainAug = ImageDataGenerator(rotation_range=15, fill_mode="nearest")

bModel = VGG16(weights="imagenet",
               include_top=False,
               input_tensor=Input(shape=(224, 224, 3)))  #base_Model
hModel = bModel.output  #head_Model
hModel = AveragePooling2D(pool_size=(4, 4))(hModel)
hModel = Flatten(name="flatten")(hModel)
hModel = Dense(64, activation="relu")(hModel)
hModel = Dropout(0.5)(hModel)
hModel = Dense(2, activation="softmax")(hModel)
model = Model(inputs=bModel.input, outputs=hModel)
for layer in bModel.layers:
    layer.trainable = False

X_train.shape, X_test.shape, Y_train.shape, Y_test.shape

W_grid = 4  #width
L_grid = 4  #lenth
fig, axes = plt.subplots(L_grid, W_grid, figsize=(25, 25))  #subplots
axes = axes.ravel()
n_training = len(X_train)
for i in np.arange(0, L_grid * W_grid):
    ## Specify model
    model = Sequential()
    # Input layer
    model.add(
        Conv2D(20,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=(img_rows, img_cols, 1)))
    #   (number of convolutions/filters, size of the conv, activation function
    # Hidden layers
    model.add(Dropout(0.3))
    model.add(Conv2D(20, kernel_size=(3, 3), activation='relu'))
    model.add(Dropout(0.3))
    #model.add(Conv2D(20, kernel_size=(3, 3), activation='relu'))
    model.add(Flatten())  # Flatten layer
    # Convert the output of the previous layers into a 1D representation
    model.add(Dense(128, activation='relu'))  # Dense layer with 128 nodes
    # Perform usually better when adding a dense layer in between
    # the flatten layer and the final layer
    # Output layer
    model.add(Dense(num_classes, activation='softmax'))

    ## Compile model
    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer='adam',
                  metrics=['accuracy'])

    ## Fit model
    model.fit(x, y, batch_size=128, epochs=2, validation_split=0.2)
# Starting to construct the model
print('Starting to construct model')
# Loading the DenseNet model pre_trained on the imageNet, simultaneously cut off the original FC_Layer
base_model = DenseNet201(weights='imagenet',
                         include_top=False,
                         input_tensor=Input(shape=(224, 224, 3)))

# It is compulsory for the base_model to freeze each layer,
# which result will never train again in the network.
for layer in base_model.layers:
    layer.trainable = False

# To construct the our own ful_connection layer will be placed on top of the base model
head_model = base_model.output
head_model = MaxPooling2D(pool_size=(7, 7))(head_model)
head_model = Flatten(name='flatten')(head_model)
head_model = Dense(64, activation='relu')(head_model)
head_model = Dense(128, activation='relu')(head_model)
head_model = Dropout(0.5)(head_model)
head_model = Dense(3, activation='softmax')(head_model)

# Place the head_model on the top of the base model
model = Model(inputs=base_model.input,
              outputs=head_model)

# Compile model
print("compiling model...")
adam_optimize = Adam(learning_rate=learning_rate)
model.compile(loss='categorical_crossentropy',
              optimizer=adam_optimize,
              metrics=['accuracy'])
def build_generator_cifar(architecture='Dense3',
                          input_shape=[32, 32, 3],
                          num_classes=10):
    if architecture == 'ResNet12v1':
        model = G_resnet(n=1, num_filters=16)
    elif architecture == 'ResNet22v2':
        model = G_resnet(n=2, num_filters=16, inner_loop_concat=True)
    elif architecture == 'ResNet12v2':
        model = G_resnet(n=1, num_filters=16, inner_loop_concat=True)
    elif architecture == 'ResNet20Xiao':
        model = G_resnet(n=1,
                         num_filters=8,
                         block_strides=[1, 2, 2, 1, 1, 1, 1, 1 / 2, 1 / 2])
    elif architecture == 'Dense2':
        # 2 Dense Layers
        image = Input(shape=input_shape)
        target = Input(shape=(num_classes, ))
        #target_int = Lambda(lambda x:K.argmax(x,axis=-1))(target)
        x1 = Flatten()(image)
        #x2 = Embedding(10,20,input_length=1)(target_int)
        #x2 = Lambda(lambda x: K.squeeze(x, -2))(x2)
        x = Concatenate(axis=-1)([x1, target])
        x = Dense(2048,
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Dense(np.prod(input_shape),
                  activation='sigmoid',
                  bias_initializer='zeros')(x)
        x = Reshape(input_shape)(x)
        model = Model(inputs=[image, target], outputs=x, name='model_G')
    elif architecture == 'Dense3':
        # 3 Dense Layers
        image = Input(shape=input_shape)
        target = Input(shape=(num_classes, ))
        x1 = Flatten()(image)
        x = Concatenate(axis=-1)([x1, target])
        print(x)
        x = Dense(1024,
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Dense(1024,
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Dense(np.prod(input_shape),
                  activation='sigmoid',
                  bias_initializer='zeros')(x)
        x = Reshape(input_shape)(x)
        model = Model(inputs=[image, target], outputs=x, name='model_G')
    elif architecture == 'Dense4':
        image = Input(shape=input_shape)
        target = Input(shape=(num_classes, ))
        x1 = Flatten()(image)
        x = Concatenate(axis=-1)([x1, target])
        x = Dense(1024,
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Dense(1024,
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Dense(1024,
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Dense(np.prod(input_shape),
                  activation='sigmoid',
                  bias_initializer='zeros')(x)
        x = Reshape(input_shape)(x)
        model = Model(inputs=[image, target], outputs=x, name='model_G')

    print('Model Generator Name', model.name)
    if is_keras_loadable(model, architecture):
        return model
Exemple #5
0
def build_cnn(num_filter, len_filter, num_layer, num_channels, len_input,
              num_classes):
    """
    Function returning a keras model.
    Parameters
    ----------
    num_filter: int
        Number of filters / kernels in the conv layer
    len_filter: float
        Length of the filters / kernels in the conv layer as fraction of inputlength
    num_layer: int
        Number of convlutional layers in the model
    num_channels: int
        Number of channels of the input
    len_input: int
        Number of dimensions of the input
    num_classes: int
        Number of classes in the dataset = Number of outputs
    Returns
    -------
    model: sequential keras model
        Keras CNN model ready to be trained
    """
    model = Sequential()
    # First Conv Layer
    model.add(
        Conv1D(filters=num_filter,
               kernel_size=int(len_filter * len_input),
               strides=1,
               padding="same",
               activation='relu',
               input_shape=(len_input, num_channels),
               name='block1_conv1'))
    model.add(
        MaxPooling1D(pool_size=2,
                     strides=2,
                     padding="same",
                     name='block1_pool'))
    # Other Conv Layers
    for l in range(2, num_layer + 1):
        model.add(
            Conv1D(filters=num_filter * l,
                   kernel_size=int(len_filter * len_input),
                   strides=1,
                   padding="same",
                   activation='relu',
                   name='block' + str(l) + '_conv1'))
        model.add(
            MaxPooling1D(pool_size=2,
                         strides=2,
                         padding="same",
                         name='block' + str(l) + '_pool'))

    model.add(Flatten(name='flatten'))
    model.add(Dense(100, activation='relu', name='fc1'))
    model.add(Dense(num_classes, activation='softmax', name='predictions'))
    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    plot_model(model, dpi=300, show_shapes=True, to_file='models\\cnn.png')
    return model
                         height_shift_range=0.2,
                         shear_range=0.15,
                         horizontal_flip=True,
                         fill_mode="nearest")

# load the MobileNetV2 network, ensuring the head FC layer sets are
# left off
baseModel = MobileNetV2(weights="imagenet",
                        include_top=False,
                        input_tensor=Input(shape=(224, 224, 3)))

# construct the head of the model that will be placed on top of the
# the base model
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(128, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(2, activation="softmax")(headModel)

# place the head FC model on top of the base model (this will become
# the actual model we will train)
model = Model(inputs=baseModel.input, outputs=headModel)

# loop over all layers in the base model and freeze them so they will
# *not* be updated during the first training process
for layer in baseModel.layers:
    layer.trainable = False

# compile our model
print("[INFO] compiling model...")
Exemple #7
0
                activation='relu',
                strides=(2, 2),
                name='en_conv_2')(conv_1)
conv_3 = Conv2D(filters,
                kernel_size=num_conv,
                padding='same',
                activation='relu',
                strides=1,
                name='en_conv_3')(conv_2)
conv_4 = Conv2D(filters,
                kernel_size=num_conv,
                padding='same',
                activation='relu',
                strides=1,
                name='en_conv_4')(conv_3)
flat = Flatten()(conv_4)
hidden = Dense(intermediate_dim, activation='relu', name='en_dense_5')(flat)

Z_mu = Dense(K, name='en_mu')(hidden)
Z_lsgms = Dense(K, name='en_var')(hidden)


def sampling(args):

    Z_mu, Z_lsgms = args
    epsilon = backend.random_normal(shape=(backend.shape(Z_mu)[0], K),
                                    mean=0.,
                                    stddev=1.0)

    return Z_mu + backend.exp(Z_lsgms) * epsilon
Y_piece = Y_piece.astype(int)

print(timer() - start)
'''We have one model to determine wether a square is empty and another model to determine the piece type on not empty squares
We achieved a precision of 100% on train set and 99.99% on test set'''

from tensorflow.keras.layers import Dense, Input, Conv2D, MaxPooling2D, Flatten
from tensorflow.keras.models import Sequential, Model

model_empty = Sequential()
model_empty.add(Input(shape=(32, 32, 1)))
model_empty.add(Conv2D(filters=5, kernel_size=(3, 3), activation='relu'))
model_empty.add(Conv2D(filters=5, kernel_size=(3, 3), activation='relu'))
model_empty.add(Conv2D(filters=5, kernel_size=(3, 3), activation='relu'))
model_empty.add(MaxPooling2D((2, 2)))
model_empty.add(Flatten())
model_empty.add(Dense(128, activation='relu'))
model_empty.add(Dense(2, activation='softmax'))

adam = tf.keras.optimizers.Adam(learning_rate=0.001)
model_empty.compile(optimizer=adam,
                    loss='sparse_categorical_crossentropy',
                    metrics=['accuracy'])

model_piece = Sequential()
model_piece.add(Input(shape=(32, 32, 1)))
model_piece.add(Conv2D(filters=5, kernel_size=(3, 3), activation='relu'))
model_piece.add(Conv2D(filters=5, kernel_size=(3, 3), activation='relu'))
model_piece.add(Conv2D(filters=5, kernel_size=(3, 3), activation='relu'))
model_piece.add(MaxPooling2D((2, 2)))
model_piece.add(Flatten())
H5_Zero2 = ZeroPadding2D(padding=(1,1))(H5_Conv1)
H5_Conv2 = Conv2D(512,kernel_size=3,strides=1,activation='relu')(H5_Zero2)
H5_Zero3 = ZeroPadding2D(padding=(1,1))(H5_Conv2)
H5_Conv3 = Conv2D(512,kernel_size=3,strides=1,activation='relu')(H5_Zero3)
H5_Pool = MaxPool2D(pool_size=(2,2),strides=2)(H5_Conv3)

#Layer_6
H6_Zero1 = ZeroPadding2D(padding=(1,1))(H5_Pool)
H6_Conv1 = Conv2D(512,kernel_size=3,strides=1,activation='relu')(H6_Zero1)
H6_Zero2 = ZeroPadding2D(padding=(1,1))(H6_Conv1)
H6_Conv2 = Conv2D(512,kernel_size=3,strides=1,activation='relu')(H6_Zero2)
H6_Zero3 = ZeroPadding2D(padding=(1,1))(H6_Conv2)
H6_Conv3 = Conv2D(512,kernel_size=3,strides=1,activation='relu')(H6_Zero3)

# Layer_7
H7_Flat = Flatten()(H6_Conv3)

#Layer_8
H8_Full = Dense(4096,activation='relu')(H7_Flat)
H8_Drop = Dropout(0.5)(H8_Full)

#Layer_9
H8_Full = Dense(1000,activation='relu')(H8_Full)
H8_Drop = Dropout(0.5)(H8_Full)

#Layer_10
outputs = Dense(10,activation='softmax')(H8_Drop)

# model_make
model = Model(inputs,outputs)
model.summary()
Exemple #10
0
# Here we will be using imagenet weights

resnet = ResNet50(input_shape=IMAGE_SIZE + [3],
                  weights='imagenet',
                  include_top=False)

# don't train existing weights
for layer in resnet.layers:
    layer.trainable = False

# useful for getting number of output classes
folders = glob('/content/drive/My Drive/Car Model Classifier/Datasets/Train/*')
folders

# our layers - you can add more if you want
x = Flatten()(resnet.output)

prediction = Dense(len(folders), activation='softmax')(x)

# create a model object
model = Model(inputs=resnet.input, outputs=prediction)

# view the structure of the model
model.summary()

# tell the model what cost and optimization method to use
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# Use the Image Data Generator to import the images from the dataset
    def __init__(self, args):
        # TODO: Define a suitable model, by calling `super().__init__`
        # with appropriate inputs and outputs.
        #
        # Alternatively, if you prefer to use a `tf.keras.Sequential`,
        # replace the `Network` parent, call `super().__init__` at the beginning
        # of this constructor and add layers using `self.add`.

        # TODO: After creating the model, call `self.compile` with appropriate arguments.
        super(Network, self).__init__()
        num_classes = 10

        weight_decay = 1e-4

        self.add(
            Conv2D(32, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay),
                   input_shape=(32, 32, 3)))
        self.add(Activation('relu'))
        self.add(BatchNormalization())
        self.add(
            Conv2D(32, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        self.add(Activation('relu'))
        self.add(BatchNormalization())
        self.add(MaxPooling2D(pool_size=(2, 2)))
        self.add(Dropout(0.2))

        self.add(
            Conv2D(64, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        self.add(Activation('relu'))
        self.add(BatchNormalization())
        self.add(
            Conv2D(64, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        self.add(Activation('relu'))
        self.add(BatchNormalization())
        self.add(MaxPooling2D(pool_size=(2, 2)))
        self.add(Dropout(0.3))

        self.add(
            Conv2D(128, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        self.add(Activation('relu'))
        self.add(BatchNormalization())
        self.add(
            Conv2D(128, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        self.add(Activation('relu'))
        self.add(BatchNormalization())
        self.add(MaxPooling2D(pool_size=(2, 2)))
        self.add(Dropout(0.4))

        self.add(Flatten())
        self.add(Dense(num_classes, activation='softmax'))

        schedule = tf.keras.optimizers.schedules.ExponentialDecay(
            initial_learning_rate=0.01,
            decay_steps=args.epochs * 45000 / 500,
            decay_rate=0.0001 / 0.01)
        self.compile(
            optimizer=tf.keras.optimizers.Adam(clipnorm=1.0,
                                               clipvalue=0.5,
                                               learning_rate=schedule),
            loss=tf.keras.losses.SparseCategoricalCrossentropy(),
            metrics=[
                tf.keras.metrics.SparseCategoricalAccuracy(name="accuracy")
            ])

        self.tb_callback = tf.keras.callbacks.TensorBoard(args.logdir,
                                                          update_freq=1000,
                                                          profile_batch=1)
        self.tb_callback.on_train_end = lambda *_: None
GAN_noise_size = 128
GAN_output_size = 7
G_input = Input(shape=(GAN_noise_size,))

G = Dense(128, kernel_initializer='glorot_uniform')(G_input)
G = LeakyReLU(alpha=0.2)(G)
G = BatchNormalization()(G)
G = Reshape([8, 8, 2])(G)  # default: channel last
G = Conv2DTranspose(32, kernel_size=2, strides=1, padding="same")(G)
G = LeakyReLU(alpha=0.2)(G)
G = BatchNormalization()(G)
G = Conv2DTranspose(16, kernel_size=3, strides=1, padding="same")(G)
G = LeakyReLU(alpha=0.2)(G)
G = BatchNormalization()(G)
G = Flatten()(G)
G_output = Dense(GAN_output_size)(G)
G_output = Activation("tanh")(G_output)
generator = Model(G_input, G_output)
generator.summary()


for batch_size_i in batch_size_i_array:

	for i in range(0, 1):


		batch_size = int(batch_size_i)
		batchsize = int(batch_size_i)

		print(' ')
Exemple #13
0
def LSTM_Network(data, string):

    tscv = TimeSeriesSplit()
    TimeSeriesSplit(max_train_size=None, n_splits=5)
    a = []

    for train_index, test_index in tscv.split(data.scaled_dataset):

        print("TRAIN:", train_index, "TEST:", test_index)
        X_train, y_train = data.Nueral_Network(data.scaled_dataset,
                                               data.scaled_dataset[:, -1], 0,
                                               data.train_set, data.timesteps)
        X_val, y_val = data.Nueral_Network(data.scaled_dataset,
                                           data.scaled_dataset[:, -1],
                                           data.train_set, data.validation_set,
                                           data.timesteps)
        X_test, y_test = data.Nueral_Network(data.scaled_dataset,
                                             data.scaled_dataset[:, -1],
                                             data.validation_set,
                                             data.test_set, data.timesteps)

        # Defines the models input shape, the loss fucntion, and the metric used for the error function.
        # The 'data' passed into the moudle as an argument calls on the each lots pre-preocessing moudle to
        # obtain the training, testing, and validation data sets

        input_shape = X_train.shape[-2:]
        loss = tf.keras.losses.MeanAbsoluteError()
        metric = tf.keras.metrics.MeanAbsolutePercentageError()

        # Reshapes the y_test numpy array so it cna be passes into the mean_absolute_percentage_error function
        # Reverses the scaler to re-obtain the atcual values of the data

        y_test_reshaped = y_test.reshape(-1, 1)
        y_test_inv = data.scaler.inverse_transform(y_test_reshaped)

        # Sets the amount of test sample to use in each iteration and shuffles the data to prevent over-fitting

        batch_size = 64
        shuffle_size = 64

        val = tf.data.Dataset.from_tensor_slices((X_val, y_val))
        val = val.cache().shuffle(shuffle_size).batch(shuffle_size).prefetch(1)

        train = tf.data.Dataset.from_tensor_slices((X_train, y_train))
        train = train.cache().shuffle(shuffle_size).batch(
            shuffle_size).prefetch(1)

        # Builds the model. LSTM defines the amount of LSTM cells in the network
        # Return sequence defiens wetaher the modle return the final h valeu of the full array of results form each cell
        # flatten() reduces the datas dimensions and it gets passed to a Dense layer of 160 nuerons
        # Dropout radnomly drops 10% of nuerons to prevent over-fitting

        LSTM_Model = tf.keras.models.Sequential([
            LSTM(80, input_shape=input_shape, return_sequences=True),
            Flatten(),
            Dense(160, activation='tanh'),
            Dropout(0.1),
            Dense(1)
        ])

        optimizer = tf.keras.optimizers.Adam(lr=.0009, amsgrad=True)
        LSTM_Model.compile(loss=loss, optimizer=optimizer, metrics=metric)
        tf.keras.backend.set_epsilon(1)

        Model = LSTM_Model.fit(train, epochs=100, validation_data=val)

        # Forecats is a build in keras model that appleis the trianed network to new data
        # The forecats sclaer values are then transformed back to real vlaues and passed to the MAPE fucntion

        forecast = LSTM_Model.predict(X_test)
        LSTM_forecast = data.scaler.inverse_transform(forecast)
        MAPE = mean_absolute_percentage_error(y_test_inv, LSTM_forecast)

        a.append(np.array(MAPE))

        # MAPE and Loss are plotted

        plot_model_mape(Model, string)
        plot_model_loss(Model, string)

        # The modle and the wights are saved as JSON ands h5 files

    LSTM_JSON = LSTM_Model.to_json()
    with open(
            "Project/Saved_Models/Buildings/" + string + "/LSTM/" + string +
            "_LSTM.json", "w") as json_file:
        json_file.write(LSTM_JSON)

    LSTM_Model.save_weights('Project/Saved_Models/Buildings/' + string +
                            '/LSTM/' + string + '_LSTM.h5')

    print('MLP forecast MAPE of hour-ahead electricity demand: {}'.format(a))
    return LSTM
learning_rate = 0.0025
t_batch_size = 1500

batch_size = 512
no_epochs = 100 # 200
verbosity = 1

lstm_model = Sequential()
classes = 90

lstm_model.add(LSTM(128,input_shape=(n_steps, n_features), return_sequences=True))
lstm_model.add(LSTM(64))


lstm_model.add(Dropout(0.50))
lstm_model.add(Flatten())
lstm_model.add(Dense(classes, activation='softmax'))


# Compile the model
lstm_model.compile(loss='categorical_crossentropy',
              optimizer='RMSProp',
              metrics=['accuracy'])

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.utils import plot_model
plot_model(lstm_model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)

"""# Model Summary"""
Exemple #15
0
def VGG16_SEGNET(n_classes,
                 input_height=224,
                 input_width=224,
                 input_depth=3,
                 vgg_level=-1):
    img_input = Input(shape=(input_height, input_width, input_depth))

    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv1',
               data_format='channels_last')(img_input)
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv2',
               data_format='channels_last')(x)
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     name='block1_pool',
                     data_format='channels_last')(x)
    f1 = x
    # Block 2
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv1',
               data_format='channels_last')(x)
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv2',
               data_format='channels_last')(x)
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     name='block2_pool',
                     data_format='channels_last')(x)
    f2 = x

    # Block 3
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv1',
               data_format='channels_last')(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv2',
               data_format='channels_last')(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv3',
               data_format='channels_last')(x)
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     name='block3_pool',
                     data_format='channels_last')(x)
    f3 = x

    # Block 4
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv1',
               data_format='channels_last')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv2',
               data_format='channels_last')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv3',
               data_format='channels_last')(x)
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     name='block4_pool',
                     data_format='channels_last')(x)
    f4 = x

    # Block 5
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv1',
               data_format='channels_last')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv2',
               data_format='channels_last')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv3',
               data_format='channels_last')(x)
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     name='block5_pool',
                     data_format='channels_last')(x)
    f5 = x

    x = Flatten(name='flatten')(x)
    x = Dense(4096, activation='relu', name='fc1')(x)
    x = Dense(4096, activation='relu', name='fc2')(x)
    x = Dense(1000, activation='softmax', name='predictions')(x)

    vgg = Model(img_input, x)
    vgg.load_weights(VGG_Weights_path)
    levels = [f1, f2, f3, f4, f5]

    o = levels[vgg_level]

    #o = ( UpSampling2D( (2,2), data_format='channels_last'))(o)
    o = (ZeroPadding2D((1, 1), data_format='channels_last'))(o)
    o = (Conv2D(512, (3, 3),
                activation='relu',
                padding='valid',
                data_format='channels_last'))(o)
    o = (BatchNormalization())(o)

    o = (UpSampling2D((2, 2), data_format='channels_last'))(o)
    o = (ZeroPadding2D((1, 1), data_format='channels_last'))(o)
    o = (Conv2D(512, (3, 3),
                activation='relu',
                padding='valid',
                data_format='channels_last'))(o)
    o = (BatchNormalization())(o)

    o = (UpSampling2D((2, 2), data_format='channels_last'))(o)
    o = (ZeroPadding2D((1, 1), data_format='channels_last'))(o)
    o = (Conv2D(256, (3, 3),
                activation='relu',
                padding='valid',
                data_format='channels_last'))(o)
    o = (BatchNormalization())(o)

    o = (UpSampling2D((2, 2), data_format='channels_last'))(o)
    o = (ZeroPadding2D((1, 1), data_format='channels_last'))(o)
    o = (Conv2D(128, (3, 3),
                activation='relu',
                padding='valid',
                data_format='channels_last'))(o)
    o = (BatchNormalization())(o)

    o = (UpSampling2D((2, 2), data_format='channels_last'))(o)
    o = (ZeroPadding2D((1, 1), data_format='channels_last'))(o)
    o = (Conv2D(64, (3, 3),
                activation='relu',
                padding='valid',
                data_format='channels_last'))(o)
    o = (BatchNormalization())(o)

    o = Conv2D(n_classes, (3, 3), padding='same',
               data_format='channels_last')(o)
    #o_shape = Model(img_input , o ).output_shape
    #outputHeight = o_shape[2]
    #outputWidth = o_shape[3]

    #o = (Reshape((  -1  , outputHeight*outputWidth   )))(o)
    #o = (Permute((2, 1)))(o)
    o = (Activation('softmax'))(o)
    model = Model(img_input, o)
    #model.outputWidth = outputWidth
    #model.outputHeight = outputHeight

    return model
Exemple #16
0
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
x_train[0]
vocab_size = len(word_vocab) + 1
EMBEDDING_DIM = 128
HIDDEN_DIM = 100
conv_feature_maps = []
x_input = Input(batch_shape=(None, x_train.shape[1]))
e_layer = Embedding(input_dim=vocab_size, output_dim=EMBEDDING_DIM)(x_input)
e_layer = Dropout(rate=0.5)(e_layer)
for ks in [3, 4, 5]:
    r_layer = Conv1D(filters=HIDDEN_DIM,
                     kernel_size=ks,
                     padding='valid',
                     activation='relu')(e_layer)
    max_pool = GlobalMaxPooling1D()(r_layer)
    flatten = Flatten()(max_pool)
    conv_feature_maps.append(flatten)
r_layer = Concatenate()(conv_feature_maps)
r_layer = Dropout(rate=0.5)(r_layer)
y_output = Dense(250, activation='relu')(r_layer)
y_output = Dense(1, activation='sigmoid')(r_layer)

model = Model(x_input, y_output)
model.compile(loss='binary_crossentropy',
              optimizer=Adam(learning_rate=0.0005),
              metrics=['accuracy'])
model.summary()

# 학습
es = EarlyStopping(monitor='val_accuracy', min_delta=0.0001, patience=2)
cp = ModelCheckpoint(filepath='./',
Exemple #17
0
def hifis_rnn_mlp(cfg,
                  input_dim,
                  metrics,
                  metadata,
                  output_bias=None,
                  hparams=None):
    '''
    Defines a Keras model for HIFIS hybrid recurrent neural network and multilayer perceptron model (i.e. HIFIS-v3)
    :param cfg: A dictionary of parameters associated with the model architecture
    :param input_dim: The shape of the model input
    :param metrics: Metrics to track model's performance
    :param metadata: Dict containing prediction horizon, time series feature info
    :param output_bias: initial bias applied to output layer
    :param hparams: dict of hyperparameters
    :return: a Keras model object with the architecture defined in this method
    '''

    # Set hyperparameters
    if hparams is None:
        nodes_dense0 = cfg['DENSE']['DENSE0']
        nodes_dense1 = cfg['DENSE']['DENSE1']
        layers = cfg['LAYERS']
        dropout = cfg['DROPOUT']
        l2_lambda = cfg['L2_LAMBDA']
        lr = cfg['LR']
        optimizer = Adam(learning_rate=lr)
        lstm_units = cfg['LSTM']['UNITS']
    else:
        nodes_dense0 = hparams['NODES0']
        nodes_dense1 = hparams['NODES1']
        layers = hparams['LAYERS']
        dropout = hparams['DROPOUT']
        lr = 10**hparams['LR']  # Random sampling on logarithmic scale
        beta_1 = 1 - 10**hparams['BETA_1']
        beta_2 = 1 - 10**hparams['BETA_2']
        l2_lambda = 10**hparams['L2_LAMBDA']
        lstm_units = hparams['LSTM_UNITS']

        if hparams['OPTIMIZER'] == 'adam':
            optimizer = Adam(learning_rate=lr, beta_1=beta_1, beta_2=beta_2)
        elif hparams['OPTIMIZER'] == 'sgd':
            optimizer = SGD(learning_rate=lr)

    if output_bias is not None:
        output_bias = Constant(output_bias)

    # Receive input to model and split into 2 tensors containing dynamic and static features respectively
    X_input = Input(shape=input_dim)
    split_idx = metadata['NUM_TS_FEATS'] * metadata['T_X']
    X_dynamic, X_static = split(X_input,
                                [split_idx, X_input.shape[1] - split_idx],
                                axis=1)

    # Define RNN component of model using LSTM cells. LSTM input shape is [batch_size, timesteps, features]
    lstm_input_shape = (metadata['T_X'], metadata['NUM_TS_FEATS'])
    X_dynamic = Reshape(lstm_input_shape)(X_dynamic)
    X_dynamic = LSTM(lstm_units, activation='tanh',
                     return_sequences=True)(X_dynamic)
    X_dynamic = Flatten()(X_dynamic)

    # Define MLP component of model
    X = concatenate([X_dynamic,
                     X_static])  # Combine output of LSTM with static features
    X = Dense(nodes_dense0,
              input_shape=input_dim,
              activation='relu',
              kernel_regularizer=l2(l2_lambda),
              bias_regularizer=l2(l2_lambda),
              name="dense0")(X)
    X = Dropout(dropout, name='dropout0')(X)
    for i in range(1, layers):
        X = Dense(nodes_dense1,
                  activation='relu',
                  kernel_regularizer=l2(l2_lambda),
                  bias_regularizer=l2(l2_lambda),
                  name='dense%d' % i)(X)
        X = Dropout(dropout, name='dropout%d' % i)(X)
    Y = Dense(1,
              activation='sigmoid',
              name="output",
              bias_initializer=output_bias)(X)

    # Define model with inputs and outputs
    model = Model(inputs=X_input,
                  outputs=Y,
                  name='HIFIS-rnn-mlp_' + str(metadata['N_WEEKS']) + '-weeks')

    # Set model loss function, optimizer, metrics.
    model.compile(loss=f1_loss(4.5), optimizer=optimizer, metrics=metrics)

    # Print summary of model and return model object
    if hparams is None:
        model.summary()
    return model
print("Shape of Training dataset: ", train_images.shape)
print("Length of Training dataset: ", len(train_labels))
print("Training labels:", train_labels)
print("Shape of Test dataset: ", test_images.shape)
print("Length of Test dataset: ", len(test_labels))
os.system("pause")


# Building the model
print("Setting up the layers: ")
model = Sequential()
model.add(Convolution2D(32, 3, 3, activation='relu', input_shape=(1,28,28), data_format='channels_first'))
model.add(Convolution2D(32, 3, 3, activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))

# Model reconstruction from JSON file
with open('trained models/model_architecture.json', 'r') as f:
    model = model_from_json(f.read())

# Load weights into the new model
model.load_weights('trained models/model_weights.h5')

print("Compiling the model: ")
model.compile(optimizer=tf.train.AdamOptimizer(),
              loss='categorical_crossentropy',
              metrics=['accuracy'])
            X, y = get_batch(train.iloc[batch_start:batch_end], start=seq_start)
            yield X, y


from tensorflow.keras.models import Sequential
from tensorflow.keras.losses import mean_absolute_percentage_error
from tensorflow.keras.layers import Conv1D, MaxPool1D, Dense, Activation, GlobalMaxPool1D, Flatten

number_of_features = 29
max_length = 100

model = Sequential([Conv1D(filters=16, kernel_size=5, input_shape=(100, 29), activation='relu', padding='causal'),
                    MaxPool1D(pool_size=5),
                    Conv1D(filters=16, kernel_size=5, activation='relu', padding='causal'),
                    MaxPool1D(5),
                    Flatten(),
                    Dense(units=1)])

model.compile(optimizer='adam', loss=mean_absolute_percentage_error)

from sklearn.model_selection import train_test_split

batch_size = 128
train_df, val_df = train_test_split(train, test_size=0.1)
train_gen = generate_batches(train_df, batch_size=batch_size)
val_gen = generate_batches(val_df, batch_size=batch_size)

n_train_samples = train_df.shape[0]
n_val_samples = val_df.shape[0]

a, b = next(train_gen)
Exemple #20
0
def get_adm_model(learning_rate, l2_reg, price_min, price_max, price_interval,
                  embedd_size):
    price_step = int(
        math.floor((price_max - price_min + K.epsilon()) / price_interval))

    total_len = 0
    input_tensors = []

    # Input Embedding Layers
    embedding_tensors = []
    # composing embedding layers
    for column_name, count_value, dimension_length in embedding_paras:
        total_len += embedd_size  # or dimension_length
        input_tensor = Input(name='{}_index'.format(column_name),
                             shape=(1, ),
                             dtype='int64')
        embedding_tensor = Embedding(
            count_value,
            embedd_size,  # or dimension_length
            input_length=1,
            embeddings_initializer='glorot_normal',
            name='{}_embedding'.format(column_name))(input_tensor)
        embedding_tensor = Flatten()(embedding_tensor)
        embedding_tensors.append(embedding_tensor)
        input_tensors.append(input_tensor)

    # Input Numerical Layers
    numerical_tensors = []
    numerical_embedd_tensors = []
    for column_name in numerical_paras:
        total_len += 1
        input_tensor = Input(name='{}'.format(column_name),
                             shape=(1, ),
                             dtype='float32')
        numerical_tensors.append(input_tensor)
        input_tensors.append(input_tensor)
        input_embedd_tensor = Lambda(lambda x: tf.tile(x, [1, embedd_size]))(
            input_tensor)
        numerical_embedd_tensors.append(input_embedd_tensor)

    feature_num = len(input_tensors)  # 44

    # features (embedding + numeric)
    ## 1-order
    x_o1_tensor = Concatenate()(embedding_tensors + numerical_tensors)
    ## 2-order
    x_o2_tensor = Lambda(lambda x: tf.stack(x, axis=1))(
        embedding_tensors +
        numerical_embedd_tensors)  # (None, feature_num, dim)
    x_o2_tensor = InteractingLayer(att_embedding_size=8,
                                   head_num=2)(x_o2_tensor)
    x_o2_tensor = Flatten()(x_o2_tensor)
    ## high-order
    x_oh_tensor = Dense(
        total_len / 2,
        activation='relu',
        kernel_regularizer=regularizers.l2(l2_reg))(x_o1_tensor)
    x_oh_tensor = Dense(
        total_len / 4,
        activation='relu',
        kernel_regularizer=regularizers.l2(l2_reg))(x_oh_tensor)

    # output layer
    output_tensor = Concatenate(axis=1)(
        [x_o1_tensor, x_o2_tensor, x_oh_tensor])
    output_tensor = Dense(
        price_step, kernel_regularizer=regularizers.l2(l2_reg))(output_tensor)
    output_tensor = Softmax(name='mixture_price_3')(output_tensor)

    model = Model(inputs=input_tensors, outputs=[output_tensor])
    adam = optimizers.Adam(lr=learning_rate)
    optimizer = hvd.DistributedOptimizer(adam)
    model.compile(loss=neighborhood_likelihood_loss,
                  optimizer=optimizer,
                  experimental_run_tf_function=False)
    return model
Exemple #21
0
target_size = (256, 256)
batch_size = 32
epochs = 90
input_shape = target_size + (3, )

classifier = Sequential()
classifier.add(Conv2D(32, (3, 3), input_shape=(256, 256, 3),
                      activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
classifier.add(Conv2D(64, (3, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
classifier.add(Conv2D(128, (3, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
classifier.add(Conv2D(256, (3, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
classifier.add(Flatten())
classifier.add(Dropout(0.5))
classifier.add(Dense(units=512, activation='relu'))
classifier.add(Dropout(0.5))
classifier.add(Dense(units=512, activation='relu'))
classifier.add(Dropout(0.5))
classifier.add(Dense(units=6, activation='softmax'))
classifier.compile(optimizer='adam',
                   loss='categorical_crossentropy',
                   metrics=['accuracy'])

#model summary
classifier.summary()

#image preprocesing and fitting
train_datagen = ImageDataGenerator(rescale=1. / 255,
    #  Reason is a bug in TF2 where not the learning_phase_tensor is extractable
    #  in order to put as an input to keras models
    #  https://stackoverflow.com/questions/58987264/how-to-get-learning-phase-in-tensorflow-2-eager
    #  https://stackoverflow.com/questions/58279628/what-is-the-difference-between-tf-keras-and-tf-python-keras?noredirect=1&lq=1
    #  https://github.com/tensorflow/tensorflow/issues/34508

    # Define how many past observations we want the control agent to process each step
    # for this case, we assume to pass only the single most recent observation
    window_length = 1

    # Define an artificial neural network to be used within the agent as actor
    # (using keras sequential)
    actor = Sequential()
    # The network's input fits the observation space of the env
    actor.add(
        Flatten(input_shape=(window_length, ) + env.observation_space.shape))
    actor.add(Dense(16, activation='relu'))
    actor.add(Dense(16, activation='relu'))
    # The network output fits the action space of the env
    actor.add(Dense(nb_actions, activation='sigmoid'))
    print(actor.summary())

    # Define another artificial neural network to be used within the agent as critic
    # note that this network has two inputs
    action_input = Input(shape=(nb_actions, ), name='action_input')
    observation_input = Input(shape=(window_length, ) +
                              env.observation_space.shape,
                              name='observation_input')
    # (using keras functional API)
    flattened_observation = Flatten()(observation_input)
    x = Concatenate()([action_input, flattened_observation])
def build_ATN(architecture=1, input_shape=[28, 28, 1], num_classes=10):
    if architecture == 0:
        image = Input(shape=input_shape)
        target = Input(shape=(num_classes, ))
        #target_int = Lambda(lambda x:K.argmax(x,axis=-1))(target)
        x1 = Flatten()(image)
        #x2 = Embedding(10,20,input_length=1)(target_int)
        #x2 = Lambda(lambda x: K.squeeze(x, -2))(x2)
        x = Concatenate(axis=-1)([x1, target])
        x = Dense(2048,
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Dense(np.prod(input_shape),
                  activation='sigmoid',
                  bias_initializer='zeros')(x)
        x = Reshape(input_shape)(x)
        cnn = Model(inputs=[image, target], outputs=x)
    elif architecture == 1:
        image = Input(shape=input_shape)
        target = Input(shape=(num_classes, ))
        x1 = Flatten()(image)
        x = Concatenate(axis=-1)([x1, target])
        x = Dense(1024,
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Dense(1024,
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Dense(np.prod(input_shape),
                  activation='sigmoid',
                  bias_initializer='zeros')(x)
        x = Reshape(input_shape)(x)
        cnn = Model(inputs=[image, target], outputs=x)
    elif architecture == -1:
        cnn = Sequential()
        cnn.add(Flatten(input_shape=input_shape))
        cnn.add(
            Dense(2048,
                  activation='relu',
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros'))
        cnn.add(Dropout(0.25))
        cnn.add(
            Dense(np.prod(input_shape),
                  activation='sigmoid',
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros'))
        cnn.add(Reshape(input_shape))
    elif architecture == -2:
        cnn = Sequential()
        cnn.add(
            Conv2D(
                64,
                kernel_size=(3, 3),
                activation='relu',
                kernel_initializer='glorot_normal',
                bias_initializer='zeros',  #Constant(-0.5),
                kernel_regularizer=l2(0.005),
                input_shape=input_shape,
                padding='same'))
        cnn.add(
            Conv2D(128,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_initializer='glorot_normal',
                   bias_initializer='zeros',
                   kernel_regularizer=l2(0.005),
                   padding='same'))
        cnn.add(MaxPooling2D(pool_size=(2, 2)))

        cnn.add(
            Conv2D(256,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_initializer='glorot_normal',
                   bias_initializer='zeros',
                   kernel_regularizer=l2(0.005),
                   padding='same'))
        cnn.add(MaxPooling2D(pool_size=(2, 2)))
        cnn.add(Flatten())
        cnn.add(
            Dense(2048,
                  activation='relu',
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros',
                  kernel_regularizer=l2(0.05)))
        cnn.add(Dropout(0.25))
        cnn.add(
            Dense(np.prod(input_shape),
                  activation='sigmoid',
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros'))
        cnn.add(Reshape(input_shape))
    elif architecture == 2:
        cnn = Sequential()
        cnn.add(
            Conv2D(256,
                   kernel_size=(3, 3),
                   activation='relu',
                   input_shape=input_shape,
                   padding='same',
                   use_bias=True,
                   kernel_initializer='glorot_normal',
                   bias_initializer='zeros'))
        cnn.add(MaxPooling2D(pool_size=(2, 2)))
        cnn.add(Dropout(0.5))
        cnn.add(
            Conv2D(512,
                   kernel_size=(3, 3),
                   activation='relu',
                   padding='same',
                   use_bias=True,
                   kernel_initializer='glorot_normal',
                   bias_initializer='zeros'))
        #cnn.add(MaxPooling2D(pool_size=(2, 2)))
        #cnn.add(Dropout(0.5))
        #cnn.add(Conv2D(512, kernel_size=(3, 3),activation='relu',padding='same',
        #          use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros'))
        #cnn.add(UpSampling2D(data_format='channels_last'))
        #cnn.add(Dropout(0.5))
        #cnn.add(Conv2DTranspose(256, kernel_size=(3,3), padding='same', activation='relu',
        #          use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros'))
        cnn.add(UpSampling2D(data_format='channels_last'))
        cnn.add(Dropout(0.5))
        cnn.add(
            Conv2DTranspose(256,
                            kernel_size=(3, 3),
                            padding='same',
                            activation='relu',
                            use_bias=True,
                            kernel_initializer='glorot_normal',
                            bias_initializer='zeros'))
        cnn.add(Dropout(0.5))
        cnn.add(
            Conv2DTranspose(1,
                            kernel_size=(3, 3),
                            padding='same',
                            activation='sigmoid',
                            use_bias=True,
                            kernel_initializer='glorot_normal',
                            bias_initializer='zeros'))
    return cnn
Exemple #24
0
import tensorflow as tf
import numpy as np

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten, Dense
from tensorflow.keras.optimizers import SGD

x_data = np.array([1, 2, 3, 4, 5, 6])
t_data = np.array([3, 4, 5, 6, 7, 8])
# x_data + 2 = t_data(정답)

model = Sequential()
model.add(Flatten(input_shape=(1, )))  # 입력층
model.add(Dense(1, activation='linear'))  # 출력층
# model.add(Dense(1, input_shape=(1,), activation='linear'))

model.compile(optimizer=SGD(learning_rate=1e-2),
              loss='mse')  # Stochastic Gradient Descent, mse 평균제곱오차
model.summary()

hist = model.fit(x_data, t_data, epochs=1000)
result = model.predict(np.array([-3.1, 3.0, 3.5, 15.0, 20.1]))

print(result)