sess = tf.Session()
    K.set_session(sess)

    elmo_model = hub.Module("https://tfhub.dev/google/elmo/2", trainable=True)
    sess.run(tf.global_variables_initializer())
    sess.run(tf.tables_initializer())

    def ElmoEmbedding(x):
        return elmo_model(inputs={
                                "tokens": tf.squeeze(tf.cast(x, tf.string)),
                                "sequence_len": tf.constant(batch_size*[max_len])
                        },
                        signature="tokens",
                        as_dict=True)["elmo"]

    input_text = Input(shape=(max_len,), dtype=tf.string)
    embedding = Lambda(ElmoEmbedding, output_shape=(max_len, 1024))(input_text)
    x = Bidirectional(LSTM(units=512, return_sequences=True,
                        recurrent_dropout=0.2, dropout=0.2))(embedding)
    x_rnn = Bidirectional(LSTM(units=512, return_sequences=True,
                            recurrent_dropout=0.2, dropout=0.2))(x)
    x = add([x, x_rnn])  # residual connection to the first biLSTM
    out = TimeDistributed(Dense(n_tags, activation="softmax"))(x)

    '''
    input = Input(shape=(140,))
    model = Embedding(input_dim=n_words, output_dim=140, input_length=140)(input)
    model = Dropout(0.1)(model)
    model = Bidirectional(LSTM(units=100, return_sequences=True, recurrent_dropout=0.1))(model)
    out = TimeDistributed(Dense(n_tags, activation="softmax"))(model)  # softmax output layer
    '''
    def buildModel(self, inputShape, numberOfOutputs):
        def shuntingInhibition(inputs):
            inhibitionDecay = 0.5

            v_c, v_c_inhibit = inputs

            output = (v_c / (inhibitionDecay + v_c_inhibit))

            return output

        print "Input shape:", inputShape

        if not self.logManager is None:
            self.logManager.newLogSession("Implementing Model: " +
                                          str(self.modelName))

        nch = 256

        inputLayer = Input(shape=inputShape, name="Vision_Network_Input")

        #Conv1 and 2
        conv1 = Conv2D(nch / 4, (3, 3),
                       padding="same",
                       kernel_initializer="glorot_uniform",
                       name="Vision_conv1")(inputLayer)
        bn1 = BatchNormalization(axis=1)(conv1)
        actv1 = Activation("relu")(bn1)

        conv2 = Conv2D(nch / 4, (3, 3),
                       padding="same",
                       kernel_initializer="glorot_uniform",
                       name="Vision_conv2")(actv1)
        bn2 = BatchNormalization(axis=1)(conv2)
        actv2 = Activation("relu")(bn2)

        mp1 = MaxPooling2D(pool_size=(2, 2))(actv2)
        drop1 = Dropout(0.25)(mp1)

        #Conv 3 and 4
        conv3 = Conv2D(nch / 2, (3, 3),
                       padding="same",
                       kernel_initializer="glorot_uniform",
                       name="Vision_conv3")(drop1)
        bn3 = BatchNormalization(axis=1)(conv3)
        actv3 = Activation("relu")(bn3)

        conv4 = Conv2D(nch / 2, (3, 3),
                       padding="same",
                       kernel_initializer="glorot_uniform",
                       name="Vision_conv4")(actv3)
        bn4 = BatchNormalization(axis=1)(conv4)
        actv4 = Activation("relu")(bn4)

        mp2 = MaxPooling2D(pool_size=(2, 2))(actv4)
        drop2 = Dropout(0.25)(mp2)

        #Conv 5 and 6 and 7
        conv5 = Conv2D(nch / 2, (3, 3),
                       padding="same",
                       kernel_initializer="glorot_uniform",
                       name="Vision_conv5")(drop2)
        bn5 = BatchNormalization(axis=1)(conv5)
        actv5 = Activation("relu")(bn5)

        conv6 = Conv2D(nch / 2, (3, 3),
                       padding="same",
                       kernel_initializer="glorot_uniform",
                       name="Vision_conv6")(actv5)
        bn6 = BatchNormalization(axis=1)(conv6)
        actv6 = Activation("relu")(bn6)

        conv7 = Conv2D(nch / 2, (3, 3),
                       padding="same",
                       kernel_initializer="glorot_uniform",
                       name="Vision_conv7")(actv6)
        bn7 = BatchNormalization(axis=1)(conv7)
        actv7 = Activation("relu")(bn7)

        mp3 = MaxPooling2D(pool_size=(2, 2))(actv7)
        drop3 = Dropout(0.25)(mp3)

        #Conv 8 and 9 and 10

        conv8 = Conv2D(nch, (3, 3),
                       padding="same",
                       kernel_initializer="glorot_uniform",
                       name="Vision_conv8")(drop3)
        bn8 = BatchNormalization(axis=1)(conv8)
        actv8 = Activation("relu")(bn8)

        conv9 = Conv2D(nch, (3, 3),
                       padding="same",
                       kernel_initializer="glorot_uniform",
                       name="conv9")(actv8)
        bn9 = BatchNormalization(axis=1)(conv9)
        actv9 = Activation("relu")(bn9)

        conv10 = Conv2D(nch, (3, 3),
                        padding="same",
                        kernel_initializer="glorot_uniform",
                        activation="relu",
                        name="conv10")(actv9)

        conv10_inhibition = Conv2D(nch, (3, 3),
                                   padding="same",
                                   kernel_initializer="glorot_uniform",
                                   activation="relu",
                                   name="conv10_inhibition")(actv9)

        v_conv_inhibitted = Lambda(function=shuntingInhibition)(
            [conv10, conv10_inhibition])

        mp4 = MaxPooling2D(pool_size=(2, 2))(v_conv_inhibitted)
        drop4 = Dropout(0.25)(mp4)

        flatten = Flatten()(drop4)

        dense = Dense(200, activation="relu")(flatten)
        drop5 = Dropout(0.25)(dense)

        output = Dense(numberOfOutputs, activation="softmax")(drop5)

        model = Model(inputs=inputLayer, outputs=output)

        self._model = model

        self.model.summary()

        if not self.logManager is None:
            self.logManager.endLogSession()
Beispiel #3
0
    x = Conv2D(192, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(192, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(192, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=2)(x)
    x = Conv2D(192, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(192, (1, 1), activation='relu')(x)
    x = Conv2D(20, (1, 1))(x)
    x = GlobalAveragePooling2D()(x)
    x = Activation(activation='softmax')(x)

    model = Model(model_input, x, name='conv_pool_cnn')

    return model


model = conv_pool_cnn(Input(shape=(28, 28, 1)))

data_generator = ImageDataGenerator(width_shift_range=0.1,
                                    height_shift_range=0.1,
                                    horizontal_flip=True)

data_generator.fit(train_x)

opt = optimizers.Adam(lr=0.001)
model.compile(loss='categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])
model.summary()

model.fit_generator(data_generator.flow(train_x, train_y, batch_size=100),
                    steps_per_epoch=1000,
Beispiel #4
0
    def build_model(self, epoch=50):
        '''
        Build the neural network model and train it
        :param epoch: number of epoch
        '''
        start_time = time.time()
        index_training = (self.df['split'] == 'training').values
        index_validation = (self.df['split'] == 'validation').values

        activities_left = Input(
            shape=(self.activities_left[index_training].shape[1],
                   self.activities_left[index_training].shape[2]))
        activities_right = Input(
            shape=(self.activities_right[index_training].shape[1],
                   self.activities_right[index_training].shape[2]))
        times_left = Input(shape=(self.times_left[index_training].shape[1],
                                  self.times_left[index_training].shape[2]))
        times_right = Input(shape=(self.times_right[index_training].shape[1],
                                   self.times_right[index_training].shape[2]))

        # Neural Network Architecture
        left = concatenate([activities_left, times_left])
        i3 = LSTM(self.factor, return_sequences=False,
                  dropout=self.noise)(left)
        right = concatenate([activities_right, times_right])
        i4 = LSTM(self.factor, return_sequences=False,
                  dropout=self.noise)(right)
        input_current_activity = Input(shape=(self.activities.shape[1], ))
        f1 = concatenate([i3, i4, input_current_activity])
        f1 = Dense(self.factor)(f1)
        output = Dense(1, activation="relu")(f1)

        model = Model([
            activities_left, activities_right, times_left, times_right,
            input_current_activity
        ], output)
        model.compile(optimizer='nadam',
                      loss='mean_squared_error',
                      metrics=['mean_squared_error'])

        plot_model(model, show_shapes=True, to_file='{}.png'.format(self.name))

        es = EarlyStopping(monitor='val_loss',
                           mode='min',
                           verbose=1,
                           patience=20)
        training = model.fit(
            [
                self.activities_left[index_training],
                self.activities_right[index_training],
                self.times_left[index_training],
                self.times_right[index_training],
                self.activities[index_training],
            ],
            y=self.y[index_training],
            shuffle=True,
            epochs=epoch,  #
            verbose=1,
            batch_size=128,
            validation_data=([
                self.activities_left[index_validation],
                self.activities_right[index_validation],
                self.times_left[index_validation],
                self.times_right[index_validation],
                self.activities[index_validation],
            ], self.y[index_validation]),
            callbacks=[es])
        self.model = model
        model.save('{}.h5'.format(self.name))

        # Plot the learning process
        plt.plot(training.history['loss'])
        plt.plot(training.history['val_loss'])
        plt.title('Model loss')
        plt.ylabel('Loss')
        plt.xlabel('Epoch')
        plt.legend(['loss', 'val_loss'], loc='upper left')
        plt.savefig('{}_accuracy.eps'.format(self.name), format='eps')
        plt.close()

        return time.time() - start_time
# with open('wiki-news-300d-1M.vec') as f:
for i in range(2, len(idx2word) - 2):
    embedding_matrix[i] = words_fast[idx2word[i]]
#         ordered_words_ft.append(s[0])
print('Found %s word vectors.' % len(embedding_matrix))

# for word, i in word2idx.items():
#     embedding_vector = embeddings_index.get(word)
#     if embedding_vector is not None:
#         # words not found in embedding index will be all-zeros.
#         embedding_matrix[i] = embedding_vector

# Model definition
# input and embedding for words

word_in = Input(shape=(MAX_LEN, ))

word_embedding = Embedding(input_dim=len(word2idx),
                           output_dim=100,
                           weights=[embedding_matrix],
                           input_length=MAX_LEN,
                           trainable=True)(word_in)

# input and embeddings for characters
char_in = Input(shape=(
    MAX_LEN,
    max_len_char,
))
emb_char = TimeDistributed(
    Embedding(input_dim=n_chars + 2,
              output_dim=10,
def segnet_vgg16(input_size=(256, 256, 3)):

    inputs = Input(input_size)

    # Block 1
    x = Conv2D(64, (3, 3), activation='relu', padding='same')(inputs)
    x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    # Block 2
    x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    # Block 3
    x = Conv2D(256, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    # Block 4
    x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    # Block 5
    x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    # Up Block 1
    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)

    # Up Block 2
    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)

    # Up Block 3
    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same')(x)

    # Up Block 4
    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)

    # Up Block 5
    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)

    x = Conv2D(1, (1, 1), activation='sigmoid', padding='same')(x)

    model = Model(input=inputs, output=x)

    model.compile(optimizer=Adam(lr=2e-4), loss=final_loss, metrics=[IoU])

    return model
def unet2(pretrained_weights=None, input_size=(256, 256, 3)):

    inputs = Input(input_size)

    conv1 = Conv2D(64, 3, activation='relu', padding='same')(inputs)
    conv1 = Conv2D(64, 3, activation='relu', padding='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(128, 3, activation='relu', padding='same')(pool1)
    conv2 = Conv2D(128, 3, activation='relu', padding='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(256, 3, activation='relu', padding='same')(pool2)
    conv3 = Conv2D(256, 3, activation='relu', padding='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(512, 3, activation='relu', padding='same')(pool3)
    conv4 = Conv2D(512, 3, activation='relu', padding='same')(conv4)
    #drop4 = Dropout(0.5)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Conv2D(512, 3, activation='relu', padding='same')(pool4)
    conv5 = Conv2D(512, 3, activation='relu', padding='same')(conv5)
    #drop5 = Dropout(0.5)(conv5)

    up6 = Conv2D(512, 2, activation='relu',
                 padding='same')(UpSampling2D(size=(2, 2))(conv5))
    merge6 = concatenate([conv4, up6], axis=3)
    conv6 = Conv2D(512, 3, activation='relu', padding='same')(merge6)
    conv6 = Conv2D(512, 3, activation='relu', padding='same')(conv6)

    up7 = Conv2D(256, 2, activation='relu',
                 padding='same')(UpSampling2D(size=(2, 2))(conv6))
    merge7 = concatenate([conv3, up7], axis=3)
    conv7 = Conv2D(256, 3, activation='relu', padding='same')(merge7)
    conv7 = Conv2D(256, 3, activation='relu', padding='same')(conv7)

    up8 = Conv2D(128, 2, activation='relu',
                 padding='same')(UpSampling2D(size=(2, 2))(conv7))
    merge8 = concatenate([conv2, up8], axis=3)
    conv8 = Conv2D(128, 3, activation='relu', padding='same')(merge8)
    conv8 = Conv2D(128, 3, activation='relu', padding='same')(conv8)

    up9 = Conv2D(64, 2, activation='relu',
                 padding='same')(UpSampling2D(size=(2, 2))(conv8))
    merge9 = concatenate([conv1, up9], axis=3)
    conv9 = Conv2D(64, 3, activation='relu', padding='same')(merge9)
    conv9 = Conv2D(64, 3, activation='relu', padding='same')(conv9)
    conv9 = Conv2D(2, 3, activation='relu', padding='same')(conv9)
    conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)

    model = Model(input=inputs, output=conv10)

    model.compile(optimizer=Adam(lr=2e-4), loss=final_loss, metrics=[IoU])
    #model.compile(optimizer = SGD(lr=1e-3, decay=0.0, momentum=0.9, nesterov=True), loss = final_loss, metrics = [IoU])
    #model.summary()

    if (pretrained_weights):
        model.load_weights(pretrained_weights)

    return model
Beispiel #8
0
model_k5.add( Conv1D(conv_depth_1,5, border_mode='same', activation='relu'))
model_k5.add( MaxPooling1D(pool_size=pool_size,padding='same') )
model_k5.add( Dropout(drop_prob_1) ) # Some Dropout regularization (if necessary)

# this convolutional layer compute the sentence with a word window size of 3
model_k3.add( Conv1D(conv_depth_1,3, border_mode='same', activation='relu'))
model_k3.add( MaxPooling1D(pool_size=pool_size,padding='same') )
model_k3.add( Dropout(drop_prob_1) ) # Some Dropout regularization (if necessary)

# this convolutional layer compute the sentence with a word window size of 7
model_k7.add( Conv1D(conv_depth_1,7, border_mode='same', activation='relu'))
model_k7.add( MaxPooling1D(pool_size=pool_size,padding='same') )
model_k7.add( Dropout(drop_prob_1) ) # Some Dropout regularization (if necessary)


model_in = Input(shape=(200,320))
merged = concatenate([model_k3(model_in),model_k5(model_in),model_k7(model_in)],axis=1)
model_final = Sequential()

# CLASSIFICATION PART: FULLY-CONNECTED LAYER + OUTPUT LAYER
#   Now flatten to 1D, apply FC -> ReLU (with dropout) -> softmax
model_final.add( Flatten() )
model_final.add( Dense(hidden_size, activation='relu', kernel_regularizer=regularizers.l2(weight_penalty)) )
model_final.add( Dropout(drop_prob_2) ) # Some Dropout regularization (if necessary)
model_final.add( Dense(num_classes, activation='softmax') )
model = Model(model_in,model_final(merged))

# DEFINE THE LOSS FUNCTION AND OPTIMIZER
model.compile(loss='categorical_crossentropy',  # using the cross-entropy loss function
              optimizer='adadelta',  # using the Adadelta optimiser
              metrics=['accuracy'])  # reporting the accuracy
Beispiel #9
0
from keras.models import Model, Input
from keras.layers import Dense
from keras.optimizers import Adam
from keras import backend as K

path = '../data/iBeacon_RSSI_Unlabeled.csv'
x_un = read_csv(path, index_col=None)
x_un.drop(["location", "date"], axis=1, inplace=True)
x_un = (x_un + 200) / 200


def rmse(y_true, y_pred):
    return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))


input_layer = Input(shape=(x_un.shape[1], ))
enc = Dense(10, activation='relu')(input_layer)
enc = Dense(5, activation='relu')(enc)
dec = Dense(5, activation='relu')(enc)
dec = Dense(10, activation='relu')(dec)
output_layer = Dense(x_un.shape[1], activation='relu')(dec)

model = Model(input_layer, output_layer)
model.compile(optimizer=Adam(.001), loss=rmse, metrics=['mse'])
hist = model.fit(x_un, x_un, epochs=20, batch_size=10, verbose=2)

path = '../data/iBeacon_RSSI_Labeled.csv'
a = read_csv(path, index_col=None)
#a.drop(["date"], axis = 1, inplace = True)
x_aug = a.groupby('location').filter(lambda b: (len(b) < 10) & (len(b) > 1))
x_aug = x_aug.reset_index(drop=True)
Beispiel #10
0
cols = ['bidopen', 'bidclose', 'bidhigh', 'bidlow', 'tickqty', "bid_hl", "bid_cl", "bid_ho", "bid_co"]

from FP_datamanager import getDataDFX
from CosHotRestarts import CosHotRestart
from sgdr import SGDRScheduler

back = 20 # 40
fore = 4
x,y,t_x,t_y,v_x,v_y = getDataDFX('1e2data.csv',back,fore,1)

nodes = 30
nodes2 = 30
merg=2

LSTMin = Input(shape=(t_x.shape[1],t_x.shape[2]))

LSTM_1 = LSTM(nodes,return_sequences=True,activation='relu')(LSTMin)

# LSTM_merge = Concatenate()([LSTM_1])
LSTM_merge = LSTM(nodes,return_sequences=True,activation='relu')(LSTM_1)
for i in range(merg):
    LSTM_merge = LSTM(nodes, return_sequences=True, activation='relu')(LSTM_merge)
LSTM_merge = LSTM(nodes,activation='relu')(LSTM_merge)

# amplifier layer (ex. tf6)
# uses multiple separate Dropout-Dense subnets to allow back propagation to be amplified
# demonstrable increase from 6->8 maximum trainable LSTM_merge layers
# with increase merg(LSTM layers) from 0(no subnet) to 1(2 subnets)
# to 2(8 subnets, unreliable without boosting via callbacks)
# uses principle proposed in [paper?] as each subnet acts as a concurrent micro batch
Beispiel #11
0
    model= Sequential()
    model.add(Dense(256, input_shape=(68,2)))
    model.add(Flatten())
    model.add(Dense(256))
    model.add(Activation('relu'))
#     model.add(Dropout(0.5))
    model.add(Dense(256))
    model.add(Activation('relu'))
#     model.add(Dropout(rate=0.1))

    model.add(Dense(outpu))
    model.add(Activation("relu"))
    return model

with tf.device('/device:GPU:1'):
    anchor_in,pos_in,neg_in = Input(shape=(68,2)),Input(shape=(68,2)),Input(shape=(68,2))

    mod=create_mod(224,68)

    anchor_out=mod(anchor_in)
    pos_out=mod(pos_in)
    neg_out=mod(neg_in)

    merged= concatenate([anchor_out,pos_out,neg_out], axis=-1)

    model=Model(inputs=[anchor_in,pos_in,neg_in],outputs=merged)

    model.compile(loss=triplet_loss,optimizer=Adam())

model.fit(train,np.zeros(train[0].shape[0]),batch_size=100,epochs=10)
Beispiel #12
0
                                                    test_size=0.1,
                                                    random_state=22)

# 图片动态生成,加扭曲、旋转
datagen = image.ImageDataGenerator(width_shift_range=0.1,
                                   height_shift_range=0.1,
                                   rotation_range=1,
                                   zoom_range=0.01,
                                   shear_range=0.01,
                                   horizontal_flip=False,
                                   vertical_flip=False)

DEBUG = False
if DEBUG:
    # 新定义
    input_tensor = Input((height, width, 3))
    x = input_tensor
    for i in range(4):
        x = Conv2D(32 * 2**i, (3, 3), padding='same', activation='relu')(x)
        x = Conv2D(32 * 2**i, (3, 3), padding='same', activation='relu')(x)
        x = MaxPooling2D((2, 2))(x)
        x = BatchNormalization()(x)

    x = Flatten()(x)
    x = Dropout(0.25)(x)
    x = [
        Dense(n_class, activation='softmax', name='c%d' % (i + 1))(x)
        for i in range(n_len)
    ]
    merged = merge(x, mode='concat', concat_axis=-1)
    model_cnn = Model(inputs=input_tensor, outputs=merged)
Beispiel #13
0
        if epoch in [2, 4, 6, 8, 10, 12, 14, 16, 18]:
            K.set_value(self.lambda_epi, K.get_value(self.lambda_epi) - 0.05)


def lr_schedule(epoch):
    lr = 0.0002
    if epoch > 60:
        lr = 0.0001
    return lr


if __name__ == "__main__":

    # Pre-process input data
    input_tensor = Input(
        shape = input_shape,
        name='direct_epipolar')
    input_of = Lambda(
        lambda x: x[:,:,:,:2],
        output_shape = (input_height, input_width, 2))(input_tensor)
    frame_t0 = Lambda(
        lambda x: x[:,:,:,2:5],
        output_shape = (input_height, input_width, 3))(input_tensor)
    frame_t2 = Lambda(
        lambda x: x[:,:,:,5:8],
        output_shape = (input_height, input_width, 3))(input_tensor)

    # Stack up network blocks
    convraw1_1 = Conv2D(
        filters = 16,
        kernel_size = 7,
Beispiel #14
0
                                                seed=None,
                                                clip=True)
        noisy_image = skimage.util.random_noise(noisy_image,
                                                mode='pepper',
                                                seed=None,
                                                clip=True)
        noisy_X_train.append(noisy_image)
    except:
        pass

noisy_X_train = np.array(noisy_X_train, dtype=np.float32)
noisy_X_train = noisy_X_train.reshape([60000, 784])

# Building Autoencoder architecture
input_img = Input(shape=[
    784,
])
hidden_1 = Dense(128, activation='relu')(input_img)
code = Dense(32, activation='relu')(hidden_1)
hidden_2 = Dense(128, activation='relu')(code)
output_img = Dense(784, activation='sigmoid')(hidden_2)

autoencoder = Model(input_img, output_img)
autoencoder.compile(loss='binary_crossentropy', optimizer='adam')
autoencoder.summary()
autoencoder.fit(noisy_X_train, X_train, epochs=10)

# Saving model
model_json = autoencoder.to_json()
with open("model.json", "w") as json_file:
    json_file.write(model_json)
Beispiel #15
0
to_png_array('/home/samir/Desktop/blender/pycode/160planes/render', 'im_wrap1' , output_images, IMAGECOUNT)


# Expand the image dimension to conform with the shape required by keras and tensorflow, inputshape=(..., h, w, nchannels).
input_images = np.expand_dims(input_images, -1)
output_images = np.expand_dims(output_images, -1)


print("input shape: {}".format(input_images.shape))
print("output shape: {}".format(output_images.shape))
print(len(input_images))

input_height = 160
input_width = 160

input_image = Input(shape=(input_height, input_width, 1))


# =============================================================================

x1 = Conv2D(50, (3, 3), padding='same')(input_image)
# x2 = BatchNormalization(axis=-1)(x1)
x3 = Activation('relu')(x1)

x4 = Conv2D(50, (3, 3), padding='same')(x3)
# x5 = BatchNormalization(axis=-1)(x4)
x6 = Activation('relu')(x4)
x7 = Conv2D(50, (3, 3), padding='same')(x6)
# x8 = BatchNormalization(axis=-1)(x7)
x9 = layers.add([x3, x7])
x10 = Activation('relu')(x9)
Beispiel #16
0
def get_deep_convnet(window_size=4096, channels=2, output_size=84):
    inputs = Input(shape=(window_size, channels))
    outs = inputs

    outs = (ComplexConv1D(16,
                          6,
                          strides=2,
                          padding='same',
                          activation='linear',
                          kernel_initializer='complex_independent'))(outs)
    outs = (ComplexBN(axis=-1))(outs)
    outs = (keras.layers.Activation('relu'))(outs)
    outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)

    outs = (ComplexConv1D(32,
                          3,
                          strides=2,
                          padding='same',
                          activation='linear',
                          kernel_initializer='complex_independent'))(outs)
    outs = (ComplexBN(axis=-1))(outs)
    outs = (keras.layers.Activation('relu'))(outs)
    outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)

    outs = (ComplexConv1D(64,
                          3,
                          strides=1,
                          padding='same',
                          activation='linear',
                          kernel_initializer='complex_independent'))(outs)
    outs = (ComplexBN(axis=-1))(outs)
    outs = (keras.layers.Activation('relu'))(outs)
    outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)

    outs = (ComplexConv1D(64,
                          3,
                          strides=1,
                          padding='same',
                          activation='linear',
                          kernel_initializer='complex_independent'))(outs)
    outs = (ComplexBN(axis=-1))(outs)
    outs = (keras.layers.Activation('relu'))(outs)
    outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)

    outs = (ComplexConv1D(128,
                          3,
                          strides=1,
                          padding='same',
                          activation='relu',
                          kernel_initializer='complex_independent'))(outs)
    outs = (ComplexConv1D(128,
                          3,
                          strides=1,
                          padding='same',
                          activation='linear',
                          kernel_initializer='complex_independent'))(outs)
    outs = (ComplexBN(axis=-1))(outs)
    outs = (keras.layers.Activation('relu'))(outs)
    outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)

    #outs = (keras.layers.MaxPooling1D(pool_size=2))
    #outs = (Permute([2, 1]))
    outs = (keras.layers.Flatten())(outs)
    outs = (keras.layers.Dense(2048,
                               activation='relu',
                               kernel_initializer='glorot_normal'))(outs)
    predictions = (keras.layers.Dense(
        output_size,
        activation='sigmoid',
        bias_initializer=keras.initializers.Constant(value=-5)))(outs)

    model = Model(inputs=inputs, outputs=predictions)
    model.compile(optimizer=keras.optimizers.Adam(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model
def fcn_2s(input_size=(256, 256, 3)):

    inputs = Input(input_size)
    x = BatchNormalization()(inputs)

    # Block 1
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv1')(x)
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
    block_1 = Conv2D(1, (1, 1), activation='relu', padding='same')(x)
    # Block 2
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv1')(x)
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
    block_2 = Conv2D(1, (1, 1), activation='relu', padding='same')(x)
    # Block 3
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv1')(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv2')(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    block_3 = Conv2D(1, (1, 1), activation='relu', padding='same')(x)

    # Block 4
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv1')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv2')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    block_4 = Conv2D(1, (1, 1), activation='relu', padding='same')(x)

    # Block 5
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv1')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv2')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

    x = Conv2D(512, (3, 3), activation='relu', padding="same")(x)

    block_5 = Conv2DTranspose(1,
                              kernel_size=(4, 4),
                              strides=(2, 2),
                              activation='relu',
                              padding='same')(x)

    sum_1 = add([block_4, block_5])
    sum_1 = Conv2DTranspose(1,
                            kernel_size=(4, 4),
                            strides=(2, 2),
                            activation='relu',
                            padding='same')(sum_1)

    sum_2 = add([block_3, sum_1])
    sum_2 = Conv2DTranspose(1,
                            kernel_size=(4, 4),
                            strides=(2, 2),
                            activation='relu',
                            padding='same')(sum_2)

    sum_3 = add([block_2, sum_2])
    sum_3 = Conv2DTranspose(1,
                            kernel_size=(4, 4),
                            strides=(2, 2),
                            activation='relu',
                            padding='same')(sum_3)

    sum_4 = add([block_1, sum_3])
    x = Conv2DTranspose(1,
                        kernel_size=(4, 4),
                        strides=(2, 2),
                        activation='sigmoid',
                        padding='same')(sum_4)

    model = Model(input=inputs, output=x)

    model.compile(optimizer=Adam(lr=2e-4), loss=final_loss, metrics=[IoU])

    return model
for i in range(FEED_LEN + 1, features.shape[0] - PREDICT_LEN):
    temp = features[i - FEED_LEN:i, :]
    temp = temp.reshape(1, FEED_LEN, features.shape[1])
    feature_set = np.concatenate((feature_set, temp), axis=0)

label_set = np.array(cpu_values[FEED_LEN:FEED_LEN + PREDICT_LEN])
label_set = label_set.reshape(1, PREDICT_LEN)

for i in range(FEED_LEN + PREDICT_LEN + 1, features.shape[0]):
    temp = cpu_values[i - PREDICT_LEN:i]
    temp = temp.reshape(1, PREDICT_LEN)
    label_set = np.concatenate((label_set, temp), axis=0)
label_set.shape

i = Input(batch_shape=(None, None, 1))
o = TCN(return_sequences=False)(i)
o = Dense(PREDICT_LEN)(o)
m = Model(inputs=[i], outputs=[o])
m.compile(optimizer='adam', loss='mae', metrics=['mape', 'mae', 'mse'])
m.summary()

X_train, x_test, Y_train, y_test = train_test_split(feature_set,
                                                    label_set,
                                                    train_size=0.8,
                                                    shuffle=False)
print(X_train.shape)
print(Y_train.shape)
print(x_test.shape)
print(y_test.shape)
def VGGUnet2(input_size=(256, 256, 3)):

    inputs = Input(input_size)

    conv1 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(inputs)
    conv1 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool1)
    conv2 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool2)
    conv3 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv3)
    conv3 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool3)
    conv4 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv4)
    conv4 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv4)
    drop4 = Dropout(0.5)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

    conv5 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool4)
    conv5 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv5)
    conv5 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv5)
    drop5 = Dropout(0.5)(conv5)
    pool5 = MaxPooling2D(pool_size=(2, 2))(drop5)

    conv6 = Conv2D(1024,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool5)
    conv6 = Conv2D(1024,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv6)
    drop6 = Dropout(0.5)(conv6)

    up5 = Conv2D(512,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(drop6))
    merge5 = concatenate([drop5, up5], axis=3)
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge5)
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv6)

    up6 = Conv2D(512,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(drop5))
    merge6 = concatenate([drop4, up6], axis=3)
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge6)
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv6)

    up7 = Conv2D(256,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(conv6))
    merge7 = concatenate([conv3, up7], axis=3)
    conv7 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge7)
    conv7 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv7)

    up8 = Conv2D(128,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(conv7))
    merge8 = concatenate([conv2, up8], axis=3)
    conv8 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge8)
    conv8 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv8)

    up9 = Conv2D(64,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(conv8))
    merge9 = concatenate([conv1, up9], axis=3)
    conv9 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge9)
    conv9 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv9)
    #conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
    conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)

    model = Model(input=inputs, output=conv10)

    model.compile(optimizer=Adam(lr=2e-4),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    return model
res = len(re.findall(r'\w+', text_str))
# printing result
print("The number of words in string are : " + str(res))

# integer encode the document
result = one_hot(text_str, round(vocab_size * 1.3))
#print(result)

tokenizer = Tokenizer(num_words=3000)

src_txt_length = len(text_str)
sum_txt_length = math.ceil(src_txt_length * 0.3)

print("sum_txt_length = ", sum_txt_length)
print("src_txt_length = ", src_txt_length)
# source text input model
inputs1 = Input(shape=(src_txt_length, ))
am1 = Embedding(vocab_size, 128)(inputs1)
am2 = LSTM(128)(am1)
# summary input model
inputs2 = Input(shape=(sum_txt_length, ))
sm1 = Embedding(vocab_size, 128)(inputs2)
sm2 = LSTM(128)(sm1)
# decoder output model
decoder1 = concatenate([am2, sm2])
outputs = Dense(vocab_size, activation='softmax')(decoder1)
# tie it together [article, summary] [word]
model = Model(inputs=[inputs1, inputs2], outputs=outputs)
model.compile(loss='categorical_crossentropy', optimizer='adam')
#model.compile(loss='mean_squared_error', optimizer='adam')
print(model.summary())
def D_resunet(input_size=(256, 256, 3)):  #含有四个下采样残差单元的D_ResUnet

    # https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels.h5
    inputs = Input(input_size)

    main_path = Conv2D(64, (3, 3), padding='same')(inputs)
    main_path = BatchNormalization()(main_path)
    main_path = Activation(activation='relu')(main_path)

    main_path = Conv2D(64, (3, 3), padding='same')(main_path)

    shortcut = Conv2D(64, (1, 1))(inputs)
    shortcut = BatchNormalization()(shortcut)

    main_path = add([shortcut, main_path])

    f0 = main_path

    #encoder res_block1
    main_path = BatchNormalization()(main_path)
    main_path = Activation(activation='relu')(main_path)
    main_path = Conv2D(128, (3, 3), padding='same', strides=(2, 2))(main_path)
    main_path = BatchNormalization()(main_path)
    main_path = Activation(activation='relu')(main_path)
    main_path = Conv2D(128, (3, 3), padding='same', strides=(1, 1))(main_path)

    shortcut = Conv2D(128, (1, 1), strides=(2, 2))(f0)
    shortcut = BatchNormalization()(shortcut)

    main_path = add([shortcut, main_path])

    f1 = main_path

    #encoder res_block2
    main_path = BatchNormalization()(main_path)
    main_path = Activation(activation='relu')(main_path)
    main_path = Conv2D(256, (3, 3), padding='same', strides=(2, 2))(main_path)
    main_path = BatchNormalization()(main_path)
    main_path = Activation(activation='relu')(main_path)
    main_path = Conv2D(256, (3, 3), padding='same', strides=(1, 1))(main_path)

    shortcut = Conv2D(256, (1, 1), strides=(2, 2))(f1)
    shortcut = BatchNormalization()(shortcut)

    main_path = add([shortcut, main_path])

    f2 = main_path

    #encoder res_block3
    main_path = BatchNormalization()(main_path)
    main_path = Activation(activation='relu')(main_path)
    main_path = Conv2D(512, (3, 3), padding='same', strides=(2, 2))(main_path)
    main_path = BatchNormalization()(main_path)
    main_path = Activation(activation='relu')(main_path)
    main_path = Conv2D(512, (3, 3), padding='same', strides=(1, 1))(main_path)

    shortcut = Conv2D(512, (1, 1), strides=(2, 2))(f2)
    shortcut = BatchNormalization()(shortcut)

    main_path = add([shortcut, main_path])
    f3 = main_path

    #encoder res_block4
    main_path = BatchNormalization()(main_path)
    main_path = Activation(activation='relu')(main_path)
    main_path = Conv2D(512, (3, 3), padding='same', strides=(2, 2))(main_path)
    main_path = BatchNormalization()(main_path)
    main_path = Activation(activation='relu')(main_path)
    main_path = Conv2D(512, (3, 3), padding='same', strides=(1, 1))(main_path)

    shortcut = Conv2D(512, (1, 1), strides=(2, 2))(f3)
    shortcut = BatchNormalization()(shortcut)

    main_path = add([shortcut, main_path])
    f4 = main_path
    '''
    #dilated_block
	dilate1 = AtrousConvolution2D(512, 3, 3, atrous_rate=(1, 1), activation='relu', border_mode='same')(main_path)
	#d1 = dilate1
	sum1 = add([f3, dilate1])
    
	dilate2 = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', border_mode='same')(dilate1)
	#d2 = dilate2
	sum2 = add([sum1, dilate2])
    
	dilate3 = AtrousConvolution2D(512, 3, 3, atrous_rate=(4, 4), activation='relu', border_mode='same')(dilate2)
	#d3 = dilate3
	sum3 = add([sum2, dilate3])
    
	dilate4 = AtrousConvolution2D(512, 3, 3, atrous_rate=(8, 8), activation='relu', border_mode='same')(dilate3)
	sum_dilate = add([sum3, dilate4])
    '''

    #dilated_block
    dilate1 = Conv2D(512, (3, 3),
                     dilation_rate=(1, 1),
                     activation='relu',
                     padding='same')(main_path)
    #d1 = dilate1
    sum1 = add([f4, dilate1])

    dilate2 = Conv2D(512, (3, 3),
                     dilation_rate=(2, 2),
                     activation='relu',
                     padding='same')(dilate1)
    #d2 = dilate2
    sum2 = add([sum1, dilate2])

    dilate3 = Conv2D(512, (3, 3),
                     dilation_rate=(4, 4),
                     activation='relu',
                     padding='same')(dilate2)
    #d3 = dilate3
    sum3 = add([sum2, dilate3])

    #	dilate4 = Conv2D(512, (3, 3), dilation_rate=(8, 8), activation='relu', padding='same')(dilate3)
    #	sum_dilate = add([sum3, dilate4])

    #decoder part1
    main_path = UpSampling2D(size=(2, 2))(sum3)
    main_path = concatenate([main_path, f3], axis=3)
    o0 = main_path

    main_path = BatchNormalization()(main_path)
    main_path = Activation(activation='relu')(main_path)
    main_path = Conv2D(512, (3, 3), padding='same', strides=(1, 1))(main_path)
    main_path = BatchNormalization()(main_path)
    main_path = Activation(activation='relu')(main_path)
    main_path = Conv2D(512, (3, 3), padding='same', strides=(1, 1))(main_path)

    shortcut = Conv2D(512, (1, 1), strides=(1, 1))(o0)
    shortcut = BatchNormalization()(shortcut)

    main_path = add([shortcut, main_path])

    #decoder part2
    main_path = UpSampling2D(size=(2, 2))(main_path)
    main_path = concatenate([main_path, f2], axis=3)
    o1 = main_path

    main_path = BatchNormalization()(main_path)
    main_path = Activation(activation='relu')(main_path)
    main_path = Conv2D(256, (3, 3), padding='same', strides=(1, 1))(main_path)
    main_path = BatchNormalization()(main_path)
    main_path = Activation(activation='relu')(main_path)
    main_path = Conv2D(256, (3, 3), padding='same', strides=(1, 1))(main_path)

    shortcut = Conv2D(256, (1, 1), strides=(1, 1))(o1)
    shortcut = BatchNormalization()(shortcut)

    main_path = add([shortcut, main_path])

    #decoder part3
    main_path = UpSampling2D(size=(2, 2))(main_path)
    main_path = concatenate([main_path, f1], axis=3)
    o2 = main_path

    main_path = BatchNormalization()(main_path)
    main_path = Activation(activation='relu')(main_path)
    main_path = Conv2D(128, (3, 3), padding='same', strides=(1, 1))(main_path)
    main_path = BatchNormalization()(main_path)
    main_path = Activation(activation='relu')(main_path)
    main_path = Conv2D(128, (3, 3), padding='same', strides=(1, 1))(main_path)

    shortcut = Conv2D(128, (1, 1), strides=(1, 1))(o2)
    shortcut = BatchNormalization()(shortcut)

    main_path = add([shortcut, main_path])

    #decoder part4
    main_path = UpSampling2D(size=(2, 2))(main_path)
    main_path = concatenate([main_path, f0], axis=3)
    o3 = main_path

    main_path = BatchNormalization()(main_path)
    main_path = Activation(activation='relu')(main_path)
    main_path = Conv2D(64, (3, 3), padding='same', strides=(1, 1))(main_path)
    main_path = BatchNormalization()(main_path)
    main_path = Activation(activation='relu')(main_path)
    main_path = Conv2D(64, (3, 3), padding='same', strides=(1, 1))(main_path)

    shortcut = Conv2D(64, (1, 1), strides=(1, 1))(o3)
    shortcut = BatchNormalization()(shortcut)

    main_path = add([shortcut, main_path])

    main_path = Conv2D(1, (1, 1), activation='sigmoid')(main_path)

    model = Model(input=inputs, output=main_path)
    model.compile(optimizer=Adam(lr=2e-4), loss=final_loss, metrics=[IoU])

    return model
Beispiel #22
0
    def build_generator(self, n_resnet=9):
        'Define the Generator model'
        # initialization weight
        init = RandomNormal(stddev=0.02)
        # input_image
        in_image = Input(shape=self.img_shape)

        def resnet_block(n_filters, input_layer, initializer=init):
            'Residual Connection block for building generator'

            # first layer
            rb = Conv2D(filters=n_filters,
                        kernel_size=3,
                        padding='same',
                        kernel_initializer=initializer)(input_layer)
            rb = InstanceNormalization(axis=-1)(rb)
            rb = Activation('relu')(rb)

            # second layer
            rb = Conv2D(filters=n_filters,
                        kernel_size=3,
                        padding='same',
                        kernel_initializer=initializer)(rb)
            rb = InstanceNormalization(axis=-1)(rb)

            # residual connection
            rb = Concatenate()([rb, input_layer])
            return rb

        def main_block(input_layer,
                       in_features=64,
                       downsampling=True,
                       initializer=init):
            'Downsampling or Upsampling block'
            if downsampling == True:
                out_features = in_features * 2
                g = Conv2D(out_features,
                           kernel_size=3,
                           strides=(2, 2),
                           padding='same',
                           kernel_initializer=initializer)(input_layer)
            elif downsampling == False:
                out_features = in_features // 2
                #g = Conv2DTranspose(out_features, kernel_size=3, strides=(2,2), padding='same', kernel_initializer=initializer)(input_layer)
                g = UpSampling2D(size=2, interpolation='bilinear')(input_layer)
                g = ReflectionPadding2D()(g)
                g = Conv2D(out_features,
                           kernel_size=3,
                           strides=1,
                           padding='valid',
                           kernel_initializer=initializer)(g)

            g = InstanceNormalization(axis=-1)(g)
            g = Activation('relu')(g)
            return g

        # c7s1-64
        g = Conv2D(64, (7, 7), padding='same',
                   kernel_initializer=init)(in_image)
        g = InstanceNormalization(axis=-1)(g)
        g = Activation('relu')(g)

        # d128
        g = main_block(input_layer=g, in_features=64, downsampling=True)
        # d256
        g = main_block(input_layer=g, in_features=128, downsampling=True)

        # R256
        for _ in range(n_resnet):
            g = resnet_block(256, g)

        # u128
        g = main_block(input_layer=g, in_features=256, downsampling=False)
        # u64
        g = main_block(input_layer=g, in_features=128, downsampling=False)

        # c7s1-3
        g = Conv2D(3, (7, 7), padding='same', kernel_initializer=init)(g)
        g = InstanceNormalization(axis=-1)(g)
        out_image = Activation('tanh')(g)

        model = Model(in_image, out_image)
        return model
Beispiel #23
0
from keras.models import Input
from utils import shared_network
from keras.layers import Lambda
from utils import euclidean_distance
from keras.models import Model
from utils import load_train_test, create_pairs, contrastive_loss
import numpy as np

input_shape = (112, 92, 1)

input_top = Input(shape=input_shape)
input_bottom = Input(shape=input_shape)

sh_network = shared_network(input_shape)
output_top = sh_network(input_top)
output_bottom = sh_network(input_bottom)

distance = Lambda(euclidean_distance,
                  output_shape=(1, ))([output_top, output_bottom])

model = Model(inputs=[input_top, input_bottom], outputs=distance)

X_train, Y_train, X_test, Y_test = load_train_test("faces")

num_classes = len(np.unique(Y_train))
training_pairs, training_labels = create_pairs(X_train, Y_train,
                                               len(np.unique(Y_train)))
test_pairs, test_labels = create_pairs(X_test, Y_test, len(np.unique(Y_test)))

model.compile(loss=contrastive_loss, optimizer='adam')
model.fit([training_pairs[:, 0], training_pairs[:, 1]],
Beispiel #24
0
    def __init__(self):
        self.img_rows = 128
        self.img_cols = 128
        self.channels = 3
        self.img_shape = (self.img_rows, self.img_cols, self.channels)

        optimizer = Adam(lr=0.0002, beta_1=0.5, beta_2=0.999)

        self.dataset_name = 'apple2orange'
        self.data_loader = DataLoader(dataset=self.dataset_name,
                                      image_shape=(self.img_rows,
                                                   self.img_cols))

        # Calculate output shape of Discriminator (PatchGAN)
        patch = int(self.img_rows / 2**4)
        self.disc_patch = (patch, patch, 1)

        # build and compile discriminator: A -> [real/fake]
        self.d_model_A = self.build_discriminator()
        self.d_model_A.compile(loss='mse',
                               optimizer=optimizer,
                               metrics=['accuracy'])
        # build and compile discriminator: B -> [real/fake]
        self.d_model_B = self.build_discriminator()
        self.d_model_B.compile(loss='mse',
                               optimizer=optimizer,
                               metrics=['accuracy'])

        # build generator: A -> B
        self.g_AtoB = self.build_generator()
        # build generator: B -> A
        self.g_BtoA = self.build_generator()

        # define input images for both domains
        img_A = Input(shape=self.img_shape)
        img_B = Input(shape=self.img_shape)

        # identity element
        fake_B = self.g_AtoB(img_A)
        # forward cycle
        fake_A = self.g_BtoA(img_B)
        # backward cycle
        back_to_A = self.g_BtoA(fake_B)
        back_to_B = self.g_AtoB(fake_A)
        # Identity mapping of images
        img_A_id = self.g_BtoA(img_A)
        img_B_id = self.g_AtoB(img_B)

        # For the combined model we will only train the generators
        self.d_model_A.trainable = False
        self.d_model_B.trainable = False

        # Discriminators determines validity of translated images
        valid_A = self.d_model_A(fake_A)
        valid_B = self.d_model_B(fake_B)

        # define a composite model for updating generators by adversarial and cycle loss
        self.composite_model = Model(inputs=[img_A, img_B],
                                     outputs=[
                                         valid_A, valid_B, back_to_A,
                                         back_to_B, img_A_id, img_B_id
                                     ])

        # compile model with weighting of least squares loss and L1 loss
        self.composite_model.compile(
            loss=['mse', 'mse', 'mae', 'mae', 'mae', 'mae'],
            loss_weights=[1, 1, 10, 10, 1, 1],
            optimizer=optimizer)
from keras.layers import Dense, Dropout, Flatten, BatchNormalization
from keras.layers import Input
import pickle
from keras.models import load_model

train = pd.read_csv('train.txt', sep=' ', encoding='gb2312')
train.columns = ['path', 'label']
val = pd.read_csv('val.txt', sep=' ', encoding='gb2312')
val.columns = ['path', 'label']

train_number = len(train)
val_number = len(val)
class_number = len(np.unique(train.label))

# create the base pre-trained model
input_tensor = Input(shape=(224, 224, 3))
base_model = InceptionV3(
    input_tensor=input_tensor,
    weights='imagenet',
    include_top=False,
)

# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(1024, activation='relu')(x)
# and a logistic layer -- let's say we have 200 classes
predictions = Dense(5, activation='Adagrad')(x)

# this is the model we will train
Beispiel #26
0
    def train(self, log_dir="./", verbose=1, cnn="simple"):
        # init count outputs
        self.OTPUT_LABELS_1 = len(self.CLASS_REGION)
        self.OTPUT_LABELS_2 = len(self.CLASS_STATE)
        self.OTPUT_LABELS_3 = len(self.CLASS_COUNT_LINE)

        # create input
        input_model = Input(shape=(self.HEIGHT, self.WEIGHT,
                                   self.COLOR_CHANNELS))

        if (cnn == "simple"):
            conv_base = self.create_simple_conv(input_model)
        else:
            conv_base = self.create_conv(input_model)

        # traine callbacks
        self.CALLBACKS_LIST = [
            callbacks.ModelCheckpoint(
                filepath=os.path.join(log_dir, 'buff_weights.h5'),
                monitor='val_loss',
                save_best_only=True,
            ),
            callbacks.ReduceLROnPlateau(
                monitor='val_loss',
                factor=self.REDUCE_LRO_N_PLATEAU_FACTOR,
                patience=self.REDUCE_LRO_N_PLATEAU_PATIENCE,
            )
        ]

        # train all
        modelsArr = []
        for i in np.arange(self.ENSEMBLES):
            # create model
            model = self.create_model(input_model = input_model, conv_base = conv_base, \
                                 dropout_1 = self.DROPOUT_1 , dropout_2 = self.DROPOUT_2, dense_layers = self.DENSE_LAYERS, \
                                 output_labels1 = self.OTPUT_LABELS_1, output_labels2 = self.OTPUT_LABELS_2, \
                                 output_labels3 = self.OTPUT_LABELS_3,
                                 out_dense_init = self.OUT_DENSE_INIT, W_regularizer = self.W_REGULARIZER, \
                                 out_dense_activation = self.OUT_DENSE_ACTIVATION , dense_activation = self.DENSE_ACTIVATION, \
                                 BatchNormalization_axis = self.BATCH_NORMALIZATION_AXIS)

            # train
            history = model.fit_generator(
                self.train_generator,
                steps_per_epoch=self.STEPS_PER_EPOCH,
                epochs=self.EPOCHS,
                callbacks=self.CALLBACKS_LIST,
                validation_data=self.validation_generator,
                validation_steps=self.VALIDATION_STEPS,
                verbose=verbose)

            # load best model
            model.load_weights(os.path.join(log_dir, 'buff_weights.h5'))

            # append to models
            modelsArr.append(model)

        # merge ensembles
        if len(modelsArr) > 1:
            self.MODEL = self.ensemble(modelsArr, input_model)
        elif len(modelsArr) == 1:
            self.MODEL = modelsArr[0]

        return self.MODEL
Beispiel #27
0
    'model_type': model_name,
    'lstm_layer_1_units': 100,
    'dense_layer_units': 50,
    'dense_layer_activation': 'relu',
    'dropout': 0.1,
    'recurrent_dropout': 0.1,
    
    # training hps
    'batch_size': 32,
    'epochs': 30
}

#exp.log_parameters(params)

# %% In [23]:
input = Input(shape=(max_len,))
model = Embedding(input_dim=params['vocab_size'], output_dim=params['embedding_size'], input_length=params['max_len'])(input)
model = Dropout(params['dropout'])(model)
model = Bidirectional(LSTM(units=params['lstm_layer_1_units'], return_sequences=True, recurrent_dropout=params['recurrent_dropout']))(model)
model = TimeDistributed(Dense(params['dense_layer_units'], activation=params['dense_layer_activation']))(model)
crf = CRF(params['num_classes'])  # CRF layer
out = crf(model)

# %% In [24]:
model = Model(input, out)
model.summary()

# %% In [25]:
model.compile(optimizer=params['optimizer'], loss=crf_loss)

# %% In [26]:
img_width, img_height = 64, 96
epochs = 20
batch_size = 32
train_dir = 'C:\\Users\\USER\\Desktop\\data_2\\model2\\train\\'
test_dir = 'C:\\Users\\USER\\Desktop\\data_2\\model2_cutout\\test\\'
if K.image_data_format() == 'channels_first':
    input_shape = (3, img_width, img_height)
else:
    input_shape = (img_width, img_height, 3)

#data_reading
X_train,y_train = load_data(train_dir)
X_test,y_test0 = load_data(test_dir)
y_test = np.argmax(y_test0 , axis=1)
#y_test = np.argmax(y_test , axis=1)
model_input = Input(shape=input_shape)



#model_2
def model_create(model_input):


    x = Conv2D(32, (3, 3), padding='same',activation='relu')(model_input)
    x = Conv2D(32, (3, 3), padding='same',activation='relu')(x)
    x = MaxPooling2D((2, 2), strides=2)(x)
    x = Conv2D(64, (3, 3), padding='same',activation='relu')(x)
    x = Conv2D(64, (3, 3), padding='same',activation='relu')(x)
    x = MaxPooling2D((2, 2), strides=2)(x)
    x = Conv2D(128, (3, 3), padding='same',activation='relu')(x)
    x = Conv2D(128, (3, 3), padding='same',activation='relu')(x)
Beispiel #29
0
def create_model(X_train_aug, y_train, X_val_aug, y_val, X_test_aug, y_test):

    DROPOUT_CHOICES = np.arange(0.0, 0.9, 0.1)
    UNIT_CHOICES = [100, 200, 500, 800, 1000, 1200]
    GRU_CHOICES = [100, 200, 300, 400, 500, 600]
    BATCH_CHOICES = [16, 32]
    LR_CHOICES = [0.0001, 0.0005, 0.001, 0.0025, 0.005, 0.01]
    params = {
        'dense1':
        hp.choice('dense1', UNIT_CHOICES),
        'dropout1':
        hp.choice('dropout1', DROPOUT_CHOICES),
        'gru1':
        hp.choice('gru1', GRU_CHOICES),
        # nesting the layers ensures they're only un-rolled sequentially
        'gru2':
        hp.choice(
            'gru2',
            [
                False,
                {
                    'gru2_units':
                    hp.choice('gru2_units', GRU_CHOICES),
                    # only make the 3rd layer availabile if the 2nd one is
                    'gru3':
                    hp.choice('gru3', [
                        False, {
                            'gru3_units': hp.choice('gru3_units', GRU_CHOICES)
                        }
                    ]),
                }
            ]),
        'dense2':
        hp.choice('dense2', UNIT_CHOICES),
        'dropout2':
        hp.choice('dropout2', DROPOUT_CHOICES),
        'lr':
        hp.choice('lr', LR_CHOICES),
        'decay':
        hp.choice('decay', LR_CHOICES),
        'batch_size':
        hp.choice('batch_size', BATCH_CHOICES)
    }

    input = Input(shape=(
        X_train_aug[0].shape[1],
        X_train_aug[0].shape[2],
    ))
    profiles_input = Input(shape=(
        X_train_aug[1].shape[1],
        X_train_aug[1].shape[2],
    ))
    x1 = concatenate([input, profiles_input])
    x2 = concatenate([input, profiles_input])
    x1 = Dense(params['dense1'], activation="relu")(x1)
    x1 = Dropout(params['dropout1'])(x1)
    x2 = Bidirectional(CuDNNGRU(units=params['gru1'],
                                return_sequences=True))(x2)
    if params['gru2']:
        x2 = Bidirectional(
            CuDNNGRU(units=params['gru2']['gru2_units'],
                     return_sequences=True))(x2)
    if params['gru2'] and params['gru2']['gru3']:
        x2 = Bidirectional(
            CuDNNGRU(units=params['gru2']['gru3']['gru3_units'],
                     return_sequences=True))(x2)
    COMBO_MOVE = concatenate([x1, x2])
    w = Dense(params['dense2'], activation="relu")(COMBO_MOVE)
    w = Dropout(params['dropout2'])(w)
    w = tcn.TCN(return_sequences=True)(w)
    y = TimeDistributed(Dense(8, activation="softmax"))(w)
    model = Model([input, profiles_input], y)

    adamOptimizer = Adam(lr=params['lr'],
                         beta_1=0.8,
                         beta_2=0.8,
                         epsilon=None,
                         decay=params['decay'],
                         amsgrad=False)
    model.compile(optimizer=adamOptimizer,
                  loss="categorical_crossentropy",
                  metrics=["accuracy", accuracy])

    model.fit(X_train_aug,
              y_train,
              validation_data=(X_val_aug, y_val),
              epochs=20,
              batch_size=params['batch_size'],
              verbose=1,
              shuffle=True)

    score = model.evaluate(X_test_aug, y_test)

    out = {
        'loss': -score[2],
        'score': score[0],
        'status': STATUS_OK,
        'model_params': params,
    }
    # optionally store a dump of your model here so you can get it from the database later
    temp_name = tempfile.gettempdir() + '/' + next(
        tempfile._get_candidate_names()) + '.h5'
    model.save(temp_name)
    with open(temp_name, 'rb') as infile:
        model_bytes = infile.read()
    out['model_serial'] = model_bytes
    return out
def build_centerloss_model(out_dims, feat_dims, input_shape=(128, 128, 1), lambda_center=0.01):
    """
    isCenterloss
    """
    inputs_dim = Input(input_shape)

    x = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(inputs_dim)
    x = bn_prelu(x)
    x = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(x)
    x = bn_prelu(x)
    x = MaxPool2D(pool_size=(2, 2))(x)

    x = Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
    x = bn_prelu(x)
    x = Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
    x = bn_prelu(x)
    x = MaxPool2D(pool_size=(2, 2))(x)

    x = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x)
    x = bn_prelu(x)
    x = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x)
    x = bn_prelu(x)
    x = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x)
    x = bn_prelu(x)
    x = MaxPool2D(pool_size=(2, 2))(x)
    
    x = Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x)
    x = bn_prelu(x)
    x = Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x)
    x = bn_prelu(x)
    x = Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x)
    x = bn_prelu(x)
    x = MaxPool2D(pool_size=(2, 2))(x)

    x = Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x)
    x = bn_prelu(x)
    x = Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x)
    x = bn_prelu(x)
    x = Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x)
    x = bn_prelu(x)
    x = AveragePooling2D(pool_size=(2, 2))(x)

    x_flat = Flatten()(x)

    fc1 = Dense(feat_dims)(x_flat)
    fc1 = bn_prelu(fc1)
    dp_1 = Dropout(0.3)(fc1)

    fc2 = Dense(out_dims)(dp_1)
    fc2 = Activation('softmax')(fc2)

    base_model = Model(inputs=inputs_dim, outputs=fc2)

    # center loss
    lambda_c = lambda_center
    input_target = Input(shape=(1,))
    centers = Embedding(out_dims, feat_dims)(input_target)
    l2_losss = Lambda(lambda x: K.sum(K.square(x[0] - x[1][:, 0]), 1, keepdims=True), name='l2_loss')([fc1, centers])
    model_centers = Model(inputs=[base_model.input, input_target], outputs=[x, l2_losss])

    return model_centers