Пример #1
0
def encoder_gen(input_shape: tuple, encoder_config: dict):
    """
    TODO 
    """

    class EncoderResult():
        pass 

    encoder_result = EncoderResult()

    # Construct VAE Encoder layers
    inputs = keras.layers.Input(shape=[input_shape[0], input_shape[1], 1])
    zero_padded_inputs = keras.layers.ZeroPadding2D(padding=(1, 0))(inputs)

    print("shape of input after padding", inputs.shape)
    z = keras.layers.convolutional.Conv2D(
        encoder_config["conv_1"]["filter_num"], 
        tuple(encoder_config["conv_1"]["kernel_size"]), 
        padding='same', 
        activation=encoder_config["activation"], 
        strides=encoder_config["conv_1"]["stride"]
    )(zero_padded_inputs)
    print("shape after first convolutional layer", z.shape)

    # z = keras.layers.AveragePooling2D(
    #     encoder_config["avg_pool_1"]["pool_size"],
    #     encoder_config["avg_pool_1"]["pool_stride"],
    #     padding="same"
    # )(z)
    # print("shape after first pooling layer", z.shape)

    z = keras.layers.convolutional.Conv2D(
        encoder_config["conv_2"]["filter_num"], 
        tuple(encoder_config["conv_2"]["kernel_size"]), 
        padding='same', 
        activation=encoder_config["activation"], 
        strides=encoder_config["conv_2"]["stride"]
    )(z)

    print("shape after second convolutional layer", z.shape)

    z = keras.layers.convolutional.Conv2D(
        encoder_config["conv_3"]["filter_num"], 
        tuple(encoder_config["conv_3"]["kernel_size"]), 
        padding='same', 
        activation=encoder_config["activation"], 
        strides=encoder_config["conv_3"]["stride"]
    )(z)

    print("shape after third convolutional layer", z.shape)

    # z = keras.layers.AveragePooling2D(
    #     encoder_config["avg_pool_2"]["pool_size"],
    #     encoder_config["avg_pool_2"]["pool_stride"],
    #     padding="same"
    # )(z)
    # print("shape after second pooling layer", z.shape)

    shape_before_flattening = K.int_shape(z) 

    z = keras.layers.Flatten()(z)

    # Compute final latent state 
    z = keras.layers.Dense(encoder_config["latent_dim"], name='z_mean')(z)

    # Instantiate Keras model for VAE encoder 
    vae_encoder = keras.Model(inputs = [inputs], outputs=[z])

    # Package up everything for the encoder
    encoder_result.inputs = inputs
    encoder_result.z = z
    encoder_result.vae_encoder = vae_encoder 
    encoder_result.shape_before_flattening = shape_before_flattening

    return encoder_result
Пример #2
0
    def test_print_summary_expand_nested(self):
        shape = (None, None, 3)

        def make_model():
            x = inputs = keras.Input(shape)
            x = keras.layers.Conv2D(3, 1)(x)
            x = keras.layers.BatchNormalization()(x)
            return keras.Model(inputs, x)

        x = inner_inputs = keras.Input(shape)
        x = make_model()(x)
        inner_model = keras.Model(inner_inputs, x)

        inputs = keras.Input(shape)
        model = keras.Model(inputs, inner_model(inputs))

        file_name = 'model_2.txt'
        temp_dir = self.get_temp_dir()
        self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
        fpath = os.path.join(temp_dir, file_name)
        writer = open(fpath, 'w')

        def print_to_file(text):
            print(text, file=writer)

        try:
            layer_utils.print_summary(model,
                                      print_fn=print_to_file,
                                      expand_nested=True)
            self.assertTrue(tf.io.gfile.exists(fpath))
            writer.close()
            reader = open(fpath, 'r')
            lines = reader.readlines()
            reader.close()
            check_str = (
                'Model: "model_2"\n'
                '_________________________________________________________________\n'
                ' Layer (type)                Output Shape              Param #   \n'
                '=================================================================\n'
                ' input_3 (InputLayer)        [(None, None, None, 3)]   0         \n'
                '                                                                 \n'
                ' model_1 (Functional)        (None, None, None, 3)     24        \n'
                '|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n'
                '| input_1 (InputLayer)      [(None, None, None, 3)]   0         |\n'
                '|                                                               |\n'
                '| model (Functional)        (None, None, None, 3)     24        |\n'
                '||¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯||\n'
                '|| input_2 (InputLayer)    [(None, None, None, 3)]   0         ||\n'
                '||                                                             ||\n'
                '|| conv2d (Conv2D)         (None, None, None, 3)     12        ||\n'
                '||                                                             ||\n'
                '|| batch_normalization (BatchN  (None, None, None, 3)  12      ||\n'
                '|| ormalization)                                               ||\n'
                '|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\n'
                '¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\n'
                '=================================================================\n'
                'Total params: 24\n'
                'Trainable params: 18\n'
                'Non-trainable params: 6\n'
                '_________________________________________________________________\n'
            )

            fin_str = ''
            for line in lines:
                fin_str += line

            self.assertIn(fin_str, check_str)
            self.assertEqual(len(lines), 25)
        except ImportError:
            pass
Пример #3
0
# ---------------------------------------------------
#   body of CSPDarknet53
# ---------------------------------------------------
def csp_darknet_body(x):
    x = DarknetConv2D_BN_Mish(32, (3, 3))(x)
    x = csp_resblock_body(x, 64, 1, False)
    x = csp_resblock_body(x, 128, 2)
    x = csp_resblock_body(x, 256, 8)
    feat1 = x
    x = csp_resblock_body(x, 512, 8)
    feat2 = x
    x = csp_resblock_body(x, 1024, 4)
    feat3 = x
    return feat1, feat2, feat3


if __name__ == '__main__':
    import keras

    inputs_ = keras.Input(shape=(416, 416, 3))
    _, _, feature3_ = csp_darknet_body(inputs_)

    # backbone
    backbone = keras.Model(inputs_, feature3_)

    # darknet53 trainable params: 40,584,928
    # cspdarknet 53 trainable params: 26,617,184
    # so, the params if cspdarkbet53 is much smaller than draknet53.
    backbone.summary()
Пример #4
0
sex_train = keras.utils.to_categorical(sex_train, sex_num_class)
sex_test = keras.utils.to_categorical(sex_test, num_classes=sex_num_class)

# age_train = keras.utils.to_categorical(age_train, 100)
# age_test = keras.utils.to_categorical(age_test, 100)

# wide model
pclass_input = layers.Input(shape=(5, ))
sex_input = layers.Input(shape=(sex_num_class, ))
# age_input = layers.Input(shape=(100, ))

merged_layers = layers.concatenate([pclass_input, sex_input])
merged_layers = layers.Dense(256, activation='relu')(merged_layers)
predictions = layers.Dense(1)(merged_layers)
wide_model = keras.Model(inputs=[pclass_input, sex_input], outputs=predictions)
print(wide_model.summary())

# deep model
deep_input = layers.Input(shape=(1, ))
embedding = layers.Embedding(10000, 32)(deep_input)
embedding = layers.Flatten()(embedding)
embed_out = layers.Dense(1)(embedding)
deep_model = keras.Model(inputs=deep_input, outputs=embed_out)
print(deep_model.summary())

# Combine wide and deep into one model
merged_output = layers.concatenate([wide_model.output, deep_model.output])
merged_output = layers.Dense(1)(merged_output)
combined_model = keras.Model(wide_model.input + [deep_model.input], merged_output)
print(combined_model.summary())
Пример #5
0
def get_nn_complete_model(train, hidden1_neurons=35, hidden2_neurons=15):
    """
    Input:
        train:           train dataframe(used to define the input size of the embedding layer)
        hidden1_neurons: number of neurons in the first hidden layer
        hidden2_neurons: number of neurons in the first hidden layer
    Output:
        return 'keras neural network model'
    """
    K.clear_session()

    air_store_id = Input(shape=(1, ), dtype='int32', name='air_store_id')
    air_store_id_emb = Embedding(len(train['air_store_id2'].unique()) + 1,
                                 15,
                                 input_shape=(1, ),
                                 name='air_store_id_emb')(air_store_id)
    air_store_id_emb = keras.layers.Flatten(
        name='air_store_id_emb_flatten')(air_store_id_emb)

    dow = Input(shape=(1, ), dtype='int32', name='dow')
    dow_emb = Embedding(8, 3, input_shape=(1, ), name='dow_emb')(dow)
    dow_emb = keras.layers.Flatten(name='dow_emb_flatten')(dow_emb)

    month = Input(shape=(1, ), dtype='int32', name='month')
    month_emb = Embedding(13, 3, input_shape=(1, ), name='month_emb')(month)
    month_emb = keras.layers.Flatten(name='month_emb_flatten')(month_emb)

    air_area_name, air_genre_name = [], []
    air_area_name_emb, air_genre_name_emb = [], []
    for i in range(7):
        area_name_col = 'air_area_name' + str(i)
        air_area_name.append(
            Input(shape=(1, ), dtype='int32', name=area_name_col))
        tmp = Embedding(len(train[area_name_col].unique()),
                        3,
                        input_shape=(1, ),
                        name=area_name_col + '_emb')(air_area_name[-1])
        tmp = keras.layers.Flatten(name=area_name_col + '_emb_flatten')(tmp)
        air_area_name_emb.append(tmp)

        if i > 4:
            continue
        area_genre_col = 'air_genre_name' + str(i)
        air_genre_name.append(
            Input(shape=(1, ), dtype='int32', name=area_genre_col))
        tmp = Embedding(len(train[area_genre_col].unique()),
                        3,
                        input_shape=(1, ),
                        name=area_genre_col + '_emb')(air_genre_name[-1])
        tmp = keras.layers.Flatten(name=area_genre_col + '_emb_flatten')(tmp)
        air_genre_name_emb.append(tmp)

    air_genre_name_emb = keras.layers.concatenate(air_genre_name_emb)
    air_genre_name_emb = Dense(4,
                               activation='sigmoid',
                               name='final_air_genre_emb')(air_genre_name_emb)

    air_area_name_emb = keras.layers.concatenate(air_area_name_emb)
    air_area_name_emb = Dense(4,
                              activation='sigmoid',
                              name='final_air_area_emb')(air_area_name_emb)

    air_area_code = Input(shape=(1, ), dtype='int32', name='air_area_code')
    air_area_code_emb = Embedding(len(train['air_area_name'].unique()),
                                  8,
                                  input_shape=(1, ),
                                  name='air_area_code_emb')(air_area_code)
    air_area_code_emb = keras.layers.Flatten(
        name='air_area_code_emb_flatten')(air_area_code_emb)

    air_genre_code = Input(shape=(1, ), dtype='int32', name='air_genre_code')
    air_genre_code_emb = Embedding(len(train['air_genre_name'].unique()),
                                   5,
                                   input_shape=(1, ),
                                   name='air_genre_code_emb')(air_genre_code)
    air_genre_code_emb = keras.layers.Flatten(
        name='air_genre_code_emb_flatten')(air_genre_code_emb)

    holiday_flg = Input(shape=(1, ), dtype='float32', name='holiday_flg')
    year = Input(shape=(1, ), dtype='float32', name='year')
    min_visitors = Input(shape=(1, ), dtype='float32', name='min_visitors')
    mean_visitors = Input(shape=(1, ), dtype='float32', name='mean_visitors')
    median_visitors = Input(shape=(1, ),
                            dtype='float32',
                            name='median_visitors')
    max_visitors = Input(shape=(1, ), dtype='float32', name='max_visitors')
    count_observations = Input(shape=(1, ),
                               dtype='float32',
                               name='count_observations')
    rs1_x = Input(shape=(1, ), dtype='float32', name='rs1_x')
    rv1_x = Input(shape=(1, ), dtype='float32', name='rv1_x')
    rs2_x = Input(shape=(1, ), dtype='float32', name='rs2_x')
    rv2_x = Input(shape=(1, ), dtype='float32', name='rv2_x')
    rs1_y = Input(shape=(1, ), dtype='float32', name='rs1_y')
    rv1_y = Input(shape=(1, ), dtype='float32', name='rv1_y')
    rs2_y = Input(shape=(1, ), dtype='float32', name='rs2_y')
    rv2_y = Input(shape=(1, ), dtype='float32', name='rv2_y')
    total_reserv_sum = Input(shape=(1, ),
                             dtype='float32',
                             name='total_reserv_sum')
    total_reserv_mean = Input(shape=(1, ),
                              dtype='float32',
                              name='total_reserv_mean')
    total_reserv_dt_diff_mean = Input(shape=(1, ),
                                      dtype='float32',
                                      name='total_reserv_dt_diff_mean')
    date_int = Input(shape=(1, ), dtype='float32', name='date_int')
    var_max_lat = Input(shape=(1, ), dtype='float32', name='var_max_lat')
    var_max_long = Input(shape=(1, ), dtype='float32', name='var_max_long')
    lon_plus_lat = Input(shape=(1, ), dtype='float32', name='lon_plus_lat')

    date_emb = keras.layers.concatenate(
        [dow_emb, month_emb, year, holiday_flg])
    date_emb = Dense(5, activation='sigmoid', name='date_merged_emb')(date_emb)

    cat_layer = keras.layers.concatenate([
        holiday_flg, min_visitors, mean_visitors, median_visitors,
        max_visitors, count_observations, rs1_x, rv1_x, rs2_x, rv2_x, rs1_y,
        rv1_y, rs2_y, rv2_y, total_reserv_sum, total_reserv_mean,
        total_reserv_dt_diff_mean, date_int, var_max_lat, var_max_long,
        lon_plus_lat, date_emb, air_area_name_emb, air_genre_name_emb,
        air_area_code_emb, air_genre_code_emb, air_store_id_emb
    ])

    m = Dense(hidden1_neurons,
              name='hidden1',
              kernel_initializer=keras.initializers.RandomNormal(
                  mean=0.0, stddev=0.05, seed=None))(cat_layer)
    m = keras.layers.LeakyReLU(alpha=0.2)(m)
    m = keras.layers.BatchNormalization()(m)

    m1 = Dense(hidden2_neurons, name='sub1')(m)
    m1 = keras.layers.LeakyReLU(alpha=0.2)(m1)
    m = Dense(1, activation='relu')(m1)

    inp_ten = [
        holiday_flg, min_visitors, mean_visitors, median_visitors,
        max_visitors, count_observations, rs1_x, rv1_x, rs2_x, rv2_x, rs1_y,
        rv1_y, rs2_y, rv2_y, total_reserv_sum, total_reserv_mean,
        total_reserv_dt_diff_mean, date_int, var_max_lat, var_max_long,
        lon_plus_lat, dow, year, month, air_store_id, air_area_code,
        air_genre_code
    ]
    inp_ten += air_area_name
    inp_ten += air_genre_name
    model = keras.Model(inp_ten, m)
    model.compile(loss='mse', optimizer='rmsprop', metrics=['acc'])

    return model
Пример #6
0
# compute a 'match' between the first input vector sequence and the question vector sequence
# shape: `(samples, story_maxlen, query_maxlen)`
match = layers.dot([input_encoded_m, question_encoded], axes=(2, 2))
match = layers.Activation('softmax')(match)

# add the match matrix with the second input vector sequence
response = layers.add([match, input_encoded_c
                       ])  # (samples, story_maxlen, query_maxlen)
# 维度重排
response = layers.Permute(
    (2, 1))(response)  # (samples, query_maxlen, story_maxlen)

# concatenate the match matrix with the question vector sequence
answer = layers.concatenate([response, question_encoded])

# the original paper uses a matrix multiplication for this reduction step.
# we choose to use a RNN instead.
answer = layers.LSTM(32)(answer)  # (samples, 32)

# one regularization layer -- more would probably be needed.
answer = layers.Dropout(0.3)(answer)
answer = layers.Dense(vocab_size)(answer)  # (samples, vocab_size)
# we output a probability distribution over the vocabulary
answer = layers.Activation('softmax')(answer)

# build the final model
model = keras.Model([input_sequence, question], answer)
model.compile(optimizer='rmsprop',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
Пример #7
0
def encoder(input_img_batch):
    encoded=Conv2D(13,(4,4),strides=(2,2))(input_img_batch)
    return encoded

outofchannel=tf.placeholder(dtype=float,shape=(None,None,None,None))
def decoder(output_channel):
    output_img=Conv2DTranspose(1,(4,4),strides=(2,2))(output_channel)
    return output_img

#metagraph
encoded=encoder(input_img_batch)
output_channel=layer.AWGNlayer(encoded)
output_img=decoder(output_channel)

#creating models
autoencoder=ks.Model(input_img_batch,output_img)


#traingingparameters
loss=tf.reduce_sum(tf.square(output_img-input_img_batch))
optimizer= tf.train.AdamOptimizer(learning_rate=0.001)
training=optimizer.minimize(loss)
init = tf.global_variables_initializer()


feed={input_img_batch:x_train[np.random.randint(0,59999,50)]}

#testing
ops = [input_img_batch,encoded,output_channel,output_img]
with tf.Session() as sess:
    sess.run(init)
Пример #8
0
"""建立模型(RNN)
---
"""

#from tensorflow import keras
#from tensorflow.keras.layers import Input, LSTM, Dropout, Dense
#from tensorflow.keras.models import Sequential
#import tensorflow as tf
import keras
from keras.layers import SimpleRNN, Dense
input = keras.Input(shape=(back_time, 1))
x = SimpleRNN(32, return_sequences=True)(input)
x = SimpleRNN(16, return_sequences=True)(x)
x = SimpleRNN(4)(x)
output = Dense(1)(x)
model = keras.Model(input, output)
model.summary()
"""訓練模型
---
"""

opt = keras.optimizers.Adam(lr=1e-3)
model.compile(loss='mean_squared_error', optimizer=opt)
model.fit(trainX, trainY, epochs=100, verbose=2)
"""預測模型
---
"""

testPredict = model.predict(testX)
"""輸出視覺化
---
Пример #9
0
def build_model(hp):
    img_input = keras.Input(shape=img_size_for_model + (3, ))
    img_x = data_augmentation(img_input)  # randomly augment image
    img_x = preprocess_input(img_x)  # use ResNet50 image preprocessing
    img_x = resNet50_model(img_x)

    txt_input = keras.Input(shape=X_txt.shape[1])
    txt_x = keras.layers.Dense(units=hp.Int("units0",
                                            min_value=10,
                                            max_value=1000,
                                            step=10),
                               activation="relu")(txt_input)

    both_x = tf.keras.layers.Concatenate()([img_x, txt_x])

    both_x = keras.layers.Dense(units=hp.Int("units1",
                                             min_value=10,
                                             max_value=1000,
                                             step=10),
                                activation="relu")(both_x)
    if hp.Boolean(
            "incl_dropout0"):  # decide whether to use dropout here or not
        both_x = keras.layers.Dropout(
            hp.Float("dropout0", min_value=0.05, max_value=0.8))(both_x)

    if hp.Boolean(
            "add_extra_layer0"):  # decide whether to add an extra layer here
        both_x = keras.layers.Dense(units=hp.Int("units2",
                                                 min_value=10,
                                                 max_value=1000,
                                                 step=10),
                                    activation="relu")(both_x)
        if hp.Boolean(
                "incl_dropout1"):  # decide whether to use dropout here or not
            both_x = keras.layers.Dropout(
                hp.Float("dropout1", min_value=0.05, max_value=0.8))(both_x)

    if hp.Boolean(
            "add_extra_layer1"):  # decide whether to add an extra layer here
        both_x = keras.layers.Dense(units=hp.Int("units3",
                                                 min_value=10,
                                                 max_value=1000,
                                                 step=10),
                                    activation="relu")(both_x)
        if hp.Boolean(
                "incl_dropout2"):  # decide whether to use dropout here or not
            both_x = keras.layers.Dropout(
                hp.Float("dropout2", min_value=0.05, max_value=0.8))(both_x)

    model_out = keras.layers.Dense(len(y_labels_1hot[0]),
                                   activation="softmax")(both_x)

    keras_model = keras.Model(inputs=[img_input, txt_input], outputs=model_out)

    keras_model.compile(
        optimizer=tf.keras.optimizers.Adam(learning_rate=hp.Float(
            "lr", min_value=0.0001, max_value=0.01, sampling="log")),
        loss="categorical_crossentropy",
        metrics=["accuracy"],
    )

    return keras_model
Пример #10
0
    def __init__(self, shape):
        """
        将dropout输出的结果直接影像最终结果
        :param shape:
        """
        self.re_rate = 0.6
        self.inputs = layers.Input(shape=shape)

        self.f_block = layers.Conv3D(4, (3, 3, 3), activation='relu',
                                     kernel_regularizer=regularizers.l2(self.re_rate),
                                     padding='same')(self.inputs)
        self.bn = layers.BatchNormalization()(self.f_block)
        self.mp1 = layers.MaxPooling3D((2, 2, 2))(self.bn)

        self.f_block1 = layers.Conv3D(8, (3, 3, 3), activation='relu',
                                      kernel_regularizer=regularizers.l2(self.re_rate),
                                      padding='same')(self.mp1)
        self.bn = layers.BatchNormalization()(self.f_block1)

        self.mp2 = layers.MaxPooling3D((2, 2, 2))(self.bn)

        self.f_block2 = layers.Conv3D(16, (3, 3, 3), activation='relu',
                                      kernel_regularizer=regularizers.l2(self.re_rate),
                                      padding='same')(self.mp2)
        self.f_block2 = layers.BatchNormalization()(self.f_block2)

        self.b_back2 = layers.Conv3D(32, (3, 3, 3), activation='relu',
                                     kernel_regularizer=regularizers.l2(self.re_rate),
                                     padding='same')(self.f_block2)
        self.b_back2 = layers.BatchNormalization()(self.b_back2)

        self.b_back2 = layers.Conv3D(64, (3, 3, 3), activation='relu',
                                     kernel_regularizer=regularizers.l2(self.re_rate),
                                     padding='same')(layers.UpSampling3D((2, 2, 2))(self.f_block2))
        self.b_back2 = layers.BatchNormalization()(self.b_back2)

        self.cat2 = layers.concatenate([self.f_block1, self.b_back2])
        self.bn = layers.BatchNormalization()(self.cat2)

        self.b_back1 = layers.Conv3D(32, (3, 3, 3), activation='relu',
                                     kernel_regularizer=regularizers.l2(self.re_rate),
                                     padding='same')(layers.UpSampling3D((2, 2, 2))(self.bn))
        self.b_back1 = layers.BatchNormalization()(self.b_back1)

        self.gb = layers.GlobalAveragePooling3D()(self.b_back1)
        self.gb_drop = layers.Dropout(rate=0.9)(self.gb)

        self.pure_dense = layers.Dense(1, activation='sigmoid')(self.gb_drop)

        # add mmse
        mmse_input = layers.Input(shape=(1, ), dtype='int32')
        embedded_layer = layers.Embedding(shape[-1], 1)(mmse_input)
        embedded_layer = layers.Conv1D(4, 1, activation='relu')(embedded_layer)
        emCon = layers.Flatten()(embedded_layer)
        self.drop = layers.concatenate([self.pure_dense, emCon])

        # add sex
        sex_input = layers.Input(shape=(1, ), dtype='int32')
        embedded_layer = layers.Embedding(shape[-1], 1)(sex_input)
        embedded_layer = layers.Conv1D(4, 1, activation='relu')(embedded_layer)
        emCon = layers.Flatten()(embedded_layer)
        self.drop = layers.concatenate([self.drop, emCon])

        # add age
        age_input = layers.Input(shape=(1, ), dtype='int32')
        embedded_layer = layers.Embedding(shape[-1], 1)(age_input)
        embedded_layer = layers.Conv1D(4, 1, activation='relu')(embedded_layer)
        emCon = layers.Flatten()(embedded_layer)
        self.drop = layers.concatenate([self.drop, emCon])

        # add marriage
        marriage_input = layers.Input(shape=(1, ), dtype='int32')
        embedded_layer = layers.Embedding(shape[-1], 1)(marriage_input)
        embedded_layer = layers.Conv1D(4, 1, activation='relu')(embedded_layer)
        emCon = layers.Flatten()(embedded_layer)
        self.drop = layers.concatenate([self.drop, emCon])

        # add apoe4
        apoe4_input = layers.Input(shape=(1, ), dtype='int32')
        embedded_layer = layers.Embedding(shape[-1], 1)(apoe4_input)
        embedded_layer = layers.Conv1D(4, 1, activation='relu')(embedded_layer)
        emCon = layers.Flatten()(embedded_layer)
        self.drop = layers.concatenate([self.drop, emCon])

        # add education
        edu_input = layers.Input(shape=(1, ), dtype='int32')
        embedded_layer = layers.Embedding(shape[-1], 1)(edu_input)
        embedded_layer = layers.Conv1D(4, 1, activation='relu')(embedded_layer)
        emCon = layers.Flatten()(embedded_layer)
        self.drop = layers.concatenate([self.drop, emCon])

        self.drop = layers.concatenate([self.pure_dense, self.drop])

        self.dense = layers.Dense(1, activation='sigmoid')(self.drop)

        self.model = keras.Model(input=[self.inputs, mmse_input, sex_input, age_input, marriage_input,
                                        apoe4_input, edu_input], output=[self.pure_dense, self.dense])
Пример #11
0
def resnet101(classes=1000):
	inp = Input(shape=(224,224,3), name='input_layer')
	x = ZeroPadding2D(padding=(3, 3), name='conv0_pad')(inp)
	x = Conv2D(64, kernel_size=(7,7), padding='valid', strides=(2,2), activation='relu', name='conv_0')(x)
	x = ZeroPadding2D(padding=(1, 1), name='maxpool0_pad')(x)
	base = MaxPool2D(pool_size=(3,3), strides=(2,2), name='maxpool_0')(x)

	print('Stage 0:', base.shape)

	# Stage 1 (3 cnv_blocks)
	names = ['_1_a', '_1_b', '_1_c']
	for n in names:
	    x = conv_block(base, 64, kernel_size=(1,1), padding='same', strides=(1,1), name=n+'1')
	    x = conv_block(x, 64, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'2')
	    x1 = conv_block(x, 256, kernel_size=(1,1), padding='same', strides=(1,1), name=n+'3')
	    
	    x = Conv2D(256, kernel_size=(1,1), padding='same', strides=(1,1), name='conv'+n+'4')(base)
	    shortcut = BatchNormalization(axis=3, epsilon=1.001e-5, name='batchnorm'+n+'4')(x)
	    base = Add(name='add'+n+'1')([x1, shortcut])
	    base = Activation('relu', name='act'+n+'4')(base)

	print('Stage 1:', base.shape)
	    
	# Stage 2 (4 cnv_blocks)
	names = ['_2_a', '_2_b', '_2_c', '_2_d']
	for n in names:
	    if n=='_2_a':
	        x = conv_block(base, 128, kernel_size=(1,1), padding='same', strides=(2,2), name=n+'1')
	        conv_shortcut = Conv2D(512, kernel_size=(1,1), padding='same', strides=(2,2), name='conv'+n+'4')(base)
	    else:
	        x = conv_block(base, 128, kernel_size=(1,1), padding='same', strides=(1,1), name=n+'1')
	        conv_shortcut = Conv2D(512, kernel_size=(1,1), padding='same', strides=(1,1), name='conv'+n+'4')(base)
	        
	    x = conv_block(x, 128, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'2')
	    x1 = conv_block(x, 512, kernel_size=(1,1), padding='same', strides=(1,1), name=n+'3')

	    # Shortcut
	    shortcut = BatchNormalization(axis=3, epsilon=1.001e-5, name='batchnorm'+n+'4')(conv_shortcut)
	    base = Add(name='add'+n+'1')([x1, shortcut])
	    base = Activation('relu', name='act'+n+'4')(base)

	print('Stage 2:', base.shape)
	    
	# Stage 3 (23 conv_blocks)
	names = ['_3'+'_'+alpha for alpha in list('abcdefghijklmnopqrstuv')]
	for n in names:
		if n=='_3_a':
	        x = conv_block(base, 256, kernel_size=(1,1), padding='same', strides=(2,2), name=n+'1')
	        conv_shortcut = Conv2D(1024, kernel_size=(1,1), padding='same', strides=(2,2), name='conv'+n+'24')(base)
	    else:
	        x = conv_block(base, 256, kernel_size=(1,1), padding='same', strides=(1,1), name=n+'1')
	        conv_shortcut = Conv2D(1024, kernel_size=(1,1), padding='same', strides=(1,1), name='conv'+n+'24')(base)

	    x = conv_block(x, 256, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'2')
	    x = conv_block(x, 256, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'3')
	    x = conv_block(x, 256, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'4')
	    x = conv_block(x, 256, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'5')
	    x = conv_block(x, 256, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'6')
	    x = conv_block(x, 256, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'7')
	    x = conv_block(x, 256, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'8')
	    x = conv_block(x, 256, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'9')
	    x = conv_block(x, 256, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'10')
	    x = conv_block(x, 256, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'11')
	    x = conv_block(x, 256, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'12')
	    x = conv_block(x, 256, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'13')
	    x = conv_block(x, 256, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'14')
	    x = conv_block(x, 256, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'15')
	    x = conv_block(x, 256, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'16')
	    x = conv_block(x, 256, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'17')
	    x = conv_block(x, 256, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'18')
	    x = conv_block(x, 256, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'19')
	    x = conv_block(x, 256, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'20')
	    x = conv_block(x, 256, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'21')
	    x1 = conv_block(x, 1024, kernel_size=(1,1), padding='same', strides=(1,1), name=n+'22')
	    
	    shortcut = BatchNormalization(axis=3, epsilon=1.001e-5, name='batchnorm'+n+'4')(conv_shortcut)
	    base = Add(name='add'+n+'1')([x1, shortcut])
	    base = Activation('relu', name='act'+n+'4')(base)

	print('Stage 3:', base.shape)

	# Stage 4 (3 cnv_blocks)
	names = ['_4_a', '_4_b', '_4_c']
	for n in names:
	    if n=='_4_a':
	        x = conv_block(base, 512, kernel_size=(1,1), padding='same', strides=(2,2), name=n+'1')
	        conv_shortcut = Conv2D(2048, kernel_size=(1,1), padding='same', strides=(2,2), name='conv'+n+'4')(base)
	    else:
	        x = conv_block(base, 512, kernel_size=(1,1), padding='same', strides=(1,1), name=n+'1')
	        conv_shortcut = Conv2D(2048, kernel_size=(1,1), padding='same', strides=(1,1), name='conv'+n+'4')(base)
	        
	    x = conv_block(x, 512, kernel_size=(3,3), padding='same', strides=(1,1), name=n+'2')
	    x1 = conv_block(x, 2048, kernel_size=(1,1), padding='same', strides=(1,1), name=n+'3')

	    # Shortcut
	    shortcut = BatchNormalization(axis=3, epsilon=1.001e-5, name='batchnorm'+n+'4')(conv_shortcut)
	    base = Add(name='add'+n+'1')([x1, shortcut])
	    base = Activation('relu', name='act'+n+'4')(base)

	print('Stage 4:', base.shape)

	out = GlobalAveragePooling2D(name='global_avg_1')(base)
	out = Dense(classes, activation='softmax', name='dense_1')(out)

	model = keras.Model(inputs=inp, outputs=out, name="resnet101_model")

	opt = keras.optimizers.Adam(lr=0.001)
	model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])

	return model
Пример #12
0
def create_cropped_unet(size_in=(1024, 1024),
                        cropped_size=(256, 256),
                        chans_in=3,
                        chans_out=3,
                        nfeats=32,
                        depth=2,
                        pool_factor=4,
                        pooling='avg',
                        nconvs=1):
    """An experimental variation of the unets above where the context
    information of the created map comes from a larger patch of the
    picture than the transformed image itself. This is a trick to
    make the model trainable on smaller GPU memory.
    """

    if pooling == 'max':
        PoolingFun = MaxPooling2D
    elif pooling == 'avg':
        PoolingFun = AveragePooling2D
    else:
        raise AttributeError("Invalid pooling function")

    input_layer = Input(shape=(*size_in, chans_in))
    cropped_input = Cropping2D(
        cropping=((size_in[0] - cropped_size[0]) // 2 - 2,
                  (size_in[1] - cropped_size[1]) // 2 - 2))(input_layer)

    # The downscaling branch
    levelsdn = [input_layer]
    for i in range(depth):
        newlayer = PoolingFun(pool_size=pool_factor)(levelsdn[-1])
        levelsdn.append(convblock(newlayer, nfeats * 2**(i + 1),
                                  nconvs=nconvs))

    # The lowest level
    lowest = Conv2D(nfeats * 2**(depth + 1), (1, 1),
                    activation='relu')(levelsdn[-1])
    lowest = Conv2D(nfeats * 2**(depth + 1), (3, 3),
                    activation='relu',
                    padding='valid')(lowest)
    lowest = Conv2D(nfeats * 2**(depth + 1), (1, 1), activation='relu')(lowest)

    # The upscaling branch
    levelsup = [lowest]
    for i in range(depth):
        upsampled = UpSampling2D(size=pool_factor)(levelsup[-1])
        cropped = Cropping2D(cropping=get_crop(levelsdn[-(i + 2)], upsampled))(
            levelsdn[-(i + 2)])
        total = Concatenate()([upsampled, cropped])
        levelsup.append(
            convblock(total, nfeats * 2**(depth - i - 1), nconvs=nconvs))

    output_cropped = Cropping2D(
        cropping=get_crop(levelsup[-1], cropped_input))(levelsup[-1])
    output_cropped = Conv2D(nfeats, (3, 3), activation='relu',
                            padding='valid')(output_cropped)
    output_cropped = Conv2D(nfeats, (3, 3), activation='relu',
                            padding='valid')(output_cropped)
    output_cropped = Conv2D(nfeats, (1, 1), activation='relu')(output_cropped)
    output = Conv2D(chans_out, (1, 1), activation='sigmoid')(output_cropped)

    model = keras.Model(inputs=input_layer, outputs=output)
    return model
Пример #13
0
    def train(self, x_train, y_train, x_val, y_val, epochs):

        t0 = time.time()

        if not os.path.exists(self.model_dir):
            os.mkdir(self.model_dir)

        self.model_file = os.path.join(self.model_dir, self.file_name)

        print('Storing in ', self.model_dir)

        if self.network == 'VGG19':
            feature_generator = keras.applications.VGG19(weights=None,
                                                         include_top=False,
                                                         input_shape=(100, 100,
                                                                      3))
        elif self.network == 'RESNET':
            feature_generator = keras.applications.ResNet50(weights=None,
                                                            include_top=False,
                                                            input_shape=(100,
                                                                         100,
                                                                         3))
        elif self.network == 'DENSENET':
            feature_generator = keras.applications.DenseNet121(
                weights=None, include_top=False, input_shape=(100, 100, 3))
        elif self.network == 'ALEXNET':
            alexnet_model = Sequential()
            alexnet_model.add(
                Conv2D(filters=96,
                       input_shape=(100, 100, 3),
                       kernel_size=(11, 11),
                       strides=(4, 4),
                       padding="valid",
                       activation="relu"))
            alexnet_model.add(
                MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding="valid"))
            alexnet_model.add(
                Conv2D(filters=256,
                       kernel_size=(5, 5),
                       strides=(1, 1),
                       padding="same",
                       activation="relu"))
            alexnet_model.add(
                MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding="valid"))
            alexnet_model.add(
                Conv2D(filters=384,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       padding="same",
                       activation="relu"))
            alexnet_model.add(
                Conv2D(filters=384,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       padding="same",
                       activation="relu"))
            alexnet_model.add(
                Conv2D(filters=256,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       padding="same",
                       activation="relu"))
            alexnet_model.add(
                MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding="valid"))

            # Normalize the ALexNet classifier

            alexnet_model.add(Flatten())
            alexnet_model.add(
                layers.Dense(256, activation='relu', input_dim=(100, 100, 3)))
            alexnet_model.add(layers.Dropout(0.5))
            alexnet_model.add(layers.Dense(1, activation='linear'))
            sgd_opt = optimizers.SGD(lr=0.0001,
                                     decay=1e-6,
                                     momentum=0.9,
                                     nesterov=True)
            alexnet_model.compile(loss='mean_squared_error',
                                  optimizer=sgd_opt,
                                  metrics=['mse', 'mae'])

            print('Alexnet Setup complete after', time.time() - t0)

            t0 = time.time()

            callbacks = [
                keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0, mode='auto'), \
                keras.callbacks.ModelCheckpoint(self.model_file, monitor='val_loss', verbose=1, save_best_only=True,
                                                mode='min')]

            history = alexnet_model.fit(x_train,
                                        y_train,
                                        epochs=epochs,
                                        batch_size=32,
                                        validation_data=(x_val, y_val),
                                        callbacks=callbacks,
                                        verbose=True)

            fit_time = time.time() - t0

            p.dump(history.history,
                   open(os.path.join(self.model_dir, "history.p"), "wb"))

            print(self.network, ' Fitting done', time.time() - t0)

            return history

        MLP = keras.models.Sequential()
        MLP.add(
            keras.layers.Flatten(
                input_shape=feature_generator.output_shape[1:]))
        MLP.add(
            keras.layers.Dense(256, activation='relu',
                               input_dim=(100, 100, 3)))
        MLP.add(keras.layers.Dropout(0.5))
        MLP.add(keras.layers.Dense(1, activation='linear'))  # REGRESSION

        self.model = keras.Model(inputs=feature_generator.input,
                                 outputs=MLP(feature_generator.output))

        sgd = keras.optimizers.SGD(lr=0.0001,
                                   decay=1e-6,
                                   momentum=0.9,
                                   nesterov=True)

        self.model.compile(loss='mean_squared_error',
                           optimizer=sgd,
                           metrics=['mse', 'mae'])  # MSE for regression

        print(self.network, 'Setup complete after', time.time() - t0)

        t0 = time.time()

        callbacks = [keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0, mode='auto'), \
                     keras.callbacks.ModelCheckpoint(self.model_file, monitor='val_loss', verbose=1, save_best_only=True, mode='min')]

        history = self.model.fit(x_train,
                                 y_train,
                                 epochs=epochs,
                                 batch_size=32,
                                 validation_data=(x_val, y_val),
                                 callbacks=callbacks,
                                 verbose=True)

        fit_time = time.time() - t0

        p.dump(history.history,
               open(os.path.join(self.model_dir, "history.p"), "wb"))

        print(self.network, ' Fitting done', time.time() - t0)

        return history
Пример #14
0
def _get_model():
  x = keras.layers.Input(shape=(3,), name="input")
  y = keras.layers.Dense(4, name="dense")(x)
  model = keras.Model(x, y)
  return model
Пример #15
0
def create_model():
  inputs = keras.Input(shape=(None,None,1))

  ini = keras.layers.Conv2D(filters = 128, #feature map number
                     kernel_size = 3, 
                     strides = 1,  # 2                     
                     padding = 'same', 
                     input_shape = (None,None,1))(inputs)

  x = keras.layers.PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1,2])(ini)
  
  ini = keras.layers.Conv2D(filters = 64, #feature map number
                     kernel_size = 3, 
                     strides = 1,  # 2
                     activation = 'relu',
                     padding = 'same', 
                     input_shape = (None,None,128))(x)

  x = keras.layers.PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1,2])(ini)
  
  ##Subpixel Construction
  sub_layer_2 = Lambda(lambda x:tf.nn.space_to_depth(x,2)) 
  init = sub_layer_2(inputs=x)



  ##Learning Residual (DCNN)
  ####Conv 3x3x64x64 + PReLu
  x = keras.layers.Conv2D(filters = 64, #feature map number
                     kernel_size = 3, 
                     strides = 1,  # 2
                     padding = 'same', 
                     input_shape = (None,None,256))(init)
  
  x = keras.layers.PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1,2])(x)

  x = keras.layers.Conv2D(filters = 64, #feature map number
                     kernel_size = 3, 
                     strides = 1,  # 2
                     padding = 'same', 
                     input_shape = (None,None,64))(x)
  
  x = keras.layers.PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1,2])(x)

  ####Residual Block
  for i in range(6):
    Conv1 = keras.layers.Conv2D(filters = 128, #feature map number
                       kernel_size = 3, 
                       strides = 1,  # 2
                       padding = 'same',
                       input_shape = (None,None,64))(x)
    
    PReLu = keras.layers.PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1,2])(Conv1)
    Conv2 = keras.layers.Conv2D(filters = 64, #feature map number
                       kernel_size = 3, 
                       strides = 1,  # 2
                       padding = 'same',
                       input_shape = (None,None,128))(PReLu)
    
    x = keras.layers.Add()([Conv2,x])
    x = keras.layers.PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1,2])(x)
  ####Conv 3x3x64x64 + PReLu
  x = keras.layers.Conv2D(filters = 64, #feature map number
                     kernel_size = 3, 
                     strides = 1,  # 2
                     padding = 'same', 
                     input_shape = (None,None,64))(x)
  ##########1->64
  x = keras.layers.PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1,2])(x)
  
  x = keras.layers.Conv2D(filters = 64, #feature map number
                     kernel_size = 3, 
                     strides = 1,  # 2
                     padding = 'same', 
                     input_shape = (None,None,64))(x)
  ##########1->64
  x = keras.layers.PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1,2])(x)
  
  ####Conv 3x3x64x48
  x = keras.layers.Conv2D(filters = 48, #feature map number
                     kernel_size = 3, 
                     strides = 1,  
                     padding = 'same',                      
                     input_shape = (None,None,64))(x)
  
  ###########Learning Residual (DCNN)############
  

  ##Recovery From Subpixel
  sub_layer = Lambda(lambda x:tf.nn.depth_to_space(x,4)) 
  Residual_Output = sub_layer(inputs=x)
  
  #Residual_Output_ = keras.layers.Conv2D(filters = 3, #feature map number
  #                   kernel_size = 3, 
  #                   strides = 1,  # 2
  #                   padding = 'same',
  #                   activation ='relu',
  #                   input_shape = (None,None,3))(Residual_Output)

  ##Initial Prediction
  R = Lambda(lambda x: x[:,:,:,0])(init)
  G = Lambda(lambda x: x[:,:,:,1:3])(init)
  G = Lambda(lambda x: K.mean(x, axis=3))(G)
  B = Lambda(lambda x: x[:,:,:,3])(init)
  print(init.shape)
  print(R.shape)
  print(G.shape)
  print(B.shape)
  R = Lambda(lambda x: tf.expand_dims(x, -1))(R)
  G = Lambda(lambda x: tf.expand_dims(x, -1))(G)
  B = Lambda(lambda x: tf.expand_dims(x, -1))(B)
  
  #rgb = tf.keras.backend.stack((R, G,B),axis =  3)
  print(R.shape)
  rg = keras.layers.Concatenate(axis = 3)([R , G])
  rgb = keras.layers.Concatenate(axis = 3)([rg,B])
  print(rgb.shape)
  Coarse_Output = keras.layers.UpSampling2D(size=(4, 4),interpolation="bilinear")(rgb)




  ## + 
  outputs = keras.layers.Add()([Residual_Output,Coarse_Output])

  model = keras.Model(inputs=inputs, outputs=outputs, name="mnist_model")
  return model
Пример #16
0
def create_discriminator(dim, depht=32, name="", learning_factor=0.7):
    """
    On change la structure par / à CGAN.py, voir pdf page 6 figure 2
    """
    input_layer = keras.layers.Input(shape=(dim, dim, 3))
    #Layer 1 : Convolution avec un filtre de 4x4 qui se déplace de 2 pixels en 2 -> Division du nombre de pixel par 2; depht filtres utilisés
    #On ajoute un InstanceNormalization pour réduire les poids et éviter une explosion du gradient
    #1] Conv; dim*dim*3 -> dim/2*dim/2*2depht
    d = keras.layers.Conv2D(2 * depht, (4, 4), strides=(2, 2),
                            padding="same")(input_layer)
    d = InstanceNormalization(axis=-1)(d)
    d = keras.layers.LeakyReLU(alpha=0.2)(d)

    #2] Conv; dim/2*dim/2*depht -> dim/4*dim/4*4*depht
    d = keras.layers.Conv2D(4 * depht, (4, 4), strides=(2, 2),
                            padding="same")(d)
    d = InstanceNormalization(axis=-1)(d)
    d = keras.layers.LeakyReLU(alpha=0.2)(d)

    #3] Conv; dim/4*dim/4*2*depht -> dim/8*dim/8*8*depht
    d = keras.layers.Conv2D(8 * depht, (4, 4), strides=(2, 2),
                            padding="same")(d)
    d = InstanceNormalization(axis=-1)(d)
    d = keras.layers.LeakyReLU(alpha=0.2)(d)

    #4] Conv; dim/8*dim/8*8*depht -> dim/8*dim/8*8*depht
    d = keras.layers.Conv2D(8 * depht, (3, 3), strides=(1, 1),
                            padding="same")(d)
    d = InstanceNormalization(axis=-1)(d)
    pre_dil_conv = keras.layers.LeakyReLU(alpha=0.2)(d)

    #C'est ici que se trouve un premier skip d'après le papier, on continue donc de l'autre coté avec des reseau de convolutions
    #dilués et l'on ferra une concatenation plus loin pour permettre la connection

    #5] Dil Conv de d = 2
    d = keras.layers.Conv2D(8 * depht, (3, 3),
                            strides=(1, 1),
                            dilation_rate=(2, 2),
                            padding="same")(pre_dil_conv)
    d = InstanceNormalization(axis=-1)(d)
    d = keras.layers.LeakyReLU(alpha=0.2)(d)

    #6] Dil Conv de d = 4
    d = keras.layers.Conv2D(8 * depht, (3, 3),
                            strides=(1, 1),
                            dilation_rate=(4, 4),
                            padding="same")(d)
    d = InstanceNormalization(axis=-1)(d)
    d = keras.layers.LeakyReLU(alpha=0.2)(d)

    #7] Dil Conv de d = 8
    d = keras.layers.Conv2D(8 * depht, (3, 3),
                            strides=(1, 1),
                            dilation_rate=(8, 8),
                            padding="same")(d)
    d = InstanceNormalization(axis=-1)(d)
    post_dil_conv = keras.layers.LeakyReLU(alpha=0.2)(d)

    #8] Reconnection par concatenation
    d = keras.layers.Concatenate()([pre_dil_conv, post_dil_conv])

    #9] Fin du réseau : Conv
    d = keras.layers.Conv2D(8 * depht, (3, 3), strides=(1, 1),
                            padding="same")(d)
    d = InstanceNormalization(axis=-1)(d)
    d = keras.layers.LeakyReLU(alpha=0.2)(d)

    #10] Dernier conv pour avoir un vecteur
    d = keras.layers.Conv2D(1, (4, 4), strides=(1, 1), padding="same")(d)

    #On compile
    model = keras.Model(input_layer, d)
    opt = keras.optimizers.Adam(lr=LEARNING_RATE * learning_factor, beta_1=0.5)
    model.compile(loss='mse',
                  optimizer=opt,
                  loss_weights=[0.5],
                  metrics=["accuracy"])

    #Enfin, on enregistre dans un fichier si jamais c'est demandé pour vérifier la structure du réseau
    #plot_model(d, to_file="d_{}.png".format(name), show_shapes=True, show_layer_names=True)

    return model
Пример #17
0
    def get_trgat(self,
                  node_size,
                  rel_size,
                  node_hidden,
                  rel_hidden,
                  triple_size,
                  n_attn_heads=2,
                  dropout_rate=0.,
                  gamma=3,
                  lr=0.005,
                  depth=2,
                  **kwargs):
        adj_input = Input(shape=(None, 2))
        index_input = Input(shape=(None, 2), dtype='int64')
        val_input = Input(shape=(None, ))
        rel_adj = Input(shape=(None, 2))
        ent_adj = Input(shape=(None, 2))

        ent_emb = TokenEmbedding(node_size, node_hidden,
                                 trainable=True)(val_input)
        rel_emb = TokenEmbedding(rel_size, node_hidden,
                                 trainable=True)(val_input)

        def avg(tensor, size):
            adj = K.cast(K.squeeze(tensor[0], axis=0), dtype="int64")
            adj = tf.SparseTensor(indices=adj,
                                  values=tf.ones_like(adj[:, 0],
                                                      dtype='float32'),
                                  dense_shape=(node_size, size))
            adj = tf.sparse_softmax(adj)
            return tf.sparse_tensor_dense_matmul(adj, tensor[1])

        opt = [rel_emb, adj_input, index_input, val_input]
        ent_feature = Lambda(avg, arguments={'size':
                                             node_size})([ent_adj, ent_emb])
        rel_feature = Lambda(avg, arguments={'size':
                                             rel_size})([rel_adj, rel_emb])

        encoder = NR_GraphAttention(node_size,
                                    activation="relu",
                                    rel_size=rel_size,
                                    depth=depth,
                                    attn_heads=n_attn_heads,
                                    triple_size=triple_size,
                                    attn_heads_reduction='average',
                                    dropout_rate=dropout_rate)

        out_feature = Concatenate(-1)(
            [encoder([ent_feature] + opt),
             encoder([rel_feature] + opt)])
        out_feature = Dropout(dropout_rate)(out_feature)

        alignment_input = Input(shape=(None, 4))
        find = Lambda(lambda x: K.gather(
            reference=x[0], indices=K.cast(K.squeeze(x[1], axis=0), 'int32')))(
                [out_feature, alignment_input])

        def align_loss(tensor):
            def _cosine(x):
                dot1 = K.batch_dot(x[0], x[1], axes=1)
                dot2 = K.batch_dot(x[0], x[0], axes=1)
                dot3 = K.batch_dot(x[1], x[1], axes=1)
                max_ = K.maximum(K.sqrt(dot2 * dot3), K.epsilon())
                return dot1 / max_

            def l1(ll, rr):
                return K.sum(K.abs(ll - rr), axis=-1, keepdims=True)

            def l2(ll, rr):
                return K.sum(K.square(ll - rr), axis=-1, keepdims=True)

            l, r, fl, fr = [
                tensor[:, 0, :], tensor[:, 1, :], tensor[:, 2, :], tensor[:,
                                                                          3, :]
            ]
            loss = K.relu(gamma + l1(l, r) -
                          l1(l, fr)) + K.relu(gamma + l1(l, r) - l1(fl, r))
            return tf.reduce_sum(loss, keep_dims=True) / self.batch_size

        loss = Lambda(align_loss)(find)

        inputs = [adj_input, index_input, val_input, rel_adj, ent_adj]
        train_model = keras.Model(inputs=inputs + [alignment_input],
                                  outputs=loss)
        train_model.compile(loss=lambda y_true, y_pred: y_pred,
                            optimizer=keras.optimizers.RMSprop(lr))

        feature_model = keras.Model(inputs=inputs, outputs=out_feature)
        return train_model, feature_model
Пример #18
0
def create_generator(dim, depht=32, name=""):
    """    On change la structure par / à CGAN.py, voir pdf """
    input_layer = keras.layers.Input(shape=(dim, dim, 3))

    #1) Convolution (dim,dim,3) -> (dim/2,dim/2,depht)
    g = keras.layers.Conv2D(depht, (4, 4), strides=(2, 2),
                            padding="same")(input_layer)
    g = InstanceNormalization(axis=-1)(g)
    g = keras.layers.LeakyReLU(alpha=0.2)(g)

    #2) Convolution (dim/2,dim/2,depht) -> (dim/2,dim/2,4*depht)
    g = keras.layers.Conv2D(4 * depht, (4, 4), strides=(1, 1),
                            padding="same")(g)
    g = InstanceNormalization(axis=-1)(g)
    g = keras.layers.LeakyReLU(alpha=0.2)(g)

    #3) 3 RESNET : (dim/2,dim/2,4*depht)
    g = create_resnet(g)
    g = create_resnet(g)
    point_1 = create_resnet(g)

    #4) Convolution : (dim/2,dim/2,4*depht) -> (dim/4,dim/4,8*depht)
    g = keras.layers.Conv2D(8 * depht, (4, 4), strides=(2, 2),
                            padding="same")(point_1)
    g = InstanceNormalization(axis=-1)(g)
    g = keras.layers.LeakyReLU(alpha=0.2)(g)

    #4) 3 Resnet : (dim/4,dim/4,8*depht)
    g = create_resnet(g)
    g = create_resnet(g)
    point_2 = create_resnet(g)

    #5) Convolution (dim/4,dim/4,8*depht) -> (dim/8,dim/8,8*depht)
    g = keras.layers.Conv2D(8 * depht, (4, 4), strides=(2, 2),
                            padding="same")(point_2)
    g = InstanceNormalization(axis=-1)(g)
    g = keras.layers.LeakyReLU(alpha=0.2)(g)

    #3) RESNET
    for _ in range(3):
        g = create_resnet(g)

    #4) Deconv : (dim/8,dim/8,8*depht) -> (dim/4,dim/4,8*depht)
    g = keras.layers.Conv2DTranspose(8 * depht, (3, 3),
                                     strides=(2, 2),
                                     padding="same")(g)
    g = InstanceNormalization(axis=-1)(g)
    g = keras.layers.LeakyReLU(alpha=0.2)(g)

    #Raccord 2 : (dim/4,dim/4,8*depht) + (dim/4,dim/4,8*depht) -> (dim/4,dim/4,16*depht)
    g = keras.layers.Concatenate()([g, point_2])

    #3 RESNET (dim/4,dim/4,16*depht)
    for _ in range(3):
        g = create_resnet(g)

    #Transpose Conv : (dim/4,dim/4,16*depht) -> (dim/2,dim/2,4*depht)
    g = keras.layers.Conv2DTranspose(4 * depht, (4, 4),
                                     strides=(2, 2),
                                     padding="same")(g)
    g = InstanceNormalization(axis=-1)(g)
    g = keras.layers.LeakyReLU(alpha=0.2)(g)

    #Raccord 1 : (dim/2,dim/2,4*depht) + (dim/2,dim/2,4*depht) -> (dim/2,dim/2,8*depht)
    g = keras.layers.Concatenate()([g, point_1])

    #3) RESNET (dim/2,dim/2,8*depht)
    for _ in range(3):
        g = create_resnet(g)

    #DeConvolution : (dim/2,dim/2,8*depht) -> (dim,dim,depht)
    g = keras.layers.Conv2DTranspose(depht, (4, 4),
                                     strides=(2, 2),
                                     padding="same")(g)
    g = InstanceNormalization(axis=-1)(g)
    g = keras.layers.LeakyReLU(alpha=0.2)(g)

    #DeConvolution : (dim,dim,depht) -> (dim,dim,3)
    g = keras.layers.Conv2DTranspose(3, (4, 4), strides=(1, 1),
                                     padding="same")(g)
    g = keras.layers.Activation("tanh")(g)

    M = keras.Model(input_layer, g, name="gen_{}".format(name))
    return M
Пример #19
0
import tensorflow as tf
import keras
import numpy as np

# path to the trained models
model_file_path = os.path.join('model', 'cnn_model.h5')

model = load_model(model_file_path)
model.summary()

# layer name
layer_name = 'conv2d_1'

# Set up a model that returns the activation values for our target layer
layer = model.get_layer(name=layer_name)
feature_extractor = keras.Model(inputs=model.inputs, outputs=layer.output)

img_width = 96
img_height = 96


def deprocess_image(img):

    # Normalize array: center on 0., ensure variance is 0.15
    img -= img.mean()
    img /= img.std() + 1e-5
    img *= 0.15

    # Center crop
    img = img[25:-25, 25:-25, :]
Пример #20
0
def domain_adaption(datafolder,
                    outdir,
                    imsize,
                    epochs=20,
                    iterations=5,
                    n_clusters=10,
                    threshold=0.75,
                    datalim=25000,
                    batchsize=16,
                    metric_learning=True,
                    pdl1=False,
                    dab=False,
                    h=False):
    """
    Adapt a neural network to a new kind of images.

    Usually a network previously trained on imagenet.
    See keras applications.
    """

    model_dir = os.path.join(outdir, 'model')
    weights_dir = os.path.join(outdir, 'weights')
    info_dir = os.path.join(outdir, 'info')

    datagen = ImageDataGenerator(featurewise_center=False,
                                 samplewise_center=False,
                                 featurewise_std_normalization=False,
                                 samplewise_std_normalization=False,
                                 zca_whitening=False,
                                 rotation_range=20,
                                 width_shift_range=0.2,
                                 height_shift_range=0.2,
                                 shear_range=0.,
                                 zoom_range=0.,
                                 channel_shift_range=0.,
                                 fill_mode='nearest',
                                 cval=0.,
                                 horizontal_flip=False,
                                 vertical_flip=False,
                                 rescale=None,
                                 data_format=K.image_data_format())

    # create directories
    ###############################
    if os.path.exists(outdir):
        # Be careful, this erase all previous work in this directory
        shutil.rmtree(outdir)
    os.makedirs(outdir)
    os.makedirs(model_dir)
    os.makedirs(weights_dir)
    os.makedirs(info_dir)
    ##################################################

    # Gather images in a numpy array
    ########################################################
    if pdl1:
        # Adapted way to load the data according to existing folders in PDL1 projects
        numpy.random.shuffle(datafolder)
        list_images = datafolder[0:datalim]
        data = []
        print("-" * 20)
        print("Loading data:")
        print("-" * 20)
        for im in tqdm(list_images):
            image = imread(im)
            if dab:
                image = imagetoDAB(image)
            if h:
                image = imagetoDAB(image, h=True)
            image = numpy.asarray(image)
            if image.shape == (224, 224, 3):
                data.append(image)
        print(data[0])
        unlabeled_images = xce.preprocess_input(numpy.array(data))
    else:
        unlabeled_images = load_data(datafolder, datalim)

    ########################################################

    # Before we start, we instantiate and store xception pre-trained model
    ############################################
    base_model = xce.Xception(include_top=False,
                              weights='imagenet',
                              input_shape=(imsize, imsize, 3),
                              pooling='avg')
    json_string = base_model.to_json()
    with open(os.path.join(model_dir, 'xception.json'), "w") as text_file:
        text_file.write(json_string)
    save_model(base_model, weights_dir, 0)
    del base_model
    ##############

    # create a tensorflow session before we start
    K.clear_session()
    sess = tf.Session()

    # GPU for similarity matrix computation
    ###################################################
    center_t = tf.placeholder(tf.float32, (None, None))
    other_t = tf.placeholder(tf.float32, (None, None))
    center_t_norm = tf.nn.l2_normalize(center_t, dim=1)
    other_t_norm = tf.nn.l2_normalize(other_t, dim=1)
    similarity = tf.matmul(center_t_norm,
                           other_t_norm,
                           transpose_a=False,
                           transpose_b=True)
    ###########################################################

    # Main Loop
    ###################################
    for checkpoint in range(1, iterations + 1):

        # Load previous model
        previous_model = load_model(outdir, checkpoint - 1)

        # extract features
        print("-" * 20)
        print("predicting features:")
        print("-" * 20)
        features = previous_model.predict(unlabeled_images)
        features = numpy.array(features)

        # instance of k-means
        print("-" * 20)
        print("fitting k-means:")
        print("-" * 20)
        kmeans = KMeans(n_clusters=n_clusters).fit(features)

        # select best candidates for k-means centers in the dataset
        distances = kmeans.transform(features)
        center_idx = numpy.argmin(distances, axis=0)
        centers = numpy.array([features[i] for i in center_idx])

        # compute similarity matrix
        print("-" * 20)
        print("similarity matrix:")
        similarities = sess.run(similarity, {
            center_t: centers,
            other_t: features
        })
        print("similarity has shape: ", similarities.shape)
        print("similarity: ", similarities)
        print("-" * 20)

        # select images closest to centers
        print("-" * 20)
        print("reliability selection:")
        print("-" * 20)
        reliable_image_idx = numpy.unique(
            numpy.argwhere(similarities > threshold)[:, 1])
        print("checkpoint {}: # reliable images {}".format(
            checkpoint, len(reliable_image_idx)))
        sys.stdout.flush()
        images = numpy.array([unlabeled_images[i] for i in reliable_image_idx])
        int_labels = numpy.array(
            [kmeans.labels_[i] for i in reliable_image_idx])
        labels = to_categorical(int_labels)

        # write a tsne visualization figure, to check if visualization improves
        print("-" * 20)
        print("TSNE figure:")
        tsne = TSNE(n_components=2)
        x2d = tsne.fit_transform(
            numpy.array([features[i] for i in reliable_image_idx]))
        print("TSNE shape: ", x2d.shape)
        print("TSNE: ", x2d)
        print("-" * 20)
        plt.figure(figsize=(6, 5))
        colors = color_cycle(n_clusters)
        for i, c, label in zip(list(range(n_clusters)), colors,
                               [str(id) for id in range(n_clusters)]):
            print("current_label: ", i)
            print("shape of kmeans predictions: ", int_labels.shape)
            print("kmeans predictions: ", int_labels)
            print("points in masked predictions: ", (int_labels == i).sum())
            plt.scatter(x2d[int_labels == i, 0],
                        x2d[int_labels == i, 1],
                        c=c,
                        label=label)
        plt.legend()
        plt.title("TSNE visualization, based on {} reliable images".format(
            len(reliable_image_idx)))
        plt.savefig(
            os.path.join(info_dir, "tsne_iter_{}.png".format(checkpoint - 1)))

        # Fine tune
        print("-" * 20)
        print("Fine tuning:")
        print("-" * 20)
        base_model = xce.Xception(include_top=False,
                                  weights='imagenet',
                                  input_shape=(imsize, imsize, 3),
                                  pooling='avg')

        # compute head of the classifier, for cosine learning
        renamer = Lambda(lambda t: t, name="features")
        regularizer = Dropout(0.8)
        if metric_learning:
            normalizer = Lambda(lambda t: K.l2_normalize(1000 * t, axis=-1))
            classifier = networks.CosineDense(n_clusters,
                                              use_bias=False,
                                              kernel_constraint=unit_norm(),
                                              activation="softmax")
            y = renamer(base_model.output)
            y = normalizer(y)
            y = regularizer(y)
            y = classifier(y)
        else:
            classifier = keras.layers.Dense(n_clusters, activation="softmax")
            y = renamer(base_model.output)
            y = regularizer(y)
            y = classifier(y)

        model = keras.Model(input=base_model.input, output=y)

        model.compile(optimizer=Adam(lr=0.001),
                      loss="categorical_crossentropy")
        model.fit_generator(datagen.flow(images, labels, batch_size=batchsize),
                            steps_per_epoch=len(images) / (batchsize + 1),
                            epochs=epochs)
        save_model(base_model, weights_dir, checkpoint)
Пример #21
0
def train(
    G,
    user_targets,
    layer_size,
    num_samples,
    batch_size,
    num_epochs,
    learning_rate,
    dropout,
):
    """
    Train a HinSAGE model on the specified graph G with given parameters.

    Args:
        G: A StellarGraph object ready for machine learning
        layer_size: A list of number of hidden nodes in each layer
        num_samples: Number of neighbours to sample at each layer
        batch_size: Size of batch for inference
        num_epochs: Number of epochs to train the model
        learning_rate: Initial Learning rate
        dropout: The dropout (0->1)
    """
    print(G.info())

    # Split "user" nodes into train/test
    # Split nodes into train/test using stratification.
    train_targets, test_targets = model_selection.train_test_split(
        user_targets, train_size=0.25, test_size=None
    )

    # The mapper feeds data from sampled subgraph to GraphSAGE model
    generator = HinSAGENodeGenerator(
        G, batch_size, num_samples
    )
    train_gen = generator.flow_from_dataframe(train_targets)
    test_gen = generator.flow_from_dataframe(test_targets)

    # GraphSAGE model
    model = HinSAGE(layer_size, train_gen, dropout=dropout)
    x_inp, x_out = model.default_model(flatten_output=True)

    # Final estimator layer
    prediction = layers.Dense(units=train_targets.shape[1], activation="softmax")(x_out)

    # The elite label is only true for a small fraction of the total users,
    # so weight the training loss to ensure that model learns to predict
    # the positive class.
    #class_count = train_targets.values.sum(axis=0)
    #weights = class_count.sum()/class_count
    weights = [0.01, 1.0]
    print("Weighting loss by: {}".format(weights))

    # Create Keras model for training
    model = keras.Model(inputs=x_inp, outputs=prediction)
    model.compile(
        optimizer=optimizers.Adam(lr=learning_rate),
        loss=weighted_binary_crossentropy(weights),
        metrics=[metrics.binary_accuracy],
    )

    # Train model
    history = model.fit_generator(
        train_gen,
        epochs=num_epochs,
        verbose=2,
        shuffle=True
    )

    # Evaluate on test set and print metrics
    predictions = model.predict_generator(test_gen)
    binary_predictions = predictions[:, 1] > 0.5
    print("\nTest Set Metrics (on {} nodes)".format(len(predictions)))

    # Calculate metrics using Scikit-Learn
    cm = sk_metrics.confusion_matrix(test_targets.iloc[:, 1], binary_predictions)
    print("Confusion matrix:")
    print(cm)

    accuracy = sk_metrics.accuracy_score(test_targets.iloc[:, 1], binary_predictions)
    precision = sk_metrics.precision_score(test_targets.iloc[:, 1], binary_predictions)
    recall = sk_metrics.recall_score(test_targets.iloc[:, 1], binary_predictions)
    f1 = sk_metrics.f1_score(test_targets.iloc[:, 1], binary_predictions)
    roc_auc = sk_metrics.roc_auc_score(test_targets.iloc[:, 1], binary_predictions)

    print(
        "accuracy = {:0.3}, precision = {:0.3}, recall = {:0.3}, f1 = {:0.3}".format(
            accuracy, precision, recall, f1
        )
    )
    print("ROC AUC = {:0.3}".format(roc_auc))

    # Save model
    save_str = "_n{}_l{}_d{}_r{}".format(
        "_".join([str(x) for x in num_samples]),
        "_".join([str(x) for x in layer_size]),
        dropout,
        learning_rate,
    )
    model.save("yelp_model" + save_str + ".h5")
Пример #22
0
def fine_tune(datafolder,
              outdir,
              device,
              global_iteration,
              imsize=299,
              epochs=2,
              batchsize=16,
              lr=0.0001,
              metric_learning=True):
    """
    Adapt a neural network to a new kind of images.

    Usually a network previously trained on imagenet.
    See keras applications.
    """
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = device

    model_dir = os.path.join(outdir, 'model')
    weights_dir = os.path.join(outdir, 'weights')
    info_dir = os.path.join(outdir, 'info')

    datagen = ImageDataGenerator(featurewise_center=False,
                                 samplewise_center=False,
                                 featurewise_std_normalization=False,
                                 samplewise_std_normalization=False,
                                 zca_whitening=False,
                                 rotation_range=20,
                                 width_shift_range=0.2,
                                 height_shift_range=0.2,
                                 shear_range=0.,
                                 zoom_range=0.,
                                 channel_shift_range=0.,
                                 fill_mode='nearest',
                                 cval=0.,
                                 horizontal_flip=False,
                                 vertical_flip=False,
                                 rescale=None,
                                 data_format=K.image_data_format(),
                                 preprocessing_function=xce.preprocess_input)

    # create directories
    ###############################
    if os.path.exists(outdir):
        # Be careful, this erase all previous work in this directory
        shutil.rmtree(outdir)
    os.makedirs(outdir)
    os.makedirs(model_dir)
    os.makedirs(weights_dir)
    os.makedirs(info_dir)

    # get information from datafolder
    #################################
    classes = []
    maxfiles = 0
    for name in os.listdir(datafolder):
        classdir = os.path.join(datafolder, name)
        if os.path.isdir(classdir):
            classes.append(name)
            images = [f for f in os.listdir(classdir) if f.endswith(".png")]
            maxfiles = max(maxfiles, len(images))
    n_clusters = len(classes)
    if n_clusters < 2:
        raise NoClassFoundError("data folder {} has no \
                                 class directories inside".format(datafolder))
    if maxfiles < 1000:
        raise NotEnoughSamples("Not enough samples found in \
                                {}".format(datafolder))

    # Fine tune
    print("-" * 20)
    print("Fine tuning:")
    print("-" * 20)
    K.clear_session()
    base_model = xce.Xception(include_top=False,
                              weights='imagenet',
                              input_shape=(imsize, imsize, 3),
                              pooling='avg')
    # compute head of the classifier, for cosine learning
    renamer = Lambda(lambda t: t, name="features")
    regularizer = Dropout(0.8)
    if metric_learning:
        normalizer = Lambda(lambda t: K.l2_normalize(1000 * t, axis=-1))
        classifier = networks.CosineDense(n_clusters,
                                          use_bias=False,
                                          kernel_constraint=unit_norm(),
                                          activation="softmax")
        y = renamer(base_model.output)
        y = normalizer(y)
        y = regularizer(y)
        y = classifier(y)
    else:
        classifier = keras.layers.Dense(n_clusters, activation="softmax")
        y = renamer(base_model.output)
        y = regularizer(y)
        y = classifier(y)

    model = keras.Model(input=base_model.input, output=y)
    # then evaluate representation in a callback
    Monitor = create_monitor(datafolder, batchsize,
                             float(maxfiles) / (batchsize + 1))

    monitor = Monitor()

    model.compile(optimizer=Adam(lr=lr), loss="categorical_crossentropy")
    print("Going to fit on {} classes.".format(n_clusters))
    print("Max number of files in a class folder is: {}.".format(maxfiles))
    print("Expected steps per epoch: {}.".format(
        float(maxfiles) / (batchsize + 1)))
    print("Go for a coffee or something...")
    model.fit_generator(datagen.flow_from_directory(datafolder,
                                                    target_size=(imsize,
                                                                 imsize),
                                                    batch_size=batchsize),
                        steps_per_epoch=float(maxfiles) / (batchsize + 1),
                        epochs=epochs,
                        callbacks=[monitor])
    save_model(base_model, weights_dir, global_iteration)
 def submodel(embedding, word_ids):
     word_embed = embedding(word_ids)
     rep = keras.layers.GlobalAveragePooling1D()(word_embed)
     return keras.Model(inputs=[word_ids], outputs=[rep])
Пример #24
0

def my_dense(x):
    return K.expand_dims(Dense(
        30,
        activation='selu',
        kernel_regularizer='l1',
    )(x[:, :, 0]),
                         axis=-1)


lstm_l1_mse = Lambda(my_dense)(lstm_input_vec)
lstm_mse = LSTM(20)(lstm_l1_mse)
predict_lstm_mse = Dense(1)(lstm_mse)

lstm_model_mse = keras.Model(inputs=lstm_input_vec, outputs=predict_lstm_mse)
lstm_model_mse.compile(optimizer="adam", loss="MSE")


def simple_MSE(y_pred, y_true):
    return (((y_pred - y_true)**2)).mean()


def weighted_MSE(y_pred, y_true):
    return (((y_pred - y_true)**2) * (1 + np.arange(len(y_pred))) /
            len(y_pred)).mean()


cqi_queue = []
prediction = []
last = []
Пример #25
0
            x = (mask // output_shape[3]) % output_shape[2]
            feature_range = K.tf.range(output_shape[3], dtype='int32')
            f = one_like_mask * feature_range

            updates_size = K.tf.size(updates)
            indices = K.transpose(
                K.reshape(K.stack([b, y, x, f]), [4, updates_size]))
            values = K.reshape(updates, [updates_size])
            ret = K.tf.scatter_nd(indices, values, output_shape)
            return ret

    def compute_output_shape(self, input_shape):
        mask_shape = input_shape[1]
        return (mask_shape[0], mask_shape[1] * self.up_size[0],
                mask_shape[2] * self.up_size[1], mask_shape[3])


if __name__ == '__main__':

    import keras
    import numpy as np

    input = keras.layers.Input((4, 4, 3))
    o = MaxPoolingWithArgmax2D()(input)
    o2 = MaxUnpooling2D()(o)
    model = keras.Model(inputs=input, outputs=o2)
    model.compile(optimizer="adam", loss='categorical_crossentropy')
    x = np.random.randint(0, 100, (3, 4, 4, 3))  # 调试此处
    m = model.predict(x)  # 调试此处
    print(m)
vgg16.summary()  #lists the layers and their names

for layer in vgg16.layers:
    if layer.name in [
            'block5_conv1', 'block5_conv2', 'block5_conv3', 'block5_pool'
    ]:
        layer.trainable = True
    else:
        layer.trainable = False

top_model = keras.models.load_model("./spiral_images/top_model")

print("Top Model Layers:")
top_model.summary()

model = keras.Model(input=vgg16.input, output=top_model(vgg16.output))
model.summary()

# note that it is necessary to start with a fully-trained
# classifier, including the top classifier,
# in order to successfully do fine-tuning

# set the first 25 layers (up to the last conv block)
# to non-trainable (weights will not be updated)

# compile the model with a SGD/momentum optimizer
# and a very slow learning rate.
model.compile(loss='binary_crossentropy',
              optimizer=keras.optimizers.SGD(lr=1e-5),
              metrics=['accuracy'])
Пример #27
0
 def make_model():
     x = inputs = keras.Input(shape)
     x = keras.layers.Conv2D(3, 1)(x)
     x = keras.layers.BatchNormalization()(x)
     return keras.Model(inputs, x)
norm2 = keras.layers.BatchNormalization(axis=-1)(pool2)
drop2 = keras.layers.Dropout(rate=0.2)(norm2)

flat = keras.layers.Flatten()(drop2)

hidden1 = keras.layers.Dense(128, activation='relu')(flat)
norm3 = keras.layers.BatchNormalization(axis=-1)(hidden1)
drop3 = keras.layers.Dropout(rate=0.2)(norm3)

hidden2 = keras.layers.Dense(50, activation='relu')(drop3)
norm4 = keras.layers.BatchNormalization(axis=-1)(hidden2)
drop4 = keras.layers.Dropout(rate=0.2)(norm4)

out = keras.layers.Dense(2, activation='softmax')(drop4)

model = keras.Model(input=inp, outputs=out)
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])
print(model.summary())
##################
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
X_train, X_test, y_train, y_test = train_test_split(dataset,
                                                    to_categorical(
                                                        np.array(label)),
                                                    test_size=0.20,
                                                    random_state=0)

history = model.fit(np.array(X_train),
                    y_train,
Пример #29
0
def test_stateful_metrics():
    np.random.seed(1334)

    class BinaryTruePositives(keras.layers.Layer):
        """Stateful Metric to count the total true positives over all batches.

        Assumes predictions and targets of shape `(samples, 1)`.

        # Arguments
            name: String, name for the metric.
        """
        def __init__(self, name='true_positives', **kwargs):
            super(BinaryTruePositives, self).__init__(name=name, **kwargs)
            self.stateful = True
            self.true_positives = K.variable(value=0, dtype='int32')

        def reset_states(self):
            K.set_value(self.true_positives, 0)

        def __call__(self, y_true, y_pred):
            """Computes the number of true positives in a batch.

            # Arguments
                y_true: Tensor, batch_wise labels
                y_pred: Tensor, batch_wise predictions

            # Returns
                The total number of true positives seen this epoch at the
                    completion of the batch.
            """
            y_true = K.cast(y_true, 'int32')
            y_pred = K.cast(K.round(y_pred), 'int32')
            correct_preds = K.cast(K.equal(y_pred, y_true), 'int32')
            true_pos = K.cast(K.sum(correct_preds * y_true), 'int32')
            current_true_pos = self.true_positives * 1
            self.add_update(K.update_add(self.true_positives, true_pos),
                            inputs=[y_true, y_pred])
            return current_true_pos + true_pos

    metric_fn = BinaryTruePositives()
    config = metrics.serialize(metric_fn)
    metric_fn = metrics.deserialize(
        config, custom_objects={'BinaryTruePositives': BinaryTruePositives})

    # Test on simple model
    inputs = keras.Input(shape=(2, ))
    outputs = keras.layers.Dense(1, activation='sigmoid')(inputs)
    model = keras.Model(inputs, outputs)
    model.compile(optimizer='sgd',
                  loss='binary_crossentropy',
                  metrics=['acc', metric_fn])

    samples = 1000
    x = np.random.random((samples, 2))
    y = np.random.randint(2, size=(samples, 1))

    val_samples = 10
    val_x = np.random.random((val_samples, 2))
    val_y = np.random.randint(2, size=(val_samples, 1))

    # Test fit and evaluate
    history = model.fit(x,
                        y,
                        validation_data=(val_x, val_y),
                        epochs=1,
                        batch_size=10)
    outs = model.evaluate(x, y, batch_size=10)
    preds = model.predict(x)

    def ref_true_pos(y_true, y_pred):
        return np.sum(np.logical_and(y_pred > 0.5, y_true == 1))

    # Test correctness (e.g. updates should have been run)
    np.testing.assert_allclose(outs[2], ref_true_pos(y, preds), atol=1e-5)

    # Test correctness of the validation metric computation
    val_preds = model.predict(val_x)
    val_outs = model.evaluate(val_x, val_y, batch_size=10)
    np.testing.assert_allclose(val_outs[2],
                               ref_true_pos(val_y, val_preds),
                               atol=1e-5)
    np.testing.assert_allclose(val_outs[2],
                               history.history['val_true_positives'][-1],
                               atol=1e-5)

    # Test with generators
    gen = [(np.array([x0]), np.array([y0])) for x0, y0 in zip(x, y)]
    val_gen = [(np.array([x0]), np.array([y0]))
               for x0, y0 in zip(val_x, val_y)]
    history = model.fit_generator(iter(gen),
                                  epochs=1,
                                  steps_per_epoch=samples,
                                  validation_data=iter(val_gen),
                                  validation_steps=val_samples)
    outs = model.evaluate_generator(iter(gen), steps=samples)
    preds = model.predict_generator(iter(gen), steps=samples)

    # Test correctness of the metric re ref_true_pos()
    np.testing.assert_allclose(outs[2], ref_true_pos(y, preds), atol=1e-5)

    # Test correctness of the validation metric computation
    val_preds = model.predict_generator(iter(val_gen), steps=val_samples)
    val_outs = model.evaluate_generator(iter(val_gen), steps=val_samples)
    np.testing.assert_allclose(val_outs[2],
                               ref_true_pos(val_y, val_preds),
                               atol=1e-5)
    np.testing.assert_allclose(val_outs[2],
                               history.history['val_true_positives'][-1],
                               atol=1e-5)
Пример #30
0
def main():
    args = argument_parsing()
    print("Command line args:", args)

    f = open("./model_config/config_{}.json".format(args.id))
    model_config = json.load(f)
    f.close()

    train_data = np.load(model_config["data"]["training_data_path"])
    test_data = np.load(model_config["data"]["test_data_path"])

    img_width = train_data.shape[1]
    img_height = train_data.shape[2]

    print("Image shape:", img_width, img_height)
    
    # Construct VAE Encoder 
    encoder_result = encoder_gen((img_width, img_height), model_config["encoder"])

    # Construct VAE Decoder 
    vae_decoder = decoder_gen(
        (img_width, img_height),  
        model_config["decoder"],
        encoder_result.shape_before_flattening
    )

    z = encoder_result.vae_encoder(encoder_result.inputs)
    x_recon = vae_decoder(z)
    vae = keras.Model(inputs=[encoder_result.inputs], outputs=[x_recon])

    # Specify the optimizer 
    optimizer = keras.optimizers.Adam(lr=model_config['optimizer']['lr'])

    # Compile model 
    vae.compile(loss='mse', optimizer=optimizer, metrics=[loss])
    vae.summary()

    train_data = train_data.reshape(train_data.shape+(1,))
    test_data = test_data.reshape(test_data.shape+(1,))

    print("train data shape", train_data.shape)
    print("test data shape", test_data.shape)

    checkpoint = ModelCheckpoint(
        './models/model_{}.th'.format(args.id), 
        monitor='loss', 
        verbose=1,
        save_best_only=True ,
        save_weights_only=True
    )
    callbacks_list = [checkpoint]

    h = vae.fit(
        x=train_data, 
        y=train_data, 
        epochs=model_config["train_epochs"], 
        batch_size=model_config["batch_size"], 
        validation_data=[test_data, test_data],
        callbacks=callbacks_list
    )

    plot_training_losses(h, args.id)