def build_model(self, filters, filter_length, optimizer='rmsprop', loss='mse'):
        input_ = Input(shape=(self.time_step, self.input_size))
        conv_h1 = Conv1D(filters, filter_length, padding='causal', activation='relu')(input_)

        conv_h2 = Conv1D(filters, filter_length, padding='causal', activation='relu')(conv_h1)

        conv_h3 = Conv1D(filters, filter_length, padding='causal', activation='relu')(conv_h2)

        conv_g3 = Conv1D(filters, filter_length, padding='causal', activation='relu')(conv_h3)

        merge_h2_g3 = add([conv_h2, conv_g3])

        conv_g2 = Conv1D(filters, filter_length, padding='causal', activation='relu')(merge_h2_g3)

        merge_h1_g2 = add([conv_h1, conv_g2])

        conv_g1 = Conv1D(filters, filter_length, padding='causal', activation='relu')(merge_h1_g2)

        conv_g0 = Conv1D(self.output_size, filter_length, padding='causal', activation='relu')(conv_g1)

        model = Model(input_, conv_g0)

        model.compile(optimizer, loss)

        model.summary()

        return model
Esempio n. 2
0
def generator(Z_dim, y_dim, image_size=32):
    gf_dim = 64
    s16 = int(image_size / 16)
    c_dim = 3
    z_in = Input(shape=(Z_dim, ), name='Z_input')
    y_in = Input(shape=(y_dim, ), name='y_input')
    inputs = concatenate([z_in, y_in])

    G_h = Dense(gf_dim * 8 * s16 * s16)(inputs)
    G_h = Activation('relu')(BatchNormalization()(
        Reshape(target_shape=[s16, s16, gf_dim * 8])(G_h)))
    G_h = Activation('relu')(BatchNormalization()(Conv2DTranspose(
        gf_dim * 4, kernel_size=(5, 5), strides=(2, 2), padding='same')(G_h)))
    G_h = Activation('relu')(BatchNormalization()(Conv2DTranspose(
        gf_dim * 2, kernel_size=(5, 5), strides=(2, 2), padding='same')(G_h)))
    G_h = Activation('relu')(BatchNormalization()(Conv2DTranspose(
        gf_dim * 2, kernel_size=(5, 5), strides=(1, 1), padding='same')(G_h)))
    G_h = Activation('relu')(BatchNormalization()(Conv2DTranspose(
        gf_dim, kernel_size=(5, 5), strides=(2, 2), padding='same')(G_h)))
    G_prob = Conv2DTranspose(c_dim,
                             kernel_size=(5, 5),
                             strides=(2, 2),
                             padding='same',
                             activation='sigmoid')(G_h)

    G = Model(inputs=[z_in, y_in], outputs=G_prob, name='Generator')
    print('=== Generator ===')
    G.summary()
    print('\n\n')

    return G
Esempio n. 3
0
def build_read_tensor_2d_model(args):
    '''Build Read Tensor 2d CNN model for classifying variants.

	2d Convolutions followed by dense connection.
	Dynamically sets input channels based on args via defines.total_input_channels_from_args(args)
	Uses the functional API. Supports theano or tensorflow channel ordering.
	Prints out model summary.

	Arguments
		args.window_size: Length in base-pairs of sequence centered at the variant to use as input.	
		args.labels: The output labels (e.g. SNP, NOT_SNP, INDEL, NOT_INDEL)
		args.channels_last: Theano->False or Tensorflow->True channel ordering flag

	Returns
		The keras model
	'''
    if args.channels_last:
        in_shape = (args.read_limit, args.window_size, args.channels_in)
    else:
        in_shape = (args.channels_in, args.read_limit, args.window_size)

    read_tensor = Input(shape=in_shape, name="read_tensor")
    read_conv_width = 16
    x = Conv2D(128, (read_conv_width, 1),
               padding='valid',
               activation="relu",
               kernel_initializer="he_normal")(read_tensor)
    x = Conv2D(64, (1, read_conv_width),
               padding='valid',
               activation="relu",
               kernel_initializer="he_normal")(x)
    x = MaxPooling2D((3, 1))(x)
    x = Conv2D(64, (1, read_conv_width),
               padding='valid',
               activation="relu",
               kernel_initializer="he_normal")(x)
    x = MaxPooling2D((3, 3))(x)
    x = Flatten()(x)
    x = Dense(units=32, kernel_initializer='normal', activation='relu')(x)
    prob_output = Dense(units=len(args.labels),
                        kernel_initializer='normal',
                        activation='softmax')(x)

    model = Model(inputs=[read_tensor], outputs=[prob_output])

    adamo = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=1.)
    my_metrics = [metrics.categorical_accuracy]

    model.compile(loss='categorical_crossentropy',
                  optimizer=adamo,
                  metrics=my_metrics)
    model.summary()

    if os.path.exists(args.weights_hd5):
        model.load_weights(args.weights_hd5, by_name=True)
        print('Loaded model weights from:', args.weights_hd5)

    return model
Esempio n. 4
0
def generator(Z_dim):
    # Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657)
    # Architecture : FC1024_BR-FC7x7x128_BR-(64)4dc2s_BR-(1)4dc2s_S
    z = Input(shape=(Z_dim, ), name='Z_input')
    net = Activation('relu')(BatchNormalization()(Dense(1024)(z)))
    net = Activation('relu')(BatchNormalization()(Dense(128 * 7 * 7)(net)))
    net = Reshape(target_shape=[7, 7, 128])(net)
    net = Activation('relu')(BatchNormalization()(Conv2DTranspose(
        64, kernel_size=(4, 4), strides=(2, 2), padding='same')(net)))
    out = Conv2DTranspose(1,
                          kernel_size=(4, 4),
                          strides=(2, 2),
                          activation='sigmoid',
                          padding='same')(net)
    G_model = Model(inputs=z, outputs=out)
    G_model.summary()
    return G_model
Esempio n. 5
0
def discriminator(X_dim, y_dim):
    df_dim = 64
    x_in = Input(shape=(X_dim), name='X_input')
    y_in = Input(shape=(y_dim, ), name='Y_input')
    D_h = LeakyReLU(0.2)(Convolution2D(df_dim,
                                       kernel_size=(5, 5),
                                       strides=(2, 2),
                                       padding='same')(x_in))
    D_h = LeakyReLU(0.2)(BatchNormalization()(Convolution2D(
        df_dim * 2, kernel_size=(5, 5), strides=(2, 2), padding='same')(D_h)))
    D_h = LeakyReLU(0.2)(BatchNormalization()(Convolution2D(
        df_dim * 2, kernel_size=(5, 5), strides=(2, 2), padding='same')(D_h)))
    D_h = LeakyReLU(0.2)(BatchNormalization()(Convolution2D(
        df_dim * 4, kernel_size=(5, 5), strides=(2, 2), padding='same')(D_h)))
    D_h = LeakyReLU(0.2)(BatchNormalization()(Convolution2D(
        df_dim * 8, kernel_size=(5, 5), strides=(2, 2), padding='same')(D_h)))
    D_h = Flatten()(D_h)
    D_h = concatenate([D_h, y_in])
    D_logit = Dense(1, name='Discriminator_Output')(D_h)
    D = Model(inputs=[x_in, y_in], outputs=D_logit, name='Discriminator')
    print('=== Discriminator ===')
    D.summary()
    print('\n\n')
    return D
Esempio n. 6
0
def discriminator(X_dim):
    # It must be Auto-Encoder style architecture
    # Architecture : (64)4c2s-FC32_BR-FC64*14*14_BR-(1)4dc2s_S
    x_in = Input(shape=X_dim, name='X_input')
    D_h = Activation('relu')(Convolution2D(64,
                                           kernel_size=(4, 4),
                                           strides=(2, 2),
                                           padding='same',
                                           name='d_conv1')(x_in))
    D_h = Flatten()(D_h)
    code = Activation('relu')(BatchNormalization()(Dense(32)(D_h)))
    D_h = Activation('relu')(BatchNormalization()(Dense(64 * 14 * 14)(code)))
    D_h = Reshape(target_shape=[14, 14, 64])(D_h)
    out = Conv2DTranspose(1,
                          kernel_size=(2, 2),
                          strides=(2, 2),
                          padding='same',
                          activation='sigmoid')(D_h)
    D_model = Model(inputs=x_in, outputs=[out, code])
    # recon loss
    # recon_error = tf.sqrt(2 * tf.nn.l2_loss(out - x_in)) / self.batch_size
    # return out, recon_error, code
    D_model.summary()
    return D_model
Esempio n. 7
0
def main():
    training_images, training_labels, test_images, test_labels = load_dataset()

    # plt.imshow(training_images[:,:,0], cmap='gray')
    # plt.show()

    perm_train = np.random.permutation(training_labels.size)
    training_labels = training_labels[perm_train]
    training_images = training_images[perm_train, :, :] / 255.0
    training_images = np.expand_dims(training_images, -1)
    print(training_images.shape)
    test_images = test_images / 255.0
    test_images = np.expand_dims(test_images, -1)

    # pdb.set_trace()

    #    training_labels = to_categorical(training_labels, NUM_CLASSES)
    #    test_labels = to_categorical(test_labels, NUM_CLASSES)

    BATCH_SIZE = 32 * 8
    WIDTH, HEIGHT = 28, 28

    # Defiining the network
    input_layer = Input(shape=(HEIGHT, WIDTH, 1), name='input_layer')
    cnn1 = Conv2D(filters=32,
                  kernel_size=3,
                  strides=(1, 1),
                  padding='same',
                  activation='relu')(input_layer)
    maxpool = MaxPooling2D(pool_size=2)(cnn1)
    cnn2 = Conv2D(filters=32,
                  kernel_size=3,
                  strides=(1, 1),
                  padding='valid',
                  activation='relu')(maxpool)
    maxpool = MaxPooling2D(pool_size=2)(cnn2)
    flat = Flatten()(maxpool)
    dense1 = Dense(units=128, activation='relu')(flat)
    dropout = Dropout(.5)(dense1)
    output_layer = Dense(units=NUM_CLASSES, activation='softmax')(dropout)

    model = Model(inputs=input_layer, outputs=output_layer)
    model.compile(optimizer=tf.train.AdamOptimizer(),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    # pdb.set_trace()

    print(model.summary())

    model.fit(x=training_images,
              y=training_labels,
              batch_size=BATCH_SIZE,
              epochs=30,
              verbose=1,
              validation_data=(test_images, test_labels))

    accuracy = model.evaluate(x=test_images,
                              y=test_labels,
                              batch_size=BATCH_SIZE)

    print('test score = {}'.format(accuracy))
Esempio n. 8
0
# check whether 3 models are the same or not
model = Sequential()
model.add(Dense(4, input_dim=2,
                name='dense1'))  # 2 nodes reaches 50% accuracy, 8 nodes 100%
model.add(Activation('relu', name='dense1_act'))
model.add(Dense(1, name='dense2'))
model.add(Activation('sigmoid', name='dense2_act'))  # output shaped by sigmoid
model.summary()  # see each layer of a model

# use Model instead of Sequential
input_tensor = Input(shape=(2, ), name='input')
hidden = Dense(4, activation='relu', name='dense1_relu')(input_tensor)
output = Dense(1, activation='sigmoid',
               name='dense2_sigm')(hidden)  # output shaped by sigmoid
model1 = Model(inputs=input_tensor, outputs=output)
model1.summary()  # see each layer of a model
"""
# use Model to split layer and activation
input_tensor = Input(shape=(2,))
hidden = Dense(2)(input_tensor)
relu_hid = K.relu(hidden)
dense_out = Dense(1)(relu_hid) # output shaped by sigmoid
sigmoid_out = K.sigmoid(dense_out)

# inputs and outputs must be layer tensor, not just tensor made from K
model2 = Model(inputs=input_tensor, outputs=sigmoid_out)

model2.summary() # see each layer of a model
"""

#######################################################
Esempio n. 9
0
"""
### Residual connection on a convolution layer

For more information about residual networks, see [Deep Residual Learning for Image Recognition](http://arxiv.org/abs/1512.03385).

"""

from tensorflow.contrib.keras.python.keras.layers import add, Input, Conv2D
from tensorflow.contrib.keras.python.keras.models import Model
# input tensor for a 3-channel 256x256 image
# x = Input(shape=(3, 256, 256)) will cause error
x = Input(shape=(256, 256, 3))
# due to keras.backend._config uses channels_last

# 3x3 conv with 3 output channels (same as input channels)
y = Conv2D(3, (3, 3), padding='same')(x)
# this returns x + y.
z = add(
    [x, y]
)  # take in same shape input tensor list, return same shape tensor, in order words, add([x,y]) is row-bind

model = Model(x, z)
model.summary()
Esempio n. 10
0
x = Conv1D(128, 5, activation='relu')(x)  # (?, 35, 128)
# conv1d_t3 = x
x = MaxPooling1D(35)(x)  # (?, 1, 128)
# maxp_t3 = x
x = Flatten()(x)  # (?, ?) not (?, 128) ??
# flatten_t = x
x = Dense(128, activation='relu')(x)  # (?, 128)
# dense_t = x
preds = Dense(len(labels_index), activation='softmax')(x)  # (?, 20)

model = Model(sequence_input,
              preds)  # a number of samples as input, preds as output tensors
model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['acc'])
model.summary(
)  # see layer output tensor shape and total params of each layer # model.layers[2].count_params() # calculate each layer's params

# pp model.trainable_weights # display weights shape of each trainable layer
"""
Important Tips:

Contemplating the following two tips deeply and patiently !!!!!!!!!!

Compare model.summary() and pp model.trainable_weights, we can see that how Conv1D weights or filters are used to screen embedding_1 tensor

In fact, for all layers, either weights of Conv1D, Dense, or that of Conv2D, consider them to be filters, to screen previous layer's tensor

How embedding weights transform input_1 (?, 1000) to embedding_1 (?, 1000, 100)? deserve research later
"""

model_path = "/Users/Natsume/Downloads/data_for_all/word_embeddings/pretrainedWordEmbedding_2.h5"
Esempio n. 11
0
def main():
    training_images, training_labels, test_images, test_labels = load_dataset()

    # plt.imshow(training_images[:,:,0], cmap='gray')
    # plt.show()

    perm_train = np.random.permutation(training_labels.size)
    training_labels = training_labels[perm_train]
    training_images = (training_images[perm_train, :, :] - 127.5) / 127.5
    training_images = np.expand_dims(training_images, -1)
    print(training_images.shape)
    test_images = test_images / 255.0
    test_images = np.expand_dims(test_images, -1)

    # pdb.set_trace()

    #    training_labels = to_categorical(training_labels, NUM_CLASSES)
    #    test_labels = to_categorical(test_labels, NUM_CLASSES)

    BATCH_SIZE = 32 * 8
    WIDTH, HEIGHT = 28, 28
    adam_lr = 0.0002
    adam_beta_1 = 0.5

    #####################################
    ### Defiining the Discriminator:
    #####################################
    input_D = Input(shape=(HEIGHT, WIDTH, 1), name='input_D')
    x = Conv2D(filters=32,
               kernel_size=3,
               strides=(2, 2),
               padding='same',
               name='conv1_D')(input_D)
    #x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2D(filters=32,
               kernel_size=3,
               strides=(2, 2),
               padding='same',
               name='conv2_D')(x)
    #x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Flatten()(x)
    x = Dense(128, activation='relu', name='dense1_D')(x)
    output_D = Dense(1, activation='sigmoid', name='output_D')(x)
    model_D = Model(inputs=input_D, outputs=output_D)
    model_D.compile(loss='binary_crossentropy',
                    optimizer=tf.train.AdamOptimizer(learning_rate=adam_lr,
                                                     beta1=adam_beta_1),
                    metrics=['accuracy'])

    #####################################
    ### Defiining the Generator:
    #####################################
    LATENT_SIZE = 100
    input_G = Input(shape=(LATENT_SIZE, ), name='input_gen')
    x = Dense(7 * 7 * 32, activation='linear', name='Dense1_G')(input_G)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Reshape((7, 7, 32))(x)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(filters=32,
               kernel_size=3,
               strides=(1, 1),
               padding='same',
               name='conv1_gen')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(filters=32,
               kernel_size=3,
               strides=(1, 1),
               padding='same',
               name='conv2_gen')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2D(filters=1,
               kernel_size=1,
               strides=(1, 1),
               padding='same',
               name='conv3_gen')(x)
    img_G = Activation('tanh')(x)
    model_G = Model(inputs=input_G, outputs=img_G)
    model_G.compile(loss='binary_crossentropy',
                    optimizer=tf.train.AdamOptimizer(learning_rate=adam_lr,
                                                     beta1=adam_beta_1))

    #####################################
    ### Defiining the Combined GAN:
    #####################################
    model_D.trainable = False  # Since model_D is already compiled, thediscriminator model remains trainble,
    # but here in the combined model it becomes non-trainable
    input_main = Input(
        shape=(LATENT_SIZE, ), name='input_main'
    )  # Note that this input should be different from the input to Generator
    combined = Model(inputs=input_main, outputs=model_D(model_G(input_main)))
    combined.compile(loss='binary_crossentropy',
                     optimizer=tf.train.AdamOptimizer(learning_rate=adam_lr,
                                                      beta1=adam_beta_1),
                     metrics=['accuracy'])

    print(combined.summary())

    #####################################
    ### Training:
    #####################################
    bar = InitBar()
    N = training_images.shape[0]
    for iter in range(100):
        fake_input = np.random.randn(1, LATENT_SIZE)
        fake_image = model_G.predict(fake_input)
        loss_G, acc_G, loss_D, acc_D = 0, 0, 0, 0
        steps = (int)(np.ceil(float(N) / float(BATCH_SIZE)))
        for batch_iter in range(steps):
            bar(100.0 * batch_iter / float(steps))
            real_image, _ = get_batch(batch_iter, BATCH_SIZE / 2,
                                      training_images, training_labels)
            ####################
            ## Discriminator Training
            ####################
            #  Note that if using BN layer in Discriminator, minibatch should contain only real images or fake images.
            fake_input = np.random.randn(BATCH_SIZE / 2, LATENT_SIZE)
            fake_image = model_G.predict(fake_input)
            #real_image = get_real_mbatch(batch_sz=BATCH_SIZE/2, data=training_images)
            agg_input = np.concatenate((fake_image, real_image), axis=0)
            agg_output = np.zeros((BATCH_SIZE, ))
            agg_output[BATCH_SIZE / 2:] = 1
            perm = np.random.permutation(BATCH_SIZE)
            agg_input = agg_input[perm]
            agg_output = agg_output[perm]
            #pdb.set_trace()
            tr = model_D.train_on_batch(x=agg_input, y=agg_output)
            loss_D += tr[0]
            acc_D += tr[1]
            #####################
            ## Generator Training
            #####################
            fake_input = np.random.randn(BATCH_SIZE, LATENT_SIZE)
            fake_label = np.ones(BATCH_SIZE, )
            tr = combined.train_on_batch(x=fake_input, y=fake_label)
            loss_G += tr[0]
            acc_G += tr[1]
        print('\nG_loss = {}, G_acc = {}\nD_loss = {}, D_acc = {}'.format(
            loss_G / float(steps), acc_G / float(steps), loss_D / float(steps),
            acc_D / float(steps)))

    for iter in range(10):
        fake_input = np.random.randn(1, LATENT_SIZE)
        fake_image = model_G.predict(fake_input)
        plt.imshow(fake_image[0, :, :, 0])
        plt.show()
# load pretrained model
model_inception_v3 = inception_v3.InceptionV3(include_top=False,
                                              weights='imagenet',
                                              input_tensor=None,
                                              input_shape=None,
                                              pooling=None)
# add a global spatial average pooling layer
x = model_inception_v3.output
x = GlobalAveragePooling2D()(x)
# add a fully-connected layer
x = Dense(1024, activation='relu')(x)
# and a logistic layer
predictions = Dense(5, activation='softmax')(x)
# the model for transform learning
model = Model(inputs=model_inception_v3.input, outputs=predictions)
print model.summary()

# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional InceptionV3 layers
for layer in model_inception_v3.layers:
    layer.trainable = False

# load dataset
# flowers_dataset = create_image_dataset(FLAGS.image_dir, training_percentage=FLAGS.training_percentage,
#                                        testing_percentage=FLAGS.testing_percentage)
image_data_generator = ImageDataGenerator()
train_data = image_data_generator.flow_from_directory(
    '/home/meizu/WORK/code/YF_baidu_ML/dataset/flowers/flower_photos/train',
    target_size=(FLAGS.input_image_size, FLAGS.input_image_size),
    batch_size=FLAGS.batch_size,
    class_mode='categorical')
Esempio n. 13
0
           kernel_initializer=init_ops.glorot_normal_initializer(),
           name='Convolution_1')(H)
H = BatchNormalization()(H)
H = Activation('relu')(H)
H = Conv2D(nch / 4,
           kernel_size=(3, 3),
           padding='same',
           kernel_initializer=init_ops.glorot_normal_initializer(),
           name='Convolution_2')(H)
H = BatchNormalization()(H)
H = Activation('relu')(H)
H = Conv2D(1, 1, 1, padding='same', kernel_initializer='random_normal')(H)
g_V = Activation('sigmoid')(H)
generator = Model(inputs=g_input, outputs=g_V)
generator.compile(loss='binary_crossentropy', optimizer=opt)
generator.summary()

# Build Discriminative model ...
d_input = Input(shape=shp)
H = Convolution2D(256,
                  kernel_size=(5, 5),
                  strides=(2, 2),
                  padding='same',
                  activation='relu')(d_input)
H = LeakyReLU(0.2)(H)
H = Dropout(dropout_rate)(H)
H = Convolution2D(512,
                  kernel_size=(5, 5),
                  strides=(2, 2),
                  padding='same',
                  activation='relu')(H)