예제 #1
0
파일: ppgan.py 프로젝트: forkbabu/Keras-GAN
    def __init__(self,
                 max_data=60000,
                 eps=50,
                 gamma=0.000001,
                 mia_attacks=None):
        self.mia_attacks = mia_attacks
        self.img_rows = 28
        self.img_cols = 28
        self.channels = 1
        self.img_shape = (self.img_rows, self.img_cols, self.channels)

        NoisyAdam = add_gradient_noise(Adam)

        K.image_data_format()

        noise_std = 2 * 128 / max_data * np.sqrt(np.log(1 / gamma)) / eps

        print("Setting noise stadard deviation to " + str(noise_std))
        np.random.seed(0)  # Deterministic output.
        self.random_dim = 100  # For consistency with other GAN implementations.

        def normalize(data):
            return np.reshape((data.astype(np.float32) - 127.5) / 127.5,
                              (-1, *self.img_shape))

        # Load data
        (X_train, y_train), (X_test, y_test) = mnist.load_data()
        X_train = normalize(X_train)
        self.X_train = X_train.reshape(60000, 784)

        self.x_out, y_out = extract_training_samples('digits')
        self.x_out = normalize(self.x_out)
        self.x_out = self.x_out.reshape(240000, 784)

        self.X_train = self.X_train[:max_data]
        self.img_shape = self.X_train.shape[1]

        # Generator
        generator = Sequential()
        generator.add(
            Dense(256,
                  input_dim=self.random_dim,
                  kernel_initializer=initializers.RandomNormal(stddev=0.02)))
        generator.add(LeakyReLU(0.2))
        generator.add(Dense(512))
        generator.add(LeakyReLU(0.2))
        generator.add(Dense(1024))
        generator.add(LeakyReLU(0.2))
        generator.add(Dense(self.X_train.shape[1], activation='tanh'))

        generator_optimizer = Adam(lr=0.0002, beta_1=0.5)
        generator.compile(optimizer=generator_optimizer,
                          loss='binary_crossentropy')
        self.generator = generator

        # Discriminator
        # discriminator = Sequential()
        # discriminator.add(Dense(1024, input_dim=self.X_train.shape[1], kernel_initializer=initializers.RandomNormal(stddev=0.02)))
        # discriminator.add(LeakyReLU(0.2))
        # discriminator.add(Dropout(0.3))
        # discriminator.add(Dense(512))
        # discriminator.add(LeakyReLU(0.2))
        # discriminator.add(Dropout(0.3))
        # discriminator.add(Dense(256))
        # discriminator.add(LeakyReLU(0.2))
        # discriminator.add(Dropout(0.3))
        # discriminator.add(Dense(1, activation='sigmoid'))
        dropout = 0.3
        critic_in = Input((self.img_shape, ))
        l0 = Dense(1024,
                   input_shape=(self.img_shape, ),
                   kernel_initializer=initializers.RandomNormal(
                       stddev=0.02))(critic_in)
        l1 = LeakyReLU(alpha=0.2)(l0)
        l2 = Dropout(dropout)(l1)
        l3 = Dense(512)(l2)
        l4 = LeakyReLU(alpha=0.2)(l3)
        l5 = Dropout(dropout)(l4)
        l6 = Dense(256)(l5)
        l7 = LeakyReLU(alpha=0.2)(l6)
        featuremaps = Dropout(dropout)(l7)
        critic_out = Dense(1, name="critic_out",
                           activation='sigmoid')(featuremaps)
        discriminator = Model(inputs=[critic_in], outputs=[critic_out])

        clipnorm = 5.0
        discriminator_optimizer = NoisyAdam(lr=0.0002,
                                            beta_1=0.5,
                                            clipnorm=clipnorm,
                                            standard_deviation=noise_std)
        discriminator.compile(optimizer=discriminator_optimizer,
                              loss='binary_crossentropy')
        self.discriminator = discriminator

        featuremap_model = Model(inputs=[critic_in], outputs=[featuremaps])

        advreg = self.build_advreg(input_shape=(256, ))
        mia_pred = advreg(featuremap_model(critic_in))

        naming_layer = Lambda(lambda x: x, name='mia_pred')
        mia_pred = naming_layer(mia_pred)

        advreg_model = Model(inputs=[critic_in], outputs=[mia_pred])

        # Do not train the critic when updating the adversarial regularizer
        featuremap_model.trainable = False

        advreg_optimizer = Adam(lr=0.0002, beta_1=0.5)
        advreg_model.compile(optimizer=advreg_optimizer,
                             metrics=["accuracy"],
                             loss=self.wasserstein_loss)
        self.advreg_model = advreg_model

        # GAN
        self.discriminator.trainable = False
        gan_input = Input(shape=(self.random_dim, ))
        x = generator(gan_input)
        gan_output = discriminator(x)
        gan = Model(inputs=gan_input, outputs=gan_output)

        gan_optimizer = Adam(lr=0.0002, beta_1=0.5)
        gan.compile(optimizer=gan_optimizer, loss='binary_crossentropy')
        self.gan = gan

        # Losses for plotting
        self.discriminator_losses = []
        self.generator_losses = []

        self.logan_precisions = []
        self.featuremap_precisions = []
예제 #2
0
np.random.seed(1000)

# The results are a little better when the dimensionality of the random vector is only 10.
# The dimensionality has been left at 100 for consistency with other GAN implementations.
randomDim = 100

# Load MNIST data
(X_train, y_train), (X_test, y_test) = (mnist.train.images, mnist.train.labels), (mnist.test.images, mnist.test.labels)
X_train = (X_train.astype(np.float32) - 127.5)/127.5
X_train = X_train.reshape(55000, 784)

# Optimizer
adam = Adam(lr=0.0002, beta_1=0.5)

generator = Sequential()
generator.add(Dense(256, input_dim=randomDim, kernel_initializer=initializers.RandomNormal(stddev=0.02)))
generator.add(LeakyReLU(0.2))
generator.add(Dense(512))
generator.add(LeakyReLU(0.2))
generator.add(Dense(1024))
generator.add(LeakyReLU(0.2))
generator.add(Dense(784, activation='tanh'))
generator.compile(loss='binary_crossentropy', optimizer=adam)

discriminator = Sequential()
discriminator.add(Dense(1024, input_dim=784, kernel_initializer=initializers.RandomNormal(stddev=0.02)))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(512))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
예제 #3
0
def build_generator(dimensions):
    im_dim = dimensions['data_shape'][0]
    if im_dim == 28:
        init = 7
        blocks = 1
    elif im_dim % 16 == 0:
        init = 4
        blocks = im_dim // 16

    generator = Sequential()
    generator.add(
        Dense(init * init * 128,
              input_dim=dimensions['latent_dim'],
              kernel_initializer=initializers.RandomNormal(stddev=0.02)))
    generator.add(LeakyReLU(0.2))
    generator.add(BatchNormalization(momentum=0.8))
    generator.add(Dropout(.3))
    generator.add(Reshape((init, init, 128)))
    generator.add(UpSampling2D(size=(2, 2)))

    for block in range(blocks):
        generator.add(Conv2D(64, kernel_size=(5, 5), padding='same'))
        generator.add(LeakyReLU(0.2))
        generator.add(BatchNormalization(momentum=0.8))
        generator.add(Dropout(.3))
        generator.add(UpSampling2D(size=(2, 2)))

    generator.add(
        Conv2D(dimensions['data_shape'][-1],
               kernel_size=(5, 5),
               padding='same',
               activation='tanh'))
    # generator.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.0002, beta_1=0.5))
    # model = Sequential()
    #
    # model.add(Reshape((dimensions['latent_sqrt'], dimensions['latent_sqrt'], 1),
    #                   input_shape=(dimensions['latent_dim'],)))
    # model.add(Conv2D(16, 3, strides=(1, 1), padding='valid'))
    #
    # model.add(LeakyReLU(alpha=0.2))
    # model.add(BatchNormalization(momentum=0.8))
    # model.add(Dropout(.3))
    # # model.add(MaxPooling2D())
    #
    # model.add(Conv2D(32, 3, strides=(2, 2), padding='valid'))
    # model.add(LeakyReLU(alpha=0.2))
    # model.add(BatchNormalization(momentum=0.8))
    # model.add(Dropout(.3))
    # # model.add(MaxPooling2D())
    #
    # model.add(Conv2D(64, 5, strides=(2, 2), padding='valid'))
    # model.add(LeakyReLU(alpha=0.2))
    # model.add(BatchNormalization(momentum=0.8))
    # model.add(Dropout(.3))
    # # model.add(MaxPooling2D())
    # # model.add(UpSampling2D())
    #
    # model.add(Flatten())
    # model.add(Dense(np.prod(dimensions['data_shape']), activation='tanh'))
    # model.add(Reshape(dimensions['data_shape']))
    # model.summary()
    generator.summary()

    noise = Input(shape=(dimensions['latent_dim'], ))
    img = generator(noise)

    return Model(noise, img)
예제 #4
0
        z_train = np.concatenate([z_train, z_i], axis=1)
num_batches = int(X_train.shape[0] / BATCH_SIZE)
LATENT_DIM = z_train.shape[1]
# compute mean and covariance of all latent vectors
mu = np.zeros((LATENT_DIM, ))
Sigma = np.cov(z_train.transpose())
exp_replay = []
dLosses = []
gLosses = []

# conditional generator
g_in_ran = Input(shape=(RANDOM_DIM, ))
g_in_lat = Input(shape=(LATENT_DIM, ))
g_in = Concatenate()([g_in_ran, g_in_lat])
g = Dense(256 * 8 * 8,
          kernel_initializer=initializers.RandomNormal(stddev=0.02))(g_in)
g = LeakyReLU(0.2)(g)
g = Reshape((256, 8, 8))(g)
g = UpSampling2D(size=(2, 2))(g)
g = Conv2D(128, kernel_size=(5, 5), padding='same')(g)
g = LeakyReLU(0.2)(g)
g = UpSampling2D(size=(2, 2))(g)
g = Conv2D(64, kernel_size=(5, 5), padding='same')(g)
g = LeakyReLU(0.2)(g)
g = UpSampling2D(size=(2, 2))(g)
g_out = Conv2D(3, kernel_size=(5, 5), padding='same', activation='tanh')(g)
generator = Model([g_in_ran, g_in_lat], g_out)
generator.compile(loss='binary_crossentropy', optimizer=adam)

# conditional discriminator
d_in_img = Input(shape=(3, 64, 64))
예제 #5
0
preprocessor = prep.StandardScaler().fit(source)
source = preprocessor.transform(source)
target = preprocessor.transform(target)

#############################
######## train MMD net ######
#############################

calibInput = Input(shape=(inputDim, ))
block1_bn1 = BatchNormalization()(calibInput)
block1_a1 = Activation('relu')(block1_bn1)
block1_w1 = Dense(
    mmdNetLayerSizes[0],
    activation='linear',
    kernel_regularizer=l2(l2_penalty),
    kernel_initializer=initializers.RandomNormal(stddev=1e-4))(block1_a1)
block1_bn2 = BatchNormalization()(block1_w1)
block1_a2 = Activation('relu')(block1_bn2)
block1_w2 = Dense(
    inputDim,
    activation='linear',
    kernel_regularizer=l2(l2_penalty),
    kernel_initializer=initializers.RandomNormal(stddev=1e-4))(block1_a2)
block1_output = add([block1_w2, calibInput])
block2_bn1 = BatchNormalization()(block1_output)
block2_a1 = Activation('relu')(block2_bn1)
block2_w1 = Dense(
    mmdNetLayerSizes[1],
    activation='linear',
    kernel_regularizer=l2(l2_penalty),
    kernel_initializer=initializers.RandomNormal(stddev=1e-4))(block2_a1)
예제 #6
0
from keras.layers import Convolution2D, MaxPooling2D, Conv2D, Conv2DTranspose, BatchNormalization
from keras.utils import np_utils
from keras.layers.advanced_activations import LeakyReLU
from keras import optimizers, initializers

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.logging.set_verbosity(tf.logging.ERROR)

np.random.seed(42)
optadam = optimizers.Adam(lr=0.0002,
                          beta_1=0.5,
                          beta_2=0.999,
                          epsilon=None,
                          decay=0.0,
                          amsgrad=False)
init = initializers.RandomNormal(mean=0.0, stddev=0.02, seed=None)


class Traindata:
    def __init__(self, real_images, fake_images):
        self.real_images = real_images
        self.fake_images = fake_images

        r_ones = np.ones([real_images.shape[0], 1])
        r_zeros = np.zeros([real_images.shape[0], 1])

        f_ones = np.ones([fake_images.shape[0], 1])
        f_zeros = np.zeros([fake_images.shape[0], 1])
        # Create lables for real and fake images
        self.real_lables = np.hstack((r_ones, r_zeros))
        self.fake_lables = np.hstack((f_zeros, f_ones))
def build_cifar10_generator(ngf=64, z_dim=128):
    """ Builds CIFAR10 DCGAN Generator Model
    PARAMS
    ------
    ngf: number of generator filters
    z_dim: number of dimensions in latent vector

    RETURN
    ------
    G: keras sequential
    """
    init = initializers.RandomNormal(stddev=0.02)

    G = Sequential()

    # Dense 1: 2x2x512
    G.add(
        Dense(2 * 2 * ngf * 8,
              input_shape=(z_dim, ),
              use_bias=True,
              kernel_initializer=init))
    G.add(Reshape((2, 2, ngf * 8)))
    G.add(BatchNormalization())
    G.add(LeakyReLU(0.2))

    # Conv 1: 4x4x256
    G.add(
        Conv2DTranspose(ngf * 4,
                        kernel_size=5,
                        strides=2,
                        padding='same',
                        use_bias=True,
                        kernel_initializer=init))
    G.add(BatchNormalization())
    G.add(LeakyReLU(0.2))

    # Conv 2: 8x8x128
    G.add(
        Conv2DTranspose(ngf * 2,
                        kernel_size=5,
                        strides=2,
                        padding='same',
                        use_bias=True,
                        kernel_initializer=init))
    G.add(BatchNormalization())
    G.add(LeakyReLU(0.2))

    # Conv 3: 16x16x64
    G.add(
        Conv2DTranspose(ngf,
                        kernel_size=5,
                        strides=2,
                        padding='same',
                        use_bias=True,
                        kernel_initializer=init))
    G.add(BatchNormalization())
    G.add(LeakyReLU(0.2))

    # Conv 4: 32x32x3
    G.add(
        Conv2DTranspose(3,
                        kernel_size=5,
                        strides=2,
                        padding='same',
                        use_bias=True,
                        kernel_initializer=init))
    G.add(Activation('tanh'))

    print("\nGenerator")
    G.summary()

    return G
예제 #8
0
def normal(shape, name=None):
    #return initializations.normal(shape, scale=0.05, name=name)
    return initializers.RandomNormal(mean=0.0, stddev=0.05, seed=None)
예제 #9
0
    def buildDiscriminator(self, max_filters, kernel_sz):

        # Sequential model
        discriminator = Sequential()

        # First layer is conv2D
        discriminator.add(
            Convolution2D(
                int(max_filters),
                kernel_size=(kernel_sz, kernel_sz),
                strides=(2, 2),
                input_shape=self.image_dimensions,
                padding="same",
                kernel_initializer=initializers.RandomNormal(stddev=0.02)))
        # Leaky Relu activation
        discriminator.add(LeakyReLU(.2))
        #discriminator.add(BatchNormalization(momentum=0.7))
        # Dropout regularization
        discriminator.add(Dropout(0.2))

        discriminator.add(
            Convolution2D(int(max_filters / 2),
                          kernel_size=(kernel_sz, kernel_sz),
                          strides=(2, 2),
                          padding="same"))
        #discriminator.add(ZeroPadding2D(padding=((0,1),(0,1))))
        discriminator.add(LeakyReLU(.2))
        #discriminator.add(BatchNormalization(momentum=0.7))
        discriminator.add(Dropout(0.2))

        discriminator.add(
            Convolution2D(int(max_filters / 2),
                          kernel_size=(kernel_sz, kernel_sz),
                          strides=(2, 2),
                          padding="same"))
        #discriminator.add(ZeroPadding2D(padding=((0,1),(0,1))))
        discriminator.add(LeakyReLU(.2))
        #discriminator.add(BatchNormalization(momentum=0.7))
        discriminator.add(Dropout(0.2))

        discriminator.add(
            Convolution2D(int(max_filters / 4),
                          kernel_size=(kernel_sz, kernel_sz),
                          strides=(2, 2),
                          padding="same"))
        #discriminator.add(ZeroPadding2D(padding=((0,1),(0,1))))
        discriminator.add(LeakyReLU(.2))
        #discriminator.add(BatchNormalization(momentum=0.7))
        discriminator.add(Dropout(0.2))

        discriminator.add(
            Convolution2D(int(max_filters / 8),
                          kernel_size=(kernel_sz, kernel_sz),
                          strides=(2, 2),
                          padding="same"))
        discriminator.add(LeakyReLU(.2))
        #discriminator.add(BatchNormalization(momentum=0.7))
        discriminator.add(Dropout(0.2))
        discriminator.add(Flatten())

        discriminator.add(
            Dense(1,
                  kernel_initializer=initializers.RandomNormal(stddev=0.02)))

        # Print summary of model
        discriminator.summary()

        return discriminator
예제 #10
0
def init_models_2():

    adam = Adam(lr=learning_rate, beta_1 = 0.5)
    rmsprop = RMSprop(lr=learning_rate)

    optim = adam

    generator = Sequential()

    generator.add(Dense(256, input_dim=gen_in_dim, kernel_initializer=initializers.RandomNormal(stddev=0.02)))
    generator.add(LeakyReLU(alpha=0.2))
    # generator.add(Dropout(gen_dropout))

    generator.add(Dense(512))
    generator.add(LeakyReLU(alpha=0.2))
    # generator.add(Dropout(gen_dropout))

    generator.add(Dense(1024))
    generator.add(LeakyReLU(alpha=0.2))
    # generator.add(Dropout(gen_dropout))
    #
    generator.add(Dense(2048))
    generator.add(LeakyReLU(alpha=0.2))

    # generator.add(Dense(2560))
    # generator.add(LeakyReLU(alpha=0.2))

    generator.add(Dense(output_dim, activation='tanh'))
    generator.compile(optimizer=optim, loss=wasserstein_loss)

    discriminator = Sequential()
    #
    discriminator.add(Dense(2048, input_dim=output_dim, kernel_initializer=initializers.RandomNormal(stddev=0.02)))
    discriminator.add(LeakyReLU(alpha=0.2))
    discriminator.add(Dropout(disc_dropout))

    discriminator.add(Dense(1024))#, input_dim=output_dim, kernel_initializer=initializers.RandomNormal(stddev=0.02)))
    discriminator.add(LeakyReLU(alpha=0.2))
    discriminator.add(Dropout(disc_dropout))

    discriminator.add(Dense(512))
    discriminator.add(LeakyReLU(alpha=0.2))
    discriminator.add(Dropout(disc_dropout))

    # discriminator.add(MinibatchDiscrimination(5, 3))

    discriminator.add(Dense(256))
    discriminator.add(LeakyReLU(alpha=0.2))
    discriminator.add(Dropout(disc_dropout))

    discriminator.add(Dense(1))#, activation='sigmoid')) #binary classification (real or fake = 1 or 0 respectively)
    discriminator.compile(optimizer=optim, loss=wasserstein_loss)

    # creating gan
    discriminator.trainable = False
    ganInput = Input(shape=(gen_in_dim,))
    x = generator(ganInput)
    ganOutput = discriminator(x)
    gan = Model(inputs=ganInput, outputs=ganOutput)
    gan.compile(loss=wasserstein_loss, optimizer=optim)

    return generator, discriminator, gan
예제 #11
0
            cont_tweet += 1

y_test = np.asarray(taggs_test)

y_train = to_categorical(y_train, num_classes=None)
y_test = to_categorical(y_test, num_classes=None)

# Convolutional model
submodels = []
for kw in size_filters:
    submodel = Sequential()
    submodel.add(
        Conv1D(num_filters,
               kw,
               padding='valid',
               kernel_initializer=initializers.RandomNormal(np.sqrt(2 / kw)),
               input_shape=(tam_fijo, embedding_vecor_length)))
    submodel.add(advanced_activations.PReLU(initializers.Constant(value=0.25)))
    submodel.add(GlobalMaxPooling1D())
    submodels.append(submodel)

model = Sequential()
model.add(Merge(submodels, mode="concat"))
model.add(Dropout(dropout))
model.add(Dense(2, activation='softmax'))

# Log to tensorboard
tensorBoardCallback = TensorBoard(log_dir='./logs22', write_graph=True)
adadelta = optimizers.Adadelta(lr=alpha)
model.compile(loss='categorical_crossentropy',
              optimizer=adadelta,
예제 #12
0
    def __init__(self, max_len, emb_train):
        # Define hyperparameters
        modname = FIXED_PARAMETERS["model_name"]
        learning_rate = FIXED_PARAMETERS["learning_rate"]
        dropout_rate = FIXED_PARAMETERS["dropout_rate"]
        batch_size = FIXED_PARAMETERS["batch_size"]
        max_words = FIXED_PARAMETERS["max_words"]

        print("Loading data...")
        genres_train, sent1_train, sent2_train, labels_train_, scores_train = load_sts_data(
            FIXED_PARAMETERS["train_path"])
        genres_dev, sent1_dev, sent2_dev, labels_dev_, scores_dev = load_sts_data(
            FIXED_PARAMETERS["dev_path"])

        print("Building dictionary...")
        text = sent1_train + sent2_train + sent1_dev + sent2_dev
        tokenizer = Tokenizer(num_words=max_words)
        tokenizer.fit_on_texts(text)
        word_index = tokenizer.word_index

        print("Padding and indexing sentences...")
        sent1_train_seq, sent2_train_seq, labels_train = tokenizing_and_padding(
            FIXED_PARAMETERS["train_path"], tokenizer, max_len)
        sent1_dev_seq, sent2_dev_seq, labels_dev = tokenizing_and_padding(
            FIXED_PARAMETERS["dev_path"], tokenizer, max_len)

        print("Loading embeddings...")
        vocab_size = min(max_words, len(word_index)) + 1
        embedding_matrix = build_emb_matrix(FIXED_PARAMETERS["embedding_path"],
                                            vocab_size, word_index)

        embedding_layer = Embedding(vocab_size,
                                    300,
                                    weights=[embedding_matrix],
                                    input_length=max_len,
                                    trainable=emb_train,
                                    name='VectorLookup')

        sent1_seq_in = Input(shape=(max_len, ),
                             dtype='int32',
                             name='sent1_seq_in')
        embedded_sent1 = embedding_layer(sent1_seq_in)
        embedded_sent1_drop = layers.Dropout(dropout_rate)(embedded_sent1)
        encoded_sent1 = Lambda(lambda x: K.sum(x, axis=1))(embedded_sent1_drop)

        sent2_seq_in = Input(shape=(max_len, ),
                             dtype='int32',
                             name='sent2_seq_in')
        embedded_sent2 = embedding_layer(sent2_seq_in)
        embedded_sent2_drop = layers.Dropout(dropout_rate)(embedded_sent2)
        encoded_sent2 = Lambda(lambda x: K.sum(x, axis=1))(embedded_sent2_drop)

        mul = layers.Multiply()([encoded_sent1, encoded_sent2])
        sub = layers.Subtract()([encoded_sent1, encoded_sent2])
        dif = Lambda(lambda x: K.abs(x))(sub)

        concatenated = layers.concatenate([mul, dif], axis=-1)

        x = Dense(150,
                  activation='sigmoid',
                  kernel_initializer=initializers.RandomNormal(stddev=0.1),
                  bias_initializer=initializers.RandomNormal(
                      stddev=0.1))(concatenated)
        x = Dropout(dropout_rate)(x)
        x = Dense(6,
                  activation='softmax',
                  kernel_initializer=initializers.RandomNormal(stddev=0.1),
                  bias_initializer=initializers.RandomNormal(stddev=0.1))(x)

        gate_mapping = K.variable(
            value=np.array([[0.], [1.], [2.], [3.], [4.], [5.]]))
        preds = Lambda(lambda a: K.dot(a, gate_mapping), name='Prediction')(x)

        model = Model([sent1_seq_in, sent2_seq_in], preds)
        model.summary()

        def pearson(y_true, y_pred):
            """
            Pearson product-moment correlation metric.
            """
            return pearsonr(y_true, y_pred)

        early_stopping = EarlyStopping(monitor='pearson',
                                       patience=20,
                                       mode='max')
        # checkpointer = ModelCheckpoint(filepath=os.path.join(FIXED_PARAMETERS["ckpt_path"], modname) + '.hdf5',
        #                               verbose=1,
        #                               monitor='val_pearson',
        #                               save_best_only=True,
        #                               mode='max')

        Adam = optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999)

        model.compile(optimizer=Adam, loss='mse', metrics=[pearson])

        history = model.fit([sent1_train_seq, sent2_train_seq],
                            labels_train,
                            verbose=1,
                            epochs=300,
                            batch_size=batch_size,
                            callbacks=[early_stopping],
                            validation_data=([sent1_dev_seq,
                                              sent2_dev_seq], labels_dev))
예제 #13
0
train_csv = pd.read_csv('./house-prices-advanced-regression-techniques/train.csv')
train_data = pd.DataFrame(train_csv)

# FULL TRAIN DATA ---> [TRAIN (50%), VALIDATE (25%), TEST (25%)]

# Initialise model that we will add layers to:
model = Sequential()

# Activation functions
acts = ['relu','sigmoid','tanh']

# Initialise weights with w ~ N(mean1, sd1)
mean1, sd1 = 0,  0.1
norm_init = initializers.RandomNormal(mean=mean1,
                                      stddev=sd1,
                                      seed=123)

# Initialise weights with w ~ UNIF(lower_bound, upper_bound)
lower_bound, upper_bound = -.5, .5
unif_init = initializers.RandomUniform(minval=lower_bound,
                                       maxval=upper_bound,
                                       seed=123)

# Kernel L2 Regulariser: reg_constant1 is our regularisation constant
reg_constant1 = 0.01
l2_regulariser = regularizers.l2(l=reg_constant1)

# Kernel L1 Regulariser: reg_constant2 is our regularisation constant
reg_constant2 = 0.01
l1_regulariser = regularizers.l1(l=reg_constant2)
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255

y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# Train the CNN under three differnet initialization schemes,
# and record loss over time.
inits = [
    initializers.Zeros(),
    initializers.RandomNormal(mean=0.0, stddev=0.4, seed=10),
    initializers.VarianceScaling(scale=2.0,
                                 mode='fan_in',
                                 distribution='normal',
                                 seed=10),
]

loss_histories = {}
models = {}

for i, init in enumerate(inits):
    init_id = get_init_id(init)

    print("Training CNN with initializer:")
    print('  ' + str(init))
    print('  ' + str(init.get_config()))
예제 #15
0
# Normalizing the data
train_x = np.float32(train_x)
test_x = np.float32(test_x)

train_x = (train_x / 255 - 0.5) * 2
test_x = (test_x / 255 - 0.5) * 2

train_x = np.clip(train_x, -1, 1)
test_x = np.clip(test_x, -1, 1)

########################## Generative model #######################

# latent space dimension
dim = 100
init = initializers.RandomNormal(stddev=0.02)
# building the model
gen_model = Sequential()
gen_model.add(Dense(2 * 2 * 512, input_shape=(dim, ),
                    kernel_initializer=init))  # 2x2x512
gen_model.add(Reshape((2, 2, 512)))
gen_model.add(BatchNormalization())
gen_model.add(LeakyReLU(0.2))

gen_model.add(Conv2DTranspose(256, kernel_size=5, strides=2,
                              padding='same'))  # 4x4x256
gen_model.add(BatchNormalization())
gen_model.add(LeakyReLU(0.2))

gen_model.add(Conv2DTranspose(128, kernel_size=5, strides=2,
                              padding='same'))  # 8x8x128
예제 #16
0
print("train_audio.shape:", x_train_audio.shape)
print("test_audio.shape:", x_test_audio.shape)
print("valid_audio.shape:", x_valid_audio.shape)

############################### Input Layers ##########################

input_train_Video = Input(shape=(316, ), name='input_video')
input_train_Audio = Input(shape=(102, ), name='input_audio')

############################### Separated Layers ##########################

video_branch = Dense(316,
                     input_dim=316,
                     name='encoded_video_branch',
                     kernel_initializer=initializers.RandomNormal(seed=None),
                     bias_initializer='zeros',
                     activation='relu')(input_train_Video)
#video_branch = Dropout(0.5)(video_branch)
audio_branch = Dense(102,
                     input_dim=102,
                     name='encoded_audio_branch',
                     kernel_initializer=initializers.RandomNormal(seed=None),
                     bias_initializer='zeros')(input_train_Audio)
#audio_branch = Dropout(0.5)(audio_branch)

#model.add(Dropout(0.2, input_shape=(60,)))

#video_branch = Dense(316, activation='relu', name ='encoded_video_branch1')(video_branch)
#audio_branch = Dense(102, activation='relu', name = 'encoded_audio_branch1')(audio_branch)
예제 #17
0
    def _create_model(self, sequence_len, method):
        '''Creates the Auto encoder module described in the paper
        '''
        model = Sequential()

        # Encoder
        model.add(
            Conv1D(8,
                   4,
                   activation="linear",
                   input_shape=(sequence_len, 1),
                   padding="same",
                   strides=1,
                   kernel_initializer=initializers.RandomNormal(mean=0.1)))
        model.add(MaxPooling1D(pool_size=2, strides=2, padding="same"))
        model.add(
            Conv1D(4,
                   4,
                   activation="linear",
                   padding="same",
                   strides=1,
                   kernel_initializer=initializers.RandomNormal(mean=0.1)))
        model.add(MaxPooling1D(pool_size=2, strides=2, padding="same"))
        model.add(Flatten())

        # Fully Connected Layers
        model.add(
            Dense(sequence_len,
                  activation='relu',
                  kernel_initializer=initializers.RandomNormal(
                      mean=0.1)))  #provare con linear sui dense

        model.add(
            Dense(128,
                  activation='relu',
                  kernel_initializer=initializers.RandomNormal(mean=0.1)))

        model.add(
            Dense(sequence_len,
                  activation='relu',
                  kernel_initializer=initializers.RandomNormal(mean=0.1)))

        # Decoder
        model.add(Reshape((sequence_len // 4, 4)))
        model.add(UpSampling1D(size=2))
        model.add(
            Conv1D(4,
                   4,
                   activation="linear",
                   padding="same",
                   strides=1,
                   kernel_initializer=initializers.RandomNormal(mean=0.1)))
        model.add(UpSampling1D(size=2))
        model.add(
            Conv1D(1,
                   4,
                   activation="linear",
                   padding="same",
                   strides=1,
                   kernel_initializer=initializers.RandomNormal(mean=0.1)))

        if method == 'SGD':
            model.compile(loss='mse',
                          optimizer=optimizers.SGD(lr=0.0001, momentum=0.9))
        else:
            model.compile(loss='mse', optimizer='adam')

        plot_model(model, to_file='model.png', show_shapes=True)
        return model
예제 #18
0
data_dim = 28 * 28

x_train = x_train.reshape(60000, 784).astype('float32')[:n_train]
x_test = x_test.reshape(10000, 784).astype('float32')[:n_train]
x_train /= 255
x_test /= 255

y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

# Run the data through a few MLP models and save the activations from
# each layer into a Pandas DataFrame.
rows = []
sigmas = [0.10, 0.14, 0.28]
for stddev in sigmas:
    init = initializers.RandomNormal(mean=0.0, stddev=stddev, seed=seed)
    activation = 'relu'

    model = create_mlp_model(n_hidden_layers, dim_layer, (data_dim, ),
                             n_classes, init, 'zeros', activation)
    compile_model(model)
    output_elts = get_activations(model, x_test)
    n_layers = len(model.layers)
    i_output_layer = n_layers - 1

    for i, out in enumerate(output_elts[:-1]):
        if i > 0 and i != i_output_layer:
            for out_i in out.ravel()[::20]:
                rows.append([i, stddev, out_i])

df = pd.DataFrame(rows,
def build_cifar10_discriminator(ndf=64, image_shape=(32, 32, 3)):
    """ Builds CIFAR10 DCGAN Discriminator Model
    PARAMS
    ------
    ndf: number of discriminator filters
    image_shape: 32x32x3

    RETURN
    ------
    D: keras sequential
    """
    init = initializers.RandomNormal(stddev=0.02)

    D = Sequential()

    # Conv 1: 16x16x64
    D.add(
        Conv2D(ndf,
               kernel_size=5,
               strides=2,
               padding='same',
               use_bias=True,
               kernel_initializer=init,
               input_shape=image_shape))
    D.add(LeakyReLU(0.2))

    # Conv 2: 8x8x128
    D.add(
        Conv2D(ndf * 2,
               kernel_size=5,
               strides=2,
               padding='same',
               use_bias=True,
               kernel_initializer=init))
    D.add(BatchNormalization())
    D.add(LeakyReLU(0.2))

    # Conv 3: 4x4x256
    D.add(
        Conv2D(ndf * 4,
               kernel_size=5,
               strides=2,
               padding='same',
               use_bias=True,
               kernel_initializer=init))
    D.add(BatchNormalization())
    D.add(LeakyReLU(0.2))

    # Conv 4:  2x2x512
    D.add(
        Conv2D(ndf * 8,
               kernel_size=5,
               strides=2,
               padding='same',
               use_bias=True,
               kernel_initializer=init))
    D.add(BatchNormalization())
    D.add(LeakyReLU(0.2))

    # Flatten: 2x2x512 -> (2048)
    D.add(Flatten())

    # Dense Layer
    D.add(Dense(1, kernel_initializer=init))
    D.add(Activation('sigmoid'))

    print("\nDiscriminator")
    D.summary()

    return D
예제 #20
0
def create_model(layer_sizes=[20, 20],
                 l2_penalty=1e-2,
                 input_dim=1,
                 optimizer=None,
                 loss=None):
    assert (optimizer is not None), "must provide an optimizer"
    assert (loss is not None), "must provide a loss"

    # input
    calibInput = Input(shape=(input_dim, ))

    # block 1
    block1_bn1 = BatchNormalization()(calibInput)
    block1_a1 = Activation('relu')(block1_bn1)
    block1_w1 = Dense(
        layer_sizes[1],
        activation='linear',
        kernel_regularizer=l2(l2_penalty),
        kernel_initializer=initializers.RandomNormal(stddev=1e-4))(block1_a1)
    block1_bn2 = BatchNormalization()(block1_w1)
    block1_a2 = Activation('relu')(block1_bn2)
    block1_w2 = Dense(
        layer_sizes[0],
        activation='linear',
        kernel_regularizer=l2(l2_penalty),
        kernel_initializer=initializers.RandomNormal(stddev=1e-4))(block1_a2)
    block1_output = add([block1_w2, calibInput])

    # block 2
    block2_bn1 = BatchNormalization()(block1_output)
    block2_a1 = Activation('relu')(block2_bn1)
    block2_w1 = Dense(
        layer_sizes[1],
        activation='linear',
        kernel_regularizer=l2(l2_penalty),
        kernel_initializer=initializers.RandomNormal(stddev=1e-4))(block2_a1)
    block2_bn2 = BatchNormalization()(block1_w1)
    block2_a2 = Activation('relu')(block2_bn2)
    block2_w2 = Dense(
        layer_sizes[0],
        activation='linear',
        kernel_regularizer=l2(l2_penalty),
        kernel_initializer=initializers.RandomNormal(stddev=1e-4))(block2_a2)
    block2_output = add([block2_w2, block1_output])

    # block 3
    block3_bn1 = BatchNormalization()(block2_output)
    block3_a1 = Activation('relu')(block3_bn1)
    block3_w1 = Dense(
        layer_sizes[1],
        activation='linear',
        kernel_regularizer=l2(l2_penalty),
        kernel_initializer=initializers.RandomNormal(stddev=1e-4))(block3_a1)
    block3_bn2 = BatchNormalization()(block3_w1)
    block3_a2 = Activation('relu')(block3_bn2)
    block3_w2 = Dense(
        layer_sizes[0],
        activation='linear',
        kernel_regularizer=l2(l2_penalty),
        kernel_initializer=initializers.RandomNormal(stddev=1e-4))(block3_a2)
    block3_output = add([block3_w2, block2_output])

    calibMMDNet = Model(inputs=calibInput, outputs=block3_output)
    calibMMDNet.compile(optimizer=optimizer, loss=loss)

    return calibMMDNet
예제 #21
0
keras.backend.set_session(session)

K.tensorflow_backend.set_session(tf.Session(config=config))

import utility

from srPreprocessing import generate_patches
from srPreprocessing import patch_to_image


def psnr(y_pred, y):
    t = K.mean(K.square(y_pred - y))
    return -10. * K.log(t)


kernel_ini = initializers.RandomNormal(mean=0.0, stddev=1e-4, seed=None)
bias_ini = keras.initializers.Zeros()

adam = optimizers.Adam(lr=0.00001)


def srcnn_mode(net=[64, 32, 3],
               flt=[9, 1, 5],
               kernel_ini=kernel_ini,
               bias_ini=bias_ini):
    model = Sequential()
    model.add(
        Conv2D(64,
               kernel_size=(3, 3),
               strides=(1, 1),
               data_format="channels_last",
예제 #22
0
    model = Sequential()
    model.add(
        Dense(ne_num,
              input_dim=in_num,
              kernel_initializer=wi.Constant(value=2.0),
              bias_initializer=wi.Constant(value=2.0)))
    plot_weights(weights=model.get_weights(),
                 x=np.arange(0, ne_num, 1),
                 title='Constant(value=2.0)')

    model = Sequential()
    model.add(
        Dense(ne_num,
              input_dim=in_num,
              kernel_initializer=wi.RandomNormal(mean=0.0,
                                                 stddev=0.05,
                                                 seed=seed),
              bias_initializer=wi.RandomNormal(mean=0.0,
                                               stddev=0.05,
                                               seed=seed)))
    plot_weights(weights=model.get_weights(),
                 x=np.arange(0, ne_num, 1),
                 title='RandomNormal(mean=0.0, stddev=0.05, seed=seed)')

    model = Sequential()
    model.add(
        Dense(ne_num,
              input_dim=in_num,
              kernel_initializer=wi.RandomUniform(minval=-0.05,
                                                  maxval=0.05,
                                                  seed=seed),
예제 #23
0
def iterate_hyperparas(use_default=False):
    ## initializer, regularization, activation function, learning rate, batch size
    ## some parameters may change during training? (learning rate). ignore this need for now
    options = {}
    # defaults = {}
    # defaults['activation_regularizer'] = None
    # defaults['kernel_regularizer'] = 0.001
    # defaults['activation_regularizer'] = 0.1
    ## initializer
    options['initializer'] = []
    options['initializer'].append(initializers.glorot_normal(seed=None))
    options['initializer'].append(initializers.glorot_uniform(seed=None))
    options['initializer'].append(
        initializers.RandomNormal(mean=0.0, stddev=0.05, seed=None))
    options['initializer'].append(
        initializers.RandomUniform(minval=-0.05, maxval=0.05, seed=None))
    options['initializer'].append(
        initializers.TruncatedNormal(mean=0.0, stddev=0.05, seed=None))
    ## kernel_regularizer
    options['kernel_regularizer'] = []
    options['kernel_regularizer'].append(None)
    options['kernel_regularizer'].append(regularizers.l1(0.0001))
    options['kernel_regularizer'].append(regularizers.l1(0.001))
    options['kernel_regularizer'].append(regularizers.l1(0.01))
    options['kernel_regularizer'].append(regularizers.l1(0.1))
    options['kernel_regularizer'].append(regularizers.l2(0.0001))
    options['kernel_regularizer'].append(regularizers.l2(0.001))
    options['kernel_regularizer'].append(regularizers.l2(0.01))
    options['kernel_regularizer'].append(regularizers.l2(0.1))
    options['kernel_regularizer'].append(regularizers.l1_l2(0.0001))
    options['kernel_regularizer'].append(regularizers.l1_l2(0.001))
    options['kernel_regularizer'].append(regularizers.l1_l2(0.01))
    options['kernel_regularizer'].append(regularizers.l1_l2(0.1))
    ## activity_regularizer
    options['activity_regularizer'] = []
    options['activity_regularizer'].append(None)
    options['activity_regularizer'].append(regularizers.l1(0.0001))
    options['activity_regularizer'].append(regularizers.l1(0.001))
    options['activity_regularizer'].append(regularizers.l1(0.01))
    options['activity_regularizer'].append(regularizers.l1(0.1))
    options['activity_regularizer'].append(regularizers.l2(0.0001))
    options['activity_regularizer'].append(regularizers.l2(0.001))
    options['activity_regularizer'].append(regularizers.l2(0.01))
    options['activity_regularizer'].append(regularizers.l2(0.1))
    options['activity_regularizer'].append(regularizers.l1_l2(0.0001))
    options['activity_regularizer'].append(regularizers.l1_l2(0.001))
    options['activity_regularizer'].append(regularizers.l1_l2(0.01))
    options['activity_regularizer'].append(regularizers.l1_l2(0.1))
    ## bias_regularizer
    options['bias_regularizer'] = []
    options['bias_regularizer'].append(None)
    options['bias_regularizer'].append(regularizers.l1(0.0001))
    options['bias_regularizer'].append(regularizers.l1(0.001))
    options['bias_regularizer'].append(regularizers.l1(0.01))
    options['bias_regularizer'].append(regularizers.l1(0.1))
    options['bias_regularizer'].append(regularizers.l2(0.0001))
    options['bias_regularizer'].append(regularizers.l2(0.001))
    options['bias_regularizer'].append(regularizers.l2(0.01))
    options['bias_regularizer'].append(regularizers.l2(0.1))
    options['bias_regularizer'].append(regularizers.l1_l2(0.0001))
    options['bias_regularizer'].append(regularizers.l1_l2(0.001))
    options['bias_regularizer'].append(regularizers.l1_l2(0.01))
    options['bias_regularizer'].append(regularizers.l1_l2(0.1))
    ## activation
    options['activation'] = []
    options['activation'].append('relu')
    options['activation'].append('elu')
    options['activation'].append('selu')
    options['activation'].append('tanh')
    options['activation'].append('sigmoid')
    ## optimizer and learning rate
    options['optimizer'] = []
    options['optimizer'].append(
        optimizers.SGD(lr=0.001, momentum=0.0, decay=0.0, nesterov=False))
    options['optimizer'].append(
        optimizers.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False))
    options['optimizer'].append(
        optimizers.SGD(lr=0.1, momentum=0.0, decay=0.0, nesterov=False))
    options['optimizer'].append(
        optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0))
    options['optimizer'].append(
        optimizers.RMSprop(lr=0.01, rho=0.9, epsilon=None, decay=0.0))
    options['optimizer'].append(
        optimizers.RMSprop(lr=0.1, rho=0.9, epsilon=None, decay=0.0))
    options['optimizer'].append(
        optimizers.Adagrad(lr=0.001, epsilon=None, decay=0.0))
    options['optimizer'].append(
        optimizers.Adagrad(lr=0.01, epsilon=None, decay=0.0))
    options['optimizer'].append(
        optimizers.Adagrad(lr=0.1, epsilon=None, decay=0.0))
    options['optimizer'].append(
        optimizers.Adam(lr=0.001,
                        beta_1=0.9,
                        beta_2=0.999,
                        epsilon=None,
                        decay=0.0,
                        amsgrad=False))
    options['optimizer'].append(
        optimizers.Adam(lr=0.01,
                        beta_1=0.9,
                        beta_2=0.999,
                        epsilon=None,
                        decay=0.0,
                        amsgrad=False))
    options['optimizer'].append(
        optimizers.Adam(lr=0.1,
                        beta_1=0.9,
                        beta_2=0.999,
                        epsilon=None,
                        decay=0.0,
                        amsgrad=False))
    options['optimizer'].append(
        optimizers.Nadam(lr=0.002,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=None,
                         schedule_decay=0.004))
    options['optimizer'].append(
        optimizers.Nadam(lr=0.02,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=None,
                         schedule_decay=0.004))
    options['optimizer'].append(
        optimizers.Nadam(lr=0.2,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=None,
                         schedule_decay=0.004))
    ## batch size
    options['batch_size'] = 32
    options['batch_size'] = 128
    options['batch_size'] = 1024
    options['batch_size'] = 8192

    return options
예제 #24
0
import gym
import numpy as np
import matplotlib.pyplot as plt

from sklearn import linear_model as lin
from sklearn.preprocessing import PolynomialFeatures as Poly
from sklearn.preprocessing import MinMaxScaler as MMS

from keras import initializers, optimizers
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import TensorBoard

weight_init = initializers.RandomNormal(0, 0.3)
bias_init = initializers.Constant(0.1)
model = Sequential()
model.add(
    Dense(10,
          input_dim=1,
          activation='relu',
          kernel_initializer=weight_init,
          bias_initializer=bias_init))
model.add(
    Dense(10,
          activation='relu',
          kernel_initializer=weight_init,
          bias_initializer=bias_init))
model.add(Dense(1, kernel_initializer=weight_init, bias_initializer=bias_init))
model.compile(optimizer=optimizers.RMSprop(0.01), loss='mse')

x = np.linspace(-2 * np.pi, 2 * np.pi, 1000).reshape(-1, 1)
예제 #25
0
def model_train(name, input_array, output_array, training, data,
                output_index_array, with_graph):

    print('================== In model trainning =====================')
    '''

    layer construction : we need to tune them

    '''

    model = Sequential()
    inputLen = input_array.shape[1]
    ## provide a normal distribution
    ## we need tune stddev for a good scaling
    normal = initializers.RandomNormal(mean=0.0, stddev=0.1, seed=10)
    ## provide a uniform distribution
    ## we need tune stddev for a good scaling
    uniform = initializers.RandomUniform(minval=-0.3, maxval=0.3, seed=10)

    output_layer = Dense(units=100,
                         input_dim=input_array.shape[1],
                         kernel_initializer=normal,
                         bias_initializer=normal,
                         activation='relu')  #only positive value
    #BN is used to accelerate Sigmoid
    #BN_1 = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.01, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None)
    dense_3 = Dense(units=1000,
                    input_dim=inputLen,
                    kernel_initializer=normal,
                    bias_initializer=normal,
                    activation='relu')
    #dense_2 = Dense(units = 100, kernel_initializer=normal,
    #                bias_initializer=normal , activation = 'relu')
    dense_1 = Dense(units=output_array.shape[1],
                    kernel_initializer=normal,
                    activation='linear')
    layer_list = [output_layer, dense_3, dense_1]
    '''

    trainning

    '''

    ## create model
    for layer in layer_list:
        model.add(layer)

    #showing the prediction of last 5 data for scaling adjusting
    print('==========showing the prediction of last 5 data==========')
    training['prediction'] = model.predict(input_array[-5:-1, :])
    print(training['prediction'])
    print('=========================================================')

    ##compile model
    ## lr is learing rate : we need to tune it for good learning
    ## loss is loss function ,metric is only for show how good is the trainging result
    ## mse: mean squred error , mae :mean absolute error
    adam = optimizers.Adam(lr=0.001,
                           beta_1=0.5,
                           beta_2=0.999,
                           epsilon=None,
                           decay=0.02,
                           amsgrad=False)
    model.compile(optimizer=adam, loss='mse', metrics=['mae'])

    ##time evalution:
    import time
    start_time = time.time()
    ##training mode

    ##Callbacks:
    # tensorboard viewing browser
    # checkpoint : save weight every period
    training['Callbacks'] = callbacks.TensorBoard(log_dir='./logs',
                                                  histogram_freq=10,
                                                  batch_size=32,
                                                  write_graph=True,
                                                  write_grads=False,
                                                  write_images=False,
                                                  embeddings_freq=0,
                                                  embeddings_layer_names=None,
                                                  embeddings_metadata=None)
    filepath = "log/" + name + "weights.{epoch:02d}-{val_loss:.2f}.hdf5"
    training['Checkpoint'] = callbacks.ModelCheckpoint(filepath,
                                                       monitor='val_loss',
                                                       verbose=0,
                                                       save_best_only=True,
                                                       save_weights_only=False,
                                                       mode='auto',
                                                       period=1)

    training['History'] = model.fit(
        input_array,
        output_array,
        validation_split=training['Validation_split'],
        epochs=training['Epochs'],
        batch_size=training['BatchSize'],
        verbose=1,
        callbacks=[])  #verbose for show training process

    print("trainning cost :: --- %s seconds ---" % (time.time() - start_time))
    '''

    save result of this trainning

    '''
    # serialize model to JSON
    model_json = model.to_json()
    with open("log/" + name + ".json", "w") as json_file:
        json_file.write(model_json)
    # serialize weights to HDF5
    model.save_weights("log/" + name + ".h5")
    print("Saved model to disk")

    if with_graph == True:
        show_train_history(name, training['History'], 'mean_absolute_error',
                           'val_mean_absolute_error')

    return model
예제 #26
0
def buildDiscriminator(layerSize=1024, leak=.2, drop=.3, dropReduce=.05):
    global discriminator
    discriminator = Sequential()
    discriminator.add(Dense(layerSize, input_dim=IMAGE_SIZE*IMAGE_SIZE, kernel_initializer=initializers.RandomNormal(stddev=.02)))
    discriminator.add(LeakyReLU(.2))
    discriminator.add(Dropout(drop))
    discriminator.add(Dense(int(layerSize / 2)))
    discriminator.add(LeakyReLU(leak))
    discriminator.add(Dropout(drop - dropReduce))
    discriminator.add(Dense(int(layerSize / 4)))
    discriminator.add(LeakyReLU(leak))
    discriminator.add(Dropout(drop - (dropReduce * 2)))
    discriminator.add(Dense(1, activation='sigmoid'))
    discriminator.compile(loss='binary_crossentropy', optimizer=adam)
예제 #27
0
def test_normal(tensor_shape):
    _runner(initializers.RandomNormal(mean=0, stddev=1), tensor_shape,
            target_mean=0., target_std=1)
def CompareCNN_LASSO( groundtruth, DNA_seq, counts):
    print('')    
    print('Ground Truth Motif is: ',groundtruth)
    methylation_level = []
    DNA_len100 = []
    m1 = 0
    m0 = 0
    for n in range(len(DNA_seq)):
        hasKmers = False
        for t in groundtruth:
            if t in DNA_seq[n]:
                methylation_level.append(1)
                DNA_len100.append(DNA_seq[n])
                hasKmers = True
                break;
        if not hasKmers:
            methylation_level.append(0)
            DNA_len100.append(DNA_seq[n])
    print('Ratio of this motif: ',sum(methylation_level)/len(methylation_level))
    DNA = preprocess_data(DNA_len100)
    train_data,train_labels = Formalize_Data(DNA, methylation_file_path, target_length, cell_type)
    #train_labels = train_labels - np.full((train_labels.shape),np.mean(train_labels))
    #train_labels = to_categorical(np.array(methylation_level))
    train_labels = np.array(methylation_level)
    init = initializers.RandomNormal(mean=0, stddev=0.5, seed=None)
    k_r = kr.l2(1e-6)
    
    print('Strat Training on CNN')
    nfilt = 1
    filtlength = 6
    num_filters = 1
    maxPsize = 100
    seqlen = target_length;
    model = Sequential()
    model.add(Conv1D(filters=num_filters, kernel_size=filtlength,kernel_initializer = 'ones',padding = 'same',
                 input_shape=(seqlen,4), activation='relu'))
    model.add(GlobalAveragePooling1D())
    model.add(Dense(1, kernel_initializer= 'ones' ,activation='sigmoid'))
    model.compile(optimizer= Adam(lr = 0.001),
              loss='binary_crossentropy',
              metrics=['accuracy'])
    
    callbacks = [EarlyStopping(monitor='val_loss', patience=10,mode = 'min')]
    history = model.fit(train_data, train_labels, epochs=500, callbacks = callbacks,
                    validation_split = 0.25,shuffle = False,
                        batch_size=100,verbose=0)
    print("\t CNN Train Accuracy: ", history.history['acc'][-1])
    print("\t CNN Test Accuracy: ", history.history['val_acc'][-1])


    #RUN LASSO
    x = counts.as_matrix()
    y = np.array(methylation_level)
    data = np.concatenate((x, y.reshape(-1,1)), axis=1)
    np.random.shuffle(data)
    train = data[:48000]
    test = data[48000:]
    train_features = train[:,:-1]
    train_methy_levels = train[:,-1]
    test_features = test[:,:-1]
    test_methy_levels = test[:,-1]
    
    C_list = [10**-3,10**-1,1]
    print("")
    print('Training for SVM')
    for c in C_list:
        clf = LinearSVC(penalty='l2',loss = 'hinge', C = c)
        clf.fit(train_features,train_methy_levels)
        a_score = clf.score(test_features,test_methy_levels)
        print('\tParameter: ',c,'Test_accuracy: ',a_score,'Train Accuracy: ',clf.score(train_features,train_methy_levels))
        
    print("========================================================")
예제 #29
0
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

print('Evaluate IRNN...')
model = Sequential()
model.add(
    SimpleRNN(hidden_units,
              kernel_initializer=initializers.RandomNormal(stddev=0.001),
              recurrent_initializer=initializers.Identity(gain=1.0),
              activation='relu',
              input_shape=x_train.shape[1:]))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
rmsprop = RMSprop(lr=learning_rate)
model.compile(loss='categorical_crossentropy',
              optimizer=rmsprop,
              metrics=['accuracy'])

model.fit(x_train,
          y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
예제 #30
0
def DenseNet(blocks,
             include_top=True,
             weights='imagenet',
             input_tensor=None,
             input_shape=None,
             pooling=None,
             classes=1000):

    if input_tensor is None:
        img_input = Input(shape=(224, 224, 3))
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    bn_axis = 3 if K.image_data_format() == 'channels_last' else 1
    #print bn_axis
    #print K.image_data_format()
    x = ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input)
    x = Conv2D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x)
    x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='conv1/bn')(x)
    x = Activation('relu', name='conv1/relu')(x)
    x = ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
    x = MaxPooling2D(3, strides=2, name='pool1')(x)

    x = dense_block(x, blocks[0], name='conv2')
    x = transition_block(x, 0.5, name='pool2')
    x = dense_block(x, blocks[1], name='conv3')
    x = transition_block(x, 0.5, name='pool3')
    x = dense_block(x, blocks[2], name='conv4')
    x = transition_block(x, 0.5, name='pool4')
    x = dense_block(x, blocks[3], name='conv5')

    x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='bn')(x)

    if include_top:
        x = GlobalAveragePooling2D(name='avg_pool')(x)
        x = Dense(classes, activation='softmax', name='fc1000')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D(name='avg_pool')(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D(name='max_pool')(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    # 确保模型考虑到任何潜在的前缀“input_tensor”。
    '''
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    '''
    inputs = img_input
    # Create model.
    if blocks == [6, 12, 24, 16]:
        model = Model(inputs, x, name='densenet121')
    elif blocks == [6, 12, 32, 32]:
        model = Model(inputs, x, name='densenet169')
    elif blocks == [6, 12, 48, 32]:
        model = Model(inputs, x, name='densenet201')
    else:
        model = Model(inputs, x, name='densenet')

    #x = Dense(1024, activation='relu')(x)
    x = model.output
    #x = Flatten()(x)
    predictions = Dense(
        classes,
        activation='softmax',
        use_bias=False,
        kernel_initializer=initializers.RandomNormal(mean=0.0,
                                                     stddev=0.05,
                                                     seed=None),
        #bias_initializer=initializers.Zeros(),
        kernel_constraint=max_norm(5.),
        #bias_constraints=max_norm(5.),
        #kernel_regularizer=regularizers.l2(0.01),
        activity_regularizer=regularizers.l2(0.01))(x)

    denseNet_model = Model(inputs=model.input, outputs=predictions)
    return denseNet_model