示例#1
0
def segnet(nClasses, optimizer=None, input_height=256, input_width=256):
    kernel = 3
    filter_size = 64
    pool_size = 2

    model = models.Sequential()
    model.add(Layer(input_shape=(input_height, input_width, 3)))

    # encoder
    model.add(Convolution2D(filter_size, kernel, kernel, border_mode='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))

    model.add(Convolution2D(128, kernel, kernel, border_mode='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))

    model.add(Convolution2D(256, kernel, kernel, border_mode='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))

    model.add(Convolution2D(512, kernel, kernel, border_mode='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(Convolution2D(512, kernel, kernel, border_mode='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    # decoder
    model.add(Convolution2D(512, kernel, kernel, border_mode='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(Convolution2D(512, kernel, kernel, border_mode='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(UpSampling2D(size=(pool_size, pool_size)))
    model.add(Convolution2D(256, kernel, kernel, border_mode='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(UpSampling2D(size=(pool_size, pool_size)))
    model.add(Convolution2D(128, kernel, kernel, border_mode='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(UpSampling2D(size=(pool_size, pool_size)))
    model.add(Convolution2D(filter_size, kernel, kernel, border_mode='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(Convolution2D(nClasses, 1, 1, border_mode='same'))

    model.outputHeight = model.output_shape[-3]
    model.outputWidth = model.output_shape[-2]

    model.add(
        Reshape((nClasses, model.outputHeight * model.outputWidth),
                input_shape=(nClasses, model.outputHeight, model.outputWidth)))

    model.add(Permute((2, 1)))
    model.add(Activation('softmax'))

    if optimizer is not None:
        model.compile(loss="categorical_crossentropy",
                      optimizer=optimizer,
                      metrics=['accuracy'])

    return model
示例#2
0
def define_model(num_layers=1,
                 num_neurons=[16],
                 input_shape=(0, ),
                 loss="L1",
                 optimizer="RMSprop",
                 optimizer_lr=0,
                 dropout=0,
                 regularizer="L1",
                 reg_rate=0,
                 batch_norm=False):

    assert input_shape[0] != 0
    assert num_layers == len(num_neurons)

    model = models.Sequential()

    for i in range(num_layers):
        if reg_rate:
            if regularizer == "l1":
                model.add(
                    layers.Dense(
                        num_neurons[i],
                        kernel_regularizer=keras.regularizers.l1(reg_rate),
                        activation='relu',
                        input_shape=input_shape))
            elif regularizer == "l2":
                model.add(
                    layers.Dense(
                        num_neurons[i],
                        kernel_regularizer=keras.regularizers.l2(reg_rate),
                        activation='relu',
                        input_shape=input_shape))
            elif regularizer == "l1_l2":
                model.add(
                    layers.Dense(
                        num_neurons[i],
                        kernel_regularizer=keras.regularizers.l1_l2(reg_rate),
                        activation='relu',
                        input_shape=input_shape))
            else:
                print(
                    "WARNING: Invalid regularizer given. Using L1 regularization with 0.01 Regularization Rate."
                )
                model.add(
                    layers.Dense(num_neurons[i],
                                 kernel_regularizer=regularizers.l1(0.01),
                                 activation='relu',
                                 input_shape=input_shape))
        else:
            model.add(
                layers.Dense(num_neurons[i],
                             activation='relu',
                             input_shape=input_shape))

        #Add dropout to all but the penultimate layer.
        if dropout:
            model.add(layers.Dropout(dropout))
        if batch_norm:
            model.add(BatchNormalization())

    model.add(layers.Dense(1))

    if optimizer_lr == 0:
        optimzer_lr = 0.01

    if optimizer == "sgd":
        optimizer = optimizers.sgd(lr=optimizer_lr)
    elif optimizer == "RMSprop":
        optimizer = optimizers.RMSprop(lr=optimizer_lr)
    elif optimizer == "Adagrad":
        optimizer = optimizers.Adagrad(lr=optimizer_lr)
    else:
        print("!!WARNING: Incompatible Optimizer provided. Using RMSprop!!")
        optimizer = optimizers.RMSprop()

    #model.summary()

    loss_fn = ""
    if loss == "L1":
        loss_fn = keras.losses.mean_absolute_error
    elif loss == "L2":
        loss_fn = keras.losses.mean_squared_error
    elif loss == "logcosh":
        loss_fn = keras.losses.logcosh
    elif loss == "huber":
        loss_fn = tf.losses.huber_loss
    else:
        print(
            "!!WARNING: Incompatible loss function given. Accepted values are {L1, L2, huber, logcosh}. Using L2 loss!!"
        )
        loss_fn = losses.mean_squared_error
    model.compile(optimizer=optimizer, loss=loss_fn, metrics=["mae"])

    return model
for fname in fnames:
    src = os.path.join(original_dataset_dir, fname)
    dst = os.path.join(test_dogs_dir, fname)
    shutil.copyfile(src, dst)

print('total training cat images:', len(os.listdir(train_cats_dir)))
print('total training dog images:', len(os.listdir(train_dogs_dir)))
print('total validation cat images:', len(os.listdir(validation_cats_dir)))
print('total validation dog images:', len(os.listdir(validation_dogs_dir)))
print('total test cat images:', len(os.listdir(test_cats_dir)))
print('total test dog images:', len(os.listdir(test_dogs_dir)))

from keras import layers
from keras import models

model = models.Sequential()
model.add(
    layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.summary()
#adam optimizers
from keras import optimizers
                    min_index=trainnum,
                    max_index=None,
                    step=step,
                    batch_size=batch_size)

# 计算val_steps,这是计算需要从val_gen中抽取多少次
val_steps = (valnum - lookback) // batch_size
# 查看训练集需要抽取的次数
train_steps = trainnum // batch_size

from keras.callbacks import EarlyStopping

early_stopping = EarlyStopping(monitor="val_loss", patience=30)

#创建模型
model1 = models.Sequential()
model1.add(
    layers.Dense(512,
                 activation="relu",
                 input_shape=(lookback // step, data.shape[-1])))
model1.add(layers.Conv1D(filters=1024, kernel_size=5, activation="relu"))
model1.add(layers.MaxPooling1D(5))
model1.add(layers.Conv1D(filters=1024, kernel_size=3, activation="relu"))
model1.add(layers.GlobalMaxPool1D())
model1.add(layers.Dropout(0.5))
model1.add(layers.Dense(8, activation="softmax"))
model1.summary()

model1.compile(optimizer=optimizers.RMSprop(),
               loss="categorical_crossentropy",
               metrics=["acc"])
# -*- coding:utf-8 -*-

from keras.datasets import mnist
from keras import models

from keras import layers

(train_image, train_labels), (test_images, test_labels) = mnist.load_data()

# print(train_image.shape)
network = models.Sequential()
network.add(layers.Dense(512, activation='relu', input_shape=(28 * 28, )))
network.add(layers.Dense(10, activation='softmax'))

# 编译步骤
network.compile(optimizer='rmsprop',
                loss='categorical_crossentropy',
                metrics=['accuracy'])

# 图像数据预处理
train_image = train_image.reshape((60000, 28 * 28))
train_image = train_image.astype('float32') / 255

test_images = test_images.reshape((10000, 28 * 28))
test_images = test_images.astype('float32') / 255

# 对标签分类编码
from keras.utils import to_categorical

train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
示例#6
0
def vectorize_sequence(sequences, dimension=10000):
    results = np.zeros((len(sequences), dimension))

    for i, sequences in enumerate(sequences):
        results[i, sequences] = 1
    return results


# vectorize
x_train = vectorize_sequence(train_data)
x_test = vectorize_sequence(test_data)
y_train = np.asarray(train_labels).astype('float32')
y_test = np.asarray(test_labels).astype('float32')

# original model
original_model = models.Sequential()
original_model.add(layers.Dense(16, activation='relu', input_shape=(10000, )))
original_model.add(layers.Dense(16, activation='relu'))
original_model.add(layers.Dense(1, activation='sigmoid'))
original_model.compile(optimizer='rmsprop',
                       loss='binary_crossentropy',
                       metrics=['accuracy'])

original_hist = original_model.fit(x_train,
                                   y_train,
                                   epochs=20,
                                   batch_size=512,
                                   validation_data=(x_test, y_test))

# l2 규제 추가한 모델
l2_model = models.Sequential()
示例#7
0
def CapsNet(input_shape, n_class, routings):
    """
    A Capsule Network on MNIST.
    :param input_shape: data shape, 3d, [width, height, channels]
    :param n_class: number of classes
    :param routings: number of routing iterations
    :return: Two Keras Models, the first one used for training, and the second one for evaluation.
            `eval_model` can also be used for training.
    """
    x = layers.Input(shape=input_shape)

    # Layer 1: Just a conventional Conv2D layer
    conv1 = layers.Conv2D(filters=256,
                          kernel_size=9,
                          strides=1,
                          padding='valid',
                          activation='relu',
                          name='conv1')(x)

    # Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_capsule]
    primarycaps = PrimaryCap(conv1,
                             dim_capsule=8,
                             n_channels=32,
                             kernel_size=9,
                             strides=2,
                             padding='valid')

    # Layer 3: Capsule layer. Routing algorithm works here.
    digitcaps = CapsuleLayer(num_capsule=n_class,
                             dim_capsule=16,
                             routings=routings,
                             name='digitcaps')(primarycaps)

    # Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.
    # If using tensorflow, this will not be necessary. :)
    out_caps = Length(name='capsnet')(digitcaps)

    # Decoder network.
    y = layers.Input(shape=(n_class, ))
    masked_by_y = Mask()(
        [digitcaps, y]
    )  # The true label is used to mask the output of capsule layer. For training
    masked = Mask(
    )(digitcaps)  # Mask using the capsule with maximal length. For prediction

    # Shared Decoder model in training and prediction
    decoder = models.Sequential(name='decoder')
    decoder.add(layers.Dense(512, activation='relu', input_dim=16 * n_class))
    decoder.add(layers.Dense(1024, activation='relu'))
    decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))
    decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))

    # Models for training and evaluation (prediction)
    train_model = models.Model([x, y], [out_caps, decoder(masked_by_y)])
    eval_model = models.Model(x, [out_caps, decoder(masked)])

    # manipulate model
    noise = layers.Input(shape=(n_class, 16))
    noised_digitcaps = layers.Add()([digitcaps, noise])
    masked_noised_y = Mask()([noised_digitcaps, y])
    manipulate_model = models.Model([x, y, noise], decoder(masked_noised_y))
    return train_model, eval_model, manipulate_model
示例#8
0
seg_model.load_weights(weight_path)
seg_model.save('seg_model.h5')

pred_y = seg_model.predict(valid_x)
print(pred_y.shape, pred_y.min(axis=0).max(), pred_y.max(axis=0).min(), pred_y.mean())

fig, ax = plt.subplots(1, 1, figsize = (6, 6))
ax.hist(pred_y.ravel(), np.linspace(0, 1, 20))
ax.set_xlim(0, 1)
ax.set_yscale('log', nonposy='clip')




if IMG_SCALING is not None:
    fullres_model = models.Sequential()
    fullres_model.add(layers.AvgPool2D(IMG_SCALING, input_shape = (None, None, 3)))
    fullres_model.add(seg_model)
    fullres_model.add(layers.UpSampling2D(IMG_SCALING))
else:
    fullres_model = seg_model
fullres_model.save('fullres_model.h5')




def raw_prediction(img, path=test_image_dir):
    c_img = imread(os.path.join(path, c_img_name))
    c_img = np.expand_dims(c_img, 0)/255.0
    cur_seg = fullres_model.predict(c_img)[0]
    return cur_seg, c_img[0]
示例#9
0
def build_model(**params):
    # TODO: get all these from **params
    CNN = 'resnet'
    INCLUDE_TOP = False
    LEARNABLE_CNN_LAYERS = params['learnable_cnn_layers']
    RNN_TYPE = 'LSTM'
    RNN_SIZE = 1024
    WORDVEC_SIZE = params['wordvec_size']
    ACTIVATION = 'relu'
    USE_CGRU = params['use_cgru']
    CGRU_SIZE = params['cgru_size']
    REDUCE_MEAN = params['reduce_visual']
    max_words = params['max_words']

    if CNN == 'vgg16':
        cnn = applications.vgg16.VGG16(include_top=INCLUDE_TOP)
    elif CNN == 'resnet':
        cnn = applications.resnet50.ResNet50(include_top=INCLUDE_TOP)
        # Pop the mean pooling layer
        cnn = models.Model(inputs=cnn.inputs, outputs=cnn.layers[-2].output)

    for layer in cnn.layers[:-LEARNABLE_CNN_LAYERS]:
        layer.trainable = False

    # Context Vector input
    # normalized to [0,1] the values:
    # left, top, right, bottom, (box area / image area)
    input_ctx = layers.Input(shape=(5, ))
    ctx = layers.BatchNormalization()(input_ctx)
    repeat_ctx = layers.RepeatVector(max_words)(ctx)

    # Global Image featuers (convnet output for the whole image)
    input_img_global = layers.Input(shape=(IMG_HEIGHT, IMG_WIDTH,
                                           IMG_CHANNELS))
    image_global = cnn(input_img_global)

    # Add a residual CGRU layer
    if USE_CGRU:
        image_global = layers.Conv2D(CGRU_SIZE, (1, 1),
                                     padding='same',
                                     activation='relu')(image_global)
        res_cgru = SpatialCGRU(image_global, CGRU_SIZE)
        image_global = layers.add([image_global, res_cgru])

    if REDUCE_MEAN:
        image_global = layers.Lambda(lambda x: tf.reduce_mean(x, axis=1))(
            image_global)
        image_global = layers.Lambda(lambda x: tf.reduce_mean(x, axis=1))(
            image_global)
    else:
        image_global = layers.Conv2D(WORDVEC_SIZE / 4, (3, 3),
                                     activation='relu')(image_global)
        image_global = layers.Conv2D(WORDVEC_SIZE / 2, (3, 3),
                                     activation='relu')(image_global)
        image_global = layers.Flatten()(image_global)

    image_global = layers.Concatenate()([image_global, ctx])
    image_global = layers.Dense(1024, activation='relu')(image_global)

    image_global = layers.BatchNormalization()(image_global)
    image_global = layers.Dense(WORDVEC_SIZE / 2,
                                activation=ACTIVATION)(image_global)
    image_global = layers.BatchNormalization()(image_global)
    image_global = layers.RepeatVector(max_words)(image_global)

    # Local Image featuers (convnet output for just the bounding box)
    input_img_local = layers.Input(shape=(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
    image_local = cnn(input_img_local)

    if USE_CGRU:
        image_local = layers.Conv2D(CGRU_SIZE, (1, 1),
                                    padding='same',
                                    activation='relu')(image_local)
        res_cgru = SpatialCGRU(image_local, CGRU_SIZE)
        image_local = layers.add([image_local, res_cgru])

    if REDUCE_MEAN:
        image_local = layers.Lambda(lambda x: tf.reduce_mean(x, axis=1))(
            image_local)
        image_local = layers.Lambda(lambda x: tf.reduce_mean(x, axis=1))(
            image_local)
    else:
        image_local = layers.Conv2D(WORDVEC_SIZE / 4, (3, 3),
                                    activation='relu')(image_local)
        image_local = layers.Conv2D(WORDVEC_SIZE / 2, (3, 3),
                                    activation='relu')(image_local)
        image_local = layers.Flatten()(image_local)

    image_local = layers.Concatenate()([image_local, ctx])
    image_local = layers.Dense(1024, activation='relu')(image_local)

    image_local = layers.BatchNormalization()(image_local)
    image_local = layers.Dense(WORDVEC_SIZE / 2,
                               activation=ACTIVATION)(image_local)
    image_local = layers.BatchNormalization()(image_local)
    image_local = layers.RepeatVector(max_words)(image_local)

    language_model = models.Sequential()

    input_words = layers.Input(shape=(max_words, ), dtype='int32')
    language = layers.Embedding(words.VOCABULARY_SIZE,
                                WORDVEC_SIZE,
                                input_length=max_words)(input_words)

    x = layers.concatenate([image_global, image_local, repeat_ctx, language])
    if RNN_TYPE == 'LSTM':
        x = layers.LSTM(RNN_SIZE)(x)
    else:
        x = layers.GRU(RNN_SIZE)(x)
    x = layers.BatchNormalization()(x)
    x = layers.Dense(words.VOCABULARY_SIZE, activation='softmax')(x)

    return models.Model(
        inputs=[input_img_global, input_img_local, input_words, input_ctx],
        outputs=x)
示例#10
0
    def getModel(self, modelNo, learningRate=0.001):

        model_input = layers.Input(shape=(28, 28, 1))

        if modelNo == 1:
            #model 1
            model = models.Sequential()
            model.add(
                layers.Conv2D(16, (3, 3),
                              activation=activations.relu,
                              input_shape=(28, 28, 1)))
            model.add(layers.MaxPooling2D((2, 2)))
            model.add(layers.Conv2D(32, (3, 3), activation=activations.relu))
            model.add(layers.MaxPooling2D((2, 2)))
            model.add(layers.Conv2D(32, (3, 3), activation=activations.relu))
            model.add(layers.Flatten())
            model.add(layers.Dense(32, activation=activations.relu))
            model.add(
                layers.Dense(self.outputShape, activation=activations.softmax))
            model.summary()
            model.compile(optimizer=optimizers.rmsprop(lr=learningRate),
                          loss=losses.categorical_crossentropy,
                          metrics=[metrics.categorical_accuracy])

            model.name = "basic CNN, 32"
            return model

        if modelNo == 2:
            #model 1
            model = models.Sequential()
            model.add(
                layers.Conv2D(32, (3, 3),
                              activation=activations.relu,
                              input_shape=(28, 28, 1)))
            model.add(layers.MaxPooling2D((2, 2)))
            model.add(layers.Conv2D(64, (3, 3), activation=activations.relu))
            model.add(layers.MaxPooling2D((2, 2)))
            model.add(layers.Conv2D(64, (3, 3), activation=activations.relu))
            model.add(layers.Flatten())
            model.add(layers.Dense(64, activation=activations.relu))
            model.add(
                layers.Dense(self.outputShape, activation=activations.softmax))
            model.summary()
            model.compile(optimizer=optimizers.rmsprop(lr=learningRate),
                          loss=losses.categorical_crossentropy,
                          metrics=[metrics.categorical_accuracy])

            model.name = "basic CNN, 64"
            return model

        if modelNo == 3:
            return self.getConvPoolCNNCModel(model_input, learningRate)
        if modelNo == 4:
            return self.getAllCNNC(model_input, learningRate)
        if modelNo == 5:
            return self.NINCNN(model_input, learningRate)

        if modelNo == 6:
            #model 1
            model = models.Sequential()
            model.add(
                layers.Conv2D(32, (3, 3),
                              activation=activations.relu,
                              input_shape=(28, 28, 1)))
            model.add(layers.MaxPooling2D((2, 2)))
            model.add(layers.Dropout(0.2))

            model.add(layers.Conv2D(64, (3, 3), activation=activations.relu))
            model.add(layers.MaxPooling2D((2, 2)))

            model.add(layers.Conv2D(64, (3, 3), activation=activations.relu))
            model.add(layers.Flatten())

            model.add(layers.Dense(64, activation=activations.relu))
            model.add(
                layers.Dense(self.outputShape, activation=activations.softmax))

            model.summary()
            model.compile(optimizer=optimizers.rmsprop(lr=learningRate),
                          loss=losses.categorical_crossentropy,
                          metrics=[metrics.categorical_accuracy])

            model.name = "basic CNN, 64, Dropout 0.2"
            return model

        if modelNo == 7:
            #model 1
            model = models.Sequential()
            model.add(
                layers.Conv2D(32, (3, 3),
                              kernel_regularizer=regularizers.l2(0.01),
                              activation=activations.relu,
                              input_shape=(28, 28, 1)))
            model.add(layers.MaxPooling2D((2, 2)))

            model.add(
                layers.Conv2D(64, (3, 3),
                              kernel_regularizer=regularizers.l2(0.01),
                              activation=activations.relu))
            model.add(layers.MaxPooling2D((2, 2)))

            model.add(
                layers.Conv2D(64, (3, 3),
                              kernel_regularizer=regularizers.l2(0.01),
                              activation=activations.relu))

            model.add(layers.Flatten())
            model.add(layers.Dense(64, activation=activations.relu))
            model.add(
                layers.Dense(self.outputShape, activation=activations.softmax))

            model.summary()
            model.compile(optimizer=optimizers.rmsprop(lr=learningRate),
                          loss=losses.categorical_crossentropy,
                          metrics=[metrics.categorical_accuracy])

            model.name = "basic CNN, 64, L2 .01"
            return model

        if modelNo == 8:
            #model 1
            model = models.Sequential()
            model.add(
                layers.Conv2D(32, (3, 3),
                              kernel_regularizer=regularizers.l2(0.01),
                              activation=activations.relu,
                              input_shape=(28, 28, 1)))
            model.add(layers.MaxPooling2D((2, 2)))
            model.add(layers.Dropout(0.2))

            model.add(
                layers.Conv2D(64, (3, 3),
                              kernel_regularizer=regularizers.l2(0.01),
                              activation=activations.relu))
            model.add(layers.MaxPooling2D((2, 2)))
            model.add(layers.Dropout(0.2))

            model.add(
                layers.Conv2D(64, (3, 3),
                              kernel_regularizer=regularizers.l2(0.01),
                              activation=activations.relu))

            model.add(layers.Flatten())
            model.add(layers.Dense(64, activation=activations.relu))
            model.add(
                layers.Dense(self.outputShape, activation=activations.softmax))

            model.summary()
            model.compile(optimizer=optimizers.rmsprop(lr=learningRate),
                          loss=losses.categorical_crossentropy,
                          metrics=[metrics.categorical_accuracy])

            model.name = "basic CNN, 64, Dropout 0.2, L2 .01"
            return model

        if modelNo == 9:
            return self.wideNet(model_input, learningRate)
        if modelNo == 10:
            model = ResnetBuilder.build_resnet_18((1, 28, 28),
                                                  self.outputShape)
            model.summary()
            model.compile(optimizer=optimizers.rmsprop(lr=learningRate),
                          loss=losses.categorical_crossentropy,
                          metrics=[metrics.categorical_accuracy])

            model.name = "build_resnet_18"
            return model
        if modelNo == 11:
            model = ResnetBuilder.build_resnet_50((1, 28, 28),
                                                  self.outputShape)
            model.summary()
            model.compile(optimizer=optimizers.rmsprop(lr=learningRate),
                          loss=losses.categorical_crossentropy,
                          metrics=[metrics.categorical_accuracy])

            model.name = "build_resnet_50"
            return model
示例#11
0
def Conv1DRegressorIn1(flag):
    K.clear_session()
    current_neighbor = space['neighbor']
    current_idx_idx = space['idx_idx']
    current_batch_size = space['batch_size']

    current_conv1D_filter_num1 = space['conv1D_filter_num1']
    current_conv1D_filter_num2 = space['conv1D_filter_num2']
    current_conv1D_filter_num3 = space['conv1D_filter_num3']
    current_dropout_rate_dense = space['dropout_rate_dense']

    summary = True
    verbose = 0

    #
    # setHyperParams
    #
    ## hypers for data
    neighbor = {{choice([50, 60, 70, 80, 90, 100, 110, 120, 130, 140])}}
    idx_idx = {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8])}}
    idx_lst = [
        [x for x in range(158) if x not in [24, 26]],  # 去除无用特征
        [
            x for x in range(158) if x not in [24, 26] +
            [x for x in range(1, 6)] + [x for x in range(16, 22)] + [40, 42]
        ],  # 去除无用特征+冗余特征
        [
            x for x in range(158)
            if x not in [24, 26] + [x for x in range(0, 22)]
        ],  # 去除无用特征+方位特征
        [x for x in range(158)
         if x not in [24, 26] + [22, 23, 26, 37, 38]],  # 去除无用特征+深度特征
        [
            x for x in range(158) if x not in [24, 26] +
            [x for x in range(27, 37)] + [x for x in range(40, 46)]
        ],  # 去除无用特征+二级结构信息
        # [x for x in range(158) if x not in [24, 26] + [x for x in range(27, 34)] + [x for x in range(40, 46)]],# 去除无用特征+二级结构信息1
        # [x for x in range(158) if x not in [24, 26] + [x for x in range(34, 37)] + [x for x in range(40, 46)]],# 去除无用特征+二级结构信息2
        [x for x in range(158) if x not in [24, 26] + [46, 47]],  # 去除无用特征+实验条件
        [
            x for x in range(158) if x not in [24, 26] + [39] +
            [x for x in range(57, 61)] + [x for x in range(48, 57)] +
            [x for x in range(61, 81)] + [x for x in range(140, 155)]
        ],  # 去除无用特征+所有原子编码
        # [x for x in range(158) if x not in [24, 26] + [39] + [x for x in range(57, 61)] + [x for x in range(48, 57)] + [x for x in range(140, 145)]],# 去除无用特征+原子编码1
        # [x for x in range(158) if x not in [24, 26] + [39] + [x for x in range(57, 61)] + [x for x in range(61, 77)] + [x for x in range(145, 153)]],# 去除无用特征+原子编码2
        # [x for x in range(158) if x not in [24, 26] + [39] + [x for x in range(57, 61)] + [x for x in range(77, 81)] + [x for x in range(153, 155)]],# 去除无用特征+原子编码3
        [
            x for x in range(158)
            if x not in [24, 26] + [x for x in range(81, 98)]
        ],  # 去除无用特征+rosetta_energy
        [
            x for x in range(158) if x not in [24, 26] +
            [x for x in range(98, 140)] + [x for x in range(155, 158)]
        ]  # 去除无用特征+msa
    ]
    idx = idx_lst[idx_idx]
    ## hypers for net
    lr = 1e-4  # 0.0001
    batch_size = {{choice([1, 32, 64, 128])}}
    epochs = 200
    conv1D_filter_num1 = {{choice([16, 32])}}
    conv1D_filter_num2 = {{choice([16, 32, 64])}}
    conv1D_filter_num3 = {{choice([32, 64])}}
    dropout_rate_dense = {{choice([0.1, 0.2, 0.3, 0.4, 0.5])}}
    metrics = ('mae', pearson_r, rmse)

    def _data(fold_num, neighbor, idx):
        train_data_pth = '/dl/sry/mCNN/dataset/deepddg/npz/wild/cross_valid/cro_fold%s_train_center_CA_PCA_False_neighbor_140.npz' % fold_num
        val_data_pth = '/dl/sry/mCNN/dataset/deepddg/npz/wild/cross_valid/cro_fold%s_valid_center_CA_PCA_False_neighbor_140.npz' % fold_num

        ## train data
        train_data = np.load(train_data_pth)
        x_train = train_data['x']
        y_train = train_data['y']
        ddg_train = train_data['ddg'].reshape(-1)
        ## select kneighbor atoms
        x_train_kneighbor_lst = []
        for sample in x_train:
            dist_arr = sample[:, 0]
            indices = sorted(dist_arr.argsort()[:neighbor])
            x_train_kneighbor_lst.append(sample[indices, :])
        x_train = np.array(x_train_kneighbor_lst)
        ## idx
        x_train = x_train[:, :, idx]

        ## val data
        val_data = np.load(val_data_pth)
        x_val = val_data['x']
        y_val = val_data['y']
        ddg_val = val_data['ddg'].reshape(-1)
        ## select kneighbor atoms
        x_val_kneighbor_lst = []
        for sample in x_val:
            dist_arr = sample[:, 0]
            indices = sorted(dist_arr.argsort()[:neighbor])
            x_val_kneighbor_lst.append(sample[indices, :])
        x_val = np.array(x_val_kneighbor_lst)
        ##  idx
        x_val = x_val[:, :, idx]

        # sort row default is chain, pass

        # reshape and one-hot
        y_train = to_categorical(y_train)
        y_val = to_categorical(y_val)
        # normalization
        train_shape = x_train.shape
        val_shape = x_val.shape
        col_train = train_shape[-1]
        col_val = val_shape[-1]
        x_train = x_train.reshape((-1, col_train))
        x_val = x_val.reshape((-1, col_val))
        mean = x_train.mean(axis=0)
        std = x_train.std(axis=0)
        std[np.argwhere(std == 0)] = 0.01
        x_train -= mean
        x_train /= std
        x_val -= mean
        x_val /= std
        x_train = x_train.reshape(train_shape)
        x_val = x_val.reshape(val_shape)
        print('x_train: %s'
              '\ny_train: %s'
              '\nddg_train: %s'
              '\nx_val: %s'
              '\ny_val: %s'
              '\nddg_val: %s' % (x_train.shape, y_train.shape, ddg_train.shape,
                                 x_val.shape, y_val.shape, ddg_val.shape))
        return x_train, y_train, ddg_train, x_val, y_val, ddg_val

    #
    # cross_valid
    #
    hyper_param_tag = '%s_%s_%s_%s_%s_%s_%s' % (
        current_neighbor, current_idx_idx, current_batch_size,
        current_conv1D_filter_num1, current_conv1D_filter_num2,
        current_conv1D_filter_num3, current_dropout_rate_dense)
    modeldir = '/dl/sry/projects/from_hp/mCNN/src/Network/deepddg/opt_all_simpleNet_v4/model/%s-%s' % (
        hyper_param_tag, time.strftime("%Y.%m.%d.%H.%M.%S", time.localtime()))
    os.makedirs(modeldir, exist_ok=True)
    opt_lst = []

    for k_count in range(1, 11):
        print('\n** fold %s is processing **\n' % k_count)
        filepth = '%s/fold_%s_weights-best.h5' % (modeldir, k_count)
        my_callbacks = [
            callbacks.ReduceLROnPlateau(
                monitor='val_loss',
                factor=0.33,
                patience=5,
                verbose=verbose,
                mode='min',
                min_lr=1e-8,
            ),
            callbacks.EarlyStopping(monitor='val_loss',
                                    patience=10,
                                    verbose=verbose),
            callbacks.ModelCheckpoint(filepath=filepth,
                                      monitor='val_mean_absolute_error',
                                      verbose=verbose,
                                      save_best_only=True,
                                      mode='min',
                                      save_weights_only=True)
        ]

        x_train, y_train, ddg_train, x_val, y_val, ddg_val = _data(
            k_count, neighbor, idx)
        row_num, col_num = x_train.shape[1:3]
        #
        # build net
        #
        network = models.Sequential()
        network.add(
            layers.SeparableConv1D(filters=conv1D_filter_num1,
                                   kernel_size=5,
                                   activation='relu',
                                   input_shape=(row_num, col_num)))
        network.add(layers.MaxPooling1D(pool_size=2))
        network.add(
            layers.SeparableConv1D(filters=conv1D_filter_num2,
                                   kernel_size=5,
                                   activation='relu'))
        network.add(layers.MaxPooling1D(pool_size=2))
        network.add(
            layers.SeparableConv1D(filters=conv1D_filter_num3,
                                   kernel_size=3,
                                   activation='relu'))
        network.add(layers.MaxPooling1D(pool_size=2))
        network.add(layers.Flatten())
        network.add(layers.Dense(128, activation='relu'))
        network.add(layers.Dropout(dropout_rate_dense))
        network.add(layers.Dense(16, activation='relu'))
        network.add(layers.Dropout(0.3))
        network.add(layers.Dense(1))
        if summary:
            trainable_count = int(
                np.sum([
                    K.count_params(p) for p in set(network.trainable_weights)
                ]))
            non_trainable_count = int(
                np.sum([
                    K.count_params(p)
                    for p in set(network.non_trainable_weights)
                ]))

            print('Total params: {:,}'.format(trainable_count +
                                              non_trainable_count))
            print('Trainable params: {:,}'.format(trainable_count))
            print('Non-trainable params: {:,}'.format(non_trainable_count))
            # print(network.summary())
        # rmsp = optimizers.RMSprop(lr=0.0001,  decay=0.1)
        rmsp = optimizers.RMSprop(lr=lr)
        network.compile(
            optimizer=rmsp,  # 'rmsprop',  # SGD,adam,rmsprop
            loss='mae',
            metrics=list(metrics))  # mae平均绝对误差(mean absolute error) accuracy
        result = network.fit(
            x=x_train,
            y=ddg_train,
            batch_size=batch_size,
            epochs=epochs,
            verbose=verbose,
            callbacks=my_callbacks,
            validation_data=(x_val, ddg_val),
            shuffle=True,
        )
        # print('\n----------History:\n%s'%result.history)
        #
        # save
        #
        save_train_cv(network, modeldir, result.history, k_count)
        opt_lst.append(np.mean(
            result.history['val_mean_absolute_error'][-10:]))
    opt_loss = np.mean(opt_lst)
    #
    # print hyper combination group and current loss value
    #
    print('\n@current_hyper_tag: %s'
          '\n@current optmized_loss: %s' % (hyper_param_tag, opt_loss))
    # return {'loss': validation_loss, 'status': STATUS_OK, 'model':model}
    return {'loss': opt_loss, 'status': STATUS_OK}
示例#12
0
 def buildGRUNet(shape):
     model = models.Sequential()
     model.add(layers.GRU(32, input_shape=shape))
     model.add(layers.Dense(1))
     model.summary()
     return model
示例#13
0
a, b= zip(*c)
a_array, b_array = np.array(a), np.array(b)

# partition data into train, validation, and test data
train_stim=a_array[: 1500,:,:,:]
val_stim = a_array[1501:1751,:,:,:]
test_stim =a_array[1751:1999,:,:,:]
train_labels = b_array[:1500]
val_labels = b_array[:1500]
test_labels = b_array[:1500]

#building custom model for the analysis.

for epochs in [100, 500, 1000, 1500]:
	for n_hidden in range(5, 10):
		cont_model =models.Sequential()
		cont_model.add(layers.Conv2D(2**n_hidden , (3,3), activation='relu', input_shape=(224, 224, 3))
		cont_model.add(layers.MaxPooling2D((2,2)))
		cont_model.add(layers.Conv2D(2**(n_hidden +1), (3, 3), activation ='relu'))
		cont_model.add(layers.MaxPooling2D((2, 2)))
		cont_model.add(layers.Con2D(2**(n_hidden+2), (3,3), activation ='relu'))
		cont_model.add(layers.MaxPooling2D((2,2)))
		cont_model.add(layers.Conv2D(2**(n_hidden+3), (3,3), activation='relu')
		cont_model.add(layers.MaxPooling2D((2,2)))
		cont_model.add(layers.Flatten())
		cont_model.add(layers.Dropout(0.5))
		cont_model.add(layers.Dense(512, activation='relu'))
		cont_model.add(layers.Dense(1, activation='sigmoid'))

		cont_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
    def fit(self, train_df, duration_col='LOS', event_col='OUT'):
        """
        :param train_df: DataFrame, with the duration and the event column
        :param duration_col: the column name for duration
        :param event_col: the column name for event
        """
        self._duration_col = duration_col
        self._event_col = event_col
        train_x = train_df.drop(columns=[duration_col, event_col]).values
        train_y = train_df[[duration_col, event_col]].values
        x_standardized = self.standard_scaler.fit_transform(train_x)

        def coxph_partial_log_likelihood_batch(y_true, y_pred, batch_size):
            # y_pred in this context consists of each feature vector dotted with
            #  beta, with a 1 padded
            y_observed_times = y_true[:, 0]
            y_event_indicators = y_true[:, 1]

            R_batch = K.cast(
                K.greater_equal(
                    K.repeat_elements(K.expand_dims(y_observed_times, axis=0),
                                      batch_size, 0),
                    K.repeat_elements(K.expand_dims(y_observed_times, axis=-1),
                                      batch_size, -1)), 'float32')

            x_transpose_beta = y_pred[:, 0]
            return -K.mean((x_transpose_beta - K.log(
                K.flatten(
                    K.dot(R_batch,
                          K.expand_dims(K.exp(x_transpose_beta), axis=-1))))) *
                           y_event_indicators)

        # yes, the code works even when batch size is not the full dataset
        batch_size = len(train_x)
        coxph_partial_log_likelihood = lambda y_true, y_pred: \
                coxph_partial_log_likelihood_batch(y_true, y_pred, batch_size)

        l1_weight = self.lmbda * self.alpha
        l2_weight = self.lmbda * (1 - self.alpha) / 2.
        coxph_neural = models.Sequential()
        coxph_neural.add(
            layers.Dense(self.first_layer_size,
                         activation=None,
                         input_shape=(x_standardized.shape[1], ),
                         use_bias=True))
        coxph_neural.add(
            layers.Dense(1,
                         activation=None,
                         input_shape=(self.first_layer_size, ),
                         kernel_regularizer=regularizers.L1L2(
                             l1_weight, l2_weight),
                         use_bias=False))
        coxph_neural.add(
            layers.Lambda(
                lambda x: K.concatenate([x, K.ones_like(x)], axis=-1)))

        if self.verbose:
            coxph_neural.summary()
        coxph_neural.compile(optimizer='Adam',
                             loss=coxph_partial_log_likelihood)
        coxph_neural.fit(x_standardized,
                         train_y,
                         epochs=self.epochs,
                         batch_size=batch_size,
                         verbose=self.verbose)

        self.dense_params = coxph_neural.get_weights()[0]
        self.bias = coxph_neural.get_weights()[1].flatten()
        self.betas = coxph_neural.get_weights()[2].flatten()

        observed_times = train_y[:, 0]
        event_indicators = train_y[:, 1]
        transformed_train_x = x_standardized.dot(self.dense_params) + self.bias

        # For each observed time, how many times the event occurred
        event_counts = Counter()
        for t, r in zip(observed_times, event_indicators):
            event_counts[t] += int(r)

        # Sorted list of observed times
        self.sorted_unique_times = np.sort(list(event_counts.keys()))
        self.num_unique_times = len(self.sorted_unique_times)
        self.log_baseline_hazard = np.zeros(self.num_unique_times)

        # Calculate the log baseline hazard
        for time_idx, t in enumerate(self.sorted_unique_times):
            logsumexp_args = []
            for subj_idx, observed_time in enumerate(observed_times):
                if observed_time >= t:
                    logsumexp_args.append(
                        np.inner(self.betas, transformed_train_x[subj_idx]))
            if event_counts[t] > 0:
                self.log_baseline_hazard[time_idx] \
                    = np.log(event_counts[t]) - logsumexp(logsumexp_args)
            else:
                self.log_baseline_hazard[time_idx] \
                    = -np.inf - logsumexp(logsumexp_args)
示例#15
0
文件: univ_tester.py 项目: ricaiu/PhD
x1_test = tf.expand_dims(x1_test, axis=-1)
x2_test = tf.expand_dims(x2_test, axis=-1)

T = []
for i in range(Nconf):
    T.append([0,1])#2d
for i in range(Nconf):
    T.append([1,0])#3d


y_label = np.array(T)



network = models.Sequential() #initialize the neural network


network.add(layers.Conv2D(Channel, kernel_size=Nf, strides=Stride, activation='relu', input_shape=input_shape))
#network.add(layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))
#network.add(layers.Conv2D(Channel, kernel_size=Nf, strides=Stride, activation='relu', padding='valid'))



#dropout: spegne casualmente qualche neurone
indexlayer = 0
if dropout:
    network.add(layers.Dropout(dropout_const))
    indexlayer += 1
network.add(layers.Flatten())
示例#16
0
from keras import layers
from keras import models
from keras.datasets import cifar10
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
import numpy as np

model1 = models.Sequential([
    layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
    layers.MaxPooling2D((2, 2)),
    layers.Flatten(),
    layers.Dense(32, activation='relu'),
    layers.Dense(3, activation='softmax')
])
model1.summary()
print("")

model2 = models.Sequential([
    layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
    layers.MaxPooling2D((2, 2)),
    layers.Conv2D(64, (3, 3), activation='relu'),
    layers.MaxPooling2D((2, 2)),
    layers.Flatten(),
    layers.Dense(32, activation='relu'),
    layers.Dense(3, activation='softmax')
])
model2.summary()
print("")

model3 = models.Sequential([
    layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
def CapsNet(input_shape, n_class, num_routing):

    #Input
    x = layers.Input(shape=input_shape)
    #conv_1
    conv_1 = layers.Conv2D(filters=256,
                           kernel_size=3,
                           strides=1,
                           padding='valid')(x)
    conv_1 = layers.BatchNormalization(momentum=0.9)(conv_1)
    conv_1 = layers.Activation('relu')(conv_1)
    # conv_2
    conv_2 = layers.Conv2D(filters=64,
                           kernel_size=3,
                           strides=2,
                           padding='valid')(conv_1)
    conv_2 = layers.BatchNormalization(momentum=0.9)(conv_2)
    conv_2 = layers.Activation('relu')(conv_2)

    primarycaps = PrimaryCap(conv_2,
                             dim_capsule=8,
                             n_channels=32,
                             kernel_size=7,
                             strides=1,
                             padding='valid')

    digtalcaps = CapsuleLayer(num_capsule=n_class,
                              dim_capsule=16,
                              routings=num_routing,
                              name='digitcaps')(primarycaps)

    # Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.
    # If using tensorflow, this will not be necessary. :)
    outcaps = Length(name='outcaps')(digtalcaps)

    #decoder network
    label = layers.Input(shape=(n_class, ))
    masked_by_label = Mask()(
        [digtalcaps, label]
    )  #The true label is used to mask the output of capsule layer. For training
    masked = Mask(
    )(digtalcaps)  # Mask using the capsule with maximal length. For prediction
    #print(masked.shape)

    # Shared Decoder model in training and prediction
    decoder = models.Sequential(name='decoder')
    decoder.add(
        layers.Dense(512,
                     activation='relu',
                     input_dim=16 * n_class,
                     kernel_regularizer=regularizers.l2(0.01)))
    decoder.add(
        layers.Dense(1024,
                     activation='relu',
                     kernel_regularizer=regularizers.l2(0.001)))
    decoder.add(layers.Dense(np.prod(input_shape),
                             activation='sigmoid'))  #np.prod为联乘函数,即将矩阵中的元素相乘
    decoder.add(
        layers.Reshape(target_shape=input_shape, name='out_reconstruction'))

    #models for training and evaluation(prediction)
    train_model = models.Model([x, label], [outcaps, decoder(masked)])
    eval_model = models.Model(x, [outcaps, decoder(masked)])
    """
    #manipulate model
    noise = layers.Input(shape=(n_class,16))
    print([noise.shape,digtalcaps.shape])
    noise_digitcaps = layers.Add()([digtalcaps,noise])
    masked_noised_y = Mask()([noise_digitcaps,label])
    manipulate_model = models.Model([x,label,noise],decoder(masked_noised_y))
    """

    return train_model, eval_model
    car_label_strlist = np.loadtxt("car_label3.csv",
                                   delimiter=',',
                                   dtype='str')
    #load vgg16
    pre_model = VGG16(weights='imagenet',
                      include_top=False,
                      input_shape=(224, 224, 3),
                      backend=keras.backend,
                      layers=keras.layers,
                      models=keras.models,
                      utils=keras.utils)
    pre_model.trainable = True
    pre_model.summary()

    # add output layer for VGG16 output (4096 -> 1000 => 4096 -> 1000 -> 100)
    vgg_model = models.Sequential()
    vgg_model.add(pre_model)
    vgg_model.add(layers.Flatten())
    vgg_model.add(layers.Dense(4096, activation='relu'))
    vgg_model.add(layers.Dense(1024, activation='relu'))
    vgg_model.add(layers.Dense(20, activation='softmax'))  # practical

    vgg_model.summary()
    vgg_model.compile(loss='categorical_crossentropy',
                      optimizer=optimizers.RMSprop(lr=2e-5),
                      metrics=['acc'])
    vgg_model.load_weights("y_seventh_weights.h5")

    #vgg_model.load_weights("CarDatabaseShare/y_up_car_weight.hdf5")'''
    # Load Yolo
    net = cv.dnn.readNet("CarDatabaseShare/yolov3.weights",
示例#19
0
    output.extend([hotlocalsingles(i) for i in filename[-8:-4]])

chars = np.array(chars)[:, :, :, np.newaxis]
output = np.array(output)
print(str(chars.shape))
print(str(output.shape))


def reset_weights(model):
    session = backend.get_session()
    for layer in model.layers:
        if hasattr(layer, 'kernel_initializer'):
            layer.kernel.initializer.run(session=session)


conv_model = models.Sequential()
conv_model.add(
    layers.Conv2D(32, (3, 3), activation='relu', input_shape=(160, 106, 1)))
conv_model.add(layers.MaxPooling2D((2, 2)))
conv_model.add(layers.Conv2D(64, (3, 3), activation='relu'))
conv_model.add(layers.MaxPooling2D((2, 2)))
conv_model.add(layers.Conv2D(128, (3, 3), activation='relu'))
conv_model.add(layers.MaxPooling2D((2, 2)))
conv_model.add(layers.Conv2D(128, (3, 3), activation='relu'))
conv_model.add(layers.MaxPooling2D((2, 2)))
conv_model.add(layers.Flatten())
conv_model.add(layers.Dropout(0.5))
conv_model.add(layers.Dense(512, activation='relu'))
conv_model.add(layers.Dense(36, activation='softmax'))

LEARNING_RATE = 1e-4
示例#20
0
	ori_test_labels = test_labels.copy()
from keras.utils import to_categorical
	train_images = ori_train_images.copy()

	train_images = train_images.reshape((60000, 28, 28, 1))
	train_images = train_images.astype('float32') / 255
	train_labels = to_categorical(ori_train_labels.copy())

	test_images = ori_test_images.copy()
	test_images = test_images.reshape((10000, 28, 28, 1))
	test_images = test_images.astype('float32') / 255
	test_labels = to_categorical(ori_test_labels.copy())

#step 3: Design Model

	net = models.Sequential()
	net.add(layers.Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1))) # need 4D tensor so we need (batch, 28, 28, 1)
	net.add(layers.MaxPooling2D((2, 2)))        
	net.add(layers.Conv2D(64, kernel_size=(3, 3), activation='relu'))
	net.add(layers.MaxPooling2D(pool_size=(2, 2)))
	net.add(layers.Conv2D(64, kernel_size=(3, 3), activation='relu'))
	net.add(layers.Flatten())
	net.add(layers.Dense(64, activation='relu'))
	net.add(layers.Dense(10, activation='softmax'))
	        
	net.summary()

# Step 4: Training Data
net.compile(optimizer=optimizers.Adam(), 
            loss=losses.categorical_crossentropy,
            metrics=[metrics.categorical_accuracy])
示例#21
0
# ## Make neural network architecture and train the network

# In[10]:

# Final Model Architecture:

##### TODO: change parameters to improve the accuracy
##### (Batch_size, nb_epoch, activation='relu', 'sigmoid', 'tanh')
##### Add or remove layers
from keras import layers
from keras import models
from keras import optimizers

activation = 'relu'

modelN = models.Sequential()
modelN.add(
    layers.Conv2D(32, (3, 3),
                  padding='same',
                  activation=activation,
                  input_shape=(48, 48, 1)))
modelN.add(layers.Conv2D(32, (3, 3), padding='same', activation=activation))
modelN.add(layers.MaxPooling2D(pool_size=(2, 2)))

modelN.add(layers.Conv2D(128, (3, 3), padding='same', activation=activation))
modelN.add(layers.Conv2D(128, (3, 3), padding='same', activation=activation))
modelN.add(layers.MaxPooling2D(pool_size=(2, 2)))

modelN.add(layers.Flatten()
           )  # this converts our 3D feature maps to 1D feature vectors
modelN.add(layers.Dense(64, activation=activation))
def create_network(number_of_features):

    # Start neural network
    network = models.Sequential()

    # Add fully connected layer with a ReLU activation function
    network.add(
        layers.Dense(units=32,
                     activation='relu',
                     input_shape=(number_of_features, )))

    # Add fully connected layer with a ReLU activation function
    network.add(layers.Dense(units=32, activation='relu'))
    # Add fully connected layer with a ReLU activation function
    network.add(layers.Dense(units=16, activation='relu'))
    # Add fully connected layer with a sigmoid activation function
    network.add(layers.Dense(units=1, activation='sigmoid'))

    # Compile neural network
    network.compile(
        loss='binary_crossentropy',  # Cross-entropy
        optimizer='rmsprop',  # Root Mean Square Propagation
        metrics=['accuracy'])  # Accuracy performance metric

    # Return compiled network
    return network


#%%
# #%% Entity embedding approaches
# #keep the result can be reproduce
# os.environ['PYTHONHASHSEED']='0'
# np.random.seed(42)
# rn.seed(12345)
# session_conf=tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1,
#                             inter_op_parallelism_threads=1)
# tf.random.set_seed(1234)
# sess=tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(),config=session_conf)
# tf.compat.v1.keras.backend.set_session(sess)

# #build the embedding layers
# model=Sequential()
# model.add(Embedding(12,5,input_length=1))
# model.compile('Adam','mape')

# #convert job feature with one-hot encoding by indices of the featuer
# label_encoder=ce.OrdinalEncoder(cols=['Job'])
# input_array=label_encoder.fit_transform(df.Job)
# input_array=input_array-1

# # unique_category_count = 12
# # inputs = tf.one_hot(input_array, unique_category_count)
# # output_array_o=model.predict(inputs)

# output_array=model.predict(input_array)
# weight=model.get_weights()
# #%%
# import tensorflow as tf

# category_indices = [0, 1, 2, 2, 1, 0]
# unique_category_count = 3
# inputs = tf.one_hot(category_indices, unique_category_count)
# print(inputs.numpy())
示例#23
0
def normal_convnet():
    model = models.Sequential()
    model.add(
        layers.Conv2D(32, (3, 3), activation='relu',
                      input_shape=(150, 150, 3)))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(64, (3, 3), activation='relu'))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(128, (3, 3), activation='relu'))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(128, (3, 3), activation='relu'))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dense(5, activation='sigmoid'))
    model.summary()
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizers.RMSprop(lr=1e-4),
                  metrics=['acc'])
    # All images will be rescaled by 1./255
    train_datagen = ImageDataGenerator(rescale=1. / 255)
    test_datagen = ImageDataGenerator(rescale=1. / 255)
    train_dir = '/home/rcarbajsa/edan95/datasets/flowers_split/train'
    validation_dir = '/home/rcarbajsa/edan95/datasets/flowers_split/validation'
    test_dir = '/home/rcarbajsa/edan95/datasets/flowers_split/test'
    train_generator = train_datagen.flow_from_directory(
        # This is the target directory
        train_dir,
        # All images will be resized to 150x150
        target_size=(150, 150),
        batch_size=15,
        # Since we use binary_crossentropy loss, we need binary labels
        class_mode='categorical')
    validation_generator = test_datagen.flow_from_directory(
        validation_dir,
        target_size=(150, 150),
        batch_size=5,
        class_mode='categorical')
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=173,
                                  epochs=30,
                                  validation_data=validation_generator,
                                  validation_steps=173)
    model.save('flowers1.h5')
    acc = history.history['acc']
    val_acc = history.history['val_acc']
    loss = history.history['loss']
    val_loss = history.history['val_loss']
    epochs = range(len(acc))
    plt.plot(epochs, acc, 'bo', label='Training acc')
    plt.plot(epochs, val_acc, 'b', label='Validation acc')
    plt.title('Training and validation accuracy')
    plt.legend()
    plt.figure()
    plt.plot(epochs, loss, 'bo', label='Training loss')
    plt.plot(epochs, val_loss, 'b', label='Validation loss')
    plt.title('Training and validation loss')
    plt.legend()
    plt.show()
    test_generator = test_datagen.flow_from_directory(test_dir,
                                                      target_size=(150, 150),
                                                      batch_size=20,
                                                      class_mode='categorical')
    test_loss, test_acc = model.evaluate_generator(test_generator, steps=50)
    print('test acc:', test_acc)
def dense_mnist_LBL(nbr_of_layers=50,
                    epochs=5,
                    results_path="",
                    save_models=False,
                    models_path=""):
    """
    Function for generating
    """
    from keras import models, layers
    import numpy as np
    train_images, train_labels, test_images, test_labels = get_mnist_data_flattened(
    )

    network_a = models.Sequential()
    network_a.add(layers.Dense(512, activation='relu',
                               input_shape=(28 * 28, )))
    network_a.add(layers.Dense(10, activation='softmax'))
    network_a.compile(optimizer='rmsprop',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

    #nbr of internal layers
    results = []
    network_a.summary()
    network_a.fit(train_images, train_labels, epochs=epochs, batch_size=128)

    for i in range(nbr_of_layers):
        if (i % 2 == 0):
            network_b = grow_network_mnist(network_a, 32, 10)
            network_b.compile(optimizer='rmsprop',
                              loss='categorical_crossentropy',
                              metrics=['accuracy'])
            network_b.summary()
            network_b.fit(train_images,
                          train_labels,
                          epochs=epochs,
                          batch_size=128,
                          verbose=2)
            if (save_models):
                model_name = "MNIST_LBL_layer" + str(
                    i + 2) + "trainedfor" + str(epochs) + "e.h5"
                network_b.save(models_path + model_name)
            results.append(network_b.evaluate(test_images, test_labels))
            del (network_a)
        else:
            network_a = grow_network_mnist(network_b)
            network_a.compile(optimizer='rmsprop',
                              loss='categorical_crossentropy',
                              metrics=['accuracy'])
            network_a.summary()
            network_a.fit(train_images,
                          train_labels,
                          epochs=epochs,
                          batch_size=128,
                          verbose=2)
            if (save_models):
                model_name = "MNIST_LBL_layer" + str(
                    i + 2) + "trainedfor" + str(epochs) + "e.h5"
                network_a.save(models_path + model_name)
            results.append(network_a.evaluate(test_images, test_labels))
            del (network_b)
            print('b')
    if (i % 2 == 1):
        network = network_a
    else:
        network = network_b
    result_name = 'MNIST_LBL_res_' + str(epochs) + 'epochs_' + str(
        nbr_of_layers) + 'layers'
    np.save(results_path + result_name, results)
    return network
print(data.shape)

# shuffle array order

shuffler = np.random.permutation(len(data))
labels_shuffled = labels[shuffler]
data_shuffled = data[shuffler]

data_shuffled = data_shuffled.reshape(-1, 128, 157, 1)

#create CNN
#link to source for architecture: https://docs.google.com/document/d/1ydh6-a05urM-wbKd0n8rrnw7aTUooVMV7kAA1d9tIOY/edit?usp=sharing

# (7×7, 16),(5×5, 32),(3×3, 64),(3×3, 128),(3×3, 256)
#CNN architecture -- same as above
model2 = models.Sequential()
model2.add(
    layers.Conv2D(16, (7, 7), activation='relu',
                  input_shape=(128, 157,
                               1)))  #input shape will need to be changed
model2.add(layers.MaxPooling2D((2, 2)))
model2.add(layers.Conv2D(32, (5, 5), activation='relu'))
model2.add(layers.MaxPooling2D((2, 2)))
model2.add(layers.Conv2D(64, (3, 3), activation='relu'))
model2.add(layers.MaxPooling2D((2, 2)))
model2.add(layers.Conv2D(128, (3, 3), activation='relu'))
model2.add(layers.MaxPooling2D((2, 2)))
model2.add(layers.Conv2D(256, (3, 3), activation='relu'))

#adding BLSTM
model2.add(layers.Flatten())  #flatten CNN output
示例#26
0
    headlines = np.array(headlines)
    targets = np.array(targets)
    print("shape info:", headlines.shape, '\n', targets.shape)

    X_train, X_test, y_train, y_test = train_test_split(headlines,
                                                        targets,
                                                        test_size=0.2,
                                                        random_state=0)

    return X_train, X_test, y_train, y_test


if __name__ == '__main__':
    X_train, X_test, y_train, y_test = preprocess()

    clf = models.Sequential()
    clf.add(layers.Dense(256, activation='relu', input_shape=(X_train.shape[1],)))
    clf.add(layers.Dense(256, activation='relu'))
    clf.add(layers.Dense(256, activation='relu'))
    clf.add(layers.Dropout(rate=0.2))
    clf.add(layers.Dense(256, activation='relu'))
    clf.add(layers.Dropout(rate=0.2))
    clf.add(layers.Dense(41))  # 41是文本类型数
    clf.add(layers.Activation('softmax'))
    clf.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])

    clf.fit(X_train, y_train, epochs=35, batch_size=100, verbose=2)

    # test model

    aa = clf.predict(X_test)  # 具体看测试集预测情况
示例#27
0
from keras.datasets import mnist
from keras.utils import to_categorical
import matplotlib.pyplot as plt
import numpy as np

(x_train, y_train), (x_test, y_test) = mnist.load_data()

noise_factor = 0.25
x_train_noisy = x_train + noise_factor * np.random.normal(
    loc=0.0, scale=1.0, size=x_train.shape)
x_test_noisy = x_test + noise_factor * np.random.normal(
    loc=0.0, scale=1.0, size=x_test.shape)
x_train_noisy = np.clip(x_train_noisy, 0., 1.)
x_test_noisy = np.clip(x_test_noisy, 0., 1.)

autoencoder = models.Sequential()
#encoding
autoencoder.add(
    layers.Conv2D(80,
                  kernel_size=(3, 3),
                  activation='relu',
                  input_shape=(28, 28, 1)))
autoencoder.add(layers.Conv2D(40, kernel_size=(3, 3), activation='relu'))

#decoding

autoencoder.add(
    layers.Conv2DTranspose(40, kernel_size=(3, 3), activation='relu'))
autoencoder.add(
    layers.Conv2DTranspose(80, kernel_size=(3, 3), activation='relu'))
  def function31(x):
    a=0.606531
    c=1.0/np.sqrt(0.0644529)
    return ( c*(K.sin(x)-a*x) )/np.sqrt(784)
  
  def function32(x):
    a=0.606531
    c=1.0/np.sqrt(0.0644529)
    return ( c*(K.sin(x)-a*x) )/np.sqrt(200)
  
  def function33(x):
    a=0.606531
    c=1.0/np.sqrt(0.0644529)
    return ( c*(K.sin(x)-a*x) )/np.sqrt(300)
  
  nn = models.Sequential()   
  nn.add(layers.Dense(units=200,activation=function31 ,input_shape=(szIm,), kernel_initializer=initializers.random_normal(mean=0.0, stddev=1.0, seed=8),use_bias=False))
  nn.add(layers.Dense(units=300,activation=function32 ,use_bias=False )) # , input_shape=(szIm,)))  
  nn.add(layers.Dense(units=250,activation=function33,use_bias=False )) # , input_shape=(szIm,))) 
#  nn.add(layers.Dense(units=150,activation=function3,use_bias=False )) # , input_shape=(szIm,)))  
  nn.add(layers.Dense(units=10, activation='softmax',use_bias=False))

  nn.compile(optimizer='rmsprop',
             loss     ='categorical_crossentropy',
             metrics  =['accuracy'])

  #time_callback = TimeHistory()
  tic=time.time()
  history = nn.fit(x = train_images, 
                   y = train_labels, 
                   validation_split=0.2,
示例#29
0
n_classes = 4

#Import a pre-trained CNN, but just the convolutional layers
vgg_conv = VGG16(weights='imagenet',
                 include_top=False,
                 input_shape=(224, 224, 3))

#Unfreeze the last four layers
for layer in vgg_conv.layers[:-4]:
    layer.trainable = False

for layer in vgg_conv.layers:
    print(layer, layer.trainable)

#Build new model by incorporating VGG model
new_model = models.Sequential()
new_model.add(vgg_conv)

#Add new dense layers, flattening input first
new_model.add(layers.Flatten())
new_model.add(layers.Dense(1024, activation='relu'))
new_model.add(layers.Dropout(0.5))
new_model.add(layers.Dense(n_classes, activation='softmax'))

new_model.summary()

train_dir = "/media/unraid/Datasets/shape_gen/train"
test_dir = "/media/unraid/Datasets/shape_gen/test"

#Count the number of .png files in the test and train data sets
n_train = 0
示例#30
0
def modelLRResnet50():
    model = models.Sequential()
    conv_base = modelResnet50()
    model.add(conv_base)
    return model