Esempio n. 1
0
def get_model(idrop=0.2,
              edrop=0.1,
              odrop=0.25,
              rdrop=0.2,
              weight_decay=1e-4,
              lr=1e-3):
    model = Sequential()
    model.add(
        Embedding(NB_WORDS,
                  128,
                  embeddings_regularizer=l2(weight_decay),
                  input_length=MAXLEN))
    if edrop:
        model.add(Dropout(edrop))
    model.add(
        LSTM(128,
             kernel_regularizer=l2(weight_decay),
             recurrent_regularizer=l2(weight_decay),
             bias_regularizer=l2(weight_decay),
             dropout=idrop,
             recurrent_dropout=rdrop))
    if odrop:
        model.add(Dropout(odrop))
    model.add(
        Dense(1,
              kernel_regularizer=l2(weight_decay),
              bias_regularizer=l2(weight_decay)))
    optimizer = Adam(lr)
    model.compile(loss='mse', metrics=["mse"], optimizer=optimizer)
    return model
Esempio n. 2
0
 def _build_model(self):
     model = Sequential()
     model.add(Dense(3, input_dim=2, activation='tanh'))
     model.add(Dense(3, activation='tanh'))
     model.add(Dense(self.env.action_space.n, activation='linear'))
     model.compile(loss='mse', optimizer=Adam(lr=self.alpha, decay=self.alpha_decay))
     return model
Esempio n. 3
0
def get_model(idrop=0.2,
              edrop=0.1,
              odrop=0.25,
              rdrop=0.2,
              weight_decay=WEIGHT_DECAY):
    model = Sequential()
    model.add(
        Embedding(
            NB_WORDS,
            128,
            embeddings_regularizer=l2(weight_decay),
            input_length=MAXLEN))  # , batch_input_shape=(batch_size, maxlen)))
    if edrop:
        model.add(Dropout(edrop))
    model.add(
        LSTM(128,
             kernel_regularizer=l2(weight_decay),
             recurrent_regularizer=l2(weight_decay),
             bias_regularizer=l2(weight_decay),
             dropout=idrop,
             recurrent_dropout=rdrop))
    if odrop:
        model.add(Dropout(odrop))
    model.add(
        Dense(1,
              kernel_regularizer=l2(weight_decay),
              bias_regularizer=l2(weight_decay),
              activation='sigmoid'))
    optimizer = Adam(1e-3)
    model.compile(loss='binary_crossentropy',
                  metrics=["binary_accuracy"],
                  optimizer=optimizer)
    return model
Esempio n. 4
0
    def build(self):
        """
        Builds the tiny yolo v2 network.
        :param input: input image batch to the network
        :return: logits output from network
        """
        self.model = Sequential()
        self.model.add(Convolution2D(16, (3, 3), input_shape=(416, 416, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())
        self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

        self.model.add(Convolution2D(32, (3, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())
        self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

        self.model.add(Convolution2D(64, (3, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())
        self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

        self.model.add(Convolution2D(128, (3, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())
        self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

        self.model.add(Convolution2D(256, (3, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())
        self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

        self.model.add(Convolution2D(512, (3, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())
        self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 1), padding='valid'))

        self.model.add(Convolution2D(1024, (3, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())
        self.model.add(Convolution2D(1024, (3, 3), padding='same'))
        self.model.add(LeakyReLU())
        self.model.add(BatchNormalization())

        self.model.add(Convolution2D(125, (1, 1), activation=None))

        if self.config.optimizer == 'adam':
            opt = Adam()
        elif self.config.optimizer == 'sgd':
            opt = SGD()

        if self.config.loss == 'categorical_crossentropy':
            loss = 'categorical_crossentropy'
        elif self.config.loss == 'yolov2_loss':
            raise NotImplemented

        self.model.compile(loss=loss, optimizer=opt, metrics=['accuracy'])
        self.model.summary()
        return self.model
Esempio n. 5
0
def build_read_tensor_2d_model(args):
    '''Build Read Tensor 2d CNN model for classifying variants.

	2d Convolutions followed by dense connection.
	Dynamically sets input channels based on args via defines.total_input_channels_from_args(args)
	Uses the functional API. Supports theano or tensorflow channel ordering.
	Prints out model summary.

	Arguments
		args.window_size: Length in base-pairs of sequence centered at the variant to use as input.	
		args.labels: The output labels (e.g. SNP, NOT_SNP, INDEL, NOT_INDEL)
		args.channels_last: Theano->False or Tensorflow->True channel ordering flag

	Returns
		The keras model
	'''
    if args.channels_last:
        in_shape = (args.read_limit, args.window_size, args.channels_in)
    else:
        in_shape = (args.channels_in, args.read_limit, args.window_size)

    read_tensor = Input(shape=in_shape, name="read_tensor")
    read_conv_width = 16
    x = Conv2D(128, (read_conv_width, 1),
               padding='valid',
               activation="relu",
               kernel_initializer="he_normal")(read_tensor)
    x = Conv2D(64, (1, read_conv_width),
               padding='valid',
               activation="relu",
               kernel_initializer="he_normal")(x)
    x = MaxPooling2D((3, 1))(x)
    x = Conv2D(64, (1, read_conv_width),
               padding='valid',
               activation="relu",
               kernel_initializer="he_normal")(x)
    x = MaxPooling2D((3, 3))(x)
    x = Flatten()(x)
    x = Dense(units=32, kernel_initializer='normal', activation='relu')(x)
    prob_output = Dense(units=len(args.labels),
                        kernel_initializer='normal',
                        activation='softmax')(x)

    model = Model(inputs=[read_tensor], outputs=[prob_output])

    adamo = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=1.)
    my_metrics = [metrics.categorical_accuracy]

    model.compile(loss='categorical_crossentropy',
                  optimizer=adamo,
                  metrics=my_metrics)
    model.summary()

    if os.path.exists(args.weights_hd5):
        model.load_weights(args.weights_hd5, by_name=True)
        print('Loaded model weights from:', args.weights_hd5)

    return model
Esempio n. 6
0
    def _build_model(self):

        # Neural Net for Deep-Q learning Model
        model = Sequential()
        model.add(Dense(24, input_dim=self.state_size, activation='relu'))
        model.add(Dense(24, activation='relu'))
        model.add(Dense(self.action_size, activation='linear'))
        model.compile(loss=self._huber_loss,
                      optimizer=Adam(lr=self.learning_rate))
        return model
Esempio n. 7
0
 def __init__(self, learning_rate, layers, functions, optimizer_name,
              beta=0.0, dropout=1.0):
     
     self.n_input = layers[0]
     self.n_hidden = layers[1:-1]
     self.n_output = layers[-1]
     
     self.model = Sequential()
     
     if len(self.n_hidden) == 0:
         # single layer
         self.model.add(Dense(self.n_output, activation=functions[0],
                          kernel_regularizer=regularizers.l2(beta),
                          input_shape=(self.n_input,)))
         
     elif len(self.n_hidden) == 1:
         # hidden layer
         self.model.add(Dense(self.n_hidden[0], activation=functions[0],
                              kernel_regularizer=regularizers.l2(beta),
                              input_shape=(self.n_input,)))
         self.model.add(Dropout(dropout))
         # output layer
         self.model.add(Dense(self.n_output, activation=functions[1],
                              kernel_regularizer=regularizers.l2(beta)))
         
     else:
         # the first hidden layer
         self.model.add(Dense(self.n_hidden[0], activation=functions[0],
                              kernel_regularizer=regularizers.l2(beta),
                              input_shape=(self.n_input,)))
         self.model.add(Dropout(dropout))
         # the second hidden layer
         self.model.add(Dense(self.n_hidden[1], activation=functions[1],
                              kernel_regularizer=regularizers.l2(beta)))
         self.model.add(Dropout(dropout))
         # the output layer
         self.model.add(Dense(self.n_output, activation=functions[2],
                              kernel_regularizer=regularizers.l2(beta)))
     
     self.model.summary()
     
     if optimizer_name == 'Adam': optimizer = Adam(learning_rate)
     
     #self.model.compile(loss='mean_squared_error',
     #                   optimizer=optimizer,
     #                   metrics=['accuracy'])
     
     self.model.compile(loss='categorical_crossentropy',
                        optimizer=optimizer,
                        metrics=['accuracy'])
Esempio n. 8
0
def finetune(base_model, n_class, lr = 1e-3):
    """Refine a given model by removing the last layer and adding a custom new one

    #Arguments
     base_model: model to start from
     n_class: number of classes

    #Returns
     Model where only the last layer is trained
    """

    for layer in base_model.layers: layer.trainable = False

    x  = base_model.layers[-2].output
    x = Dense(n_class, activation = 'softmax')(x)

    model = Model(inputs = base_model.input, outputs = x)
    model.compile(optimizer = Adam(lr = lr), loss = 'categorical_crossentropy', metrics = ['accuracy'])
    return model
Esempio n. 9
0
    def build(self):
        d_input = Input(shape=(2, ))

        x = Dense(64, activation='relu')(d_input)
        x = Dropout(0.5)(x)

        x = Dense(64, activation='relu')(x)
        x = Dropout(0.5)(x)

        x = Dense(64, activation='relu')(x)
        x = Dropout(0.5)(x)

        x = Dense(64, activation='relu')(x)
        x = Dropout(0.5)(x)

        d_output = Dense(3, activation="relu")(x)

        model = Model(inputs=[d_input], outputs=[d_output])
        optimizer = Adam(lr=0.00005)
        model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"])

        return model
Esempio n. 10
0
def run(model):
    # Download kitti dataset
    helper.maybe_download_training_img(DATA_DIRECTORY)

    x, y = helper.get_data(TRAINING_DATA_DIRECTORY, IMAGE_SHAPE)

    if model is None:
        inputs = Input(shape=(IMAGE_SHAPE[0], IMAGE_SHAPE[1], 3))

        # Block 1
        block1_conv1 = Conv2D(64, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block1_conv1')(inputs)
        block1_conv2 = Conv2D(64, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block1_conv2')(block1_conv1)
        block1_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block1_pool')(block1_conv2)

        # Block 2
        block2_conv1 = Conv2D(128, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block2_conv1')(block1_pool)
        block2_conv2 = Conv2D(128, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block2_conv2')(block2_conv1)
        block2_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block2_pool')(block2_conv2)

        # Block 3
        block3_conv1 = Conv2D(256, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block3_conv1')(block2_pool)
        block3_conv2 = Conv2D(256, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block3_conv2')(block3_conv1)
        block3_conv3 = Conv2D(256, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block3_conv3')(block3_conv2)
        block3_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block3_pool')(block3_conv3)

        # Block 4
        block4_conv1 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block4_conv1')(block3_pool)
        block4_conv2 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block4_conv2')(block4_conv1)
        block4_conv3 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block4_conv3')(block4_conv2)
        block4_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block4_pool')(block4_conv3)

        # Block 5
        block5_conv1 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block5_conv1')(block4_pool)
        block5_conv2 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block5_conv2')(block5_conv1)
        block5_conv3 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block5_conv3')(block5_conv2)
        block5_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block5_pool')(block5_conv3)

        pool5_conv1x1 = Conv2D(2, (1, 1), activation='relu',
                               padding='same')(block5_pool)
        upsample_1 = Conv2DTranspose(2,
                                     kernel_size=(4, 4),
                                     strides=(2, 2),
                                     padding="same")(pool5_conv1x1)

        pool4_conv1x1 = Conv2D(2, (1, 1), activation='relu',
                               padding='same')(block4_pool)
        add_1 = Add()([upsample_1, pool4_conv1x1])

        upsample_2 = Conv2DTranspose(2,
                                     kernel_size=(4, 4),
                                     strides=(2, 2),
                                     padding="same")(add_1)
        pool3_conv1x1 = Conv2D(2, (1, 1), activation='relu',
                               padding='same')(block3_pool)
        add_2 = Add()([upsample_2, pool3_conv1x1])

        upsample_3 = Conv2DTranspose(2,
                                     kernel_size=(16, 16),
                                     strides=(8, 8),
                                     padding="same")(add_2)
        output = Dense(2, activation='softmax')(upsample_3)

        model = Model(inputs, output, name='multinet_seg')

        adam = Adam(lr=LEARNING_RATE)
        model.compile(loss='categorical_crossentropy',
                      optimizer=adam,
                      metrics=['accuracy'])

    model.fit(x, y, batch_size=BATCH_SIZE, epochs=EPOCHS)
    model.save('trained_model' + str(time.time()) + '.h5')
Esempio n. 11
0
## Not Trainable layer settings
freeze = [
    'input_1', 'conv1_1', 'conv1_2', 'pool1', 'conv2_1', 'conv2_2', 'pool2',
    'conv3_1', 'conv3_2', 'conv3_3', 'pool3'
]

for L in model.layers:
    if L.name in freeze:
        L.trainable = False

## Callback Settings for keras
callbacks = [
    ModelCheckpoint('./checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5',
                    verbose=1,
                    save_weights_only=True),
    LearningRateScheduler(schedule)
]

optim = Adam(lr=base_lr)
model.compile(optimizer=optim,
              loss=MultiboxLoss(NUM_CLASSES, neg_pos_ratio=2.0).compute_loss)

history = model.fit_generator(gen.generate(True),
                              gen.train_batches,
                              nb_epoch,
                              verbose=1,
                              workers=1,
                              callbacks=callbacks,
                              validation_data=gen.generate(False),
                              validation_steps=gen.val_batches)
Esempio n. 12
0
def train():
    # Prepare Training Data
    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    X_train = (X_train.astype(np.float32) - 127.5) / 127.5
    X_train = X_train[:, :, :, None]
    X_test = X_test[:, :, :, None]

    # Initialize Models
    d = discriminator_model()
    g = generator_model()
    q = q_model()
    d_on_g = generator_containing_discriminator(g, d)
    q_on_g = generator_containing_discriminator(g, q)

    # Initialize Optimizers
    d_optim = Adam(lr=LR, beta_1=0.5, beta_2=0.999, epsilon=EPSILON)
    g_optim = Adam(lr=LR, beta_1=0.5, beta_2=0.999, epsilon=EPSILON)
    q_optim = Adam(lr=LR, beta_1=0.5, beta_2=0.999, epsilon=EPSILON)

    # Compile Models with loss functions
    g.compile(loss='binary_crossentropy', optimizer="SGD")
    d_on_g.compile(loss='binary_crossentropy', optimizer=g_optim)
    q_on_g.compile(loss=disc_mutual_info_loss, optimizer=g_optim)

    d.trainable = True
    d.compile(loss='binary_crossentropy', optimizer=d_optim)

    q.trainable = True
    q.compile(loss=disc_mutual_info_loss, optimizer=q_optim)

    try:
        # Main Training Loop
        for epoch in range(100):
            print("Epoch is", epoch)
            print("Number of batches", int(X_train.shape[0] / BATCH_SIZE))

            for index in range(int(X_train.shape[0] / BATCH_SIZE)):
                # Get Real and Generated Images
                noise = sample_zc()

                real_images = X_train[index * BATCH_SIZE:(index + 1) *
                                      BATCH_SIZE]
                generated_images = g.predict(noise,
                                             batch_size=BATCH_SIZE,
                                             verbose=0)

                # Train Discriminator and Q network
                training_images = np.concatenate(
                    (real_images, generated_images))
                labels = [1] * BATCH_SIZE + [0] * BATCH_SIZE
                latent_code = np.concatenate(noise[1:], axis=1)

                d_loss = d.train_on_batch(training_images, labels)
                q_loss = q.train_on_batch(generated_images, latent_code)

                # Train Generator using Fake/Real Signal
                noise = sample_zc()

                d.trainable = False
                g_d_loss = d_on_g.train_on_batch(noise, [1] * BATCH_SIZE)
                d.trainable = True

                # Train Generator using Mutual Information Lower Bound
                noise = sample_zc()
                latent_code = np.concatenate(noise[1:], axis=1)

                q.trainable = False
                g_q_loss = q_on_g.train_on_batch(noise, latent_code)
                q.trainable = True

                print(
                    "batch %d d_loss : %.3f q_loss: %.3f g_loss_d: %.3f g_loss_q: %.3f"
                    % (index, d_loss, q_loss, g_d_loss, g_q_loss))

                # Generate Sample Images
                if index % 20 == 0:
                    image = make_image(g)

                    Image.fromarray(image.astype(np.uint8)).save(
                        str(epoch) + "_" + str(index) + ".png")

                # Save weights
                if index % 10 == 9:
                    g.save_weights('g.kerasweights', True)
                    d.save_weights('d.kerasweights', True)
                    q.save_weights('q.kerasweights', True)

    except KeyboardInterrupt:
        pass

    print("\rFinished training")
Esempio n. 13
0
# this is a model that holds both the RPN and the classifier, used to load/save weights for the models
model_all = Model([img_input, roi_input], rpn[:2] + classifier)

try:
    dprint('loading weights from {}'.format(C.base_net_weights))
    model_rpn.load_weights(C.base_net_weights, by_name=True)
    model_classifier.load_weights(C.base_net_weights, by_name=True)
except:
    dprint(
        'Could not load pretrained model weights. Weights can be found at {} and {}'
        .format(
            'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_th_dim_ordering_th_kernels_notop.h5',
            'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
        ))

optimizer = Adam(lr=1e-5)
optimizer_classifier = Adam(lr=1e-5)
model_rpn.compile(
    optimizer=optimizer,
    loss=[losses.rpn_loss_cls(num_anchors),
          losses.rpn_loss_regr(num_anchors)])
model_classifier.compile(
    optimizer=optimizer_classifier,
    loss=[
        losses.class_loss_cls,
        losses.class_loss_regr(len(classes_count) - 1)
    ],
    metrics={'dense_class_{}'.format(len(classes_count)): 'accuracy'})
model_all.compile(optimizer='sgd', loss='mae')

epoch_length = 1000
Esempio n. 14
0
    model = mk_model(arch, data_type, classes)
    model.summary()  #check model configuration

    #sys.exit()

    #visualize_filters(model, 'before') #重みの可視化

    #作成したモデルのアーキテクチャをnetwork.pngに出力
    vis_utils.plot_model(model,
                         to_file='network-' + filename + '.png',
                         show_shapes=True,
                         show_layer_names=True)

    #学習プロセスの設定
    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(),
                  metrics=['accuracy'])

    #モデルの学習
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=nb_epoch,
              verbose=1,
              validation_data=(x_test, y_test))

    #visualize_filters(model, 'after') #重みの可視化

    #モデルの評価
    print('Evaluate')
    score = model.evaluate(x_test, y_test, verbose=1)
Esempio n. 15
0
# create a flattened placeholder with conv2d tensor is a must, if to link to a dense layer
# no additional args are needed for Flatten layer
flattened = layers.Flatten(name='flatten')(conv2d_tensor)

# create a dense placeholder with flatten tensor
dense_2_cls = layers.Dense(2, activation='softmax', name='dense_2_cls')(flattened)

# create a simple model start from input layer, a conv2d layer, flatten layer, to dense layer
model = Model(input_tensor, dense_2_cls, name='con2d_simdl')

# see the model
model.summary()

# compile the model
lr = 0.001
model.compile(optimizer=Adam(lr=lr),
		loss='categorical_crossentropy', metrics=['accuracy'])

"""
def compile(self,\n',
  '              optimizer,\n',
  '              loss,\n',
  '              metrics=None,\n',
  '              loss_weights=None,\n',
  '              sample_weight_mode=None,\n',
  '              **kwargs):\n',
  '    Configures the model for training.\n',
  '\n',
  '    Arguments:\n',
  '        optimizer: str (name of optimizer) or optimizer object.\n',
  '            See [optimizers](/optimizers).\n',
Esempio n. 16
0
print(np.min(X_train), np.max(X_train))

print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')


def make_trainable(net, val):
    net.trainable = val
    for l in net.layers:
        l.trainable = val


shp = X_train.shape[1:]
dropout_rate = 0.25
opt = Adam(lr=1e-4)
dopt = Adam(lr=1e-3)

# Build Generative model ...
nch = 200
g_input = Input(shape=[100])
H = Dense(nch * 14 * 14,
          kernel_initializer=init_ops.glorot_normal_initializer())(g_input)
H = BatchNormalization()(H)
H = Activation('relu')(H)
H = Reshape([14, 14, nch])(H)
H = UpSampling2D(size=(2, 2))(H)
H = Conv2D(nch / 2,
           kernel_size=(3, 3),
           padding='same',
           kernel_initializer=init_ops.glorot_normal_initializer(),