Пример #1
0
def classifier(base_layers,
               input_rois,
               num_rois,
               nb_classes=21,
               trainable=False):

    # compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround

    pooling_regions = 14
    input_shape = (num_rois, 14, 14, 1024)

    out_roi_pool = RoiPoolingConv(pooling_regions,
                                  num_rois)([base_layers, input_rois])
    out = classifier_layers(out_roi_pool,
                            input_shape=input_shape,
                            trainable=True)

    out = TimeDistributed(Flatten())(out)

    out_class = TimeDistributed(Dense(nb_classes,
                                      activation='softmax',
                                      kernel_initializer='zero'),
                                name='dense_class_{}'.format(nb_classes))(out)
    # note: no regression target for bg class
    out_regr = TimeDistributed(Dense(4 * (nb_classes - 1),
                                     activation='linear',
                                     kernel_initializer='zero'),
                               name='dense_regress_{}'.format(nb_classes))(out)
    return [out_class, out_regr]
Пример #2
0
def mk_model_with_bn():
    model = Sequential()
    model.add(
        Convolution2D(filters=64,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      input_shape=(IMAGE_SIZE, IMAGE_SIZE, 1),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(
        Convolution2D(filters=64,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(
        Convolution2D(filters=128,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(
        Convolution2D(filters=128,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(
        Convolution2D(filters=256,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(
        Convolution2D(filters=256,
                      kernel_size=(3, 3),
                      kernel_initializer=kernel_initializer()))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(256, kernel_initializer='he_normal'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(LABEL_NUM, kernel_initializer='he_normal'))
    model.add(Activation('softmax'))

    return model
Пример #3
0
 def _build_model(self):
     model = Sequential()
     model.add(Dense(3, input_dim=2, activation='tanh'))
     model.add(Dense(3, activation='tanh'))
     model.add(Dense(self.env.action_space.n, activation='linear'))
     model.compile(loss='mse', optimizer=Adam(lr=self.alpha, decay=self.alpha_decay))
     return model
Пример #4
0
def build_read_tensor_2d_model(args):
    '''Build Read Tensor 2d CNN model for classifying variants.

	2d Convolutions followed by dense connection.
	Dynamically sets input channels based on args via defines.total_input_channels_from_args(args)
	Uses the functional API. Supports theano or tensorflow channel ordering.
	Prints out model summary.

	Arguments
		args.window_size: Length in base-pairs of sequence centered at the variant to use as input.	
		args.labels: The output labels (e.g. SNP, NOT_SNP, INDEL, NOT_INDEL)
		args.channels_last: Theano->False or Tensorflow->True channel ordering flag

	Returns
		The keras model
	'''
    if args.channels_last:
        in_shape = (args.read_limit, args.window_size, args.channels_in)
    else:
        in_shape = (args.channels_in, args.read_limit, args.window_size)

    read_tensor = Input(shape=in_shape, name="read_tensor")
    read_conv_width = 16
    x = Conv2D(128, (read_conv_width, 1),
               padding='valid',
               activation="relu",
               kernel_initializer="he_normal")(read_tensor)
    x = Conv2D(64, (1, read_conv_width),
               padding='valid',
               activation="relu",
               kernel_initializer="he_normal")(x)
    x = MaxPooling2D((3, 1))(x)
    x = Conv2D(64, (1, read_conv_width),
               padding='valid',
               activation="relu",
               kernel_initializer="he_normal")(x)
    x = MaxPooling2D((3, 3))(x)
    x = Flatten()(x)
    x = Dense(units=32, kernel_initializer='normal', activation='relu')(x)
    prob_output = Dense(units=len(args.labels),
                        kernel_initializer='normal',
                        activation='softmax')(x)

    model = Model(inputs=[read_tensor], outputs=[prob_output])

    adamo = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=1.)
    my_metrics = [metrics.categorical_accuracy]

    model.compile(loss='categorical_crossentropy',
                  optimizer=adamo,
                  metrics=my_metrics)
    model.summary()

    if os.path.exists(args.weights_hd5):
        model.load_weights(args.weights_hd5, by_name=True)
        print('Loaded model weights from:', args.weights_hd5)

    return model
Пример #5
0
def main():
    x_train, y_train, x_test, y_test = load_data()

    model = Sequential()

    model.add(
        Conv2D(32,
               kernel_size=(11, 11),
               strides=4,
               padding="same",
               activation='relu',
               input_shape=(48, 48, 1)))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=2, padding="valid"))
    model.add(
        Conv2D(32,
               kernel_size=(5, 5),
               strides=1,
               padding="same",
               activation='relu'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=2, padding="valid"))
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               strides=1,
               padding="same",
               activation='relu'))
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               strides=1,
               padding="same",
               activation='relu'))
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               strides=1,
               padding="same",
               activation='relu'))
    model.add(Dense(1024, activation='relu'))
    model.add(Dense(512, activation='relu'))
    model.add(Dense(7, activation='softmax'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    model.fit(x_train,
              y_train,
              batch_size=128,
              epochs=5,
              verbose=1,
              validation_data=(x_test, y_test))

    model.save(expanduser("~/emotion/alex_net.h5"))

    accuracy, fbeta = test_model(model, x_test, y_test)
    print("Accuracy: %s" % accuracy)
    print("F-Beta: %s" % fbeta)
Пример #6
0
def atari_qnet(input_shape, num_actions, net_name, net_size):
    net_name = net_name.lower()

    # input state
    state = Input(shape=input_shape)

    # convolutional layers
    conv1_32 = Conv2D(32, (8, 8), strides=(4, 4), activation='relu')
    conv2_64 = Conv2D(64, (4, 4), strides=(2, 2), activation='relu')
    conv3_64 = Conv2D(64, (3, 3), strides=(1, 1), activation='relu')

    # if recurrent net then change input shape
    if 'drqn' in net_name:
        # recurrent net (drqn)
        lambda_perm_state = lambda x: K.permute_dimensions(x, [0, 3, 1, 2])
        perm_state = Lambda(lambda_perm_state)(state)
        dist_state = Lambda(lambda x: K.stack([x], axis=4))(perm_state)

        # extract features with `TimeDistributed` wrapped convolutional layers
        dist_conv1 = TimeDistributed(conv1_32)(dist_state)
        dist_conv2 = TimeDistributed(conv2_64)(dist_conv1)
        dist_convf = TimeDistributed(conv3_64)(dist_conv2)
        feature = TimeDistributed(Flatten())(dist_convf)
    elif 'dqn' in net_name:
        # fully connected net (dqn)
        # extract features with convolutional layers
        conv1 = conv1_32(state)
        conv2 = conv2_64(conv1)
        convf = conv3_64(conv2)
        feature = Flatten()(convf)

    # network type. Dense for dqn; LSTM or GRU for drqn
    if 'lstm' in net_name:
        net_type = LSTM
    elif 'gru' in net_name:
        net_type = GRU
    else:
        net_type = Dense

    # dueling or regular dqn/drqn
    if 'dueling' in net_name:
        value1 = net_type(net_size, activation='relu')(feature)
        adv1 = net_type(net_size, activation='relu')(feature)
        value2 = Dense(1)(value1)
        adv2 = Dense(num_actions)(adv1)
        mean_adv2 = Lambda(lambda x: K.mean(x, axis=1))(adv2)
        ones = K.ones([1, num_actions])
        lambda_exp = lambda x: K.dot(K.expand_dims(x, axis=1), -ones)
        exp_mean_adv2 = Lambda(lambda_exp)(mean_adv2)
        sum_adv = add([exp_mean_adv2, adv2])
        exp_value2 = Lambda(lambda x: K.dot(x, ones))(value2)
        q_value = add([exp_value2, sum_adv])
    else:
        hid = net_type(net_size, activation='relu')(feature)
        q_value = Dense(num_actions)(hid)

    # build model
    return Model(inputs=state, outputs=q_value)
Пример #7
0
def model_fn(features, targets, mode, params):
    """Model function for Estimator."""

    # 1. Configure the model via TensorFlow operations
    # First, build all the model, a good idea is using Keras or tf.layers
    # since these are high-level API's
    conv1 = Conv2D(32, (5, 5), activation='relu',
                   input_shape=(28, 28, 1))(features)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(64, (5, 5), activation='relu')(pool1)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    flat = Flatten()(pool2)
    dense = Dense(1024, activation='relu')(flat)

    preds = Dense(10, activation='softmax')(dense)

    # 2. Define the loss function for training/evaluation
    loss = None
    train_op = None

    # Calculate Loss (for both TRAIN and EVAL modes)
    if mode != learn.ModeKeys.INFER:
        loss = tf.losses.softmax_cross_entropy(onehot_labels=targets,
                                               logits=preds)

    # 3. Define the training operation/optimizer

    # Configure the Training Op (for TRAIN mode)
    if mode == learn.ModeKeys.TRAIN:
        train_op = tf.contrib.layers.optimize_loss(
            loss=loss,
            global_step=tf.contrib.framework.get_global_step(),
            learning_rate=params["learning_rate"],
            optimizer="Adam",
        )

    # 4. Generate predictions
    predictions_dict = {
        "classes": tf.argmax(input=preds, axis=1),
        "probabilities": tf.nn.softmax(preds, name="softmax_tensor")
    }

    # 5. Define how you want to evaluate the model
    metrics = {
        "accuracy":
        tf.metrics.accuracy(tf.argmax(input=preds, axis=1),
                            tf.argmax(input=targets, axis=1))
    }

    # 6. Return predictions/loss/train_op/eval_metric_ops in ModelFnOps object
    return model_fn_lib.ModelFnOps(mode=mode,
                                   predictions=predictions_dict,
                                   loss=loss,
                                   train_op=train_op,
                                   eval_metric_ops=metrics)
Пример #8
0
    def _build_model(self):

        # Neural Net for Deep-Q learning Model
        model = Sequential()
        model.add(Dense(24, input_dim=self.state_size, activation='relu'))
        model.add(Dense(24, activation='relu'))
        model.add(Dense(self.action_size, activation='linear'))
        model.compile(loss=self._huber_loss,
                      optimizer=Adam(lr=self.learning_rate))
        return model
Пример #9
0
    def _decoder(self, n_hidden_recog_1, n_hidden_recog_2, n_hidden_gener_1,
                 n_hidden_gener_2, n_input, n_z):
        # Generate probabilistic decoder (decoder network), which
        # maps points in latent space onto a Bernoulli distribution in data space.
        # The transformation is parametrized and can be learned.
        net = Dense(units=n_hidden_gener_1, activation='softplus')(self.z)
        net = Dense(units=n_hidden_gener_2, activation='softplus')(net)
        x_reconstr_mean = Dense(units=n_input, activation='linear')(net)

        return x_reconstr_mean
Пример #10
0
def simple_acnet(input_shape, num_actions, net_arch):
    # input state
    state = Input(shape=input_shape)
    layer = state
    for num_hid in net_arch:
        layer = Dense(num_hid, activation='relu')(layer)
    logits = Dense(num_actions, kernel_initializer='zeros')(layer)
    value = Dense(1)(layer)

    # build model
    return Model(inputs=state, outputs=[value, logits])
Пример #11
0
    def _encoder(self, n_hidden_recog_1, n_hidden_recog_2, n_hidden_gener_1,
                 n_hidden_gener_2, n_input, n_z):
        # Generate probabilistic encoder (recognition network), which
        # maps inputs onto a normal distribution in latent space.
        # The transformation is parametrized and can be learned.

        net = Dense(units=n_hidden_recog_1, activation='softplus')(self.x)
        net = Dense(units=n_hidden_recog_2, activation='softplus')(net)
        z_mean = Dense(units=n_z, activation='linear')(net)
        z_log_sigma_sq = Dense(units=n_z, activation='linear')(net)

        return z_mean, z_log_sigma_sq
Пример #12
0
def Squeeze_excitation_layer(input_x):
    ratio = 4
    out_dim = int(np.shape(input_x)[-1])
    squeeze = GlobalAveragePooling2D()(input_x)
    excitation = Dense(units=int(out_dim / ratio))(squeeze)
    excitation = Activation('relu')(excitation)
    excitation = Dense(units=out_dim)(excitation)
    excitation = Activation('sigmoid')(excitation)
    excitation = layers.Reshape([-1, 1, out_dim])(excitation)
    scale = layers.multiply([input_x, excitation])

    return scale
def create_two_stream_classifier(
        num_fc_neurons,
        dropout_rate,
        num_classes=24):  # classifier_weights_path=None
    classifier = Sequential()
    classifier.add(
        Dense(num_fc_neurons, name='fc7', input_shape=(num_fc_neurons * 2, )))
    #classifier.add(BatchNormalization(axis=1, name='fc7_bn'))
    classifier.add(Activation('relu', name='fc7_ac'))
    classifier.add(Dropout(dropout_rate))
    classifier.add(Dense(num_classes, activation='softmax',
                         name='predictions'))
    return classifier
Пример #14
0
def discriminator_model():
    model = Sequential()
    model.add(Conv2D(64, (5, 5),padding='same',input_shape=(28, 28, 1)) )
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(128, (5, 5)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('tanh'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
Пример #15
0
def generator_model():
    model = Sequential()
    model.add(Dense(1024,input_dim=100 ))
    model.add(Activation('tanh'))
    model.add(Dense(128*7*7))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    model.add(Reshape((7, 7, 128), input_shape=(128*7*7,)))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Conv2D(64, (5, 5), padding='same'))
    model.add(Activation('tanh'))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Conv2D(1, (5, 5), padding='same'))
    model.add(Activation('tanh'))
    return model
Пример #16
0
    def init_model(self):
        with tf.variable_scope('resnet'):
            x = self.input_img
            x = tf.cast(x, tf.float32) / 255

            #split (1,2,3)
            x = self.split_block(x, 's1')

            #4
            x = self.conv_block(x, 3, [32, 32, 128], stage=4, block='a')
            x = self.identity_block(x, 3, [32, 32, 128], stage=4, block='b')
            x = self.identity_block(x, 3, [32, 32, 128], stage=4, block='c')
            x = self.identity_block(x, 3, [32, 32, 128], stage=4, block='d')
            x = self.identity_block(x, 3, [32, 32, 128], stage=4, block='e')
            x = self.identity_block(x, 3, [32, 32, 128], stage=4, block='f')

            #5
            x = self.conv_block(x, 3, [64, 64, 256], stage=5, block='a')
            x = self.identity_block(x, 3, [64, 64, 256], stage=5, block='b')
            x = self.identity_block(x, 3, [64, 64, 256], stage=5, block='c')

            #pool
            x = AveragePooling3D((7, 4, 2), name='avg_pool')(x)
            x = Flatten()(x)

            self.output_feature = x
            logger.info('feature shape:{}'.format(self.output_feature.shape))

            #fc
            x = Dense(self.args.classes, activation='softmax', name='fc')(x)

            self.output = x

        logger.info('network inited!')
Пример #17
0
def q_model():
    model = shared_dq_model()

    h = Dense(DISC_DIM + CONT_DIM)(model.output)
    h = Activation('softmax')(h)

    return Model(inputs=model.input, outputs=[h], name="q_network")
Пример #18
0
def inference(input_shape, N_CLASSES):
    '''build the model
    args:
        image: image batch, 4D tensor [batch_size, width, height, channels=3], dtype=tf.float32
    return:
        output tensor with the computed logits, float, [batch_size, n_classes]
    '''

    inputs = Input(shape=input_shape)

    K.set_learning_phase(
        False)  # all new operations will be in test mode from now on

    ## Conv layer 1
    x = dn_layer(inputs=inputs, name='layer1')
    x = MaxPooling2D((2, 2), strides=(2, 2), name='layer1_maxpool')(x)

    ## Conv layer 2
    x = dn_layer(inputs=x, num_filters=32, name='layer2')
    x = MaxPooling2D((2, 2), strides=(2, 2), name='layer2_maxpool')(x)

    ## Conv layer 3
    x = dn_layer(inputs=x, num_filters=64, name='layer3')
    x = MaxPooling2D((2, 2), strides=(2, 2), name='layer3_maxpool')(x)

    x = GlobalAveragePooling2D(name='GlobalAveragePooling2D')(x)

    #x = Flatten(name='flatten')(x)
    x = Dense(N_CLASSES, activation='softmax', name='predictions')(x)

    model = Model(inputs=inputs, outputs=x)
    return model
Пример #19
0
def generator(Z_dim, y_dim, image_size=32):
    gf_dim = 64
    s16 = int(image_size / 16)
    c_dim = 3
    z_in = Input(shape=(Z_dim, ), name='Z_input')
    y_in = Input(shape=(y_dim, ), name='y_input')
    inputs = concatenate([z_in, y_in])

    G_h = Dense(gf_dim * 8 * s16 * s16)(inputs)
    G_h = Activation('relu')(BatchNormalization()(
        Reshape(target_shape=[s16, s16, gf_dim * 8])(G_h)))
    G_h = Activation('relu')(BatchNormalization()(Conv2DTranspose(
        gf_dim * 4, kernel_size=(5, 5), strides=(2, 2), padding='same')(G_h)))
    G_h = Activation('relu')(BatchNormalization()(Conv2DTranspose(
        gf_dim * 2, kernel_size=(5, 5), strides=(2, 2), padding='same')(G_h)))
    G_h = Activation('relu')(BatchNormalization()(Conv2DTranspose(
        gf_dim * 2, kernel_size=(5, 5), strides=(1, 1), padding='same')(G_h)))
    G_h = Activation('relu')(BatchNormalization()(Conv2DTranspose(
        gf_dim, kernel_size=(5, 5), strides=(2, 2), padding='same')(G_h)))
    G_prob = Conv2DTranspose(c_dim,
                             kernel_size=(5, 5),
                             strides=(2, 2),
                             padding='same',
                             activation='sigmoid')(G_h)

    G = Model(inputs=[z_in, y_in], outputs=G_prob, name='Generator')
    print('=== Generator ===')
    G.summary()
    print('\n\n')

    return G
Пример #20
0
def discriminator_model():
    model = shared_dq_model()

    h = Dense(1)(model.output)
    h = Activation('sigmoid')(h)

    return Model(inputs=model.input, outputs=[h], name="discriminator")
Пример #21
0
def model_fn(features, targets, mode, params):

    logits = Dense(10, input_dim=784)(features["x"])
    
    loss = tf.losses.softmax_cross_entropy(
            onehot_labels=targets, logits=logits)
    
    train_op = tf.contrib.layers.optimize_loss(
            loss=loss,
            global_step=tf.contrib.framework.get_global_step(),
            learning_rate=params["learning_rate"],
            optimizer="SGD")
    
    predictions = {
        "classes": tf.argmax(input=logits, axis=1),
        "probabilities": tf.nn.softmax(logits)
    }
    
    eval_metric_ops = {
        "accuracy": tf.metrics.accuracy(
                     tf.argmax(input=logits, axis=1),
                     tf.argmax(input=targets, axis=1))
    }
     
    return model_fn_lib.ModelFnOps(
        mode=mode,
        predictions=predictions,
        loss=loss,
        train_op=train_op,
        eval_metric_ops=eval_metric_ops)
Пример #22
0
def model_fn(features, labels, mode, params):

    conv1 = Conv2D(32, (5, 5), activation='relu', input_shape=(28, 28, 1))(features["x"])
    
    conv2 = Conv2D(64, (5, 5), activation='relu')(conv1)
    
    conv3 = Conv2D(128, (2, 2), activation='relu')(conv2)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    
    conv4 = Conv2D(256, (5, 5), activation='relu')(pool3)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
    
    flat = Flatten()(pool4)
    
    dense1 = Dense(1024, activation='relu')(flat)
    dense2 = Dense(1024, activation='relu')(dense1)
    dense3 = Dense(1024, activation='relu')(dense2)
    dense4 = Dense(1024, activation='relu')(dense3)

    logits = Dense(10, activation='softmax')(dense4)

    loss = tf.losses.softmax_cross_entropy(
            onehot_labels=labels, logits=logits)
    
    train_op = tf.contrib.layers.optimize_loss(
            loss=loss,
            global_step=tf.contrib.framework.get_global_step(),
            learning_rate=params["learning_rate"],
            optimizer="SGD")
    
    predictions = {
        "classes": tf.argmax(input=logits, axis=1),
        "probabilities": tf.nn.softmax(logits)
    }
    
    eval_metric_ops = {
        "accuracy": tf.metrics.accuracy(
                     tf.argmax(input=logits, axis=1),
                     tf.argmax(input=labels, axis=1))
    }
     
    return model_fn_lib.EstimatorSpec(
        mode=mode,
        predictions=predictions,
        loss=loss,
        train_op=train_op,
        eval_metric_ops=eval_metric_ops)
Пример #23
0
def atari_acnet(input_shape, num_actions, net_name, net_size):
    net_name = net_name.lower()

    # input state
    state = Input(shape=input_shape)

    # convolutional layers
    conv1_32 = Conv2D(32, (8, 8), strides=(4, 4), activation='relu')
    conv2_64 = Conv2D(64, (4, 4), strides=(2, 2), activation='relu')
    conv3_64 = Conv2D(64, (3, 3), strides=(1, 1), activation='relu')

    # if recurrent net then change input shape
    if 'lstm' in net_name or 'gru' in net_name:
        # recurrent net
        lambda_perm_state = lambda x: K.permute_dimensions(x, [0, 3, 1, 2])
        perm_state = Lambda(lambda_perm_state)(state)
        dist_state = Lambda(lambda x: K.stack([x], axis=4))(perm_state)

        # extract features with `TimeDistributed` wrapped convolutional layers
        dist_conv1 = TimeDistributed(conv1_32)(dist_state)
        dist_conv2 = TimeDistributed(conv2_64)(dist_conv1)
        dist_convf = TimeDistributed(conv3_64)(dist_conv2)
        feature = TimeDistributed(Flatten())(dist_convf)

        # specify net type for the following layer
        if 'lstm' in net_name:
            net_type = LSTM
        elif 'gru' in net_name:
            net_type = GRU
    elif 'fully connected' in net_name:
        # fully connected net
        # extract features with convolutional layers
        conv1 = conv1_32(state)
        conv2 = conv2_64(conv1)
        convf = conv3_64(conv2)
        feature = Flatten()(convf)

        # specify net type for the following layer
        net_type = Dense

    # actor (policy) and critic (value) stream
    hid = net_type(net_size, activation='relu')(feature)
    logits = Dense(num_actions, kernel_initializer='zeros')(hid)
    value = Dense(1)(hid)

    # build model
    return Model(inputs=state, outputs=[value, logits])
Пример #24
0
def generator(Z_dim):
    # Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657)
    # Architecture : FC1024_BR-FC7x7x128_BR-(64)4dc2s_BR-(1)4dc2s_S
    z = Input(shape=(Z_dim, ), name='Z_input')
    net = Activation('relu')(BatchNormalization()(Dense(1024)(z)))
    net = Activation('relu')(BatchNormalization()(Dense(128 * 7 * 7)(net)))
    net = Reshape(target_shape=[7, 7, 128])(net)
    net = Activation('relu')(BatchNormalization()(Conv2DTranspose(
        64, kernel_size=(4, 4), strides=(2, 2), padding='same')(net)))
    out = Conv2DTranspose(1,
                          kernel_size=(4, 4),
                          strides=(2, 2),
                          activation='sigmoid',
                          padding='same')(net)
    G_model = Model(inputs=z, outputs=out)
    G_model.summary()
    return G_model
Пример #25
0
def mk_model(arch, data_type, classes):
    if data_type == 'mnist':
        channels = 1
    elif data_type == 'aerial':
        channels = 3

    if arch == 'lenet-5':
        model = Sequential()  #モデルの初期化

        #畳み込み第1層
        model.add(Conv2D(
            32, 5, padding='same',
            input_shape=(28, 28, channels)))  #output_shape=(None,28,28,32)
        #filters=32, kernel_size=(5,5), strides(1,1), use_bias=True
        #dilidation_rate(膨張率)=(1,1), kernel_initializer='glorot_uniform', bias_initializer='zeros'
        #padding='sane'は出力のshapeが入力と同じになるように調整
        #output_shape=(None(60000),28,28,32)
        model.add(Activation('relu'))
        model.add(MaxPooling2D(padding='same'))  #output_shape=(None,14,14,32)
        #pool_size=(2,2), strides(2,2)

        #畳み込み第2層
        model.add(Conv2D(64, 5, padding='same'))  #output_shape=(None,14,14,64)
        model.add(Activation('relu'))
        model.add(MaxPooling2D(padding='same'))  #output_shape=(None,7,7,64)

        #平坦化
        model.add(Flatten())  #output_shape=(None,3136(7*7*64))

        #全結合第1層
        model.add(Dense(1024))  #output_shape=(None,1024)
        model.add(Activation('relu'))
        model.add(Dropout(0.5))  #無視する割合を記述(例えば、0.2と記述した場合、80%の結合が残る)

        #全結合第2層
        model.add(Dense(classes))  #output_shape=(None,classes)
        model.add(Activation('softmax'))

        return model

    elif arch == 'alexnet':
        model = Sequential()  #モデルの初期化
Пример #26
0
def fit_lstm(train, batch_size, nb_epoch, neurons):
	X, y = train[:, 0:-1], train[:, -1]
	X = X.reshape(X.shape[0], 1, X.shape[1])
	model = Sequential()
	model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
	model.add(Dense(1))
	model.compile(loss='mean_squared_error', optimizer='adam')
	for i in range(nb_epoch):
		model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)
		model.reset_states()
	return model
Пример #27
0
def bidirectional_model():
    inputs = Input(shape=(maxlen, ), dtype='int32')
    x = Embedding(max_features, 128, input_length=maxlen)(inputs)
    x = Bidirectional(LSTM(64))(x)
    x = Dropout(0.5)(x)
    x = Dense(1, activation='sigmoid')(x)

    model = Model(inputs=inputs, outputs=x)
    # try using different optimizers and different optimizer configs
    model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
    return model
def build_model(num_output_classes):
    conv1size = 32
    conv2size = 64
    convfiltsize = 4
    densesize = 128
    poolsize = (2, 2)
    imgdepth = 3
    dropout = 0.3
    if IMG_COLORMODE == 'grayscale':
        imgdepth = 1
    inpshape = IMG_TGT_SIZE + (imgdepth, )
    inputs = Input(shape=inpshape)
    conv1 = Convolution2D(conv1size,
                          convfiltsize,
                          strides=(1, 1),
                          padding='valid',
                          activation='relu',
                          name='conv1',
                          data_format='channels_last')(inputs)
    pool1 = MaxPooling2D(pool_size=poolsize, name='pool1')(conv1)
    drop1 = Dropout(dropout)(pool1)
    conv2 = Convolution2D(conv2size,
                          convfiltsize,
                          strides=(1, 1),
                          padding='valid',
                          activation='relu',
                          name='conv2',
                          data_format='channels_last')(drop1)
    pool2 = MaxPooling2D(pool_size=poolsize, name='pool2')(conv2)
    drop2 = Dropout(dropout)(pool2)
    flat2 = Flatten()(drop2)
    dense = Dense(densesize, name='dense')(flat2)
    denseact = Activation('relu')(dense)
    output = Dense(num_output_classes, name='output')(denseact)
    outputact = Activation('softmax')(output)

    model = Model(inputs=inputs, outputs=outputact)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
Пример #29
0
def simple_model():
    model = Sequential()
    model.add(
        Convolution2D(filters=32,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      input_shape=(IMAGE_SIZE, IMAGE_SIZE, 1)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(filters=64, kernel_size=(3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(LABEL_NUM))
    model.add(Activation('softmax'))

    return model
def cnn_model_fn():
    '''
    define the model in function way

    '''
    # input shape is (img_rows, img_cols, fea_channel)
    inputs = Input(shape=(img_rows, img_cols, 1))
    x = Conv2D(32, kernel_size=(3, 3), activation='relu')(inputs)
    x = Conv2D(64, (3, 3), activation='relu')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.25)(x)
    x = Flatten()(x)
    x = Dense(128, activation='relu')(x)
    x = Dropout(0.5)(x)
    pred = Dense(num_classes, activation='softmax')(x)
    # small change of Model parameters names, now is inputs, outputs
    model = Model(inputs=inputs, outputs=pred)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adadelta',
                  metrics=['accuracy'])
    return model