示例#1
0
 def __init__(self):
     super(LatentDiscriminator, self).__init__()
     self.fc_11 = Dense(name='ld_fc_11',
                        units=512,
                        activation=None,
                        use_bias=True)
     self.fc_12 = Dense(name='ld_fc_12',
                        units=512,
                        activation=tf.nn.sigmoid,
                        use_bias=True)
     self.drop_1 = Dropout(name='ld_drop_1', rate=0.5)
     self.fc_21 = Dense(name='ld_fc_21',
                        units=256,
                        activation=None,
                        use_bias=True)
     self.fc_22 = Dense(name='ld_fc_22',
                        units=256,
                        activation=tf.nn.sigmoid,
                        use_bias=True)
     self.drop_2 = Dropout(name='ld_drop_2', rate=0.5)
     self.fc_3 = Dense(name='ld_fc_3',
                       units=128,
                       activation=tf.nn.sigmoid,
                       use_bias=True)
     self.classifier = Dense(name='ld_classifier',
                             units=2,
                             activation=tf.nn.softmax,
                             use_bias=False)
    def construct_model(self, input_x, input_y):
        ct = self.cnn_trainable
        x = self.inception_part(input_x, ct)
        x = self.resnet_3d_part(x, ct)
        x = AveragePooling3D(pool_size=(self.frm_num // 4, 7, 7),
                             strides=(1, 1, 1),
                             padding='valid',
                             data_format=self.DATA_FORMAT,
                             name='global_pool')(x)
        print(x)
        x = tf.reshape(x, shape=(-1, 512))
        print(x)
        x = Dropout(0.3, name='dropout')(x)
        self.fc8 = Dense(400, trainable=ct, name='fc8')
        self.fc8_output = self.fc8(x)
        print(self.fc8_output)
        self.loss = sparse_softmax_cross_entropy_with_logits(
            logits=x, labels=self.input_y)

        self.top1_acc = in_top_k(predictions=self.fc8_output,
                                 targets=self.input_y,
                                 k=1)
        self.top1_acc = tf.reduce_mean(tf.cast(self.top1_acc, tf.float32),
                                       name='top1_accuracy')
        self.top5_acc = in_top_k(predictions=self.fc8_output,
                                 targets=self.input_y,
                                 k=5)
        self.top5_acc = tf.reduce_mean(tf.cast(self.top5_acc, tf.float32),
                                       name='top5_accuracy')
示例#3
0
def dropout(inputs, drop_rate):
    """
    Applies Dropout to the input

    Parameters
    ----------
    inputs: Input tensor
    drop_rate: float between 0 and 1. Fraction of the input units to drop.
    """
    return Dropout(rate=drop_rate)(inputs)
示例#4
0
def initialize_model(config, num_people, current_layer, is_training):
    for count, layer_conf in enumerate(config.model.layers):
        name = get_or_none(layer_conf, "name")
        with tf.variable_scope(layer_conf.scope, reuse=tf.AUTO_REUSE):
            if layer_conf.HasField("convolutional"):
                current_layer = relu(
                    conv2d(
                        current_layer,
                        layer_conf.convolutional.filters,
                        max(layer_conf.convolutional.kernel_size.width,
                            layer_conf.convolutional.kernel_size.height),
                        #data_format='channels_last',
                        padding="same",
                        scope="conv"))

            elif layer_conf.HasField("pool"):
                if layer_conf.pool.type == "max":
                    current_layer = MaxPooling2D(
                        (layer_conf.pool.size.width,
                         layer_conf.pool.size.height),
                        strides=(layer_conf.pool.size.width,
                                 layer_conf.pool.size.height),
                        name=name)(current_layer)
                else:
                    raise ValueError("Unsupported pool type:" +
                                     conv_config.pool_type)

            elif layer_conf.HasField("dense"):
                current_layer = Dense(layer_conf.dense.units,
                                      activation=str_to_activation(
                                          layer_conf.dense.activation),
                                      name=name)(current_layer)

            elif layer_conf.HasField("flatten"):
                current_layer = Flatten(name=name)(current_layer)

            elif layer_conf.HasField("dropout"):
                current_layer = Dropout(layer_conf.dropout.rate * is_training,
                                        name=name)(current_layer)
            elif layer_conf.HasField("transfer"):
                if count != 0:
                    ValueError("Transfer layer must occur first.")
                    # We're handling this outside now
            else:
                ValueError("Unsupported layer.")

    return current_layer
示例#5
0
inputs = Input(shape=(new_x_train.shape[1], new_x_train.shape[2]))
#these first layers are 'data augmentation' layers
x = MyAddScale(name='scale_augment')(inputs)
x = MyAdd2DRotation(name='rotate_augment')(x)
x = MyAddShift(name='shift_augment')(x)
x = MyAddJitter(name='jitter_augment')(x)
#This is the ursa layer to create a feature vector
x = MyUrsaMin(Nstars, name='cluster')(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
#these last layers do classification
x = Dense(512, activation='relu', name='dense512')(x)
x = BatchNormalization()(x)
x = Dense(256, activation='relu', name='dense256')(x)
x = BatchNormalization()(x)
x = Dropout(rate=0.3)(x)
x = Dense(10, activation='softmax')(x)

model = Model(inputs=inputs, outputs=x)
if gpus > 1:
    from keras.utils import multi_gpu_model
    model = multi_gpu_model(model, gpus=gpus)

rmsprop = tf.keras.optimizers.RMSprop(lr=.001, rho=.9, decay=.0001)
model.compile(optimizer=rmsprop,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

model.summary()

history = model.fit(new_x_train,
示例#6
0
文件: training.py 项目: VSZM/ConnectX
BN2 = BatchNormalization()
BN3 = BatchNormalization()
BN4 = BatchNormalization()
BN5 = BatchNormalization()
BN6 = BatchNormalization()

CONV1 = Conv2D(NUM_CHANNELS, kernel_size=3, strides=1, padding='same')
CONV2 = Conv2D(NUM_CHANNELS, kernel_size=3, strides=1, padding='same')
CONV3 = Conv2D(NUM_CHANNELS, kernel_size=3, strides=1)
CONV4 = Conv2D(NUM_CHANNELS, kernel_size=3, strides=1)

FC1 = Dense(128)
FC2 = Dense(64)
FC3 = Dense(7)

DROP1 = Dropout(0.3)
DROP2 = Dropout(0.3)


# 6x7 input
# https://github.com/PaddlePaddle/PARL/blob/0915559a1dd1b9de74ddd2b261e2a4accd0cd96a/benchmark/torch/AlphaZero/submission_template.py#L496
def modified_cnn(inputs, **kwargs):
    relu = tf.nn.relu
    log_softmax = tf.nn.log_softmax

    layer_1_out = relu(BN1(CONV1(inputs)))
    layer_2_out = relu(BN2(CONV2(layer_1_out)))
    layer_3_out = relu(BN3(CONV3(layer_2_out)))
    layer_4_out = relu(BN4(CONV4(layer_3_out)))

    # 3 is width - 4 due to convolition filters, 2 is same for height
示例#7
0
    def __init__(self):
        super(Discriminator, self).__init__()
        arg = {'activation': tf.nn.relu, 'padding': 'same'}
        self.conv_11 = Conv2D(name='di_conv_11',
                              filters=64,
                              kernel_size=(5, 5),
                              strides=(2, 2),
                              **arg)
        self.conv_12 = Conv2D(name='di_conv_12',
                              filters=64,
                              kernel_size=(3, 3),
                              strides=(1, 1),
                              **arg)
        self.conv_13 = Conv2D(name='di_conv_13',
                              filters=64,
                              kernel_size=(3, 3),
                              strides=(1, 1),
                              **arg)
        self.pool_1 = MaxPooling2D(name='di_pool_1',
                                   pool_size=(5, 5),
                                   strides=(2, 2),
                                   padding='same')
        self.drop_1 = Dropout(0.5)

        self.conv_21 = Conv2D(name='di_conv_21',
                              filters=128,
                              kernel_size=(3, 3),
                              strides=(1, 1),
                              **arg)
        self.conv_22 = Conv2D(name='di_conv_22',
                              filters=128,
                              kernel_size=(3, 3),
                              strides=(1, 1),
                              **arg)
        self.conv_23 = Conv2D(name='di_conv_23',
                              filters=128,
                              kernel_size=(3, 3),
                              strides=(1, 1),
                              **arg)
        self.pool_2 = MaxPooling2D(name='di_pool_2',
                                   pool_size=(3, 3),
                                   strides=(2, 2),
                                   padding='same')
        self.drop_2 = Dropout(0.5)

        self.conv_31 = Conv2D(name='di_conv_31',
                              filters=256,
                              kernel_size=(3, 3),
                              strides=(2, 2),
                              **arg)
        self.conv_32 = Conv2D(name='di_conv_32',
                              filters=256,
                              kernel_size=(3, 3),
                              strides=(1, 1),
                              **arg)
        self.conv_33 = Conv2D(name='di_conv_33',
                              filters=256,
                              kernel_size=(3, 3),
                              strides=(1, 1),
                              **arg)
        self.pool_3 = MaxPooling2D(name='di_pool_3',
                                   pool_size=(3, 3),
                                   strides=(2, 2),
                                   padding='same')
        self.drop_3 = Dropout(0.5)

        self.flattener = Flatten()
        self.drop_4 = Dropout(0.5)
        self.classifier_1 = Dense(name='di_cls_1',
                                  units=512,
                                  activation=tf.nn.relu,
                                  use_bias=True)
        self.drop_5 = Dropout(0.5)
        self.classifier_2 = Dense(name='di_cls_2',
                                  units=256,
                                  activation=tf.nn.relu,
                                  use_bias=True)
        self.classifier_3 = Dense(name='di_cls_3',
                                  units=2,
                                  activation=None,
                                  use_bias=True)
示例#8
0
    def __init__(self):
        super(Encoder, self).__init__()
        arg = {'activation': tf.nn.relu, 'padding': 'same'}
        self.conv_11 = Conv2D(name='e_conv_11',
                              filters=64,
                              kernel_size=7,
                              strides=(2, 2),
                              **arg)
        self.conv_12 = Conv2D(name='e_conv_12',
                              filters=64,
                              kernel_size=7,
                              strides=(2, 2),
                              **arg)
        self.pool_1 = MaxPooling2D(name='e_pool_1',
                                   pool_size=4,
                                   strides=(2, 2),
                                   padding='same')
        self.compress_11 = AveragePooling2D(name='e_comp_11',
                                            pool_size=5,
                                            strides=(3, 3),
                                            padding='same')
        self.compress_12 = Flatten()
        self.compress_13 = Dense(name='e_comp_13',
                                 units=128,
                                 activation=None,
                                 use_bias=False)
        #  activity_regularizer=tf.keras.regularizers.l2(l=0.01))
        self.batch_norm_1 = BatchNormalization(name='e_bn_1')
        self.drop_1 = Dropout(name='e_drop_1', rate=0.5)

        self.conv_21 = Conv2D(name='e_conv_21',
                              filters=128,
                              kernel_size=5,
                              strides=(1, 1),
                              **arg)
        self.conv_22 = Conv2D(name='e_conv_22',
                              filters=128,
                              kernel_size=5,
                              strides=(1, 1),
                              **arg)
        self.pool_2 = MaxPooling2D(name='e_pool_2',
                                   pool_size=4,
                                   strides=(2, 2),
                                   padding='same')
        self.compress_21 = AveragePooling2D(name='e_comp_21',
                                            pool_size=5,
                                            strides=(3, 3),
                                            padding='same')
        self.compress_22 = Flatten()
        self.compress_23 = Dense(name='e_comp_23',
                                 units=128,
                                 activation=None,
                                 use_bias=False)
        #  activity_regularizer=tf.keras.regularizers.l2(l=0.01))
        self.batch_norm_2 = BatchNormalization(name='e_bn_2')
        self.drop_2 = Dropout(name='e_drop_2', rate=0.5)

        self.conv_31 = Conv2D(name='e_conv_31',
                              filters=256,
                              kernel_size=3,
                              strides=(1, 1),
                              **arg)
        self.conv_32 = Conv2D(name='e_conv_32',
                              filters=256,
                              kernel_size=3,
                              strides=(1, 1),
                              **arg)
        self.pool_3 = MaxPooling2D(name='e_pool_3',
                                   pool_size=2,
                                   strides=(2, 2),
                                   padding='same')
        self.compress_31 = AveragePooling2D(name='e_comp_31',
                                            pool_size=3,
                                            strides=(1, 1),
                                            padding='same')
        self.compress_32 = Flatten()
        self.compress_33 = Dense(name='e_comp_33',
                                 units=128,
                                 activation=None,
                                 use_bias=False)
        #  activity_regularizer=tf.keras.regularizers.l2(l=0.01))
        self.batch_norm_3 = BatchNormalization(name='e_bn_3')
        self.drop_3 = Dropout(name='e_drop_3', rate=0.5)