Example #1
0
 def __build_encoder(self):
     with tf.name_scope('x'):
         self.x = tf.placeholder(tf.float32, self.img_shape)
         self.batch_size = tf.shape(self.x, out_type=tf.int32)[0]
     feature_map = self.x
     with tf.name_scope('encoder'):
         with tf.variable_scope('encoder_w'):
             for i_block in range(self.num_block):
                 with tf.name_scope('block' + str(i_block)):
                     feature_map = res_block('block' + str(i_block) + '_w',
                                             feature_map,
                                             self.num_layer_per_block,
                                             self.num_filter[i_block],
                                             self.filter_size, self.padding,
                                             self.reg, self.activation_fn)
                 with tf.name_scope('pool' + str(i_block)):
                     feature_map = tf.nn.max_pool(feature_map, [1, 2, 2, 1],
                                                  [1, 2, 2, 1], 'SAME')
             for i_fc in range(self.num_fc_layer):
                 with tf.name_scope('fc' + str(i_fc)):
                     feature_map = dense('fc' + str(i_fc) + '_w',
                                         feature_map, self.fc_dim[i_fc],
                                         self.reg, self.activation_fn)
             with tf.name_scope('mu_z'):
                 self.mu_z = dense('mu_z_w', feature_map, self.latent_dim,
                                   self.reg)
             with tf.name_scope('sd_z'):
                 self.logsd_z = dense('logsd_z_w', feature_map,
                                      self.latent_dim, self.reg)
                 self.sd_z = tf.exp(self.logsd_z)
                 self.mean_sd_z = tf.reduce_mean(self.sd_z, 0)
Example #2
0
 def compute_est_samples(z, params=None, reuse=tf.AUTO_REUSE):
     with tf.variable_scope("estimator"):
         with arg_scope([nn.dense], params=params):
             with tf.variable_scope("decoder", reuse=reuse):
                 h_dec_1 = nn.dense(z, vae_z_dim, 128, "dense1", nonlinearity=nonlin)
                 h_dec_2 = nn.dense(h_dec_1, 128, 128, "dense2", nonlinearity=nonlin)
                 x_mean = nn.dense(h_dec_2, 128, x_dim, "dense3", nonlinearity=None)
                 return x_mean
Example #3
0
def classifier(inp, is_training, init=False, reuse=False, getter =None,category=125):
    with tf.variable_scope('discriminator_model', reuse=reuse,custom_getter=getter):
        counter = {}
        #x = tf.reshape(inp, [-1, 32, 32, 3])
        x = tf.reshape(inp, [-1, 200, 30, 3])
        x = tf.layers.dropout(x, rate=0.2, training=is_training, name='dropout_0')

        x = nn.conv2d(x, 96, nonlinearity=leakyReLu, init=init, counters=counter)                #  64*200*30*96
        x = nn.conv2d(x, 96, nonlinearity=leakyReLu, init=init, counters=counter)                #  64*200*30*96
        #x = nn.conv2d(x, 96, stride=[2, 2], nonlinearity=leakyReLu, init=init, counters=counter) 
        x = nn.conv2d(x, 96, stride=[5, 2], nonlinearity=leakyReLu, init=init, counters=counter) #  64*40*15*96
        
        x = tf.layers.dropout(x, rate=0.5, training=is_training, name='dropout_1')               #  64*40*15*96

        x = nn.conv2d(x, 192, nonlinearity=leakyReLu, init=init, counters=counter)               #  64*40*15*192
        x = nn.conv2d(x, 192, nonlinearity=leakyReLu, init=init, counters=counter)               #  64*40*15*192
        #x = nn.conv2d(x, 192, stride=[2, 2], nonlinearity=leakyReLu, init=init, counters=counter)
        x = nn.conv2d(x, 192, stride=[5, 2], nonlinearity=leakyReLu, init=init, counters=counter)#  64*8*8*192

        x = tf.layers.dropout(x, rate=0.5, training=is_training, name='dropout_2')               #  64*8*8*192

        x = nn.conv2d(x, 192, pad='VALID', nonlinearity=leakyReLu, init=init, counters=counter)  #  64*6*6*192
        x = nn.nin(x, 192, counters=counter, nonlinearity=leakyReLu, init=init)                  #  64*6*6*192
        x = nn.nin(x, 192, counters=counter, nonlinearity=leakyReLu, init=init)                  #  64*6*6*192
        x = tf.layers.max_pooling2d(x, pool_size=6, strides=1, name='avg_pool_0')                #  64*1*1*192
        x = tf.squeeze(x, [1, 2])                                                                #  64*192

        intermediate_layer = x

        #logits = nn.dense(x, 10, nonlinearity=None, init=init, counters=counter, init_scale=0.1)
        logits = nn.dense(x, category, nonlinearity=None, init=init, counters=counter, init_scale=0.1) # 64*125
        print('logits:',logits)

        return logits, intermediate_layer
    def build(self, input_shape):
        B, H, W, C = input_shape
        if self.out_ch is None:
            self.out_ch = C
        self.normalize_1 = normalize('norm1')
        self.normalize_2 = normalize('norm2')

        self.dense = nn.dense(name='temb_proj',
                              num_units=self.out_ch,
                              spec_norm=self.spec_norm)
        self.conv2d_1 = nn.conv2d(name='conv1',
                                  num_units=self.out_ch,
                                  spec_norm=self.spec_norm)

        self.conv2d_2 = nn.conv2d(name='conv2',
                                  num_units=self.out_ch,
                                  init_scale=0.,
                                  spec_norm=self.spec_norm,
                                  use_scale=self.use_scale)
        if self.conv_shortcut:
            self.conv2d_shortcut = nn.conv2d(name='conv_shortcut',
                                             num_units=self.out_ch,
                                             spec_norm=self.spec_norm)
        else:
            self.nin_shortcut = nn.nin(name='nin_shortcut',
                                       num_units=self.out_ch,
                                       spec_norm=self.spec_norm)
Example #5
0
def discriminator(inp, is_training, init=False, reuse=False, getter =None):
    with tf.variable_scope('discriminator_model', reuse=reuse,custom_getter=getter):
        counter = {}
        x = tf.reshape(inp, [-1, 32, 32, 3])

        x = tf.layers.dropout(x, rate=0.2, training=is_training, name='dropout_0')

        x = nn.conv2d(x, 96, nonlinearity=leakyReLu, init=init, counters=counter)
        x = nn.conv2d(x, 96, nonlinearity=leakyReLu, init=init, counters=counter)
        x = nn.conv2d(x, 96, stride=[2, 2], nonlinearity=leakyReLu, init=init, counters=counter)

        x = tf.layers.dropout(x, rate=0.5, training=is_training, name='dropout_1')

        x = nn.conv2d(x, 192, nonlinearity=leakyReLu, init=init, counters=counter)
        x = nn.conv2d(x, 192, nonlinearity=leakyReLu, init=init, counters=counter)
        x = nn.conv2d(x, 192, stride=[2, 2], nonlinearity=leakyReLu, init=init, counters=counter)

        x = tf.layers.dropout(x, rate=0.5, training=is_training, name='dropout_2')

        x = nn.conv2d(x, 192, pad='VALID', nonlinearity=leakyReLu, init=init, counters=counter)
        x = nn.nin(x, 192, counters=counter, nonlinearity=leakyReLu, init=init)
        x = nn.nin(x, 192, counters=counter, nonlinearity=leakyReLu, init=init)
        x = tf.layers.max_pooling2d(x, pool_size=6, strides=1,
                                    name='avg_pool_0')
        x = tf.squeeze(x, [1, 2])

        intermediate_layer = x

        logits = nn.dense(x, 10, nonlinearity=None, init=init, counters=counter, init_scale=0.1)

        return logits, intermediate_layer
Example #6
0
 def __build_decoder(self):
     with tf.name_scope('sample'):
         self.noise = tf.random_normal([self.batch_size, self.latent_dim],
                                       0.0, 1.0, tf.float32)
         self.z = self.noise * self.sd_z + self.mu_z
     feature_map = self.z
     with tf.name_scope('decoder'):
         with tf.variable_scope('decoder_w'):
             for i_fc in range(self.num_fc_layer):
                 with tf.name_scope('fc' + str(i_fc)):
                     feature_map = dense('fc' + str(i_fc) + '_w',
                                         feature_map,
                                         self.fc_dim[-1 - i_fc], self.reg,
                                         self.activation_fn)
             feature_map_dim = self.num_filter[-1] * \
                 self.smallest_size * self.smallest_size
             with tf.name_scope('fc' + str(self.num_fc_layer)):
                 feature_map = dense('fc' + str(self.num_fc_layer) + '_w',
                                     feature_map, feature_map_dim, self.reg,
                                     self.activation_fn)
                 feature_map = tf.reshape(feature_map, [
                     -1, self.smallest_size, self.smallest_size,
                     self.num_filter[-1]
                 ])
             for i_block in range(self.num_block):
                 with tf.name_scope('upsample' + str(i_block)):
                     feature_map = upsample(feature_map)
                 with tf.name_scope('block' + str(i_block)):
                     if i_block == self.num_block - 1:
                         num_filter = 3
                     else:
                         num_filter = self.num_filter[-2 - i_block]
                     feature_map = res_block('block' + str(i_block) + '_w',
                                             feature_map,
                                             self.num_layer_per_block,
                                             num_filter, self.filter_size,
                                             self.padding, self.reg,
                                             self.activation_fn)
             with tf.name_scope('x_hat'):
                 self.x_hat = tf.nn.sigmoid(feature_map)
             with tf.variable_scope('log_gamma'):
                 self.log_gamma = tf.get_variable('log_gamma', [],
                                                  tf.float32,
                                                  tf.zeros_initializer(),
                                                  trainable=True)
             with tf.name_scope('gamma'):
                 self.gamma = tf.exp(self.log_gamma)
    def build(self, input_shape):
        # timestep embedding
        self.temb_dense_0 = nn.dense(name='temb/dense0',
                                     num_units=self.ch * 4,
                                     spec_norm=self.spec_norm)
        self.temb_dense_1 = nn.dense(name='temb/dense1',
                                     num_units=self.ch * 4,
                                     spec_norm=self.spec_norm)
        self.temb_dense_2 = nn.dense(name='temb/dense2',
                                     num_units=self.ch * self.ch_mult[-1],
                                     spec_norm=False)

        S = input_shape[-3]
        self.res_levels = []
        self.attn_s = dict()
        self.downsample_s = []

        # downsample
        self.conv2d_in = nn.conv2d(name='conv_in',
                                   num_units=self.ch,
                                   spec_norm=self.spec_norm)
        for i_level in range(self.num_resolutions):
            res_s = []
            if self.use_attention and S in self.attn_resolutions:
                self.attn_s[str(S)] = []
            for i_block in range(self.num_res_blocks):
                res_s.append(
                    resnet_block(name='level_{}_block_{}'.format(
                        i_level, i_block),
                                 out_ch=self.ch * self.ch_mult[i_level]))
                if self.use_attention and S in self.attn_resolutions:
                    self.attn_s[str(S)].append(
                        attn_block(
                            name='down_{}_attn_{}'.format(i_level, i_block)))
            self.res_levels.append(res_s)

            if i_level != self.num_resolutions - 1:
                self.downsample_s.append(
                    downsample(name='downsample_{}'.format(i_level),
                               with_conv=self.resamp_with_conv))
                S = S // 2

        # end
        self.normalize_out = normalize(name='norm_out')
        self.fc_out = nn.dense(name='dense_out', num_units=1, spec_norm=False)
    def compute_est_ll(x, params=None, reuse=tf.AUTO_REUSE):
        with tf.variable_scope("estimator", reuse=reuse):
            logvae_x_var = tf.get_variable(
                "logvae_x_var", (),
                tf.float32,
                trainable=True,
                initializer=tf.constant_initializer(-1))

            with arg_scope([nn.dense], params=params):
                with tf.variable_scope("encoder", reuse=reuse):
                    h_enc_1 = nn.dense(
                        x, x_dim, 500 * 2, "dense1", nonlinearity=nonlin)
                    # h_enc_1 = nn.batch_norm(h_enc_1, "bn1", 129, 2)
                    h_enc_2 = nn.dense(
                        h_enc_1,
                        500 * 2,
                        200 * 2,
                        "dense2",
                        nonlinearity=nonlin)
                    # h_enc_2 = nn.batch_norm(h_enc_2, "bn2", 128, 2)
                    z_mean = nn.dense(
                        h_enc_2,
                        200 * 2,
                        vae_z_dim,
                        "dense3",
                        nonlinearity=None)
                    z_logvar = nn.dense(
                        h_enc_2,
                        200 * 2,
                        vae_z_dim,
                        "dense4",
                        nonlinearity=None)
                epsilon = tf.random_normal(tf.shape(z_mean), dtype=tf.float32)
                z = z_mean + tf.exp(0.5 * z_logvar) * epsilon

                with tf.variable_scope("decoder", reuse=reuse):
                    h_dec_1 = nn.dense(
                        z, vae_z_dim, 200 * 2, "dense1", nonlinearity=nonlin)
                    # h_dec_1 = nn.batch_norm(h_dec_1, "bn1", 127, 2)
                    h_dec_2 = nn.dense(
                        h_dec_1,
                        200 * 2,
                        500 * 2,
                        "dense2",
                        nonlinearity=nonlin)
                    # h_dec_2 = nn.batch_norm(h_dec_2, "bn2", 128, 2)
                    x_mean = nn.dense(
                        h_dec_2, 500 * 2, x_dim, "dense3", nonlinearity=None)
                    x_mean = tf.nn.tanh(x_mean)

        vae_x_var = tf.exp(logvae_x_var)
        elbo = tf.reduce_mean(
            tf.reduce_sum(
                -0.5 * np.log(2 * np.pi) - 0.5 * tf.log(vae_x_var) -
                tf.layers.flatten(tf.square(x - x_mean)) / (2 * vae_x_var),
                axis=1) -
            tf.reduce_sum(
                -0.5 * (1 + z_logvar - tf.square(z_mean) - tf.exp(z_logvar)),
                axis=1))
        return elbo, tf.nn.tanh(x_mean)
Example #9
0
    def compute_est_ll(x, params=None, reuse=tf.AUTO_REUSE):
        with tf.variable_scope("estimator"):
            with arg_scope([nn.dense], params=params):
                with tf.variable_scope("encoder", reuse=reuse):
                    h_enc_1 = nn.dense(x,
                                       x_dim,
                                       500 * 2,
                                       "dense1",
                                       nonlinearity=nonlin)
                    # h_enc_1 = nn.batch_norm(h_enc_1, "bn1", 129, 2)
                    h_enc_2 = nn.dense(h_enc_1,
                                       500 * 2,
                                       200 * 2,
                                       "dense2",
                                       nonlinearity=nonlin)
                    # h_enc_2 = nn.batch_norm(h_enc_2, "bn2", 128, 2)
                    z_mean = nn.dense(h_enc_2,
                                      200 * 2,
                                      vae_z_dim,
                                      "dense3",
                                      nonlinearity=None)
                    z_logvar = nn.dense(h_enc_2,
                                        200 * 2,
                                        vae_z_dim,
                                        "dense4",
                                        nonlinearity=None)
                epsilon = tf.random_normal(tf.shape(z_mean), dtype=tf.float32)
                z = z_mean + tf.exp(0.5 * z_logvar) * epsilon

                with tf.variable_scope("decoder", reuse=reuse):
                    h_dec_1 = nn.dense(z,
                                       vae_z_dim,
                                       200 * 2,
                                       "dense1",
                                       nonlinearity=nonlin)
                    # h_dec_1 = nn.batch_norm(h_dec_1, "bn1", 127, 2)
                    h_dec_2 = nn.dense(h_dec_1,
                                       200 * 2,
                                       500 * 2,
                                       "dense2",
                                       nonlinearity=nonlin)
                    # h_dec_2 = nn.batch_norm(h_dec_2, "bn2", 128, 2)
                    x_mean = nn.dense(h_dec_2,
                                      500 * 2,
                                      x_dim,
                                      "dense3",
                                      nonlinearity=None)

        elbo = tf.reduce_mean(
            tf.reduce_sum(-tf.nn.sigmoid_cross_entropy_with_logits(
                logits=x_mean, labels=x),
                          axis=1) -
            tf.reduce_sum(-0.5 * (1 + z_logvar - tf.square(z_mean) -
                                  tf.exp(z_logvar)),
                          axis=1))
        return elbo, tf.nn.sigmoid(x_mean)
    def compute_est_ll(x, params=None, reuse=tf.AUTO_REUSE):
        with tf.variable_scope("estimator"):
            with arg_scope([nn.dense], params=params):
                with tf.variable_scope("encoder", reuse=reuse):
                    h_enc_1 = nn.dense(x,
                                       2,
                                       128,
                                       "dense1",
                                       nonlinearity=nonlin)
                    # h_enc_1 = nn.batch_norm(h_enc_1, "bn1", 128, 2)
                    h_enc_2 = nn.dense(h_enc_1,
                                       128,
                                       128,
                                       "dense2",
                                       nonlinearity=nonlin)
                    # h_enc_2 = nn.batch_norm(h_enc_2, "bn2", 128, 2)
                    z_mean = nn.dense(h_enc_2,
                                      128,
                                      vae_z_dim,
                                      "dense3",
                                      nonlinearity=None)
                    z_logvar = nn.dense(h_enc_2,
                                        128,
                                        vae_z_dim,
                                        "dense4",
                                        nonlinearity=None)
                epsilon = tf.random_normal(tf.shape(z_mean), dtype=tf.float32)
                z = z_mean + tf.exp(0.5 * z_logvar) * epsilon

                with tf.variable_scope("decoder", reuse=reuse):
                    h_dec_1 = nn.dense(z,
                                       vae_z_dim,
                                       128,
                                       "dense1",
                                       nonlinearity=nonlin)
                    # h_dec_1 = nn.batch_norm(h_dec_1, "bn1", 128, 2)
                    h_dec_2 = nn.dense(h_dec_1,
                                       128,
                                       128,
                                       "dense2",
                                       nonlinearity=nonlin)
                    # h_dec_2 = nn.batch_norm(h_dec_2, "bn2", 128, 2)
                    x_mean = nn.dense(h_dec_2,
                                      128,
                                      x_dim,
                                      "dense3",
                                      nonlinearity=None)

        elbo = tf.reduce_mean(
            tf.reduce_sum(-0.5 * np.log(2 * np.pi) - 0.5 * np.log(vae_x_var) -
                          tf.square(x - x_mean) / (2 * vae_x_var),
                          axis=1) -
            tf.reduce_sum(-0.5 * (1 + z_logvar - tf.square(z_mean) -
                                  tf.exp(z_logvar)),
                          axis=1))
        return elbo, x_mean
Example #11
0
def discriminator(inp, is_training, init=False,reuse=False, getter =None):
    with tf.variable_scope('discriminator_model', reuse=reuse,custom_getter=getter):

        counter = {}
        x = inp

        x = gaussian_noise_layer(x, std=0.3,deterministic= ~is_training)
        x = nn.dense(x, 1000, nonlinearity=tf.nn.relu, init=init, counters=counter,train_scale=False, init_w=init_w)

        x = gaussian_noise_layer(x, std=0.5, deterministic=~is_training)
        x = nn.dense(x, 500, nonlinearity=tf.nn.relu, init=init, counters=counter,train_scale=False, init_w=init_w)

        x = gaussian_noise_layer(x, std=0.5, deterministic=~is_training)
        x = nn.dense(x, 250, nonlinearity=tf.nn.relu, init=init, counters=counter,train_scale=False, init_w=init_w)

        x = gaussian_noise_layer(x, std=0.5, deterministic=~is_training)
        x = nn.dense(x, 250, nonlinearity=tf.nn.relu, init=init, counters=counter, train_scale=False, init_w=init_w)

        x = gaussian_noise_layer(x, std=0.5, deterministic=~is_training)
        x = nn.dense(x, 250, nonlinearity=tf.nn.relu, init=init, counters=counter, train_scale=False, init_w=init_w)

        inter_layer = x

        x = gaussian_noise_layer(x, std=0.5, deterministic=~is_training)
        logits = nn.dense(x, 1, nonlinearity=None, init=init, counters=counter, train_scale=True, init_w=init_w)


        return logits, inter_layer
    def build_forward(self):
        config = self.config
        image = self.p_image  # [1, 3, H, W]
        image_shape2d = tf.shape(image)[2:]

        # [N, 3, box_size, box_size]
        boxes = self.boxes
        boxes = tf.stop_gradient(boxes)
        box_images = crop_and_resize(
            image, boxes, tf.zeros([tf.shape(boxes)[0]], dtype=tf.int32),
            config.box_size)
        # box_images = roi_align(image, boxes, config.box_size)

        # [N, C, FS, FS]
        c2, c3, c4, c5 = resnet_fpn_backbone(
            box_images,
            config.resnet_num_block,
            use_gn=False,
            resolution_requirement=32.0,
            use_dilations=False,
            use_deformable=False,
            tf_pad_reverse=True,
            freeze=config.freeze,
            use_basic_block=config.use_basic_block,
            use_se=config.use_se)
        # box_size must be divided by 32, like 224,
        c5 = tf.reshape(c5,
                        [-1, 2048, config.box_size / 32, config.box_size / 32])

        # fully-connected for classification
        with tf.variable_scope("dcr_classification"):
            #dim = config.dcr_fc_dim
            initializer = tf.variance_scaling_initializer()

            hidden = c5
            #hidden = dense(c5, dim, W_init=initializer, activation=tf.nn.relu, scope="fc")
            #hidden = dense(hidden, dim, W_init=initializer, activation=tf.nn.relu, scope="fc7")

            classification = dense(
                hidden,
                config.num_class,
                W_init=tf.random_normal_initializer(stddev=0.01),
                scope="class")  # [K, num_class]

        self.logits = classification
        self.yp = tf.nn.softmax(classification)
Example #13
0
def generator(z_seed,is_training, init=False,reuse=False):
    with tf.variable_scope('generator_model', reuse=reuse):
        counter = {}
        with tf.variable_scope('dense1'):
            x = tf.layers.dense(z_seed, 500, name='fc1', activation=None)
            x = tf.layers.batch_normalization(x,training=is_training)
            # x = tf.nn.softplus(x)
            x = tf.nn.relu(x)
        with tf.variable_scope('dense2'):
            x = tf.layers.dense(x, 500, name='fc1', activation=None)
            x = tf.layers.batch_normalization(x,training=is_training)
            # x = tf.nn.softplus(x)
            x = tf.nn.relu(x)

        with tf.variable_scope('dense3'):
            x = nn.dense(x,28**2, tf.sigmoid,init=init,counters=counter, train_scale=True)

        return x
Example #14
0
def model_spec(x,
               keep_prob=0.5,
               deterministic=False,
               init=False,
               use_weight_normalization=False,
               use_batch_normalization=False,
               use_mean_only_batch_normalization=False):
    x = nn.gaussian_noise(x,
                          deterministic=deterministic,
                          name='gaussian_noise')

    x = nn.conv2d(
        x,
        num_filters=96,
        init=init,
        use_weight_normalization=use_weight_normalization,
        use_batch_normalization=use_batch_normalization,
        use_mean_only_batch_normalization=use_mean_only_batch_normalization,
        deterministic=deterministic,
        name='conv1',
        nonlinearity=nn.lRelu)

    x = nn.conv2d(
        x,
        num_filters=96,
        init=init,
        use_weight_normalization=use_weight_normalization,
        use_batch_normalization=use_batch_normalization,
        use_mean_only_batch_normalization=use_mean_only_batch_normalization,
        deterministic=deterministic,
        name='conv2',
        nonlinearity=nn.lRelu)

    x = nn.conv2d(
        x,
        num_filters=96,
        init=init,
        use_weight_normalization=use_weight_normalization,
        use_batch_normalization=use_batch_normalization,
        use_mean_only_batch_normalization=use_mean_only_batch_normalization,
        deterministic=deterministic,
        name='conv3',
        nonlinearity=nn.lRelu)

    x = tf.nn.max_pool(x,
                       ksize=[1, 2, 2, 1],
                       strides=[1, 2, 2, 1],
                       padding='SAME',
                       name='max_pool_1')
    x = nn.dropout(x,
                   keep_prob=keep_prob,
                   deterministic=deterministic,
                   name='drop1')

    x = nn.conv2d(
        x,
        num_filters=192,
        init=init,
        use_weight_normalization=use_weight_normalization,
        use_batch_normalization=use_batch_normalization,
        use_mean_only_batch_normalization=use_mean_only_batch_normalization,
        deterministic=deterministic,
        name='conv4',
        nonlinearity=nn.lRelu)

    x = nn.conv2d(
        x,
        num_filters=192,
        init=init,
        use_weight_normalization=use_weight_normalization,
        use_batch_normalization=use_batch_normalization,
        use_mean_only_batch_normalization=use_mean_only_batch_normalization,
        deterministic=deterministic,
        name='conv5',
        nonlinearity=nn.lRelu)

    x = nn.conv2d(
        x,
        num_filters=192,
        init=init,
        use_weight_normalization=use_weight_normalization,
        use_batch_normalization=use_batch_normalization,
        use_mean_only_batch_normalization=use_mean_only_batch_normalization,
        deterministic=deterministic,
        name='conv6',
        nonlinearity=nn.lRelu)

    x = tf.nn.max_pool(x,
                       ksize=[1, 2, 2, 1],
                       strides=[1, 2, 2, 1],
                       padding='SAME',
                       name='max_pool_2')
    x = nn.dropout(x,
                   keep_prob=keep_prob,
                   deterministic=deterministic,
                   name='drop2')

    x = nn.conv2d(
        x,
        num_filters=192,
        init=init,
        use_weight_normalization=use_weight_normalization,
        use_batch_normalization=use_batch_normalization,
        use_mean_only_batch_normalization=use_mean_only_batch_normalization,
        deterministic=deterministic,
        pad='VALID',
        name='conv7',
        nonlinearity=nn.lRelu)

    x = nn.NiN(
        x,
        num_units=192,
        nonlinearity=nn.lRelu,
        init=init,
        use_weight_normalization=use_weight_normalization,
        use_batch_normalization=use_batch_normalization,
        use_mean_only_batch_normalization=use_mean_only_batch_normalization,
        deterministic=deterministic,
        name='Nin1')

    x = nn.NiN(
        x,
        num_units=192,
        nonlinearity=nn.lRelu,
        init=init,
        use_weight_normalization=use_weight_normalization,
        use_batch_normalization=use_batch_normalization,
        use_mean_only_batch_normalization=use_mean_only_batch_normalization,
        deterministic=deterministic,
        name='Nin2')

    x = nn.globalAvgPool(x, name='Globalavgpool1')

    x = nn.dense(
        x,
        num_units=10,
        nonlinearity=None,
        init=init,
        use_weight_normalization=use_weight_normalization,
        use_batch_normalization=use_batch_normalization,
        use_mean_only_batch_normalization=use_mean_only_batch_normalization,
        deterministic=deterministic,
        name='output_dense')

    return x
Example #15
0
def model_spec(x,
               keep_prob=0.5,
               deterministic=False,
               init=False,
               use_weight_normalization=False,
               use_batch_normalization=False,
               use_mean_only_batch_normalization=False,
               use_xavier_initialization=False):

    # Stage 1
    x = nn.conv2d(
        x,
        num_filters=32,
        filter_size=[5, 5],
        init=init,
        use_weight_normalization=use_weight_normalization,
        use_batch_normalization=use_batch_normalization,
        use_mean_only_batch_normalization=use_mean_only_batch_normalization,
        deterministic=deterministic,
        name='conv1_1',
        pad='SAME',
        nonlinearity=nn.PRelu,
        use_xavier_initialization=use_xavier_initialization)

    x = nn.conv2d(
        x,
        num_filters=32,
        filter_size=[5, 5],
        init=init,
        use_weight_normalization=use_weight_normalization,
        use_batch_normalization=use_batch_normalization,
        use_mean_only_batch_normalization=use_mean_only_batch_normalization,
        deterministic=deterministic,
        name='conv1_2',
        pad='SAME',
        nonlinearity=nn.PRelu,
        use_xavier_initialization=use_xavier_initialization)
    x = tf.nn.max_pool(x,
                       ksize=[1, 2, 2, 1],
                       strides=[1, 2, 2, 1],
                       padding='SAME',
                       name='max_pool_1_1')

    # Stage 2
    x = nn.conv2d(
        x,
        num_filters=64,
        filter_size=[5, 5],
        init=init,
        use_weight_normalization=use_weight_normalization,
        use_batch_normalization=use_batch_normalization,
        use_mean_only_batch_normalization=use_mean_only_batch_normalization,
        deterministic=deterministic,
        name='conv2_1',
        pad='SAME',
        nonlinearity=nn.PRelu,
        use_xavier_initialization=use_xavier_initialization)

    x = nn.conv2d(
        x,
        num_filters=64,
        filter_size=[5, 5],
        init=init,
        use_weight_normalization=use_weight_normalization,
        use_batch_normalization=use_batch_normalization,
        use_mean_only_batch_normalization=use_mean_only_batch_normalization,
        deterministic=deterministic,
        name='conv2_2',
        pad='SAME',
        nonlinearity=nn.PRelu,
        use_xavier_initialization=use_xavier_initialization)
    x = tf.nn.max_pool(x,
                       ksize=[1, 2, 2, 1],
                       strides=[1, 2, 2, 1],
                       padding='SAME',
                       name='max_pool_2_1')

    # Stage 3
    x = nn.conv2d(
        x,
        num_filters=128,
        filter_size=[5, 5],
        init=init,
        use_weight_normalization=use_weight_normalization,
        use_batch_normalization=use_batch_normalization,
        use_mean_only_batch_normalization=use_mean_only_batch_normalization,
        deterministic=deterministic,
        name='conv3_1',
        pad='SAME',
        nonlinearity=nn.PRelu,
        use_xavier_initialization=use_xavier_initialization)

    x = nn.conv2d(
        x,
        num_filters=128,
        filter_size=[5, 5],
        init=init,
        use_weight_normalization=use_weight_normalization,
        use_batch_normalization=use_batch_normalization,
        use_mean_only_batch_normalization=use_mean_only_batch_normalization,
        deterministic=deterministic,
        name='conv3_2',
        pad='SAME',
        nonlinearity=nn.PRelu,
        use_xavier_initialization=use_xavier_initialization)
    x = tf.nn.max_pool(x,
                       ksize=[1, 2, 2, 1],
                       strides=[1, 2, 2, 1],
                       padding='SAME',
                       name='max_pool_3_1')

    # embedding layer
    x = tf.reshape(x, [x.get_shape()[0], -1])

    embed = nn.dense(
        x,
        2,
        nonlinearity=None,
        init=init,
        use_weight_normalization=use_weight_normalization,
        use_batch_normalization=use_batch_normalization,
        use_mean_only_batch_normalization=use_mean_only_batch_normalization,
        deterministic=deterministic,
        name='embedding_layer',
        use_xavier_initialization=use_xavier_initialization)

    x = nn.PRelu(embed, name='embedding_layer/PRelu')

    x = nn.dense(
        x,
        10,
        nonlinearity=None,
        init=init,
        use_weight_normalization=use_weight_normalization,
        use_batch_normalization=use_batch_normalization,
        use_mean_only_batch_normalization=use_mean_only_batch_normalization,
        deterministic=deterministic,
        name='output_dense',
        use_xavier_initialization=use_xavier_initialization,
        use_bias=False)

    return x, embed