コード例 #1
0
def get_model(X, batch_size, image_dimension):

    input_shape = (batch_size, 3, image_dimension, image_dimension)
    all_parameters = []

    #############################################
    # a first convolution with 32 (3, 3) filters
    output, output_test, params, output_shape = convolutional(
        X, X, input_shape, 32, (3, 3))
    all_parameters += params

    # maxpool with size=(2, 2)
    output, output_test, params, output_shape = maxpool(
        output, output_test, output_shape, (2, 2))

    # relu activation
    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'relu')

    #############################################
    # a second convolution with 32 (5, 5) filters
    output, output_test, params, output_shape = convolutional(
        output, output_test, output_shape, 32, (5, 5))
    all_parameters += params

    # maxpool with size=(2, 2)
    output, output_test, params, output_shape = maxpool(
        output, output_test, output_shape, (2, 2))

    # relu activation
    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'relu')

    #############################################
    # MLP first layer

    output = output.flatten(2)
    output_test = output_test.flatten(2)

    output, output_test, params, output_shape = linear(
        output, output_test,
        (output_shape[0], output_shape[1] * output_shape[2] * output_shape[3]),
        500)
    all_parameters += params

    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'relu')

    #############################################
    # MLP second layer

    output, output_test, params, output_shape = linear(output, output_test,
                                                       output_shape, 1)
    all_parameters += params

    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'sigmoid')

    #
    return output, output_test, all_parameters
コード例 #2
0
ファイル: conv_3_layers.py プロジェクト: tfjgeorge/ift6266
def get_model(X, batch_size, image_dimension):

	input_shape = (batch_size, 3, image_dimension, image_dimension)
	all_parameters = []

	#############################################
	# a first convolution with 32 (3, 3) filters
	output, output_test, params, output_shape = convolutional(X, X, input_shape, 32, (3, 3))
	all_parameters += params

	# maxpool with size=(2, 2)
	output, output_test, params, output_shape = maxpool(output, output_test, output_shape, (2, 2))

	# relu activation
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')

	#############################################
	# a second convolution with 32 (3, 3) filters
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 32, (3, 3))
	all_parameters += params

	# maxpool with size=(2, 2)
	output, output_test, params, output_shape = maxpool(output, output_test, output_shape, (2, 2))

	# relu activation
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')
	
	#############################################
	# a third convolution with 32 (3, 3) filters
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 32, (3, 3))
	all_parameters += params

	# maxpool with size=(2, 2)
	output, output_test, params, output_shape = maxpool(output, output_test, output_shape, (2, 2))

	# relu activation
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')

	#############################################
	# MLP first layer

	output = output.flatten(2)
	output_test = output_test.flatten(2)
	
	output, output_test, params, output_shape = linear(output, output_test, (output_shape[0], output_shape[1]*output_shape[2]*output_shape[3]), 500)
	all_parameters += params

	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')

	#############################################
	# MLP second layer

	output, output_test, params, output_shape = linear(output, output_test, output_shape, 1)
	all_parameters += params

	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'sigmoid')

	#
	return output, output_test, all_parameters
コード例 #3
0
def discriminator_block(x,
                        is_training,
                        filters,
                        activation_='lrelu',
                        kernel_size=(3, 3),
                        normalization='spectral',
                        residual=True):
    with tf.variable_scope(None, discriminator_block.__name__):
        _x = conv_block(
            x,
            filters,
            activation_,
            kernel_size,
            'same',
            normalization,
            is_training,
            0.,
        )
        _x = conv_block(
            _x,
            filters,
            None,
            kernel_size,
            'same',
            normalization,
            is_training,
            0.,
        )
        if residual:
            _x += x
        _x = activation(_x, 'lrelu')

        return _x
コード例 #4
0
def first_block(x,
                target_size,
                noise_dim,
                upsampling='deconv',
                normalization='batch',
                is_training=True):
    if upsampling == 'deconv':
        _x = reshape(x, (1, 1, noise_dim))
        _x = conv2d_transpose(_x,
                              1024,
                              target_size,
                              strides=(1, 1),
                              padding='valid')
    elif upsampling == 'dense':
        _x = dense(x, target_size[0] * target_size[1] * 1024)
        _x = reshape(_x, (target_size[1], target_size[0], 1024))
    else:
        raise ValueError

    if normalization == 'batch':
        _x = batch_norm(_x, is_training=is_training)
    elif normalization == 'layer':
        _x = layer_norm(_x, is_training=is_training)
    elif normalization is None:
        pass
    else:
        raise ValueError
    _x = activation(_x, 'relu')
    return _x
コード例 #5
0
ファイル: sn_layers.py プロジェクト: salty-vanilla/sngan_tf
    def call(self, x, *args, **kwargs):
        with tf.variable_scope(self.scope_name) as vs:
            if not self.is_training:
                vs.reuse_variables()
            control_inputs = []
            if self.is_training:
                w_mat = tf.transpose(self.w)
                sigma, _u = get_max_singular_value(w_mat, self.u)
                w = self.w / sigma

                control_inputs.append(tf.assign(self.u, _u))
                control_inputs.append(tf.assign(self.w_sn, w))
            else:
                w = self.w_sn
        with tf.control_dependencies(control_inputs):
            _h = tf.nn.bias_add(tf.matmul(x, w), self.bias)
            return activation(_h, self.activation)
コード例 #6
0
def discriminator_block(x,
                        filters,
                        activation_='lrelu',
                        kernel_size=(3, 3),
                        is_training=True,
                        normalization=None,
                        residual=True):
    with tf.variable_scope(None, discriminator_block.__name__):
        _x = conv_block(x, filters, activation_, kernel_size, is_training,
                        'same', normalization, 0., 'conv_first')
        _x = conv_block(_x, filters, None, kernel_size, is_training, 'same',
                        None, 0., 'conv_first')
        if residual:
            _x += x
        _x = activation(_x, activation_)
        if normalization == 'layer':
            _x = layer_norm(_x, is_training=is_training)
        return _x
コード例 #7
0
ファイル: transformer.py プロジェクト: talkhouli/sockeye
    def __call__(self, x) -> mx.sym.Symbol:
        """
        Position-wise feed-forward network with activation.

        :param x: Symbol of shape (batch_size, seq_len, num_hidden)
        :return: Symbol of shape (batch_size, seq_len, num_hidden)
        """
        h = mx.sym.FullyConnected(data=x,
                                  num_hidden=self.num_hidden,
                                  weight=self.w_i2h,
                                  bias=self.b_i2h,
                                  flatten=False)
        h = layers.activation(h, act_type=self.act_type)
        if self.dropout > 0.0:
            h = mx.sym.Dropout(h, p=self.dropout)
        y = mx.sym.FullyConnected(data=h,
                                  num_hidden=self.num_model,
                                  weight=self.w_h2o,
                                  bias=self.b_h2o,
                                  flatten=False)
        return y
コード例 #8
0
ファイル: sn_layers.py プロジェクト: salty-vanilla/sngan_tf
    def call(self, x, *args, **kwargs):
        with tf.variable_scope(self.scope_name) as vs:
            if not self.is_training:
                vs.reuse_variables()

            control_inputs = []
            if self.is_training:
                w_mat = tf.transpose(self.w, (3, 2, 0, 1))
                w_mat = tf.reshape(w_mat, (w_mat.shape[0], -1))
                sigma, _u = get_max_singular_value(w_mat, self.u)
                w = self.w / sigma

                control_inputs.append(tf.assign(self.u, _u))
                control_inputs.append(tf.assign(self.w_sn, w))
            else:
                w = self.w_sn
            with tf.control_dependencies(control_inputs):
                _h = tf.nn.bias_add(
                    tf.nn.conv2d(x,
                                 w,
                                 strides=self.strides,
                                 padding=self.padding), self.bias)
                return activation(_h, self.activation)
コード例 #9
0
ファイル: model.py プロジェクト: tfjgeorge/ift6268
def get_model(X, batch_size, image_dimension):

	input_shape = (batch_size, 3, image_dimension[0], image_dimension[1])
	all_parameters = []
	acc_parameters = []

	#############################################
	# a first convolution with 64 (3, 3) filters
	output, output_test, params, output_shape = convolutional(X, X, input_shape, 64, (3, 3))
	all_parameters += params

	# maxpool with size=(2, 2)
	output, output_test, params, output_shape = maxpool(output, output_test, output_shape, (2, 2))

	# relu activation
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')

	#############################################
	# a second convolution with 128 (3, 3) filters
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 128, (3, 3))
	all_parameters += params

	# maxpool with size=(2, 2)
	output, output_test, params, output_shape = maxpool(output, output_test, output_shape, (2, 2))

	# relu activation
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')
	
	#############################################
	# 2 convolutional layers with 256 (3, 3) filters
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 256, (3, 3))
	all_parameters += params
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 256, (3, 3))
	all_parameters += params

	# maxpool with size=(2, 2)
	output, output_test, params, output_shape = maxpool(output, output_test, output_shape, (2, 2))

	# relu activation
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')

	#############################################
	# Fully connected
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 1024, (1, 1))
	all_parameters += params
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 1024, (1, 1))
	all_parameters += params

	# maxpool with size=(4, 4) and fully connected
	output, output_test, params, output_shape = avgpool(output, output_test, output_shape, (4, 4))
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 10, (1, 1))
	all_parameters += params

	output, output_test, params, output_shape, cacc_parameters = batch_norm(output, output_test, output_shape)
	acc_parameters += cacc_parameters
	all_parameters += params

	# softmax
	output = multi_dim_softmax(output)
	output_test = multi_dim_softmax(output_test)

	#
	return output, output_test, all_parameters, acc_parameters
コード例 #10
0
ファイル: nets.py プロジェクト: JAVersteeg/Deep-SAD-PyTorch
def build_cnn_discriminator(discriminator_input,
                            img_size,
                            return_encoder=False,
                            scope='discriminator',
                            reuse=None,
                            training=None):
    config = options.get_options()
    max_resolution = max(img_size[0], img_size[1])
    initial_lod = int(np.log2(max_resolution)) - int(
        np.log2(config.initial_resolution))
    initial_res1 = int(img_size[0] / (2**initial_lod))
    initial_res2 = int(
        img_size[1] /
        (2**initial_lod))  # N.B. assumes the resolutions are powers of 2
    res_str = lambda res: f'{initial_res1 * 2 ** res}x{initial_res2 * 2 ** res}'

    num_features_base = 4096  # number of filters (pre-max!) in the first block
    num_features_max = 512

    def num_features(res):
        return min(int(num_features_base / (2**res)), num_features_max)

    x = discriminator_input
    res = initial_lod

    with tf.variable_scope(scope, reuse=reuse):
        while res > 0:
            nf = num_features(res)
            with tf.variable_scope(res_str(res)):
                with tf.variable_scope('conv1'):
                    x = layers.conv2d(x, nf, 3, training=training)
                    x = layers.activation(layers.apply_bias(x))
                with tf.variable_scope('conv2_down'):
                    x = layers.conv2d_downscale2d(layers.blur2d(x),
                                                  nf,
                                                  3,
                                                  training=training)
                    x = layers.activation(layers.apply_bias(x))

            # print('disc')
            # print((x.shape, res_str(res)))
            res -= 1

        x = tf.identity(x, name='encoder_output')
        enc_out = layers.flatten(x)
        nf = num_features(res)

        if config.training_method != 'gan':
            with tf.variable_scope('bottleneck'):
                enc_out = layers.dense(x,
                                       config.autoencoder_latent_dim,
                                       gain=1,
                                       training=training)
                enc_out = layers.activation(layers.apply_bias(enc_out))

        # Gan part (not used in autoencoder)
        # with tf.variable_scope('minibatch_sim'):
        #     x, num_params = layers.minibatch_similarity(x, training=training)
        # with tf.variable_scope('conv'):
        #     x = layers.conv2d(x, nf, 3, training=training)
        #     x = layers.activation(layers.apply_bias(x))
        # with tf.variable_scope('dense1'):
        #     x = layers.dense(x, nf, training=training)
        #     x = layers.activation(layers.apply_bias(x))
        # with tf.variable_scope('dense2'):
        #     x = layers.dense(x, 1, gain=1, training=training)
        #     x = layers.apply_bias(x)

        # discriminator = tf.identity(x, name='discriminator_output')

    if return_encoder:
        return discriminator, enc_out
    else:
        return discriminator
コード例 #11
0
ファイル: nets.py プロジェクト: JAVersteeg/Deep-SAD-PyTorch
def build_cnn_generator(generator_input,
                        img_size,
                        scope='generator',
                        return_mapping=False,
                        reuse=None,
                        training=None,
                        **kwargs):
    if kwargs:
        print(f'Warning! Unused network kwargs ignored: {kwargs}')

    config = options.get_options()
    max_resolution = max(img_size[0], img_size[1])
    initial_lod = int(np.log2(max_resolution)) - int(
        np.log2(config.initial_resolution))  # level of detail
    initial_res1 = int(img_size[0] / (2**initial_lod))
    initial_res2 = int(
        img_size[1] /
        (2**initial_lod))  # N.B. assumes the resolutions are powers of 2
    res_str = lambda res: f'{initial_res1 * 2 ** res}x{initial_res2 * 2 ** res}'

    num_features_base = 4096  # number of filters (pre-max!) in the first block
    num_features_max = 512

    def num_features(res):  # how many filters there are in a certain layer
        return min(int(num_features_base / (2**res)), num_features_max)

    x = generator_input
    res = 0

    with tf.variable_scope(scope, reuse=reuse):
        while res <= initial_lod:
            nf = num_features(res)
            with tf.variable_scope(res_str(res)):
                if res == 0:
                    with tf.variable_scope('dense'):
                        initial_num_units = initial_res1 * initial_res2
                        x = layers.dense(x,
                                         nf * initial_num_units,
                                         gain=np.sqrt(2 / initial_num_units),
                                         training=training)
                        x = tf.reshape(x, (-1, initial_res1, initial_res2, nf))
                        x = layers.activation(layers.apply_bias(x))
                else:
                    with tf.variable_scope('conv1_up'):
                        x = layers.blur2d(
                            layers.upscale2d_conv2d(
                                x, nf, 3,
                                training=training))  # overgenomen uit StyleGAN
                        x = layers.activation(layers.apply_bias(x))

                with tf.variable_scope('conv2'):
                    x = layers.conv2d(x, nf, 3, training=training)
                    x = layers.activation(layers.apply_bias(x))

                # print('gen')
                # print((x.shape, res_str(res)))

            res += 1

        x = layers.conv2d(x, img_size[-1], 1, gain=1, training=training)
        generator = tf.identity(x, name='generator_output')

    if return_mapping:
        return generator, generator_input
    else:
        return generator
コード例 #12
0
ファイル: model.py プロジェクト: tfjgeorge/ift6268
def get_model(X, batch_size, image_dimension):

    input_shape = (batch_size, 3, image_dimension[0], image_dimension[1])
    all_parameters = []
    acc_parameters = []

    #############################################
    # a first convolution with 64 (3, 3) filters
    output, output_test, params, output_shape = convolutional(
        X, X, input_shape, 64, (3, 3))
    all_parameters += params

    # maxpool with size=(2, 2)
    output, output_test, params, output_shape = maxpool(
        output, output_test, output_shape, (2, 2))

    # relu activation
    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'relu')

    #############################################
    # a second convolution with 128 (3, 3) filters
    output, output_test, params, output_shape = convolutional(
        output, output_test, output_shape, 128, (3, 3))
    all_parameters += params

    # maxpool with size=(2, 2)
    output, output_test, params, output_shape = maxpool(
        output, output_test, output_shape, (2, 2))

    # relu activation
    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'relu')

    #############################################
    # 2 convolutional layers with 256 (3, 3) filters
    output, output_test, params, output_shape = convolutional(
        output, output_test, output_shape, 256, (3, 3))
    all_parameters += params
    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'relu')
    output, output_test, params, output_shape = convolutional(
        output, output_test, output_shape, 256, (3, 3))
    all_parameters += params

    # maxpool with size=(2, 2)
    output, output_test, params, output_shape = maxpool(
        output, output_test, output_shape, (2, 2))

    # relu activation
    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'relu')

    #############################################
    # Fully connected
    output, output_test, params, output_shape = convolutional(
        output, output_test, output_shape, 1024, (1, 1))
    all_parameters += params
    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'relu')
    output, output_test, params, output_shape = convolutional(
        output, output_test, output_shape, 1024, (1, 1))
    all_parameters += params

    # maxpool with size=(4, 4) and fully connected
    output, output_test, params, output_shape = avgpool(
        output, output_test, output_shape, (4, 4))
    output, output_test, params, output_shape = convolutional(
        output, output_test, output_shape, 10, (1, 1))
    all_parameters += params

    output, output_test, params, output_shape, cacc_parameters = batch_norm(
        output, output_test, output_shape)
    acc_parameters += cacc_parameters
    all_parameters += params

    # softmax
    output = multi_dim_softmax(output)
    output_test = multi_dim_softmax(output_test)

    #
    return output, output_test, all_parameters, acc_parameters