Exemplo n.º 1
0
    def forward(self, input_tensor):
        self.input_tensor = input_tensor
        #pdb.set_trace()
        self.check_input_shape()
        self.in_N, self.in_h, self.in_w, self.in_depth = self.input_tensor.get_shape().as_list()
        
        # init weights
        self.weights_shape = [self.kernel_size, self.kernel_size, self.in_depth, self.output_depth]
        self.strides = [1,self.stride_size, self.stride_size,1]
        with tf.variable_scope(self.name):
            self.weights = variables.weights(self.weights_shape)
            self.biases = variables.biases(self.output_depth)
        
        with tf.name_scope(self.name):
            conv = tf.nn.conv2d(self.input_tensor, self.weights, strides = self.strides, padding=self.pad)
            conv = tf.reshape(tf.nn.bias_add(conv, self.biases), conv.get_shape().as_list())

            if isinstance(self.act, str): 
                self.activations = activations.apply(conv, self.act)
            elif hasattr(self.act, '__call__'):
                self.activations = self.act(conv)
                
            if self.keep_prob<1.0:
                self.activations = tf.nn.dropout(self.activations, keep_prob=self.keep_prob)
            
            tf.summary.histogram('activations', self.activations)
            tf.summary.histogram('weights', self.weights)
            tf.summary.histogram('biases', self.biases)

        return self.activations
Exemplo n.º 2
0
    def forward(self, input_tensor):
        self.input_tensor = input_tensor
        #pdb.set_trace()
        self.check_input_shape()
        self.in_N, self.in_h, self.in_w, self.in_c, self.in_depth = self.input_tensor.get_shape(
        ).as_list()

        # init weights
        self.weights_shape = [
            self.kernel_size, self.kernel_size, self.kernel_channels,
            self.in_depth, self.output_depth
        ]
        self.strides = [
            1, self.stride_size, self.stride_size, self.stride_size, 1
        ]
        with tf.variable_scope(self.name):
            self.weights = variables.weights(self.weights_shape,
                                             initializer=self.weights_init,
                                             name=self.name)
            self.biases = variables.biases(self.output_depth,
                                           initializer=self.bias_init,
                                           name=self.name)

        with tf.name_scope(self.name):
            # convolutional layer뒤에 batch_normalization이 붙도록 코드를 수정
            if self.final_layer == False:
                conv = tf.nn.conv3d(self.input_tensor,
                                    self.weights,
                                    strides=self.strides,
                                    padding=self.pad)
                bn = tf.contrib.layers.batch_norm(conv,
                                                  decay=self.momentum,
                                                  updates_collections=None,
                                                  epsilon=self.epsilon,
                                                  scale=False,
                                                  is_training=self.training)

                if isinstance(self.act, str):
                    self.activations = activations.apply(bn, self.act)
                elif hasattr(self.act, '__call__'):
                    self.activations = self.act(bn)
                print('not final')
                #
                # if self.keep_prob<1.0:
                #     self.activations = tf.nn.dropout(self.activations, keep_prob=self.keep_prob)
                #
                tf.summary.histogram('activations', self.activations)
                tf.summary.histogram('weights', self.weights)
                tf.summary.histogram('biases', self.biases)
            else:
                conv = tf.nn.conv3d(self.input_tensor,
                                    self.weights,
                                    strides=self.strides,
                                    padding=self.pad)
                self.activations = conv
                print('final')
        return self.activations
Exemplo n.º 3
0
    def forward(self, input_tensor):
        self.input_tensor = input_tensor
        self.check_input_shape()
        in_N, in_h, in_w, in_depth = self.input_tensor.get_shape().as_list()
        self.input_depth = in_depth
        #pdb.set_trace()
        inp_shape = self.input_tensor.get_shape().as_list()
        if self.pad == 'SAME':
            output_shape = tf.stack([
                self.batch_size, inp_shape[1] * self.stride_size,
                inp_shape[1] * self.stride_size, self.output_depth
            ])
        elif self.pad == 'VALID':
            output_shape = tf.stack([
                self.batch_size,
                (inp_shape[1] - 1) * self.stride_size + self.kernel_size,
                (inp_shape[2] - 1) * self.stride_size + self.kernel_size,
                self.output_depth
            ])

        self.weights_shape = [
            self.kernel_size, self.kernel_size, self.output_depth,
            self.input_depth
        ]

        self.strides = [1, self.stride_size, self.stride_size, 1]
        with tf.variable_scope(self.name):
            self.weights = variables.weights(self.weights_shape,
                                             initializer=self.weights_init,
                                             name=self.name)
            self.biases = variables.biases(self.output_depth,
                                           initializer=self.bias_init,
                                           name=self.name)

        with tf.name_scope(self.name):
            #pdb.set_trace()
            #deconv = tf.nn.atrous_conv2d(self.input_tensor, self.weights, rate=2, padding='SAME')
            deconv = tf.nn.conv2d_transpose(self.input_tensor,
                                            self.weights,
                                            output_shape=output_shape,
                                            strides=self.strides,
                                            padding=self.pad)
            deconv = tf.reshape(tf.nn.bias_add(deconv, self.biases),
                                [-1] + deconv.get_shape().as_list()[1:])

            if isinstance(self.act, str):
                self.activations = activations.apply(deconv, self.act)
            elif hasattr(self.act, '__call__'):
                self.activations = self.act(conv)

            if self.keep_prob < 1.0:
                self.activations = tf.nn.dropout(self.activations,
                                                 keep_prob=self.keep_prob)
            tf.summary.histogram('activations', self.activations)

        return self.activations
Exemplo n.º 4
0
    def forward(self, input_tensor):
        self.input_tensor = input_tensor
        #pdb.set_trace()
        self.check_input_shape()
        self.in_N, self.in_h, self.in_w, self.in_depth = self.input_tensor.get_shape(
        ).as_list()

        # init weights
        self.weights_shape = [
            self.kernel_size, self.kernel_size, self.in_depth,
            self.output_depth
        ]
        self.strides = [1, self.stride_size, self.stride_size, 1]
        with tf.name_scope(self.name):
            self.weights = variables.weights(self.weights_shape,
                                             initializer=self.weights_init,
                                             name=self.name)
            self.biases = variables.biases(self.output_depth,
                                           initializer=self.bias_init,
                                           name=self.name)

        with tf.name_scope(self.name):
            conv = tf.nn.conv2d(self.input_tensor,
                                self.weights,
                                strides=self.strides,
                                padding=self.pad)
            conv = tf.reshape(tf.nn.bias_add(conv, self.biases),
                              conv.get_shape().as_list())

            if self.batch_norm:
                self.momentum = self.batch_norm_params['momentum']
                self.epsilon = self.batch_norm_params['epsilon']
                self.training = self.batch_norm_params['training']
                self.bn_name = self.batch_norm_params['name']
                conv = tf.contrib.layers.batch_norm(conv,
                                                    decay=self.momentum,
                                                    updates_collections=None,
                                                    epsilon=self.epsilon,
                                                    scale=True,
                                                    is_training=self.training,
                                                    scope=self.bn_name)

            if isinstance(self.act, str):
                self.activations = activations.apply(conv, self.act)
            elif hasattr(self.act, '__call__'):
                self.activations = self.act(conv)

            # if self.keep_prob<1.0:
            #     self.activations = tf.nn.dropout(self.activations, keep_prob=self.keep_prob)

            tf.summary.histogram('activations', self.activations)
            tf.summary.histogram('weights', self.weights)
            tf.summary.histogram('biases', self.biases)

        return self.activations
Exemplo n.º 5
0
    def forward(self, input_tensor):
        self.input_tensor = input_tensor
        inp_shape = self.input_tensor.get_shape().as_list()

        #import pdb;pdb.set_trace()
        if len(inp_shape) != 2:
            import numpy as np
            self.input_dim = np.prod(inp_shape[1:])
            self.input_tensor = tf.reshape(self.input_tensor,
                                           [inp_shape[0], self.input_dim])
        else:
            self.input_dim = inp_shape[1]
        self.weights_shape = [self.input_dim, self.output_dim]
        #with tf.name_scope(self.name):
        self.weights = variables.weights(self.weights_shape,
                                         initializer=self.weights_init,
                                         name=self.name)
        self.biases = variables.biases(self.output_dim,
                                       initializer=self.bias_init,
                                       name=self.name)

        #import pdb;pdb.set_trace()
        with tf.name_scope(self.name):
            linear = tf.nn.bias_add(tf.matmul(self.input_tensor, self.weights),
                                    self.biases,
                                    name=self.name)
            if isinstance(self.act, str):
                self.activations = activations.apply(linear, self.act)
            elif hasattr(self.act, '__call__'):
                self.activations = self.act(conv)

            def dropout_check_false():
                #print('Dropout adjusted 1.0')
                return tf.constant(1.0)

            def dropout_check_true():
                return tf.multiply(self.keep_prob, 1)

            dropout_check = self.keep_prob <= tf.constant(1.0)
            #import pdb; pdb.set_trace()
            dropout = tf.cond(dropout_check, dropout_check_true,
                              dropout_check_false)

            self.activations = tf.nn.dropout(self.activations,
                                             keep_prob=dropout)
            #activations = activation_fn(conv, name='activation')
            tf.summary.histogram('activations', self.activations)
            tf.summary.histogram('weights', self.weights)
            tf.summary.histogram('biases', self.biases)

        return self.activations
Exemplo n.º 6
0
    def forward(self, input_tensor):
        self.input_tensor = input_tensor
        inp_shape = self.input_tensor.get_shape().as_list()

        #import pdb;pdb.set_trace()
        if len(inp_shape) != 2:
            import numpy as np
            self.input_dim = np.prod(inp_shape[1:])
            self.input_tensor = tf.reshape(self.input_tensor,
                                           [inp_shape[0], self.input_dim])
        else:
            self.input_dim = inp_shape[1]
        self.weights_shape = [self.input_dim, self.output_dim]
        #with tf.name_scope(self.name):
        self.weights = variables.weights(self.weights_shape, name=self.name)
        self.biases = variables.biases(self.output_dim, name=self.name)

        #import pdb;pdb.set_trace()
        with tf.name_scope(self.name):
            linear = tf.nn.bias_add(tf.matmul(self.input_tensor, self.weights),
                                    self.biases,
                                    name=self.name)
            if isinstance(self.act, str):
                self.activations = activations.apply(linear, self.act)
            elif hasattr(self.act, '__call__'):
                self.activations = self.act(conv)

            if self.keep_prob < 1.0:
                self.activations = tf.nn.dropout(self.activations,
                                                 keep_prob=self.keep_prob)
            #activations = activation_fn(conv, name='activation')
            tf.summary.histogram('activations', self.activations)
            tf.summary.histogram('weights', self.weights)
            tf.summary.histogram('biases', self.biases)

        return self.activations
Exemplo n.º 7
0
    def forward(self, input_tensor):
        self.input_tensor = input_tensor
        inp_shape = self.input_tensor.get_shape().as_list()

        if len(inp_shape) != 2:
            import numpy as np
            self.input_dim = np.prod(inp_shape[1:])
            self.input_tensor = tf.reshape(self.input_tensor,
                                           [inp_shape[0], self.input_dim])
        else:
            self.input_dim = inp_shape[1]
        self.weights_shape = [self.input_dim, self.output_dim]
        with tf.name_scope(self.name):
            self.weights = variables.weights(self.weights_shape,
                                             initializer=self.weights_init,
                                             name=self.name)
            self.biases = variables.biases(self.output_dim,
                                           initializer=self.bias_init,
                                           name=self.name)

        with tf.name_scope(self.name):
            linear = tf.nn.bias_add(tf.matmul(self.input_tensor, self.weights),
                                    self.biases,
                                    name=self.name)
            if self.batch_norm:
                self.momentum = self.batch_norm_params['momentum']
                self.epsilon = self.batch_norm_params['epsilon']
                self.training = self.batch_norm_params['training']
                self.bn_name = self.batch_norm_params['name']
                linear = tf.contrib.layers.batch_norm(
                    linear,
                    decay=self.momentum,
                    updates_collections=None,
                    epsilon=self.epsilon,
                    scale=True,
                    is_training=self.training,
                    scope=self.bn_name)

            if isinstance(self.act, str):
                self.activations = activations.apply(linear, self.act)
            elif hasattr(self.act, '__call__'):
                self.activations = self.act(conv)

            def dropout_check_false():
                #print('Dropout adjusted 1.0')
                return tf.constant(1.0)

            def dropout_check_true():
                return tf.multiply(self.keep_prob, 1)

            # dropout_check = self.keep_prob<=tf.constant(1.0)

            # dropout = tf.cond(dropout_check, dropout_check_true, dropout_check_false)

            # self.activations = tf.nn.dropout(self.activations, keep_prob=dropout)
            #activations = activation_fn(conv, name='activation')
            tf.summary.histogram('activations', self.activations)
            tf.summary.histogram('weights', self.weights)
            tf.summary.histogram('biases', self.biases)

        return self.activations