예제 #1
0
    def forward(self, input_tensor):
        self.input_tensor = input_tensor
        #pdb.set_trace()
        self.check_input_shape()
        self.in_N, self.in_h, self.in_w, self.in_depth = self.input_tensor.get_shape().as_list()
        
        # init weights
        self.weights_shape = [self.kernel_size, self.kernel_size, self.in_depth, self.output_depth]
        self.strides = [1,self.stride_size, self.stride_size,1]
        with tf.variable_scope(self.name):
            self.weights = variables.weights(self.weights_shape)
            self.biases = variables.biases(self.output_depth)
        
        with tf.name_scope(self.name):
            conv = tf.nn.conv2d(self.input_tensor, self.weights, strides = self.strides, padding=self.pad)
            conv = tf.reshape(tf.nn.bias_add(conv, self.biases), conv.get_shape().as_list())

            if isinstance(self.act, str): 
                self.activations = activations.apply(conv, self.act)
            elif hasattr(self.act, '__call__'):
                self.activations = self.act(conv)
                
            if self.keep_prob<1.0:
                self.activations = tf.nn.dropout(self.activations, keep_prob=self.keep_prob)
            
            tf.summary.histogram('activations', self.activations)
            tf.summary.histogram('weights', self.weights)
            tf.summary.histogram('biases', self.biases)

        return self.activations
예제 #2
0
    def forward(self, input_tensor):
        self.input_tensor = input_tensor
        #pdb.set_trace()
        self.check_input_shape()
        self.in_N, self.in_h, self.in_w, self.in_c, self.in_depth = self.input_tensor.get_shape(
        ).as_list()

        # init weights
        self.weights_shape = [
            self.kernel_size, self.kernel_size, self.kernel_channels,
            self.in_depth, self.output_depth
        ]
        self.strides = [
            1, self.stride_size, self.stride_size, self.stride_size, 1
        ]
        with tf.variable_scope(self.name):
            self.weights = variables.weights(self.weights_shape,
                                             initializer=self.weights_init,
                                             name=self.name)
            self.biases = variables.biases(self.output_depth,
                                           initializer=self.bias_init,
                                           name=self.name)

        with tf.name_scope(self.name):
            # convolutional layer뒤에 batch_normalization이 붙도록 코드를 수정
            if self.final_layer == False:
                conv = tf.nn.conv3d(self.input_tensor,
                                    self.weights,
                                    strides=self.strides,
                                    padding=self.pad)
                bn = tf.contrib.layers.batch_norm(conv,
                                                  decay=self.momentum,
                                                  updates_collections=None,
                                                  epsilon=self.epsilon,
                                                  scale=False,
                                                  is_training=self.training)

                if isinstance(self.act, str):
                    self.activations = activations.apply(bn, self.act)
                elif hasattr(self.act, '__call__'):
                    self.activations = self.act(bn)
                print('not final')
                #
                # if self.keep_prob<1.0:
                #     self.activations = tf.nn.dropout(self.activations, keep_prob=self.keep_prob)
                #
                tf.summary.histogram('activations', self.activations)
                tf.summary.histogram('weights', self.weights)
                tf.summary.histogram('biases', self.biases)
            else:
                conv = tf.nn.conv3d(self.input_tensor,
                                    self.weights,
                                    strides=self.strides,
                                    padding=self.pad)
                self.activations = conv
                print('final')
        return self.activations
예제 #3
0
    def forward(self, input_tensor):
        self.input_tensor = input_tensor
        self.check_input_shape()
        in_N, in_h, in_w, in_depth = self.input_tensor.get_shape().as_list()
        self.input_depth = in_depth
        #pdb.set_trace()
        inp_shape = self.input_tensor.get_shape().as_list()
        if self.pad == 'SAME':
            output_shape = tf.stack([
                self.batch_size, inp_shape[1] * self.stride_size,
                inp_shape[1] * self.stride_size, self.output_depth
            ])
        elif self.pad == 'VALID':
            output_shape = tf.stack([
                self.batch_size,
                (inp_shape[1] - 1) * self.stride_size + self.kernel_size,
                (inp_shape[2] - 1) * self.stride_size + self.kernel_size,
                self.output_depth
            ])

        self.weights_shape = [
            self.kernel_size, self.kernel_size, self.output_depth,
            self.input_depth
        ]

        self.strides = [1, self.stride_size, self.stride_size, 1]
        with tf.variable_scope(self.name):
            self.weights = variables.weights(self.weights_shape,
                                             initializer=self.weights_init,
                                             name=self.name)
            self.biases = variables.biases(self.output_depth,
                                           initializer=self.bias_init,
                                           name=self.name)

        with tf.name_scope(self.name):
            #pdb.set_trace()
            #deconv = tf.nn.atrous_conv2d(self.input_tensor, self.weights, rate=2, padding='SAME')
            deconv = tf.nn.conv2d_transpose(self.input_tensor,
                                            self.weights,
                                            output_shape=output_shape,
                                            strides=self.strides,
                                            padding=self.pad)
            deconv = tf.reshape(tf.nn.bias_add(deconv, self.biases),
                                [-1] + deconv.get_shape().as_list()[1:])

            if isinstance(self.act, str):
                self.activations = activations.apply(deconv, self.act)
            elif hasattr(self.act, '__call__'):
                self.activations = self.act(conv)

            if self.keep_prob < 1.0:
                self.activations = tf.nn.dropout(self.activations,
                                                 keep_prob=self.keep_prob)
            tf.summary.histogram('activations', self.activations)

        return self.activations
예제 #4
0
    def forward(self, input_tensor):
        self.input_tensor = input_tensor
        #pdb.set_trace()
        self.check_input_shape()
        self.in_N, self.in_h, self.in_w, self.in_depth = self.input_tensor.get_shape(
        ).as_list()

        # init weights
        self.weights_shape = [
            self.kernel_size, self.kernel_size, self.in_depth,
            self.output_depth
        ]
        self.strides = [1, self.stride_size, self.stride_size, 1]
        with tf.name_scope(self.name):
            self.weights = variables.weights(self.weights_shape,
                                             initializer=self.weights_init,
                                             name=self.name)
            self.biases = variables.biases(self.output_depth,
                                           initializer=self.bias_init,
                                           name=self.name)

        with tf.name_scope(self.name):
            conv = tf.nn.conv2d(self.input_tensor,
                                self.weights,
                                strides=self.strides,
                                padding=self.pad)
            conv = tf.reshape(tf.nn.bias_add(conv, self.biases),
                              conv.get_shape().as_list())

            if self.batch_norm:
                self.momentum = self.batch_norm_params['momentum']
                self.epsilon = self.batch_norm_params['epsilon']
                self.training = self.batch_norm_params['training']
                self.bn_name = self.batch_norm_params['name']
                conv = tf.contrib.layers.batch_norm(conv,
                                                    decay=self.momentum,
                                                    updates_collections=None,
                                                    epsilon=self.epsilon,
                                                    scale=True,
                                                    is_training=self.training,
                                                    scope=self.bn_name)

            if isinstance(self.act, str):
                self.activations = activations.apply(conv, self.act)
            elif hasattr(self.act, '__call__'):
                self.activations = self.act(conv)

            # if self.keep_prob<1.0:
            #     self.activations = tf.nn.dropout(self.activations, keep_prob=self.keep_prob)

            tf.summary.histogram('activations', self.activations)
            tf.summary.histogram('weights', self.weights)
            tf.summary.histogram('biases', self.biases)

        return self.activations
예제 #5
0
    def forward(self, input_tensor):
        self.input_tensor = input_tensor
        inp_shape = self.input_tensor.get_shape().as_list()

        #import pdb;pdb.set_trace()
        if len(inp_shape) != 2:
            import numpy as np
            self.input_dim = np.prod(inp_shape[1:])
            self.input_tensor = tf.reshape(self.input_tensor,
                                           [inp_shape[0], self.input_dim])
        else:
            self.input_dim = inp_shape[1]
        self.weights_shape = [self.input_dim, self.output_dim]
        #with tf.name_scope(self.name):
        self.weights = variables.weights(self.weights_shape,
                                         initializer=self.weights_init,
                                         name=self.name)
        self.biases = variables.biases(self.output_dim,
                                       initializer=self.bias_init,
                                       name=self.name)

        #import pdb;pdb.set_trace()
        with tf.name_scope(self.name):
            linear = tf.nn.bias_add(tf.matmul(self.input_tensor, self.weights),
                                    self.biases,
                                    name=self.name)
            if isinstance(self.act, str):
                self.activations = activations.apply(linear, self.act)
            elif hasattr(self.act, '__call__'):
                self.activations = self.act(conv)

            def dropout_check_false():
                #print('Dropout adjusted 1.0')
                return tf.constant(1.0)

            def dropout_check_true():
                return tf.multiply(self.keep_prob, 1)

            dropout_check = self.keep_prob <= tf.constant(1.0)
            #import pdb; pdb.set_trace()
            dropout = tf.cond(dropout_check, dropout_check_true,
                              dropout_check_false)

            self.activations = tf.nn.dropout(self.activations,
                                             keep_prob=dropout)
            #activations = activation_fn(conv, name='activation')
            tf.summary.histogram('activations', self.activations)
            tf.summary.histogram('weights', self.weights)
            tf.summary.histogram('biases', self.biases)

        return self.activations
예제 #6
0
    def forward(self, input_tensor):
        self.input_tensor = input_tensor
        inp_shape = self.input_tensor.get_shape().as_list()

        #import pdb;pdb.set_trace()
        if len(inp_shape) != 2:
            import numpy as np
            self.input_dim = np.prod(inp_shape[1:])
            self.input_tensor = tf.reshape(self.input_tensor,
                                           [inp_shape[0], self.input_dim])
        else:
            self.input_dim = inp_shape[1]
        self.weights_shape = [self.input_dim, self.output_dim]
        #with tf.name_scope(self.name):
        self.weights = variables.weights(self.weights_shape, name=self.name)
        self.biases = variables.biases(self.output_dim, name=self.name)

        #import pdb;pdb.set_trace()
        with tf.name_scope(self.name):
            linear = tf.nn.bias_add(tf.matmul(self.input_tensor, self.weights),
                                    self.biases,
                                    name=self.name)
            if isinstance(self.act, str):
                self.activations = activations.apply(linear, self.act)
            elif hasattr(self.act, '__call__'):
                self.activations = self.act(conv)

            if self.keep_prob < 1.0:
                self.activations = tf.nn.dropout(self.activations,
                                                 keep_prob=self.keep_prob)
            #activations = activation_fn(conv, name='activation')
            tf.summary.histogram('activations', self.activations)
            tf.summary.histogram('weights', self.weights)
            tf.summary.histogram('biases', self.biases)

        return self.activations
예제 #7
0
    def forward(self, input_tensor):

        #input tensor shape=(samples, feturemap_h, featuremap_w, channels)
        #get input sensor and its dimensions
        self.input_tensor = input_tensor
        inp_shape = self.input_tensor.get_shape().as_list()

        #import pdb;pdb.set_trace()
        #if input shape diferente from 2 (a tensor) input tensor its reshaped as [batchsize, inputs]
        #this is meant to flatten incoming tensors!!!!!!!!
        if len(inp_shape) != 2:
            #import numpy as np
            #get number of flat inputs
            self.input_dim = np.prod(inp_shape[1:])
            #9/10 changed inp_shape[0] to -1
            self.input_tensor = tf.reshape(self.input_tensor,
                                           [-1, self.input_dim])

        #if they are already flat define dimension of input
        else:
            self.input_dim = inp_shape[1]

        self.weights_shape = [self.input_dim, self.output_dim]

        #with tf.name_scope(self.name):
        with tf.name_scope(self.name):
            #initialice random weights and biases
            with tf.name_scope(self.name + '_params'):
                if self.param_dir == None:
                    if self.init_DH:
                        self.weights_init = tf.truncated_normal_initializer(
                            stddev=np.sqrt(2 / (self.input_dim)))
                        self.bias_init = tf.constant_initializer(0.0)
                    #call variables.py and set parameters
                    self.weights = variables.weights(
                        self.weights_shape,
                        initializer=self.weights_init,
                        name='weights')
                    self.biases = variables.biases(self.output_dim,
                                                   initializer=self.bias_init,
                                                   name='biases')

        #CHECK!!!!!!!!!!!!!!1 Correctly
        #CHECK how to correctly pass weights to method
        #9/10 tf.Variable() is added
        #9/10 change from txt to np.load, see if a try can be added
                else:
                    #self.biases =  tf.Variable(tf.convert_to_tensor(np.loadtxt(self.param_dir+'-B.txt'), np.float32))
                    #self.weights = tf.Variable(tf.convert_to_tensor(np.loadtxt(self.param_dir+'-W.txt'), np.float32))
                    self.biases = tf.Variable(tf.convert_to_tensor(
                        np.load(self.param_dir + '-B.npy'), np.float32),
                                              name='biases')
                    self.weights = tf.Variable(tf.convert_to_tensor(
                        np.load(self.param_dir + '-W.npy'), np.float32),
                                               name='weights')

        #WHAWT this for?
        #create instance in graph model?
        #with tf.name_scope(self.name):
        #SCORES before activations
            linear = tf.nn.bias_add(tf.matmul(self.input_tensor, self.weights),
                                    self.biases,
                                    name=self.name)
            #IF attribute act is a string aplly activation function
            #if isinstance(self.act, str):
            #call activation from activations.py
            #self.activations = activations.apply(linear, self.act)
            if self.act == 'relu':
                self.activations = tf.nn.relu(linear)
            elif self.act == 'lrelu':
                self.activations = tf.maximum(linear, 0.01 * linear)
            else:
                self.activations = linear

                # Dropout
            if self.use_dropout:
                self.activations = tf.nn.dropout(self.activations,
                                                 keep_prob=self.keep_prob)
            #Otherwise call for convolutional
            #elif hasattr(self.act, '__call__'):
            #    self.activations = self.act(conv)

            #def dropout_check_false():
            #print('Dropout adjusted 1.0')
            #    return tf.constant(1.0)

            #def dropout_check_true():
            #    return tf.multiply(self.keep_prob, 1)

            #dropout_check = self.keep_prob<=tf.constant(1.0)

            #extremly rare way to set dropout
            #dropout = tf.cond(dropout_check, dropout_check_true, dropout_check_false)

            #apply dropout to activations
#9/10 dropout commented
#self.activations = tf.nn.dropout(self.activations, keep_prob=self.keep_prob)
#activations = activation_fn(conv, name='activation')
#tf.summary.histogram('activations', self.activations)
#tf.summary.histogram('weights', self.weights)
#tf.summary.histogram('biases', self.biases)

        return self.activations
예제 #8
0
    def forward(self, input_tensor):
        self.input_tensor = input_tensor
        inp_shape = self.input_tensor.get_shape().as_list()

        if len(inp_shape) != 2:
            import numpy as np
            self.input_dim = np.prod(inp_shape[1:])
            self.input_tensor = tf.reshape(self.input_tensor,
                                           [inp_shape[0], self.input_dim])
        else:
            self.input_dim = inp_shape[1]
        self.weights_shape = [self.input_dim, self.output_dim]
        with tf.name_scope(self.name):
            self.weights = variables.weights(self.weights_shape,
                                             initializer=self.weights_init,
                                             name=self.name)
            self.biases = variables.biases(self.output_dim,
                                           initializer=self.bias_init,
                                           name=self.name)

        with tf.name_scope(self.name):
            linear = tf.nn.bias_add(tf.matmul(self.input_tensor, self.weights),
                                    self.biases,
                                    name=self.name)
            if self.batch_norm:
                self.momentum = self.batch_norm_params['momentum']
                self.epsilon = self.batch_norm_params['epsilon']
                self.training = self.batch_norm_params['training']
                self.bn_name = self.batch_norm_params['name']
                linear = tf.contrib.layers.batch_norm(
                    linear,
                    decay=self.momentum,
                    updates_collections=None,
                    epsilon=self.epsilon,
                    scale=True,
                    is_training=self.training,
                    scope=self.bn_name)

            if isinstance(self.act, str):
                self.activations = activations.apply(linear, self.act)
            elif hasattr(self.act, '__call__'):
                self.activations = self.act(conv)

            def dropout_check_false():
                #print('Dropout adjusted 1.0')
                return tf.constant(1.0)

            def dropout_check_true():
                return tf.multiply(self.keep_prob, 1)

            # dropout_check = self.keep_prob<=tf.constant(1.0)

            # dropout = tf.cond(dropout_check, dropout_check_true, dropout_check_false)

            # self.activations = tf.nn.dropout(self.activations, keep_prob=dropout)
            #activations = activation_fn(conv, name='activation')
            tf.summary.histogram('activations', self.activations)
            tf.summary.histogram('weights', self.weights)
            tf.summary.histogram('biases', self.biases)

        return self.activations
예제 #9
0
    def forward(self, input_tensor):
        self.input_tensor = input_tensor
        self.batch_size = tf.shape(self.input_tensor)[0]
        self.input_dim = tf.shape(self.input_tensor)[1]
        self.input_depth = tf.shape(self.input_tensor)[3]
        #pdb.set_trace()
        self.check_input_shape()
        #input images, featuremap height, feturemap width, num_input_channels
        #_
        self.in_N, self.in_h, self.in_w, self.in_depth = self.input_tensor.get_shape(
        ).as_list()
        self.in_N = tf.shape(self.input_tensor)[0]

        # init weights
        self.weights_shape = [
            self.kernel_size, self.kernel_size, self.in_depth,
            self.output_depth
        ]
        self.strides = [1, self.stride_size, self.stride_size, 1]

        with tf.name_scope(self.name):
            #initialice random weights and biases
            with tf.name_scope(self.name + '_params'):
                if self.param_dir == None:
                    if self.init_DH:
                        self.weights_init = tf.truncated_normal_initializer(
                            stddev=np.sqrt(2 /
                                           (self.input_dim * self.input_dim *
                                            self.input_depth)))
                        self.bias_init = tf.constant_initializer(0.0)
                    #call variables.py and set parameters
                    self.weights = variables.weights(
                        self.weights_shape,
                        initializer=self.weights_init,
                        name='weights')
                    self.biases = variables.biases(self.output_depth,
                                                   initializer=self.bias_init,
                                                   name='biases')

        #CHECK!!!!!!!!!!!!!!1 Coorectly!
        #CHECK how to correctly pass weights to method
        #9/10 tf.Variable() is added
                else:
                    #self.biases =  tf.Variable(tf.convert_to_tensor(np.loadtxt(self.param_dir+'-B.txt'), np.float32))
                    #self.weights = tf.Variable(tf.convert_to_tensor(np.loadtxt(self.param_dir+'-W.txt'), np.float32))
                    self.biases = tf.Variable(tf.convert_to_tensor(
                        np.load(self.param_dir + '-B.npy'), np.float32),
                                              name='biases')
                    self.weights = tf.Variable(tf.convert_to_tensor(
                        np.load(self.param_dir + '-W.npy'), np.float32),
                                               name='weights')

        #with tf.name_scope(self.name):
        #print("hola")
            conv = tf.nn.conv2d(self.input_tensor,
                                self.weights,
                                strides=self.strides,
                                padding=self.pad)

            conv += self.biases

            self.conv = conv

            #CHECK this reshape (after all working) simple conv +=biases
            #conv = tf.reshape(tf.nn.bias_add(conv, self.biases), conv.get_shape().as_list())

            #            if isinstance(self.act, str):
            #                self.activations = activations.apply(conv, self.act)
            #            elif hasattr(self.act, '__call__'):
            #                self.activations = self.act(conv)
            if self.act == 'relu':
                self.activations = tf.nn.relu(conv)
            elif self.act == 'lrelu':
                self.activations = tf.maximum(self.conv, 0.01 * self.conv)
            else:
                self.activations = conv

        # if self.keep_prob<1.0:
        #    self.activations = tf.nn.dropout(self.activations, keep_prob=self.keep_prob)

        #dont know if i want them
        #tf.summary.histogram('activations', self.activations)
            tf.summary.histogram('weights', self.weights)
            tf.summary.histogram('biases', self.biases)

        return self.activations