Esempio n. 1
0
    def set_model(self, input_figs, z, batch_size, is_training = True, reuse = False):
        assert(self.en_channels[0] == input_figs.get_shape().as_list()[3])
        
        # reshape z
        with tf.variable_scope(self.name_scopes[0], reuse = reuse):
            h = linear('_r', z, get_dim(input_figs))
            h = batch_norm('reshape', h, decay_rate= 0.99,
                           is_training = is_training)
            h = tf.nn.relu(h)
        height = input_figs.get_shape().as_list()[1]
        width = input_figs.get_shape().as_list()[2]        
        h = tf.reshape(h, [-1, height, width, self.en_channels[0]])
        h = tf.concat([h, input_figs], 3)
        
        # convolution
        encoded_list = []
        
        # encode
        with tf.variable_scope(self.name_scopes[1], reuse = reuse):
            for i, out_dim in enumerate(self.en_channels[1:]):
                h = conv(i, h, out_dim, 4, 4, 2)
                if i == 0:
                    encoded_list.append(h)
                    h = lrelu(h)
                else:
                    h = batch_norm(i, h, 0.99, is_training)
                    encoded_list.append(h)
                    h = lrelu(h)
                    
        # deconvolution
        encoded_list.pop()
        h = tf.nn.relu(h)
        
        with tf.variable_scope(self.name_scopes[2], reuse = reuse):
            for i, out_chan in enumerate(self.dec_channels[:-1]):
                # get out shape
                h_shape = h.get_shape().as_list()
                out_width = 2 * h_shape[2] 
                out_height = 2 * h_shape[1]
                out_shape = [batch_size, out_height, out_width, out_chan]
                
                # deconvolution
                deconved = deconv(i, h, out_shape, 4, 4, 2)

                # batch normalization
                h = batch_norm(i, deconved, 0.99, is_training)
                if i <= 2:
                    h = tf.nn.dropout(h, 0.5)
                h = tf.concat([h, encoded_list.pop()], 3)
                # activation
                h = tf.nn.relu(h)
            height = 2 * h.get_shape().as_list()[1]
            width = 2 * h.get_shape().as_list()[1]
            out_shape = [batch_size, height, width, self.dec_channels[-1]]
            h = deconv(i + 1, h, out_shape, 4, 4, 2)
        return tf.nn.tanh(h)
Esempio n. 2
0
 def set_model(self, input_img,  is_training = True, reuse = False):
     
     assert(self.layer_channels[0] == input_img.get_shape().as_list()[-1])
     h  = input_img
     
     # convolution
     with tf.variable_scope(self.name_scopes[0], reuse = reuse):
         for i, out_chan in enumerate(self.layer_channels[1:]):
             h = conv(i, h, out_chan, 5, 5, 2)
             h = lrelu(h)
             
     # fully connect
     h = flatten(h)
     with tf.variable_scope(self.name_scopes[1], reuse = reuse):
         h =  linear('disc_fc', h, 1)
         
     return h
Esempio n. 3
0
    def set_model(self, inputs, batch_size, is_training = True, reuse = False):
        assert(self.layer_channels[0] == inputs.get_shape().as_list()[-1])
        
        h = inputs

        # convolution
        with tf.variable_scope(self.name_scopes[0], reuse = reuse):
            for i, out_chan in enumerate(self.layer_channels[1:]):
                # convolution
                conved = conv(i, h, out_chan, 5, 5, 2)

                # batch normalization
                bn_conved = batch_norm(i, conved, 0.99, is_training)

                # activation
                h = lrelu(bn_conved)
                
        # fully connect
        with tf.variable_scope(self.name_scopes[1], reuse = reuse):
            encoded = linear('fc', flatten(h), self.out_dim)
            
        return encoded
Esempio n. 4
0
    def build_graph(self):
        def pad(board):

            board = tf.pad(board, [[0, 0], [0, 0], [8, 8], [8, 8]],
                           constant_values=0.0)
            edges = tf.constant(0, dtype=tf.float32, shape=[1, 1, 9, 9])
            edges = tf.pad(edges, [[0, 0], [0, 0], [8, 8], [8, 8]],
                           constant_values=1.0)
            board = tf.concat([board, edges], 1)

            return board

        # activation function
        activation = tf.nn.selu

        self.observation = tf.placeholder(tf.float32,
                                          self.obs_space,
                                          name='inputs')

        board = pad(self.observation)

        # Channel first to channel last as conv2d only support channel last
        board = tf.transpose(board, [0, 2, 3, 1])

        conv1 = U.conv(board,
                       name='conv1',
                       filters=16,
                       kernel_size=7,
                       strides=1,
                       use_bias=True,
                       activation=activation,
                       padding="valid",
                       summary=self.summary)

        conv2 = U.conv(conv1,
                       name='conv2',
                       filters=16,
                       kernel_size=5,
                       strides=1,
                       use_bias=True,
                       activation=activation,
                       padding="valid",
                       summary=self.summary)

        conv3 = U.conv(conv2,
                       name='conv3',
                       filters=44,
                       kernel_size=5,
                       strides=1,
                       use_bias=True,
                       activation=activation,
                       padding="valid",
                       summary=self.summary)

        conv4 = U.conv(conv3,
                       name='conv4',
                       filters=16,
                       kernel_size=3,
                       strides=1,
                       use_bias=True,
                       activation=activation,
                       padding="valid",
                       summary=self.summary)

        out = U.conv(conv4,
                     name='out',
                     filters=1,
                     kernel_size=1,
                     strides=1,
                     use_bias=True,
                     activation=None,
                     padding="valid",
                     summary=self.summary)

        output = tf.layers.flatten(out)

        #self.actions = tf.identity(output, 'outputs')
        self.actions = output