Пример #1
0
    def define_graph(self):
        # Sets up the model graph

        # The variables to train
        self.train_vars = []

        # Sets up the layer
        with tf.name_scope('net'):
            with tf.name_scope('setup'):
                # Convolution
                with tf.name_scope('convolutions'):
                    self.conv_ws = []
                    self.conv_bs = []
                    last_out_height = self.height
                    last_out_width = self.width
                    # last_out_seqlen = self.seqlen

                    for i in xrange(len(self.kernel_sizes)):
                        self.conv_ws.append(
                            w([
                                self.kernel_sizes[i], self.kernel_sizes[i],
                                self.feature_maps[i], self.feature_maps[i + 1]
                            ]))
                        self.conv_bs.append(b([self.feature_maps[i + 1]]))
                        last_out_height = conv_out_size(
                            last_out_height, 'SAME', self.kernel_sizes[i], 1)
                        last_out_width = conv_out_size(last_out_width, 'SAME',
                                                       self.kernel_sizes[i], 1)

                with tf.name_scope('fully-connected'):
                    # Add in an initial layer to go from the last conv to the first fully-connected.
                    # Use /2 for the height and width because there is a 2x2 pooling layer
                    self.fc_layer_sizes.insert(0, (last_out_height / 2) *
                                               (last_out_width / 2) *
                                               self.feature_maps[-1])

                    self.fc_ws = []
                    self.fc_bs = []
                    for i in xrange(len(self.fc_layer_sizes) - 1):
                        self.fc_ws.append(
                            w([
                                self.fc_layer_sizes[i],
                                self.fc_layer_sizes[i + 1]
                            ]))
                        self.fc_bs.append(b([self.fc_layer_sizes[i + 1]]))

                self.train_vars += self.conv_ws
                self.train_vars += self.conv_bs
                self.train_vars += self.fc_ws
                self.train_vars += self.fc_bs
    def define_graph(self):
        """
        Sets up the model graph in TensorFlow.
        """

        ##
        # Input data
        ##
        with tf.name_scope('input'):
            self.input_frames = tf.placeholder(
                tf.float32, shape=[None, self.height, self.width, self.conv_layer_fms[0]])

            # use variable batch_size for more flexibility
            self.batch_size = tf.shape(self.input_frames)[0]

        ##
        # Layer setup
        ##

        with tf.name_scope('setup'):
            # convolution
            with tf.name_scope('convolutions'):
                conv_ws = []
                conv_bs = []
                last_out_height = self.height
                last_out_width = self.width
                for i in range(len(self.kernel_sizes)):
                    conv_ws.append(w([self.kernel_sizes[i],
                                      self.kernel_sizes[i],
                                      self.conv_layer_fms[i],
                                      self.conv_layer_fms[i + 1]]))
                    conv_bs.append(b([self.conv_layer_fms[i + 1]]))

                    last_out_height = conv_out_size(
                        last_out_height, c.PADDING_D, self.kernel_sizes[i], 1)
                    last_out_width = conv_out_size(
                        last_out_width, c.PADDING_D, self.kernel_sizes[i], 1)

            # fully-connected
            with tf.name_scope('full-connected'):
                # Add in an initial layer to go from the last conv to the first fully-connected.
                # Use /2 for the height and width because there is a 2x2 pooling layer
                self.fc_layer_sizes.insert(
                    0, (last_out_height / 2) * (last_out_width / 2) * self.conv_layer_fms[-1])

                fc_ws = []
                fc_bs = []
                for i in range(len(self.fc_layer_sizes) - 1):
                    fc_ws.append(w([int(self.fc_layer_sizes[i]),
                                    int(self.fc_layer_sizes[i + 1])]))
                    fc_bs.append(b([int(self.fc_layer_sizes[i + 1])]))

        ##
        # Forward pass calculation
        ##

        def generate_predictions():
            """
            Runs self.input_frames through the network to generate a prediction from 0
            (generated img) to 1 (real img).

            @return: A tensor of predictions of shape [self.batch_size x 1].
            """
            with tf.name_scope('calculation'):
                preds = tf.zeros([self.batch_size, 1])
                last_input = self.input_frames

                # convolutions
                with tf.name_scope('convolutions'):
                    for i in range(len(conv_ws)):
                        # Convolve layer and activate with ReLU
                        preds = tf.nn.conv2d(
                            last_input, conv_ws[i], [1, 1, 1, 1], padding=c.PADDING_D)
                        preds = tf.nn.relu(preds + conv_bs[i])

                        last_input = preds

                # pooling layer
                with tf.name_scope('pooling'):
                    preds = tf.nn.max_pool(preds, [1, 2, 2, 1], [1, 2, 2, 1], padding=c.PADDING_D)

                # flatten preds for dense layers
                shape = preds.get_shape().as_list()
                # -1 can be used as one dimension to size dynamically
                preds = tf.reshape(preds, [-1, shape[1] * shape[2] * shape[3]])

                # fully-connected layers
                with tf.name_scope('fully-connected'):
                    for i in range(len(fc_ws)):
                        preds = tf.matmul(preds, fc_ws[i]) + fc_bs[i]

                        # Activate with ReLU (or Sigmoid for last layer)
                        if i == len(fc_ws) - 1:
                            preds = tf.sigmoid(preds)
                        else:
                            preds = tf.nn.relu(preds)

                # clip preds between [.1, 0.9] for stability
                with tf.name_scope('clip'):
                    preds = tf.clip_by_value(preds, 0.1, 0.9)

                return preds

        self.preds = generate_predictions()
    def define_graph(self):
        """
        Sets up the model graph in TensorFlow.
        """

        ##
        # Input data
        ##
        with tf.name_scope('input'):
            self.input_frames = tf.placeholder(
                tf.float32,
                shape=[None, self.height, self.width, self.conv_layer_fms[0]])

            # use variable batch_size for more flexibility
            self.batch_size = tf.shape(self.input_frames)[0] / (c.HIST_LEN + 1)
            # self.batch_size = c.BATCH_SIZE

        ##
        # Layer setup
        ##

        with tf.name_scope('setup'):
            # convolution
            with tf.name_scope('convolutions'):
                conv_ws = []
                conv_bs = []
                last_out_height = self.height
                last_out_width = self.width
                for i in xrange(len(self.kernel_sizes)):
                    conv_ws.append(
                        w([
                            self.kernel_sizes[i], self.kernel_sizes[i],
                            self.conv_layer_fms[i], self.conv_layer_fms[i + 1]
                        ]))
                    conv_bs.append(b([self.conv_layer_fms[i + 1]]))

                    last_out_height = conv_out_size(last_out_height,
                                                    c.PADDING_D,
                                                    self.kernel_sizes[i], 1)
                    last_out_width = conv_out_size(last_out_width, c.PADDING_D,
                                                   self.kernel_sizes[i], 1)

            # lstm
            with tf.name_scope('lstm'):
                hidden_dim = (last_out_height / 2) * (
                    last_out_width / 2) * self.conv_layer_fms[-1]

            # fully-connected
            with tf.name_scope('full-connected'):
                # Add in an initial layer to go from the last conv to the first fully-connected.
                # Use /2 for the height and width because there is a 2x2 pooling layer

                self.fc_layer_sizes.insert(0, hidden_dim)

                fc_ws = []
                fc_bs = []
                for i in xrange(len(self.fc_layer_sizes) - 1):
                    fc_ws.append(
                        w([self.fc_layer_sizes[i],
                           self.fc_layer_sizes[i + 1]]))
                    fc_bs.append(b([self.fc_layer_sizes[i + 1]]))

        ##
        # Forward pass calculation
        ##

        def generate_predictions():
            """
            Runs self.input_frames through the network to generate a prediction from 0
            (generated img) to 1 (real img).

            @return: A tensor of predictions of shape [self.batch_size x 1].
            """
            with tf.name_scope('calculation'):
                preds = tf.zeros([self.batch_size, 1])
                last_input = self.input_frames

                # convolutions
                with tf.name_scope('convolutions'):
                    for i in xrange(len(conv_ws)):
                        # Convolve layer and activate with ReLU
                        preds = tf.nn.conv2d(last_input,
                                             conv_ws[i], [1, 1, 1, 1],
                                             padding=c.PADDING_D)
                        preds = tf.nn.relu(preds + conv_bs[i])

                        last_input = preds

                # pooling layer
                with tf.name_scope('pooling'):
                    preds = tf.nn.max_pool(preds, [1, 2, 2, 1], [1, 2, 2, 1],
                                           padding=c.PADDING_D)

                # flatten preds for dense layers
                shape = preds.get_shape().as_list()
                # -1 can be used as one dimension to size dynamically
                preds = tf.reshape(
                    preds,
                    [-1, (c.HIST_LEN + 1), shape[1] * shape[2] * shape[3]])

                print 'preds:', preds.get_shape()
                # conv_outputs = tf.stack(tf.split(preds, self.batch_size), 1)
                conv_outputs = tf.transpose(preds, [1, 0, 2])

                print 'conv_outputs:', conv_outputs.get_shape()
                print 'hidden_dim:', hidden_dim

                # lstm layers
                with tf.name_scope('lstm'):
                    lstm_cell = rnn.BasicLSTMCell(hidden_dim)
                    dropout = rnn.DropoutWrapper(lstm_cell,
                                                 output_keep_prob=c.KEEP_PROB)
                    stacked_lstm = rnn.MultiRNNCell([dropout] * c.LAYERS)
                    initial_state = stacked_lstm.zero_state(
                        self.batch_size, tf.float32)
                    outputs, state = tf.nn.dynamic_rnn(
                        stacked_lstm,
                        conv_outputs,
                        initial_state=initial_state,
                        time_major=True,
                        dtype=tf.float32,
                        scope='lstm_' + str(self.scale_index))
                    preds = outputs[-1]

                # fully-connected layers
                with tf.name_scope('fully-connected'):
                    for i in xrange(len(fc_ws)):
                        preds = tf.matmul(preds, fc_ws[i]) + fc_bs[i]

                        # Activate with ReLU (or Sigmoid for last layer)
                        if i == len(fc_ws) - 1:
                            preds = tf.sigmoid(preds)
                        else:
                            preds = tf.nn.relu(preds)

                # clip preds between [.1, 0.9] for stability
                with tf.name_scope('clip'):
                    preds = tf.clip_by_value(preds, 0.1, 0.9)

                return preds

        self.preds = generate_predictions()
Пример #4
0
    def define_graph(self):
        """
        Sets up the model graph in TensorFlow.
        """

        self.train_vars = []  # the variables to train in the optimization step

        ##
        # Layer setup
        ##

        with tf.name_scope('setup'):
            # convolution
            with tf.name_scope('convolutions'):
                self.conv_ws = []
                self.conv_bs = []
                last_out_height = self.height
                last_out_width = self.width
                with tf.name_scope('weights'):
                    for i in xrange(len(self.kernel_sizes)):
                        self.conv_ws.append(
                            w([
                                self.kernel_sizes[i], self.kernel_sizes[i],
                                self.conv_layer_fms[i],
                                self.conv_layer_fms[i + 1]
                            ], 'dis_con_' + str(self.scale_index) + '_' +
                              str(i)))

                with tf.name_scope('biases'):
                    for i in xrange(len(self.kernel_sizes)):
                        self.conv_bs.append(b([self.conv_layer_fms[i + 1]]))

                        last_out_height = conv_out_size(
                            last_out_height, self.padding,
                            self.kernel_sizes[i], 1)
                        last_out_width = conv_out_size(last_out_width,
                                                       self.padding,
                                                       self.kernel_sizes[i], 1)

            # fully-connected
            with tf.name_scope('fully-connected'):
                # Add in an initial layer to go from the last conv to the first fully-connected.
                # Use /2 for the height and width if not using the new models because there is a 2x2 pooling layer
                self.fc_layer_sizes.insert(
                    0,
                    last_out_height * last_out_width * self.conv_layer_fms[-1])

                self.fc_ws = []
                self.fc_bs = []
                with tf.name_scope('weights'):
                    for i in xrange(len(self.fc_layer_sizes) - 1):
                        self.fc_ws.append(
                            w([
                                self.fc_layer_sizes[i],
                                self.fc_layer_sizes[i + 1]
                            ], 'dis_fc_' + str(self.scale_index) + '_' +
                              str(i)))
                with tf.name_scope('biases'):
                    for i in xrange(len(self.fc_layer_sizes) - 1):
                        self.fc_bs.append(b([self.fc_layer_sizes[i + 1]]))

            self.train_vars += self.conv_ws
            self.train_vars += self.conv_bs
            self.train_vars += self.fc_ws
            self.train_vars += self.fc_bs