Exemplo n.º 1
0
    def build_activator(self, input_tensor, features: int, activator="", leaky_relu_alpha=0.1, base_name=""):
        features = int(features)
        if activator is None or "":
            return
        elif activator == "relu":
            output = tf.nn.relu(input_tensor, name=base_name + "_relu")
        elif activator == "sigmoid":
            output = tf.nn.sigmoid(input_tensor, name=base_name + "_sigmoid")
        elif activator == "tanh":
            output = tf.nn.tanh(input_tensor, name=base_name + "_tanh")
        elif activator == "leaky_relu":
            output = tf.maximum(input_tensor, leaky_relu_alpha * input_tensor, name=base_name + "_leaky")
        elif activator == "prelu":
            with tf.variable_scope("prelu"):
                alphas = tf.Variable(tf.constant(0.1, shape=[features]), name=base_name + "_prelu")
                if self.save_weights:
                    util.add_summaries("prelu_alpha", self.name, alphas, save_stddev=False, save_mean=False)
                output = tf.nn.relu(input_tensor) + tf.multiply(alphas, (input_tensor - tf.abs(input_tensor))) * 0.5
        elif activator == "selu":
            output = tf.nn.selu(input_tensor, name=base_name + "_selu")
        else:
            raise NameError('Not implemented activator:%s' % activator)

        self.complexity += (self.pix_per_input * features)

        return output
Exemplo n.º 2
0
    def build_conv(self, name, input_tensor, cnn_size, input_feature_num,
                   output_feature_num):
        with tf.variable_scope(name):
            w = util.weight(
                [cnn_size, cnn_size, input_feature_num, output_feature_num],
                stddev=self.weight_dev,
                name="conv_W",
                initializer=self.initializer)
            h = self.conv2d(input_tensor,
                            w,
                            self.cnn_stride,
                            bias=None,
                            activator=None,
                            name=name)

            if self.save_weights:
                util.add_summaries("weight",
                                   self.name,
                                   w,
                                   save_stddev=True,
                                   save_mean=True)

            if self.save_images and cnn_size > 1 and input_feature_num == 1:
                weight_transposed = tf.transpose(w, [3, 0, 1, 2])
                with tf.name_scope("image"):
                    tf.summary.image(self.name,
                                     weight_transposed,
                                     max_outputs=self.log_weight_image_num)

        return w, h
Exemplo n.º 3
0
    def add_optimizer_op(self, loss, lr_input):

        if self.optimizer == "gd":
            optimizer = tf.train.GradientDescentOptimizer(lr_input)
        elif self.optimizer == "adadelta":
            optimizer = tf.train.AdadeltaOptimizer(lr_input)
        elif self.optimizer == "adagrad":
            optimizer = tf.train.AdagradOptimizer(lr_input)
        elif self.optimizer == "adam":
            optimizer = tf.train.AdamOptimizer(lr_input, beta1=self.beta1, beta2=self.beta2, epsilon=self.epsilon)
        elif self.optimizer == "momentum":
            optimizer = tf.train.MomentumOptimizer(lr_input, self.momentum)
        elif self.optimizer == "rmsprop":
            optimizer = tf.train.RMSPropOptimizer(lr_input, momentum=self.momentum)
        else:
            print("Optimizer arg should be one of [gd, adadelta, adagrad, adam, momentum, rmsprop].")
            return None

        if self.clipping_norm > 0 or self.save_weights:
            trainables = tf.trainable_variables()
            grads = tf.gradients(loss, trainables)

            if self.save_weights:
                for i in range(len(grads)):
                    util.add_summaries("", self.name, grads[i], header_name=grads[i].name + "/", save_stddev=True,
                                       save_mean=True)

        if self.clipping_norm > 0:
            clipped_grads, _ = tf.clip_by_global_norm(grads, clip_norm=self.clipping_norm)
            grad_var_pairs = zip(clipped_grads, trainables)
            training_optimizer = optimizer.apply_gradients(grad_var_pairs)
        else:
            training_optimizer = optimizer.minimize(loss)

        return training_optimizer
    def conv2d(self, x, w, stride, bias=None, activator=None, leaky_relu_alpha=0.1, name=""):
        conv = tf.nn.conv2d(x, w, strides=[stride, stride, 1, 1], padding="SAME", name=name + "_conv")

        self.complexity += int(w.shape[0] * w.shape[1] * w.shape[2] * w.shape[3])

        if bias is not None:
            conv = tf.add(conv, bias, name=name + "_add")
            self.complexity += int(bias.shape[0])

        if activator is not None:
            if activator == "relu":
                conv = tf.nn.relu(conv, name=name + "_relu")
            elif activator == "sigmoid":
                conv = tf.nn.sigmoid(conv, name=name + "_sigmoid")
            elif activator == "tanh":
                conv = tf.nn.tanh(conv, name=name + "_tanh")
            elif activator == "leaky_relu":
                conv = tf.maximum(conv, leaky_relu_alpha * conv, name=name + "_leaky")
            elif activator == "prelu":
                with tf.variable_scope("prelu"):
                    alphas = tf.Variable(tf.constant(0.1, shape=[w.get_shape()[3]]), name=name + "_prelu")
                    if self.save_weights:
                        util.add_summaries("prelu_alpha", self.name, alphas, save_stddev=False, save_mean=False)
                    conv = tf.nn.relu(conv) + tf.multiply(alphas, (conv - tf.abs(conv))) * 0.5
            else:
                raise NameError('Not implemented activator:%s' % activator)
            self.complexity += int(bias.shape[0])

        return conv
Exemplo n.º 5
0
    def build_conv_and_bias(self,
                            name,
                            input_tensor,
                            cnn_size,
                            input_feature_num,
                            output_feature_num,
                            use_activator=True,
                            use_dropout=True):
        with tf.variable_scope(name):
            w = util.weight(
                [cnn_size, cnn_size, input_feature_num, output_feature_num],
                stddev=self.weight_dev,
                name="conv_W",
                initializer=self.initializer)
            b = util.bias([output_feature_num], name="conv_B")
            h = self.conv2d(
                input_tensor,
                w,
                self.cnn_stride,
                bias=b,
                activator=self.activator if use_activator else None,
                name=name)

            if use_dropout and self.dropout != 1.0:
                h = tf.nn.dropout(h, self.dropout_input, name="dropout")

            if self.save_weights:
                util.add_summaries("weight",
                                   self.name,
                                   w,
                                   save_stddev=True,
                                   save_mean=True)
                util.add_summaries("bias",
                                   self.name,
                                   b,
                                   save_stddev=True,
                                   save_mean=True)

            if self.save_images and cnn_size > 1 and input_feature_num == 1:
                weight_transposed = tf.transpose(w, [3, 0, 1, 2])
                with tf.name_scope("image"):
                    tf.summary.image(self.name,
                                     weight_transposed,
                                     max_outputs=self.log_weight_image_num)

        return w, b, h
Exemplo n.º 6
0
	def build_conv(self, name, input_tensor, cnn_size, input_feature_num, output_feature_num, use_bias=False,
	               activator=None, use_batch_norm=False, dropout_rate=1.0):

		with tf.variable_scope(name):
			w = util.weight([cnn_size, cnn_size, input_feature_num, output_feature_num],
			                stddev=self.weight_dev, name="conv_W", initializer=self.initializer)

			b = util.bias([output_feature_num], name="conv_B") if use_bias else None
			h = self.conv2d(input_tensor, w, self.cnn_stride, bias=b, use_batch_norm=use_batch_norm, name=name)

			if activator is not None:
				h = self.build_activator(h, output_feature_num, activator, base_name=name)

			if dropout_rate < 1.0:
				h = tf.nn.dropout(h, self.dropout, name="dropout")

			self.H.append(h)

			if self.save_weights:
				util.add_summaries("weight", self.name, w, save_stddev=True, save_mean=True)
				util.add_summaries("output", self.name, h, save_stddev=True, save_mean=True)
				if use_bias:
					util.add_summaries("bias", self.name, b, save_stddev=True, save_mean=True)

			# todo check
			if self.save_images and cnn_size > 1 and input_feature_num == 1:
				weight_transposed = tf.transpose(w, [3, 0, 1, 2])

				with tf.name_scope("image"):
					tf.summary.image(self.name, weight_transposed, max_outputs=self.log_weight_image_num)

		if self.receptive_fields == 0:
			self.receptive_fields = cnn_size
		else:
			self.receptive_fields += (cnn_size - 1)
		self.features += "%d " % output_feature_num

		self.Weights.append(w)
		if use_bias:
			self.Biases.append(b)

		return h
Exemplo n.º 7
0
    def build_graph(self):

        self.x = tf.placeholder(tf.float32,
                                shape=[None, None, None, self.channels],
                                name="x")
        self.y = tf.placeholder(tf.float32,
                                shape=[None, None, None, self.output_channels],
                                name="y")
        self.x2 = tf.placeholder(
            tf.float32,
            shape=[None, None, None, self.output_channels],
            name="x2")
        self.dropout = tf.placeholder(tf.float32,
                                      shape=[],
                                      name="dropout_keep_rate")
        self.is_training = tf.placeholder(tf.bool, name="is_training")

        # building feature extraction layers

        output_feature_num = self.filters
        total_output_feature_num = 0
        input_feature_num = self.channels
        input_tensor = self.x

        if self.save_weights:
            with tf.name_scope("X"):
                util.add_summaries("output",
                                   self.name,
                                   self.x,
                                   save_stddev=True,
                                   save_mean=True)

        for i in range(self.layers):
            if self.min_filters != 0 and i > 0:
                x1 = i / float(self.layers - 1)
                y1 = pow(x1, 1.0 / self.filters_decay_gamma)
                output_feature_num = int((self.filters - self.min_filters) *
                                         (1 - y1) + self.min_filters)

            self.build_conv("CNN%d" % (i + 1),
                            input_tensor,
                            self.cnn_size,
                            input_feature_num,
                            output_feature_num,
                            use_bias=True,
                            activator=self.activator,
                            use_batch_norm=self.batch_norm,
                            dropout_rate=self.dropout_rate)
            input_feature_num = output_feature_num
            input_tensor = self.H[-1]
            total_output_feature_num += output_feature_num

        with tf.variable_scope("Concat"):
            self.H_concat = tf.concat(self.H, 3, name="H_concat")
        self.features += " Total: (%d)" % total_output_feature_num

        # building reconstruction layers ---

        if self.use_nin:
            self.build_conv("A1",
                            self.H_concat,
                            1,
                            total_output_feature_num,
                            self.nin_filters,
                            dropout_rate=self.dropout_rate,
                            use_bias=True,
                            activator=self.activator)
            self.receptive_fields -= (self.cnn_size - 1)

            self.build_conv("B1",
                            self.H_concat,
                            1,
                            total_output_feature_num,
                            self.nin_filters2,
                            dropout_rate=self.dropout_rate,
                            use_bias=True,
                            activator=self.activator)

            self.build_conv("B2",
                            self.H[-1],
                            3,
                            self.nin_filters2,
                            self.nin_filters2,
                            dropout_rate=self.dropout_rate,
                            use_bias=True,
                            activator=self.activator)

            self.H.append(
                tf.concat([self.H[-1], self.H[-3]], 3, name="Concat2"))
            input_channels = self.nin_filters + self.nin_filters2
        else:
            self.H.append(self.H_concat)
            input_channels = total_output_feature_num

        # building upsampling layer
        if self.pixel_shuffler:
            if self.pixel_shuffler_filters != 0:
                output_channels = self.pixel_shuffler_filters
            else:
                output_channels = input_channels
            if self.scale == 4:
                self.build_pixel_shuffler_layer("Up-PS", self.H[-1], 2,
                                                input_channels, input_channels)
                self.build_pixel_shuffler_layer("Up-PS2", self.H[-1], 2,
                                                input_channels,
                                                output_channels)
            else:
                self.build_pixel_shuffler_layer("Up-PS", self.H[-1],
                                                self.scale, input_channels,
                                                output_channels)
            input_channels = output_channels
        else:
            self.build_transposed_conv("Up-TCNN", self.H[-1], self.scale,
                                       input_channels)

        for i in range(self.reconstruct_layers - 1):
            self.build_conv("R-CNN%d" % (i + 1),
                            self.H[-1],
                            self.cnn_size,
                            input_channels,
                            self.reconstruct_filters,
                            dropout_rate=self.dropout_rate,
                            use_bias=True,
                            activator=self.activator)
            input_channels = self.reconstruct_filters

        self.build_conv("R-CNN%d" % self.reconstruct_layers, self.H[-1],
                        self.cnn_size, input_channels, self.output_channels)

        self.y_ = tf.add(self.H[-1], self.x2, name="output")

        if self.save_weights:
            with tf.name_scope("Y_"):
                util.add_summaries("output",
                                   self.name,
                                   self.y_,
                                   save_stddev=True,
                                   save_mean=True)

        logging.info("Feature:%s Complexity:%s Receptive Fields:%d" %
                     (self.features, "{:,}".format(
                         self.complexity), self.receptive_fields))
Exemplo n.º 8
0
    def build_graph(self):
        inference = False
        # first dimension is always 1 for some reason. Second dimension is image height. Third dimension is image width. Fourth dimension is the number of color channels.
        if (self.input_image_height > 0 and self.input_image_width > 0):
            if (inference):
                self.x = tf.placeholder(tf.float32,
                                        shape=[
                                            1, self.input_image_height,
                                            self.input_image_width,
                                            self.channels
                                        ],
                                        name="x")
                self.y = tf.placeholder(
                    tf.float32,
                    shape=[
                        1, self.input_image_height * self.scale,
                        self.input_image_width * self.scale,
                        self.output_channels
                    ],
                    name="y")
                self.x2 = tf.placeholder(
                    tf.float32,
                    shape=[
                        1, self.input_image_height * self.scale,
                        self.input_image_width * self.scale,
                        self.output_channels
                    ],
                    name="x2")
            else:
                self.x = tf.placeholder(tf.float32,
                                        shape=[
                                            None, self.input_image_height,
                                            self.input_image_width,
                                            self.channels
                                        ],
                                        name="x")
                self.y = tf.placeholder(
                    tf.float32,
                    shape=[
                        None, self.input_image_height * self.scale,
                        self.input_image_width * self.scale,
                        self.output_channels
                    ],
                    name="y")
                self.x2 = tf.placeholder(
                    tf.float32,
                    shape=[
                        None, self.input_image_height * self.scale,
                        self.input_image_width * self.scale,
                        self.output_channels
                    ],
                    name="x2")
        else:
            self.x = tf.placeholder(tf.float32,
                                    shape=[None, None, None, self.channels],
                                    name="x")
            self.y = tf.placeholder(
                tf.float32,
                shape=[None, None, None, self.output_channels],
                name="y")
            self.x2 = tf.placeholder(
                tf.float32,
                shape=[None, None, None, self.output_channels],
                name="x2")

        self.dropout = tf.placeholder(tf.float32,
                                      shape=[],
                                      name="dropout_keep_rate")
        self.is_training = tf.placeholder(tf.bool, name="is_training")

        # building feature extraction layers
        output_feature_num = self.filters
        total_output_feature_num = 0
        input_feature_num = self.channels
        input_tensor = self.x

        if self.save_weights:
            with tf.name_scope("X"):
                util.add_summaries("output",
                                   self.name,
                                   self.x,
                                   save_stddev=True,
                                   save_mean=True)

        if (self.depthwise_seperable):
            for i in range(self.layers):
                if (i > 0):
                    x1 = i / float(self.layers - 1)
                    y1 = pow(x1, 1.0 / self.filters_decay_gamma)
                    output_feature_num = int(
                        (self.filters - self.min_filters) * (1 - y1) +
                        self.min_filters)

                    # self.build_conv("Bottleneck%d" % (i + 1), input_tensor, 1, input_feature_num,
                    #                 1, use_bias=True, activator=None,
                    #                 use_batch_norm=self.batch_norm, dropout_rate=self.dropout_rate)
                    # input_tensor = self.H[-1]
                    # self.build_depthwise_seperable_conv("CNN%d" % (i + 1), input_tensor, self.cnn_size, 1,
                    #                 output_feature_num, use_bias=True, activator=self.activator,
                    #                 use_batch_norm=self.batch_norm, dropout_rate=self.dropout_rate)
                    self.build_depthwise_seperable_conv(
                        "CNN%d" % (i + 1),
                        input_tensor,
                        self.cnn_size,
                        input_feature_num,
                        output_feature_num,
                        use_bias=True,
                        activator=self.activator,
                        use_batch_norm=self.batch_norm,
                        dropout_rate=self.dropout_rate)
                else:
                    self.build_conv("CNN%d" % (i + 1),
                                    input_tensor,
                                    self.cnn_size,
                                    input_feature_num,
                                    output_feature_num,
                                    use_bias=True,
                                    activator=self.activator,
                                    use_batch_norm=self.batch_norm,
                                    dropout_rate=self.dropout_rate)
                input_feature_num = output_feature_num
                input_tensor = self.H[-1]
                total_output_feature_num += output_feature_num
        else:
            # original version
            for i in range(self.layers):
                if self.min_filters != 0 and i > 0:
                    x1 = i / float(self.layers - 1)
                    y1 = pow(x1, 1.0 / self.filters_decay_gamma)
                    output_feature_num = int(
                        (self.filters - self.min_filters) * (1 - y1) +
                        self.min_filters)

                self.build_conv("CNN%d" % (i + 1),
                                input_tensor,
                                self.cnn_size,
                                input_feature_num,
                                output_feature_num,
                                use_bias=True,
                                activator=self.activator,
                                use_batch_norm=self.batch_norm,
                                dropout_rate=self.dropout_rate)
                input_feature_num = output_feature_num
                input_tensor = self.H[-1]
                total_output_feature_num += output_feature_num

        with tf.variable_scope("Concat"):
            self.H_concat = tf.concat(self.H, 3, name="H_concat")
        self.features += " Total: (%d)" % total_output_feature_num

        # building reconstruction layers ---

        if self.use_nin:
            self.build_conv("A1",
                            self.H_concat,
                            1,
                            total_output_feature_num,
                            self.nin_filters,
                            dropout_rate=self.dropout_rate,
                            use_bias=True,
                            activator=self.activator)
            self.receptive_fields -= (self.cnn_size - 1)

            self.build_conv("B1",
                            self.H_concat,
                            1,
                            total_output_feature_num,
                            self.nin_filters2,
                            dropout_rate=self.dropout_rate,
                            use_bias=True,
                            activator=self.activator)

            if (self.depthwise_seperable):
                self.build_depthwise_seperable_conv(
                    "B2",
                    self.H[-1],
                    3,
                    self.nin_filters2,
                    self.nin_filters2,
                    use_bias=True,
                    activator=self.activator,
                    dropout_rate=self.dropout_rate)
            else:
                self.build_conv("B2",
                                self.H[-1],
                                3,
                                self.nin_filters2,
                                self.nin_filters2,
                                dropout_rate=self.dropout_rate,
                                use_bias=True,
                                activator=self.activator)

            self.H.append(
                tf.concat([self.H[-1], self.H[-3]], 3, name="Concat2"))
            input_channels = self.nin_filters + self.nin_filters2
        else:
            if (self.depthwise_seperable):
                self.build_depthwise_seperable_conv(
                    "C",
                    self.H_concat,
                    1,
                    total_output_feature_num,
                    self.filters,
                    dropout_rate=self.dropout_rate,
                    use_bias=True,
                    activator=self.activator)
            else:
                self.build_conv("C",
                                self.H_concat,
                                1,
                                total_output_feature_num,
                                self.filters,
                                dropout_rate=self.dropout_rate,
                                use_bias=True,
                                activator=self.activator)
            input_channels = self.filters

        # building upsampling layer
        if self.pixel_shuffler:
            if self.pixel_shuffler_filters != 0:
                output_channels = self.pixel_shuffler_filters
            else:
                output_channels = input_channels
            if self.scale == 4:
                if (self.bottleneck):
                    self.build_conv("Up-Bottleneck",
                                    self.H[-1],
                                    1,
                                    input_channels,
                                    output_channels,
                                    dropout_rate=self.dropout_rate,
                                    use_bias=True,
                                    activator=self.activator)
                    self.build_pixel_shuffler_layer(
                        "Up-PS",
                        self.H[-1],
                        2,
                        output_channels,
                        input_channels,
                        depthwise_seperable=self.depthwise_seperable)
                    self.build_conv("Up-Bottleneck2",
                                    self.H[-1],
                                    1,
                                    input_channels,
                                    output_channels,
                                    dropout_rate=self.dropout_rate,
                                    use_bias=True,
                                    activator=self.activator)
                    self.build_pixel_shuffler_layer(
                        "Up-PS2",
                        self.H[-1],
                        2,
                        output_channels,
                        output_channels,
                        depthwise_seperable=self.depthwise_seperable)
                else:
                    self.build_pixel_shuffler_layer(
                        "Up-PS",
                        self.H[-1],
                        2,
                        input_channels,
                        input_channels,
                        depthwise_seperable=self.depthwise_seperable)
                    self.build_pixel_shuffler_layer(
                        "Up-PS2",
                        self.H[-1],
                        2,
                        input_channels,
                        output_channels,
                        depthwise_seperable=self.depthwise_seperable)
            else:
                self.build_pixel_shuffler_layer(
                    "Up-PS",
                    self.H[-1],
                    self.scale,
                    input_channels,
                    output_channels,
                    depthwise_seperable=self.depthwise_seperable)
            input_channels = output_channels
        else:
            self.build_transposed_conv("Up-TCNN", self.H[-1], self.scale,
                                       input_channels)

        for i in range(self.reconstruct_layers - 1):
            self.build_conv("R-CNN%d" % (i + 1),
                            self.H[-1],
                            self.cnn_size,
                            input_channels,
                            self.reconstruct_filters,
                            dropout_rate=self.dropout_rate,
                            use_bias=True,
                            activator=self.activator)
            input_channels = self.reconstruct_filters

        if (self.depthwise_seperable):
            self.build_depthwise_seperable_conv(
                "R-CNN%d" % self.reconstruct_layers, self.H[-1], self.cnn_size,
                input_channels, self.output_channels)
        else:
            self.build_conv("R-CNN%d" % self.reconstruct_layers, self.H[-1],
                            self.cnn_size, input_channels,
                            self.output_channels)

        self.y_ = tf.add(self.H[-1], self.x2, name="output")

        if self.save_weights:
            with tf.name_scope("Y_"):
                util.add_summaries("output",
                                   self.name,
                                   self.y_,
                                   save_stddev=True,
                                   save_mean=True)

        logging.info("Feature:%s Complexity:%s Receptive Fields:%d" %
                     (self.features, "{:,}".format(
                         self.complexity), self.receptive_fields))