def conv2d_input(x, filter=32, format="NHWC", name="conv_input", batch_normalization=True,
                 layer_list=None):
    """

    :param x:
    :param filter:
    :param format:
    :param name:
    :param batch_normalization:
    :param layer_list:
    :return:
    """
    with tf.name_scope(name) as scope:
        x = tf.convert_to_tensor(x)
        input_shape = x.get_shape()
        N, H, W, C = (0, 0, 0, 0)
        if format == "NHWC":
            N, H, W, C = input_shape[0].value, input_shape[1].value, input_shape[2].value, input_shape[3].value
        elif format == "NCHW":
            N, C, H, W = input_shape[0].value, input_shape[1].value, input_shape[2].value, input_shape[3].value

        filter_1 = filter if filter is not None else C
        y = tf_helper.add_conv2d(x, filter_1, h_kernel=3, w_kernel=3, name=scope, h_stride=1,
                                 w_stride=1, format=format, batch_normalization=batch_normalization,
                                 activation="leaky_relu", leaky_relu_alpha=0.1, padding="SAME")
        print("conv_input : ", y.shape)
        if layer_list is not None:
            layer_list.append(y)
        return y
def feature_out(x, classes_num, format="NHWC", name="feature_out", batch_normalization=True,
                layer_list=None):
    """

    :param x:
    :param classes_num:
    :param format:
    :param name:
    :param batch_normalization:
    :param layer_list:
    :return:
    """

    with tf.name_scope(name) as scope:
        x = tf.convert_to_tensor(x)
        input_shape = x.get_shape()
        if format == "NHWC":
            N, H, W, C = input_shape[0].value, input_shape[1].value, input_shape[2].value, input_shape[3].value
        elif format == "NCHW":
            N, C, H, W = input_shape[0].value, input_shape[1].value, input_shape[2].value, input_shape[3].value

        filter_1 = (classes_num + 5) * 3

        feature = tf_helper.add_conv2d(x, filter_1, h_kernel=1, w_kernel=1, name=scope, h_stride=1,
                                       w_stride=1, format=format, batch_normalization=batch_normalization,
                                       activation="linear", leaky_relu_alpha=0.1, padding="SAME")
        print("conv_feature : ", feature.shape)
    if layer_list is not None:
        layer_list.append(feature)
    return feature
def residual_block(x, filter=None, format="NHWC", name="residual_block", batch_normalization=True,
                   layer_list=None):
    """
    :param x: input tensor
    :param filter: first conv2d layer filter size. if None, it will be a half of the input tensor channel size.
    :param format: "NHWC" for channel last and "NCHW" for channel first. default is 'NHWC'
    :param name:
    :param batch_normalization:
    :param layer_list:
    :return:
    """
    with tf.name_scope(name) as scope:
        x = tf.convert_to_tensor(x)
        shortcut = x
        input_shape = x.get_shape()
        N, H, W, C = (0, 0, 0, 0)
        if format == "NHWC":
            N, H, W, C = input_shape[0].value, input_shape[1].value, input_shape[2].value, input_shape[3].value
        elif format == "NCHW":
            N, C, H, W = input_shape[0].value, input_shape[1].value, input_shape[2].value, input_shape[3].value

        filter_1 = filter if filter is not None else int(C / 2)
        filter_2 = C

        block_conv_1 = tf_helper.add_conv2d(x, filter_1, h_kernel=1, w_kernel=1, name="layer_1", h_stride=1,
                                            w_stride=1, format=format, batch_normalization=batch_normalization,
                                            activation="leaky_relu", leaky_relu_alpha=0.1, padding="SAME")
        print("conv_residual : ", block_conv_1.shape)
        block_conv_2 = tf_helper.add_conv2d(block_conv_1, filter_2, h_kernel=3, w_kernel=3, name="layer_2",
                                            h_stride=1,
                                            w_stride=1, format=format, batch_normalization=batch_normalization,
                                            activation="leaky_relu", leaky_relu_alpha=0.1, padding="SAME")
        print("conv_residual : ", block_conv_2.shape)
        y = tf_helper.add_shortcut(block_conv_2, shortcut, name=scope)
        print("shortcut : ", y.shape)
        if layer_list is not None:
            layer_list.append(block_conv_1)
            layer_list.append(block_conv_2)
        return y
Exemplo n.º 4
0
    def __init__(self,
                 input_size=784,
                 mid_units=100,
                 output_size=10,
                 lr=1e-4,
                 keep_prob=0.5,
                 output_op_name="output"):
        super(CONV, self).__init__()
        self.input_size = input_size
        self.mid_units = mid_units
        self.output_size = output_size
        self.lr = lr
        self.output_node_name = output_op_name

        mlp_graph = tf.Graph()
        with mlp_graph.as_default():
            x = tf.placeholder(tf.float32, [None, self.input_size],
                               name="input")
            y_t = tf.placeholder(tf.float32, [None, self.output_size],
                                 name="valid")
            x_image = tf.reshape(x, [-1, 28, 28, 1], name="reshape")
            conv1 = tf_ext.add_conv2d(x_image,
                                      output_size=32,
                                      h_kernel=5,
                                      w_kernel=5,
                                      activation="relu",
                                      name="conv1")
            pool1 = tf_ext.add_pool(conv1, name="pool1")
            conv2 = tf_ext.add_conv2d(pool1,
                                      output_size=64,
                                      h_kernel=5,
                                      w_kernel=5,
                                      activation="relu",
                                      name="conv2")
            pool2 = tf_ext.add_pool(conv2, name="pool2")

            flat = tf_ext.add_flatten(pool2, name="flatten")
            fc1 = tf_ext.add_fc(flat,
                                output_size=1024,
                                activation="relu",
                                name="fc1")
            drop = tf_ext.add_dropout(fc1,
                                      keep_prob=keep_prob,
                                      name="dropout1")
            fc2 = tf_ext.add_fc(drop,
                                self.output_size,
                                activation="relu",
                                name="fc2")
            y = tf.nn.softmax(fc2, name=self.output_node_name)

            with tf.name_scope("train"):
                cross_entropy = -tf.reduce_sum(y_t * tf.log(y))
                train_op = tf.train.AdamOptimizer(
                    learning_rate=self.lr).minimize(cross_entropy)

            with tf.name_scope("predict"):
                correct_prediction = tf.equal(tf.argmax(y, 1),
                                              tf.argmax(y_t, 1))
                predict_op = tf.reduce_mean(
                    tf.cast(correct_prediction, tf.float32))

            # version V2 ファイル新規作成する際にエラーになります。
            # max_to_keep 出力ckpt フィアル数

            saver = tf.train.Saver(write_version=tf.train.SaverDef.V1,
                                   max_to_keep=1)
            pb_saver = tf.train
            init = tf.global_variables_initializer()

        self.graph = mlp_graph
        self.train_op = train_op
        self.predict_op = predict_op
        self.loss_op = cross_entropy
        self.x = x
        self.y = y
        self.y_t = y_t

        self.init = init
        self.saver = saver
        self.pb_saver = pb_saver
def conv2d_5l_block(x, filter=None, format="NHWC", name="conv_5l", batch_normalization=True,
                    layer_list=None):
    """

    :param x:
    :param filter:
    :param format:
    :param name:
    :param batch_normalization:
    :param layer_list:
    :return:
    """
    with tf.name_scope(name) as scope:
        x = tf.convert_to_tensor(x)
        input_shape = x.get_shape()
        N, H, W, C = (0, 0, 0, 0)
        if format == "NHWC":
            N, H, W, C = input_shape[0].value, input_shape[1].value, input_shape[2].value, input_shape[3].value
        elif format == "NCHW":
            N, C, H, W = input_shape[0].value, input_shape[1].value, input_shape[2].value, input_shape[3].value

        filter_1 = filter if filter is not None else int(C / 2)
        filter_2 = int(filter_1 * 2)
        filter_3 = filter_1
        filter_4 = int(filter_1 * 2)
        filter_5 = filter_1

        block_conv_1 = tf_helper.add_conv2d(x, filter_1, h_kernel=1, w_kernel=1, name="layer_1", h_stride=1,
                                            w_stride=1, format=format, batch_normalization=batch_normalization,
                                            activation="leaky_relu", leaky_relu_alpha=0.1, padding="SAME")
        print("conv5l : ", block_conv_1.shape)

        block_conv_2 = tf_helper.add_conv2d(block_conv_1, filter_2, h_kernel=3, w_kernel=3, name="layer_2",
                                            h_stride=1,
                                            w_stride=1, format=format, batch_normalization=batch_normalization,
                                            activation="leaky_relu", leaky_relu_alpha=0.1, padding="SAME")
        print("conv5l : ", block_conv_2.shape)

        block_conv_3 = tf_helper.add_conv2d(block_conv_2, filter_3, h_kernel=1, w_kernel=1, name="layer_3",
                                            h_stride=1,
                                            w_stride=1, format=format, batch_normalization=batch_normalization,
                                            activation="leaky_relu", leaky_relu_alpha=0.1, padding="SAME")
        print("conv5l : ", block_conv_3.shape)

        block_conv_4 = tf_helper.add_conv2d(block_conv_3, filter_4, h_kernel=3, w_kernel=3, name="layer_4",
                                            h_stride=1,
                                            w_stride=1, format=format, batch_normalization=batch_normalization,
                                            activation="leaky_relu", leaky_relu_alpha=0.1, padding="SAME")
        print("conv5l : ", block_conv_4.shape)

        block_conv_5 = tf_helper.add_conv2d(block_conv_4, filter_5, h_kernel=1, w_kernel=1, name="layer_5",
                                            h_stride=1,
                                            w_stride=1, format=format, batch_normalization=batch_normalization,
                                            activation="leaky_relu", leaky_relu_alpha=0.1, padding="SAME")
        print("conv5l : ", block_conv_5.shape)
        y = block_conv_5

        if layer_list is not None:
            layer_list.append(block_conv_1)
            layer_list.append(block_conv_2)
            layer_list.append(block_conv_3)
            layer_list.append(block_conv_4)
            layer_list.append(block_conv_5)
        return y