Example #1
0
def parse_conv_params(params):
    nonlinearity = 'relu'
    if len(params) == 4:
        params, nonlinearity = params[:-1], params[-1]
    nkernels, stride, num_outputs = [parse_math(p) for p in params]

    return nkernels, stride, num_outputs, nonlinearity
Example #2
0
def parse_conv_params(params):
    nonlinearity = 'relu'
    if len(params) == 4:
        params, nonlinearity = params[:-1], params[-1]
    nkernels, stride, num_outputs = [parse_math(p) for p in params]

    return nkernels, stride, num_outputs, nonlinearity
Example #3
0
def run_network(inpt,
                string,
                is_training,
                debug=False,
                strip_batchnorm_from_last_layer=False):
    maybe_fc_batch_norm = layers.batch_norm
    maybe_conv_batch_norm = conv_batch_norm

    if debug:
        print("%s architecture" % (tf.get_variable_scope().name, ))

    layer_idx = 0

    out = inpt
    layer_strs = string.split(",")
    for i, layer in enumerate(layer_strs):
        if i + 1 == len(layer_strs) and strip_batchnorm_from_last_layer:
            maybe_fc_batch_norm = None
            maybe_conv_batch_norm = None

        if layer.startswith("conv:"):
            nkernels, stride, num_outputs, nonlinearity_str = parse_conv_params(
                layer[len("conv:"):].split(":"))
            nonlinearity = NONLINEARITY_NAME_TO_F[nonlinearity_str]

            out = layers.convolution2d(
                out,
                num_outputs=num_outputs,
                kernel_size=nkernels,
                stride=stride,
                normalizer_params={"is_training": is_training},
                normalizer_fn=maybe_conv_batch_norm,
                activation_fn=nonlinearity,
                scope='layer_%d' % (layer_idx, ))
            layer_idx += 1

            if debug:
                print(
                    "Convolution with nkernels=%d, stride=%d, num_outputs=%d followed by %s"
                    % (nkernels, stride, num_outputs, nonlinearity_str))

        elif layer.startswith("deconv:"):
            nkernels, stride, num_outputs, nonlinearity_str = parse_conv_params(
                layer[len("deconv:"):].split(":"))
            nonlinearity = NONLINEARITY_NAME_TO_F[nonlinearity_str]

            out = layers.convolution2d_transpose(
                out,
                num_outputs=num_outputs,
                kernel_size=nkernels,
                stride=stride,
                activation_fn=nonlinearity,
                normalizer_fn=maybe_conv_batch_norm,
                normalizer_params={"is_training": is_training},
                scope='layer_%d' % (layer_idx, ))
            layer_idx += 1
            if debug:
                print(
                    "Deconvolution with nkernels=%d, stride=%d, num_outputs=%d followed by %s"
                    % (nkernels, stride, num_outputs, nonlinearity_str))
        elif layer.startswith("fc:"):
            params = layer[len("fc:"):].split(":")
            nonlinearity_str = 'relu'
            if len(params) == 2:
                params, nonlinearity_str = params[:-1], params[-1]
            num_outputs = parse_math(params[0])
            nonlinearity = NONLINEARITY_NAME_TO_F[nonlinearity_str]

            out = layers.fully_connected(out,
                                         num_outputs=num_outputs,
                                         activation_fn=nonlinearity,
                                         normalizer_fn=maybe_fc_batch_norm,
                                         normalizer_params={
                                             "is_training": is_training,
                                             "updates_collections": None
                                         },
                                         scope='layer_%d' % (layer_idx, ))
            layer_idx += 1
            if debug:
                print("Fully connected with num_outputs=%d followed by %s" %
                      (num_outputs, nonlinearity_str))
        elif layer.startswith("reshape:"):
            params = layer[len("reshape:"):].split(":")
            dims = [parse_math(dim) for dim in params]
            out = tf.reshape(out, [-1] + dims)
            if debug:
                print("Reshape to %r" % (dims, ))
        else:
            raise ValueError("Could not parse layer description: %r" %
                             (layer, ))
    if debug:
        print("")
    return out
Example #4
0
def run_network(inpt, string, is_training, debug=False, strip_batchnorm_from_last_layer=False):
    maybe_fc_batch_norm   = layers.batch_norm
    maybe_conv_batch_norm = conv_batch_norm

    if debug:
        print ("%s architecture" % (tf.get_variable_scope().name,))

    layer_idx = 0

    out = inpt
    layer_strs = string.split(",")
    for i, layer in enumerate(layer_strs):
        if i + 1 == len(layer_strs) and strip_batchnorm_from_last_layer:
            maybe_fc_batch_norm   = None
            maybe_conv_batch_norm = None

        if layer.startswith("conv:"):
            nkernels, stride, num_outputs, nonlinearity_str = parse_conv_params(layer[len("conv:"):].split(":"))
            nonlinearity = NONLINEARITY_NAME_TO_F[nonlinearity_str]

            out = layers.convolution2d(
                out,
                num_outputs=num_outputs,
                kernel_size=nkernels,
                stride=stride,
                normalizer_params={"is_training": is_training},
                normalizer_fn=maybe_conv_batch_norm,
                activation_fn=nonlinearity,
                scope='layer_%d' % (layer_idx,)
            )
            layer_idx += 1

            if debug:
                print ("Convolution with nkernels=%d, stride=%d, num_outputs=%d followed by %s" %
                        (nkernels, stride, num_outputs, nonlinearity_str))

        elif layer.startswith("deconv:"):
            nkernels, stride, num_outputs, nonlinearity_str = parse_conv_params(layer[len("deconv:"):].split(":"))
            nonlinearity = NONLINEARITY_NAME_TO_F[nonlinearity_str]

            out = layers.convolution2d_transpose(
                out,
                num_outputs=num_outputs,
                kernel_size=nkernels,
                stride=stride,
                activation_fn=nonlinearity,
                normalizer_fn=maybe_conv_batch_norm,
                normalizer_params={"is_training": is_training},
                scope='layer_%d' % (layer_idx,)
            )
            layer_idx += 1
            if debug:
                print ("Deconvolution with nkernels=%d, stride=%d, num_outputs=%d followed by %s" %
                        (nkernels, stride, num_outputs, nonlinearity_str))
        elif layer.startswith("fc:"):
            params = layer[len("fc:"):].split(":")
            nonlinearity_str = 'relu'
            if len(params) == 2:
                params, nonlinearity_str = params[:-1], params[-1]
            num_outputs = parse_math(params[0])
            nonlinearity = NONLINEARITY_NAME_TO_F[nonlinearity_str]

            out = layers.fully_connected(
                out,
                num_outputs=num_outputs,
                activation_fn=nonlinearity,
                normalizer_fn=maybe_fc_batch_norm,
                normalizer_params={"is_training": is_training, "updates_collections": None},
                scope='layer_%d' % (layer_idx,)
            )
            layer_idx += 1
            if debug:
                print ("Fully connected with num_outputs=%d followed by %s" %
                        (num_outputs, nonlinearity_str))
        elif layer.startswith("reshape:"):
            params = layer[len("reshape:"):].split(":")
            dims = [parse_math(dim) for dim in params]
            out = tf.reshape(out, [-1] + dims)
            if debug:
                print("Reshape to %r" % (dims,))
        else:
            raise ValueError("Could not parse layer description: %r" % (layer,))
    if debug:
        print("")
    return out
Example #5
0
def run_network(inpt,
                string,
                is_training,
                use_batch_norm,
                debug=False,
                strip_batchnorm_from_last_layer=False):
    # 下面两步没有看懂,?????????
    # 如果use_batch_norm时,下面的两个都有效
    # layers.bacth_norm是对输出的全连接层进行批归一化的
    # 而conv_batch_norm是对卷积层进行批归一化的
    maybe_fc_batch_norm = layers.batch_norm if use_batch_norm else None
    maybe_conv_batch_norm = conv_batch_norm if use_batch_norm else None

    if debug:
        print("%s architecture" % (tf.get_variable_scope().name, ))

    layer_idx = 0

    out = inpt
    layer_strs = string.split(",")
    for i, layer in enumerate(layer_strs):
        # 最后一层跳过batch_norm
        if i + 1 == len(layer_strs) and strip_batchnorm_from_last_layer:
            maybe_fc_batch_norm = None
            maybe_conv_batch_norm = None

        # 如果为卷积层,进行卷积操作
        if layer.startswith("conv:"):
            nkernels, stride, num_outputs, nonlinearity_str = parse_conv_params(
                layer[len("conv:"):].split(":"))
            nonlinearity = NONLINEARITY_NAME_TO_F[nonlinearity_str]

            out = layers.convolution2d(
                out,
                num_outputs=num_outputs,
                kernel_size=nkernels,
                stride=stride,
                normalizer_params={"is_training": is_training},
                normalizer_fn=maybe_conv_batch_norm,
                activation_fn=nonlinearity,
                scope='layer_%d' % (layer_idx, ))
            layer_idx += 1

            if debug:
                print(
                    "Convolution with nkernels=%d, stride=%d, num_outputs=%d followed by %s"
                    % (nkernels, stride, num_outputs, nonlinearity_str))
        # 如果为反卷积层,进行反卷积操作
        #这个操作的具体过程可以查看http://blog.csdn.net/fate_fjh/article/details/52882134说得非常好
        #大致的过程就是如果输入为N1*N1,卷积核为N2*N2,步长为K,则输出为(N1-1)*K+N2
        elif layer.startswith("deconv:"):
            nkernels, stride, num_outputs, nonlinearity_str = parse_conv_params(
                layer[len("deconv:"):].split(":"))
            nonlinearity = NONLINEARITY_NAME_TO_F[nonlinearity_str]

            out = layers.convolution2d_transpose(
                out,
                num_outputs=num_outputs,
                kernel_size=nkernels,
                stride=stride,
                activation_fn=nonlinearity,
                normalizer_fn=maybe_conv_batch_norm,
                normalizer_params={"is_training": is_training},
                scope='layer_%d' % (layer_idx, ))
            layer_idx += 1
            if debug:
                print(
                    "Deconvolution with nkernels=%d, stride=%d, num_outputs=%d followed by %s"
                    % (nkernels, stride, num_outputs, nonlinearity_str))
        # 如果为全连接层,进行全连接操作
        # 这里的全连接是tensorflow自己集成的,可以看一下内部的说明:大致的意思是
        # 如果使用了batch_norm的话,就没有bias,并且默认的激活函数是relu
        elif layer.startswith("fc:"):
            params = layer[len("fc:"):].split(":")
            nonlinearity_str = 'relu'
            if len(params) == 2:
                params, nonlinearity_str = params[:-1], params[-1]
            num_outputs = parse_math(params[0])
            nonlinearity = NONLINEARITY_NAME_TO_F[nonlinearity_str]

            out = layers.fully_connected(out,
                                         num_outputs=num_outputs,
                                         activation_fn=nonlinearity,
                                         normalizer_fn=maybe_fc_batch_norm,
                                         normalizer_params={
                                             "is_training": is_training,
                                             "updates_collections": None
                                         },
                                         scope='layer_%d' % (layer_idx, ))
            layer_idx += 1
            if debug:
                print("Fully connected with num_outputs=%d followed by %s" %
                      (num_outputs, nonlinearity_str))
        # 如果为重构层,进行重构操作
        elif layer.startswith("reshape:"):
            params = layer[len("reshape:"):].split(":")
            dims = [parse_math(dim) for dim in params]
            out = tf.reshape(out, [-1] + dims)
            if debug:
                print("Reshape to %r" % (dims, ))
        else:
            raise ValueError("Could not parse layer description: %r" %
                             (layer, ))
    if debug:
        print("")
    return out