예제 #1
0
    def build_net(self):
        with tf.name_scope('Alexnet_model'):
            with Builder(**self.build_params) as alexnet_builder:
                #Setting control params
                alexnet_builder.control_params(Dropout_control=self.dropout_placeholder, State=self.state_placeholder)

                #Feature Extraction
                conv1 = alexnet_builder.Conv2d_layer(self.input_placeholder, stride=[1, 4, 4, 1], k_size=[11, 11], filters=96, padding='VALID', Batch_norm=True)
                
                pool1 = alexnet_builder.Pool_layer(conv1, k_size=[1, 3, 3, 1], padding='VALID')

                pad1 = alexnet_builder.Pad_layer(pool1, p_type='SYMMETRIC')
                conv2 = alexnet_builder.Conv2d_layer(pad1, k_size=[5, 5], filters=256, padding='VALID', Batch_norm=True)

                pool2 = alexnet_builder.Pool_layer(conv2, k_size=[1, 3, 3, 1], padding='VALID')

                conv3 = alexnet_builder.Conv2d_layer(pool2, filters=384, Batch_norm=True)
                conv4 = alexnet_builder.Conv2d_layer(conv3, filters=384, Batch_norm=True)
                conv5 = alexnet_builder.Conv2d_layer(conv4, filters=256, Batch_norm=True)

                pool5 = alexnet_builder.Pool_layer(conv5, k_size=[1, 3, 3, 1])

                #Densely Connected
                fc1 = alexnet_builder.FC_layer(pool5, filters=4096)
                drop1 = alexnet_builder.Dropout_layer(fc1)

                fc2 = alexnet_builder.FC_layer(drop1, filters=4096)
                drop2 = alexnet_builder.Dropout_layer(fc2)

                output = alexnet_builder.FC_layer(drop2, filters=self.build_params['Classes'], readout=True)
                return output
예제 #2
0
    def build_net(self):
        with tf.name_scope('Vgg19_model'):
            with Builder(**self.build_params) as vgg19_builder:
                #Setting control params
                vgg19_builder.control_params(
                    Dropout_control=self.dropout_placeholder)

                #Feature Extraction
                conv1a = vgg19_builder.Conv2d_layer(self.input_placeholder,
                                                    filters=64)
                conv1b = vgg19_builder.Conv2d_layer(conv1a, filters=64)

                pool1 = vgg19_builder.Pool_layer(conv1b)

                conv2a = vgg19_builder.Conv2d_layer(pool1, filters=128)
                conv2b = vgg19_builder.Conv2d_layer(conv2a, filters=128)

                pool2 = vgg19_builder.Pool_layer(conv2b)

                conv3a = vgg19_builder.Conv2d_layer(pool2, filters=256)
                conv3b = vgg19_builder.Conv2d_layer(conv3a, filters=256)
                conv3c = vgg19_builder.Conv2d_layer(conv3b, filters=256)
                conv3d = vgg19_builder.Conv2d_layer(conv3c, filters=256)

                pool3 = vgg19_builder.Pool_layer(conv3d)

                conv4a = vgg19_builder.Conv2d_layer(pool3, filters=512)
                conv4b = vgg19_builder.Conv2d_layer(conv4a, filters=512)
                conv4c = vgg19_builder.Conv2d_layer(conv4b, filters=512)
                conv4d = vgg19_builder.Conv2d_layer(conv4c, filters=512)

                pool4 = vgg19_builder.Pool_layer(conv4d)

                conv5a = vgg19_builder.Conv2d_layer(pool4, filters=512)
                conv5b = vgg19_builder.Conv2d_layer(conv5a, filters=512)
                conv5c = vgg19_builder.Conv2d_layer(conv5b, filters=512)
                conv5d = vgg19_builder.Conv2d_layer(conv5c, filters=512)

                pool5 = vgg19_builder.Pool_layer(conv5d)

                #Densely Connected
                fc1 = vgg19_builder.FC_layer(pool5, filters=4096)
                drop1 = vgg19_builder.Dropout_layer(fc1)

                fc2 = vgg19_builder.FC_layer(drop1, filters=4096)
                drop2 = vgg19_builder.Dropout_layer(fc2)

                output = vgg19_builder.FC_layer(
                    drop2, filters=self.build_params['Classes'], readout=True)
                return output
예제 #3
0
    def build_net(self):
        with tf.name_scope('LeNet_Model'):
            with Builder(**self.build_params) as lenet_builder:
                input_reshape = lenet_builder.Reshape_input(self.input_placeholder)
                
                conv1 = lenet_builder.Conv2d_layer(input_reshape, k_size=[5, 5])
                pool1 = lenet_builder.Pool_layer(conv1)

                conv2 = lenet_builder.Conv2d_layer(pool1, k_size=[5, 5], filters=64)
                pool2 = lenet_builder.Pool_layer(conv2)

                fc1 = lenet_builder.FC_layer(pool2);
                output = lenet_builder.FC_layer(fc1, filters=self.build_params['Classes'], readout=True)

                return output
예제 #4
0
파일: run.py 프로젝트: NuAge-Solutions/OJ
def run(src_path):
    # setup script arguments
    parser = argparse.ArgumentParser(description="Compile Objective-JS Package(s).")

    parser.add_argument(
        "action", choices=["add", "build", "compile", "dist", "remove", "setup", "install"], default="compile",
        help="The action to take when running the script."
    )

    parser.add_argument(
        "packages", type=str, nargs="*", default=ALL,
        help="Action \"add\": The classes you want to add. At least one class must be specified.\n" +
             "Action \"build\": The packages you want to build. Will build all packages if nothing is specified.\n" +
             "Action \"compile\": The packages you want to compile. Will compile all packages if nothing is specified.\n" +
             "Action \"dist\": The packages you want to distribute.\n" +
             "Action \"remove\": The classes you want to remove. At least one class must be specified.\n" +
             "Action \"setup\": The first argument is the package name and the second is the destination.\n" +
             "***Note: Package names 'android', 'ios', 'linux', 'osx', & 'windows' are reserved."
    )

    parser.add_argument(
        "-v", action="store_true", default=False,
        help="Show verbose output."
    )

    parser.add_argument(
        "--mode", choices=[DEV, PROD], default="prod",
        help="The mode to compile in. Note that prod will automatically update dev as well."
    )

    parser.add_argument(
        "--types", nargs="*", choices=[ALL, CSS, JS, TEMPLATE, THEME], default=ALL,
        help="The file types to compile. Will compile all file types if nothing is specified."
    )

    parser.add_argument(
        "--destination", type=str, default=os.path.join(src_path, "builds"),
        help="The path to the build directory."
    )

    parser.add_argument(
        "--verbose", type=int, default=0,
        help="Show verbose output."
    )

    # process the script args
    args = parser.parse_args()

    kwargs = {
        "types": args.types
    }

    if ALL in args.types:
        kwargs["types"] = TYPES

    # setup verbose setting
    utils.VERBOSE = args.verbose

    if args.v and utils.VERBOSE == 0:
        utils.VERBOSE = 1

    destination, mode, packages = _process_args(args)

    # figure out what action to take
    if args.action == "add":
        # setup compiler instance
        from utils.manage import add

        kwargs["classes"] = packages

        add(src_path, **kwargs)

    elif args.action == "build":
        # check that at least one package was specified
        if args.packages == ALL:
            raise Exception("When building at least one package must be specified. All/empty is unsupported.")

        # setup compiler instance
        from utils.builder import Builder

        builder = Builder("manifest.json")

        # build the compile kwargs
        kwargs["mode"] = mode

        kwargs.pop("types")

        # run the compiler
        builder.run(destination, packages, **kwargs)

    elif args.action == "compile":
        # setup compiler instance
        from utils.compiler import Compiler

        compiler = Compiler("manifest.json")

        # build the compile kwargs
        kwargs["mode"] = mode
        kwargs["packages"] = packages

        # run the compiler
        compiler.run(destination, **kwargs)

    elif args.action == "dist":
        from zipfile import ZipFile

        for package in packages:
            zip_name = "{}{}.zip".format(
                package,
                "-dev" if mode == DEV else ""
            )

            with ZipFile(os.path.join("builds", "dist", zip_name)) as zip:
                zip.extractall(destination)

    elif args.action == "install":
        sh.npm.install("-g", "uglify-es")
        sh.npm.install("-g", "clean-css")
        sh.npm.install("-g", "clean-css-cli")

    elif args.action == "remove":
        # setup compiler instance
        from utils.manage import remove

        kwargs["classes"] = packages

        remove(src_path, **kwargs)

    elif args.action == "setup":
        from utils.manage import setup

        setup(src_path, *packages)
예제 #5
0
    def build_net(self):
        with tf.name_scope('FRRN_A'):
            with Builder(**self.build_params) as frnn_a_builder:

                #Setting control params
                frnn_a_builder.control_params(Dropout_control=self.dropout_placeholder, State=self.state_placeholder)

                #Construct functional building blocks
                def RU(input, filters):
                    with tf.name_scope('Residual_Unit'):
                        Conv1 = frnn_a_builder.Conv2d_layer(input, stride=[1, 1, 1, 1], filters=filters, Batch_norm=True)
                        Conv2 = frnn_a_builder.Conv2d_layer(Conv1, stride=[1, 1, 1, 1], filters=filters, Batch_norm=True)
                        Conv3 = frnn_a_builder.Conv2d_layer(Conv2, k_size=[1, 1], stride=[1, 1, 1, 1], filters=filters, Activation=False)

                        return frnn_a_builder.Residual_connect([input, Conv3])

                def FRRU(Residual_stream, Pooling_stream, scale_factor, filters, res_filters=32):
                    with tf.name_scope('Full_Resolution_Unit'):
                        scale_dims = [1, scale_factor, scale_factor, 1]
                        Pool, Ind = frnn_a_builder.Pool_layer(Residual_stream, k_size=scale_dims, stride=scale_dims, pooling_type='MAXIND')

                        Concat = frnn_a_builder.Concat([Pool, Pooling_stream])

                        #Conv0 = frnn_a_builder.Conv2d_layer(Concat, stride=[1,1,1,1], k_size=[1,1], filters=filters, Batch_norm=True)
                        Conv1 = frnn_a_builder.Conv2d_layer(Concat, stride=[1, 1, 1, 1], filters=filters, Batch_norm=True)
                        Conv2 = frnn_a_builder.Conv2d_layer(Conv1, stride=[1, 1, 1, 1], filters=filters, Batch_norm=True)

                        #Res_connect = frnn_a_builder.Residual_connect([Conv0, Conv2])
                        Conv3 = frnn_a_builder.Conv2d_layer(Conv2, k_size=[1, 1], stride=[1, 1, 1, 1], filters=res_filters, Activation=False)

                        Unpool = frnn_a_builder.Unpool_layer(Conv3, Ind, k_size = scale_dims)
                    Residual_stream_out = frnn_a_builder.Residual_connect([Residual_stream, Unpool])
                    Pooling_stream_out = Conv2

                    return Residual_stream_out, Pooling_stream_out
                    #return Conv2

                #Model Construction
                Stem = frnn_a_builder.Conv2d_layer(self.input_placeholder, stride=[1, 1, 1, 1], k_size=[5, 5], filters=48, Batch_norm=True)
                Stem = RU(Stem, 48)
                Stem_pool = frnn_a_builder.Pool_layer(Stem)
                
                Stem_pool = RU(Stem_pool, 48)
                Stem_pool = RU(Stem_pool, 48)

                Residual_stream = frnn_a_builder.Conv2d_layer(Stem_pool, stride=[1, 1, 1, 1], k_size=[1, 1], filters=32, Batch_norm=True)
                Pooling_stream, ind1 = frnn_a_builder.Pool_layer(Stem_pool, pooling_type='MAXIND')

                #Encoder
                scale_factor = 2
                Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=96)
                Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=96)
                Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=96)

                Pooling_stream, ind2 = frnn_a_builder.Pool_layer(Pooling_stream, pooling_type='MAXIND')

                scale_factor = 4
                Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=192)
                Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=192)
                Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=192)
                Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=192)

                Pooling_stream, ind3 = frnn_a_builder.Pool_layer(Pooling_stream, pooling_type='MAXIND')

                scale_factor=8
                Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=384)
                Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=384)

                Pooling_stream, ind4 = frnn_a_builder.Pool_layer(Pooling_stream, pooling_type='MAXIND')

                scale_factor=16
                Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=384)
                Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=384)


                #Decoder
                Pooling_stream = frnn_a_builder.Unpool_layer(Pooling_stream, ind4)
                scale_factor = 8
                Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=192)
                Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=192)

                
                Pooling_stream = frnn_a_builder.Unpool_layer(Pooling_stream, ind3)

                scale_factor = 4
                Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=192)
                Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=192)

                Pooling_stream = frnn_a_builder.Conv2d_layer(Pooling_stream, stride=[1, 1, 1, 1], k_size=[1, 1], filters=96, Batch_norm=True)
                Pooling_stream = frnn_a_builder.Unpool_layer(Pooling_stream, ind2)

                scale_factor = 2
                Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=96)
                Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=96)

                Pooling_stream = frnn_a_builder.Conv2d_layer(Pooling_stream, stride=[1, 1, 1, 1], k_size=[1, 1], filters=48, Batch_norm=True)
                Pooling_stream = frnn_a_builder.Unpool_layer(Pooling_stream, ind1)

                RP_stream_merge = frnn_a_builder.Concat([Pooling_stream, Residual_stream])
                Conv3 = frnn_a_builder.Conv2d_layer(RP_stream_merge, stride=[1, 1, 1, 1], k_size=[1, 1], filters=48, Batch_norm=True)
                
                Conv3 = RU(Conv3, 48)
                Conv3 = RU(Conv3, 48)


                
                Upconv = frnn_a_builder.Upconv_layer(Conv3, stride=[1, 2, 2, 1], filters=48, Batch_norm=True, output_shape=[self.build_params['Image_width'], self.build_params['Image_height']])
                Res_connect = frnn_a_builder.Residual_connect([Stem, Upconv])
                Res_connect = RU(Res_connect, 48)
                output = frnn_a_builder.Conv2d_layer(Res_connect, filters=1, stride=[1, 1, 1, 1], k_size=[1, 1], Batch_norm=True, Activation=False)
                return output
예제 #6
0
    def build_net(self):
        with tf.name_scope('Inception_Resnet_v2_model'):
            with Builder(**self.build_params) as inceprv2_builder:
                #Setting control params
                inceprv2_builder.control_params(
                    Dropout_control=self.dropout_placeholder,
                    State=self.state_placeholder)

                #Construct functional building blocks
                def stem(input):
                    with tf.name_scope('Stem') as scope:
                        conv1 = inceprv2_builder.Conv2d_layer(
                            input,
                            stride=[1, 2, 2, 1],
                            filters=32,
                            padding='VALID',
                            Batch_norm=True)
                        conv2 = inceprv2_builder.Conv2d_layer(
                            conv1,
                            stride=[1, 1, 1, 1],
                            filters=32,
                            padding='VALID',
                            Batch_norm=True)
                        conv3 = inceprv2_builder.Conv2d_layer(
                            conv2,
                            stride=[1, 1, 1, 1],
                            filters=64,
                            Batch_norm=True)

                        conv1a_split1 = inceprv2_builder.Conv2d_layer(
                            conv3,
                            stride=[1, 2, 2, 1],
                            filters=96,
                            padding='VALID',
                            Batch_norm=True)
                        pool1b_split1 = inceprv2_builder.Pool_layer(
                            conv3, padding='VALID')

                        concat1 = inceprv2_builder.Concat(
                            [conv1a_split1, pool1b_split1])

                        conv1a_split2 = inceprv2_builder.Conv2d_layer(
                            concat1,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=64,
                            Batch_norm=True)
                        conv2a_split2 = inceprv2_builder.Conv2d_layer(
                            conv1a_split2,
                            stride=[1, 1, 1, 1],
                            k_size=[7, 1],
                            filters=64,
                            Batch_norm=True)
                        conv3a_split2 = inceprv2_builder.Conv2d_layer(
                            conv2a_split2,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 7],
                            filters=64,
                            Batch_norm=True)
                        conv4a_split2 = inceprv2_builder.Conv2d_layer(
                            conv3a_split2,
                            stride=[1, 1, 1, 1],
                            filters=96,
                            padding='VALID',
                            Batch_norm=True)

                        conv1b_split2 = inceprv2_builder.Conv2d_layer(
                            concat1,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=64,
                            Batch_norm=True)
                        conv2b_split2 = inceprv2_builder.Conv2d_layer(
                            conv1b_split2,
                            stride=[1, 1, 1, 1],
                            filters=96,
                            padding='VALID',
                            Batch_norm=True)

                        concat2 = inceprv2_builder.Concat(
                            [conv4a_split2, conv2b_split2])

                        pool1a_split3 = inceprv2_builder.Pool_layer(
                            concat2, padding="VALID")
                        conv1b_split3 = inceprv2_builder.Conv2d_layer(
                            concat2,
                            stride=[1, 2, 2, 1],
                            filters=192,
                            padding='VALID',
                            Batch_norm=True)

                        concat3 = inceprv2_builder.Concat(
                            [pool1a_split3, conv1b_split3])
                        return concat3

                def inception_resnet_A(input):
                    with tf.name_scope('Inception_Resnet_A') as scope:
                        conv1a_split1 = inceprv2_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=32,
                            Batch_norm=True)
                        conv2a_split1 = inceprv2_builder.Conv2d_layer(
                            conv1a_split1,
                            stride=[1, 1, 1, 1],
                            filters=48,
                            Batch_norm=True)
                        conv3a_split1 = inceprv2_builder.Conv2d_layer(
                            conv2a_split1,
                            stride=[1, 1, 1, 1],
                            filters=64,
                            Batch_norm=True)

                        conv1b_split1 = inceprv2_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=32,
                            Batch_norm=True)
                        conv2b_split1 = inceprv2_builder.Conv2d_layer(
                            conv1b_split1,
                            stride=[1, 1, 1, 1],
                            filters=32,
                            Batch_norm=True)

                        conv1c_split1 = inceprv2_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=32,
                            Batch_norm=True)

                        concat1 = inceprv2_builder.Concat(
                            [conv3a_split1, conv2b_split1, conv1c_split1])

                        conv2 = inceprv2_builder.Conv2d_layer(
                            concat1,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=384,
                            Batch_norm=True,
                            Activation=False)

                        conv2_scale = inceprv2_builder.Scale_activations(
                            conv2, scaling_factor=1)
                        residual_out = inceprv2_builder.Residual_connect(
                            [input, conv2_scale])

                        return residual_out

                def reduction_A(input):
                    with tf.name_scope('Reduction_A') as scope:
                        '''
                        k=256, l=256, m=384, n=384
                        '''
                        conv1a_split1 = inceprv2_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=256,
                            Batch_norm=True)
                        conv2a_split1 = inceprv2_builder.Conv2d_layer(
                            conv1a_split1,
                            stride=[1, 1, 1, 1],
                            filters=256,
                            Batch_norm=True)
                        conv3a_split1 = inceprv2_builder.Conv2d_layer(
                            conv2a_split1,
                            stride=[1, 2, 2, 1],
                            filters=384,
                            padding='VALID',
                            Batch_norm=True)

                        conv1b_split1 = inceprv2_builder.Conv2d_layer(
                            input,
                            stride=[1, 2, 2, 1],
                            filters=384,
                            padding='VALID',
                            Batch_norm=True)

                        pool1c_split1 = inceprv2_builder.Pool_layer(
                            input, padding='VALID')

                        concat = inceprv2_builder.Concat(
                            [conv3a_split1, conv1b_split1, pool1c_split1])

                        return concat

                def inception_resnet_B(input):
                    with tf.name_scope('Inception_Resnet_B') as scope:
                        conv1a_split1 = inceprv2_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=128,
                            Batch_norm=True)
                        conv2a_split1 = inceprv2_builder.Conv2d_layer(
                            conv1a_split1,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 7],
                            filters=160,
                            Batch_norm=True)
                        conv3a_split1 = inceprv2_builder.Conv2d_layer(
                            conv2a_split1,
                            stride=[1, 1, 1, 1],
                            k_size=[7, 1],
                            filters=192,
                            Batch_norm=True)

                        conv1b_split1 = inceprv2_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=192,
                            Batch_norm=True)

                        concat1 = inceprv2_builder.Concat(
                            [conv3a_split1, conv1b_split1])

                        conv2 = inceprv2_builder.Conv2d_layer(
                            concat1,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=1152,
                            Batch_norm=True,
                            Activation=False)  #paper discrepancy filter = 1154
                        conv2_scale = inceprv2_builder.Scale_activations(
                            conv2, scaling_factor=0.4)

                        residual_out = inceprv2_builder.Residual_connect(
                            [input, conv2_scale])

                        return residual_out

                def reduction_B(input):
                    with tf.name_scope('Reduction_B') as scope:
                        conv1a_split1 = inceprv2_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=256,
                            Batch_norm=True)
                        conv2a_split1 = inceprv2_builder.Conv2d_layer(
                            conv1a_split1,
                            stride=[1, 1, 1, 1],
                            filters=288,
                            Batch_norm=True)
                        conv3a_split1 = inceprv2_builder.Conv2d_layer(
                            conv2a_split1,
                            stride=[1, 2, 2, 1],
                            filters=384,
                            padding='VALID',
                            Batch_norm=True)

                        conv1b_split1 = inceprv2_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=256,
                            Batch_norm=True)
                        conv2b_split1 = inceprv2_builder.Conv2d_layer(
                            conv1b_split1,
                            stride=[1, 2, 2, 1],
                            filters=256,
                            padding='VALID',
                            Batch_norm=True)

                        conv1c_split1 = inceprv2_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=256,
                            Batch_norm=True)
                        conv2c_split1 = inceprv2_builder.Conv2d_layer(
                            conv1c_split1,
                            stride=[1, 2, 2, 1],
                            filters=256,
                            padding='VALID',
                            Batch_norm=True)

                        pool1d_split1 = inceprv2_builder.Pool_layer(
                            input, padding='VALID')

                        concat = inceprv2_builder.Concat([
                            conv3a_split1, conv2b_split1, conv2c_split1,
                            pool1d_split1
                        ])
                        return concat

                def inception_resnet_C(input):
                    with tf.name_scope('Inception_Resnet_C') as scope:
                        conv1a_split1 = inceprv2_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=192,
                            Batch_norm=True)
                        conv2a_split1 = inceprv2_builder.Conv2d_layer(
                            conv1a_split1,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 3],
                            filters=224,
                            Batch_norm=True)
                        conv3a_split1 = inceprv2_builder.Conv2d_layer(
                            conv2a_split1,
                            stride=[1, 1, 1, 1],
                            k_size=[3, 1],
                            filters=256,
                            Batch_norm=True)

                        conv1b_split1 = inceprv2_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=192,
                            Batch_norm=True)

                        concat1 = inceprv2_builder.Concat(
                            [conv3a_split1, conv1b_split1])

                        conv2 = inceprv2_builder.Conv2d_layer(
                            concat1,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=2048,
                            Batch_norm=True,
                            Activation=False)
                        conv2_scale = inceprv2_builder.Scale_activations(conv2)

                        residual_out = inceprv2_builder.Residual_connect(
                            [input, conv2_scale])

                        return residual_out

                # Model Construction

                # Stem
                model_stem = stem(self.input_placeholder)
                # 5x Inception Resnet A
                inception_A1 = inception_resnet_A(model_stem)
                inception_A2 = inception_resnet_A(inception_A1)
                inception_A3 = inception_resnet_A(inception_A2)
                inception_A4 = inception_resnet_A(inception_A3)
                inception_A5 = inception_resnet_A(inception_A4)
                self.Endpoints['Block_A'] = inception_A5
                # Reduction A
                model_reduction_A = reduction_A(inception_A5)
                # 10X Inception Resnet B
                inception_B1 = inception_resnet_B(
                    model_reduction_A
                )  # Don't know if i'm missing something or now, but reduction A's output for inception resnetv2 is a tensor of depth 1152
                inception_B2 = inception_resnet_B(inception_B1)
                inception_B3 = inception_resnet_B(inception_B2)
                inception_B4 = inception_resnet_B(inception_B3)
                inception_B5 = inception_resnet_B(inception_B4)
                inception_B6 = inception_resnet_B(inception_B5)
                inception_B7 = inception_resnet_B(inception_B6)
                inception_B8 = inception_resnet_B(inception_B7)
                inception_B9 = inception_resnet_B(inception_B8)
                inception_B10 = inception_resnet_B(inception_B9)
                self.Endpoints['Block_B'] = inception_B10
                # Reduction B
                model_reduction_B = reduction_B(inception_B10)
                # 5X Inception Resnet C
                inception_C1 = inception_resnet_C(model_reduction_B)
                inception_C2 = inception_resnet_C(inception_C1)
                inception_C3 = inception_resnet_C(inception_C2)
                inception_C4 = inception_resnet_C(inception_C3)
                inception_C5 = inception_resnet_C(inception_C4)
                self.Endpoints['Block_C'] = inception_C5
                # Average Pooling
                average_pooling = inceprv2_builder.Pool_layer(
                    inception_C5,
                    k_size=[1, 8, 8, 1],
                    stride=[1, 8, 8, 1],
                    padding='SAME',
                    pooling_type='AVG')
                # Dropout
                drop1 = inceprv2_builder.Dropout_layer(average_pooling)
                # Output
                output = inceprv2_builder.FC_layer(
                    drop1, filters=self.build_params['Classes'], readout=True)

                return output
예제 #7
0
    def build_net(self):
        with tf.name_scope('Unet1024'):
            with Builder(**self.build_params) as unet_res_builder:

                #Setting control params
                unet_res_builder.control_params(
                    Dropout_control=self.dropout_placeholder,
                    State=self.state_placeholder)

                def stack_encoder(input, out_filters):
                    with tf.name_scope('Encoder'):
                        input = unet_res_builder.Relu(input)

                        #conv1a_split1 = unet_res_builder.Conv2d_layer(input, stride=[1, 1, 1, 1], k_size=[1, 1], filters=out_filters, Activation=False, Batch_norm=True)

                        conv1 = unet_res_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[3, 3],
                            filters=out_filters,
                            Batch_norm=True)
                        conv2 = unet_res_builder.Conv2d_layer(
                            conv1,
                            stride=[1, 1, 1, 1],
                            k_size=[3, 3],
                            filters=out_filters,
                            Batch_norm=True)

                        #res_connect = unet_res_builder.Residual_connect([conv1a_split1, conv2b_split1])

                        return conv2

                def stack_decoder(input,
                                  encoder_connect,
                                  out_filters,
                                  output_shape,
                                  infilter=None):
                    with tf.name_scope('Decoder'):
                        encoder_connect_shape = encoder_connect.get_shape(
                        ).as_list()
                        del encoder_connect_shape[0]
                        res_filters = encoder_connect_shape.pop(2)

                        if infilter is not None:
                            res_filters = infilter
                        #upscale_input = unet_res_builder.Upconv_layer(input, stride=[1, 2, 2, 1], filters=res_filters, Batch_norm=True, output_shape=output_shape) #change_filters to match encoder_connect filters
                        #upscale_input = unet_res_builder.Conv_Resize_layer(input, stride=[1,2,2,1], filters=res_filters, Batch_norm=True)
                        upscale_input = unet_res_builder.Conv_Resize_layer(
                            input,
                            k_size=[3, 3],
                            output_scale=2,
                            Batch_norm=True,
                            filters=out_filters)
                        uconnect = unet_res_builder.Concat(
                            [encoder_connect, upscale_input])
                        #conv1 = unet_res_builder.Conv2d_layer(uconnect, stride=[1, 1, 1, 1], k_size=[3, 3], filters=out_filters, Batch_norm=True)
                        conv2 = unet_res_builder.Conv2d_layer(
                            upscale_input,
                            stride=[1, 1, 1, 1],
                            k_size=[3, 3],
                            filters=out_filters,
                            Batch_norm=True)
                        conv3 = unet_res_builder.Conv2d_layer(
                            conv2,
                            stride=[1, 1, 1, 1],
                            k_size=[3, 3],
                            filters=out_filters,
                            Batch_norm=True)
                        return conv3

                def Center_pool(input, filters=768):
                    ''' Dense dialations '''
                    with tf.name_scope('Dense_Dialated_Center'):
                        Dconv1 = unet_res_builder.DConv_layer(input,
                                                              filters=filters,
                                                              Batch_norm=True,
                                                              D_rate=1,
                                                              Activation=False)
                        Dense_connect1 = unet_res_builder.Concat(
                            [input, Dconv1])

                        Dconv2 = unet_res_builder.DConv_layer(Dense_connect1,
                                                              filters=filters,
                                                              Batch_norm=True,
                                                              D_rate=2,
                                                              Activation=False)
                        Dense_connect2 = unet_res_builder.Concat(
                            [input, Dconv1, Dconv2])

                        Dconv4 = unet_res_builder.DConv_layer(Dense_connect2,
                                                              filters=filters,
                                                              Batch_norm=True,
                                                              D_rate=4,
                                                              Activation=False)
                        Dense_connect3 = unet_res_builder.Concat(
                            [input, Dconv1, Dconv2, Dconv4])

                        Dconv8 = unet_res_builder.DConv_layer(Dense_connect3,
                                                              filters=filters,
                                                              Batch_norm=True,
                                                              D_rate=8,
                                                              Activation=False)
                        Dense_connect4 = unet_res_builder.Concat(
                            [input, Dconv1, Dconv2, Dconv4, Dconv8])

                        Dconv16 = unet_res_builder.DConv_layer(
                            Dense_connect4,
                            filters=filters,
                            Batch_norm=True,
                            D_rate=16,
                            Activation=False)
                        Dense_connect5 = unet_res_builder.Concat(
                            [input, Dconv1, Dconv2, Dconv4, Dconv8, Dconv16])

                        Dconv32 = unet_res_builder.DConv_layer(
                            Dense_connect5,
                            filters=filters,
                            Batch_norm=True,
                            D_rate=32,
                            Activation=False)
                        Dense_connect6 = unet_res_builder.Concat([
                            input, Dconv1, Dconv2, Dconv4, Dconv8, Dconv16,
                            Dconv32
                        ])

                        Scale_output = unet_res_builder.Scale_activations(
                            Dense_connect6, scaling_factor=0.9)

                        return Scale_output

                #Build Encoder

                Encoder1 = stack_encoder(self.input_placeholder, 24)
                Pool1 = unet_res_builder.Pool_layer(Encoder1)  #512

                Encoder2 = stack_encoder(Pool1, 64)
                Pool2 = unet_res_builder.Pool_layer(Encoder2)  #256

                Encoder3 = stack_encoder(Pool2, 128)
                Pool3 = unet_res_builder.Pool_layer(Encoder3)  #128

                Encoder4 = stack_encoder(Pool3, 256)
                Pool4 = unet_res_builder.Pool_layer(Encoder4)  #64

                Encoder5 = stack_encoder(Pool4, 512)
                Pool5 = unet_res_builder.Pool_layer(Encoder5)  #32

                Encoder6 = stack_encoder(Pool5, 768)
                Pool6 = unet_res_builder.Pool_layer(Encoder6)  #16

                Encoder7 = stack_encoder(Pool6, 768)
                Pool7 = unet_res_builder.Pool_layer(Encoder7)  #8

                #Center
                #Conv_center = unet_res_builder.Conv2d_layer(Pool7, stride=[1, 1, 1, 1], filters=768, Batch_norm=True, padding='SAME')
                Conv_center = Center_pool(Pool7)
                #Pool_center = unet_res_builder.Pool_layer(Conv_center) #8
                #Build Decoder
                Decode1 = stack_decoder(Conv_center,
                                        Encoder7,
                                        out_filters=768,
                                        output_shape=[16, 16])
                Decode2 = stack_decoder(Decode1,
                                        Encoder6,
                                        out_filters=768,
                                        output_shape=[32, 32])
                Decode3 = stack_decoder(Decode2,
                                        Encoder5,
                                        out_filters=512,
                                        output_shape=[64, 64],
                                        infilter=768)
                Decode4 = stack_decoder(Decode3,
                                        Encoder4,
                                        out_filters=256,
                                        output_shape=[128, 128],
                                        infilter=512)
                Decode5 = stack_decoder(Decode4,
                                        Encoder3,
                                        out_filters=128,
                                        output_shape=[256, 256],
                                        infilter=256)
                Decode6 = stack_decoder(Decode5,
                                        Encoder2,
                                        out_filters=64,
                                        output_shape=[512, 512],
                                        infilter=128)
                Decode7 = stack_decoder(Decode6,
                                        Encoder1,
                                        out_filters=24,
                                        output_shape=[1024, 1024],
                                        infilter=64)
                output = unet_res_builder.Conv2d_layer(
                    Decode7,
                    stride=[1, 1, 1, 1],
                    filters=1,
                    Batch_norm=True,
                    k_size=[1, 1],
                    Activation=False)  #output
                return output
예제 #8
0
def Build_Fnet(kwargs):
    with tf.name_scope('F_Net'):
        with Builder(**kwargs) as frnn_c_builder:
            frnn_c_builder.BNscope = 50
            input_placeholder = tf.placeholder(tf.float32, \
                shape=[None, kwargs['Image_width']*kwargs['Image_height']*kwargs['Image_cspace']], name='Input')
            output_placeholder = tf.placeholder(tf.float32, \
                shape=[None, kwargs['Image_width']*kwargs['Image_height']], name='Mask')
            weight_placeholder = tf.placeholder(tf.float32, \
                shape=[None, kwargs['Image_width']*kwargs['Image_height']], name='Weight')
            dropout_prob_placeholder = tf.placeholder(tf.float32,
                                                      name='Dropout')
            state_placeholder = tf.placeholder(tf.string, name="State")
            input_reshape = frnn_c_builder.Reshape_input(input_placeholder, \
                width=kwargs['Image_width'], height=kwargs['Image_height'], colorspace= kwargs['Image_cspace'])
            #Build P-Net output + Input
            prior_image_path = tf.placeholder(tf.string)
            prior_image = tf.image.decode_image(tf.read_file(prior_image_path))
            prior_image.set_shape([900, 900, 1])
            prior_image = tf.image.convert_image_dtype(
                tf.image.resize_images(
                    prior_image,
                    size=[kwargs['Image_height'], kwargs['Image_width']]),
                tf.float32)
            prior_image = tf.stack([prior_image, prior_image, prior_image],
                                   axis=2)
            prior_image = tf.multiply(prior_image, 0.0001)
            prior_image = tf.expand_dims(tf.squeeze(prior_image), 0)
            input_reshape = input_reshape + prior_image

            frnn_c_builder.control_params(
                Dropout_control=dropout_prob_placeholder,
                State=state_placeholder)

            #Construct functional building blocks
            def RU(input, filters):
                with tf.name_scope('Residual_Unit'):
                    Conv1 = frnn_c_builder.Conv2d_layer(input,
                                                        stride=[1, 1, 1, 1],
                                                        filters=filters,
                                                        Batch_norm=True)
                    Conv2 = frnn_c_builder.Conv2d_layer(Conv1,
                                                        stride=[1, 1, 1, 1],
                                                        filters=filters,
                                                        Batch_norm=True,
                                                        Activation=False)

                    return frnn_c_builder.Residual_connect([input, Conv2])

            def FRRU(Residual_stream,
                     Pooling_stream,
                     scale_factor,
                     filters,
                     res_filters=32,
                     D_rate=1,
                     k_size=[3, 3]):
                with tf.name_scope('Full_Resolution_Unit'):
                    scale_dims = [1, scale_factor, scale_factor, 1]
                    Pool = frnn_c_builder.Pool_layer(Residual_stream,
                                                     k_size=scale_dims,
                                                     stride=scale_dims)

                    Concat = frnn_c_builder.Concat([Pool, Pooling_stream])
                    Conv1 = frnn_c_builder.DConv_layer(Concat,
                                                       filters=filters,
                                                       Batch_norm=True,
                                                       D_rate=D_rate)
                    Conv2 = frnn_c_builder.DConv_layer(Conv1,
                                                       filters=filters,
                                                       Batch_norm=True,
                                                       D_rate=D_rate)
                    Conv3 = frnn_c_builder.Conv2d_layer(Conv2,
                                                        k_size=[1, 1],
                                                        stride=[1, 1, 1, 1],
                                                        filters=res_filters,
                                                        Activation=False,
                                                        Batch_norm=True)

                    Unpool = frnn_c_builder.Conv_Resize_layer(
                        Conv3,
                        k_size=k_size,
                        output_scale=scale_factor,
                        Batch_norm=False)

                Residual_stream_out = frnn_c_builder.Residual_connect(
                    [Residual_stream, Unpool], Activation=False)
                Pooling_stream_out = Conv2
                return Residual_stream_out, Pooling_stream_out

            #Model Construction
            with tf.name_scope('F-Net'):
                Stem = frnn_c_builder.Conv2d_layer(input_reshape,
                                                   stride=[1, 1, 1, 1],
                                                   k_size=[3, 3],
                                                   filters=64,
                                                   Batch_norm=True)
                Stem = RU(Stem, 64)

                Residual_stream = frnn_c_builder.Conv2d_layer(
                    Stem,
                    stride=[1, 1, 1, 1],
                    k_size=[1, 1],
                    filters=32,
                    Batch_norm=True)
                Stem_pool = frnn_c_builder.Pool_layer(Stem)
                Stem_pool = RU(Stem_pool, 64)
                Pooling_stream = frnn_c_builder.Pool_layer(Stem_pool)

                #Encoder
                scale_factor = 4
                Residual_stream, Pooling_stream = FRRU(
                    Residual_stream=Residual_stream,
                    Pooling_stream=Pooling_stream,
                    scale_factor=scale_factor,
                    filters=64)
                Residual_stream, Pooling_stream = FRRU(
                    Residual_stream=Residual_stream,
                    Pooling_stream=Pooling_stream,
                    scale_factor=scale_factor,
                    filters=64,
                    D_rate=2)
                Residual_stream, Pooling_stream = FRRU(
                    Residual_stream=Residual_stream,
                    Pooling_stream=Pooling_stream,
                    scale_factor=scale_factor,
                    filters=64,
                    D_rate=6)
                Residual_stream, Pooling_stream = FRRU(
                    Residual_stream=Residual_stream,
                    Pooling_stream=Pooling_stream,
                    scale_factor=scale_factor,
                    filters=128,
                    D_rate=12)
                Residual_stream, Pooling_stream = FRRU(
                    Residual_stream=Residual_stream,
                    Pooling_stream=Pooling_stream,
                    scale_factor=scale_factor,
                    filters=128,
                    D_rate=18)
                Residual_stream, Pooling_stream = FRRU(
                    Residual_stream=Residual_stream,
                    Pooling_stream=Pooling_stream,
                    scale_factor=scale_factor,
                    filters=256,
                    D_rate=24)
                Residual_stream, Pooling_stream = FRRU(
                    Residual_stream=Residual_stream,
                    Pooling_stream=Pooling_stream,
                    scale_factor=scale_factor,
                    filters=256,
                    D_rate=32)
                Pooling_stream = frnn_c_builder.Conv_Resize_layer(
                    Pooling_stream, k_size=[3, 3], Batch_norm=True, filters=64)
                Pooling_stream = RU(Pooling_stream, 64)
                Pooling_stream = frnn_c_builder.Conv_Resize_layer(
                    Pooling_stream, k_size=[3, 3], Batch_norm=True)
                RP_stream_merge = frnn_c_builder.Concat(
                    [Pooling_stream, Residual_stream])
                Conv3 = frnn_c_builder.Conv2d_layer(RP_stream_merge,
                                                    stride=[1, 1, 1, 1],
                                                    k_size=[1, 1],
                                                    filters=64,
                                                    Batch_norm=True)
                Res_connect = frnn_c_builder.Residual_connect([Stem, Conv3])
                Res_connect = RU(Res_connect, 64)

            output = frnn_c_builder.Conv2d_layer(Res_connect,
                                                 filters=1,
                                                 stride=[1, 1, 1, 1],
                                                 k_size=[1, 1],
                                                 Batch_norm=False,
                                                 Activation=False)
            weights = tf.reshape(
                weight_placeholder,
                shape=[-1, kwargs['Image_width'] * kwargs['Image_height']])
            logits = tf.reshape(
                output,
                shape=[-1, kwargs['Image_width'] * kwargs['Image_height']])

            #Add loss and debug
            '''
                with tf.name_scope('Focal_Loss'):
                    
                    P = tf.minimum(tf.nn.sigmoid(logits)+1e-4,1.0) #safe for log sigmoid
                    F1= -output_placeholder*tf.pow(1-P,5)*tf.log(P) -(1-output_placeholder)*tf.pow(P,5)*tf.log(1-P+1e-4)
                    tf.summary.image('FOCAL Loss', tf.reshape(F1,[1, 1024, 1024, 1]))
                    F1_count = tf.count_nonzero(tf.maximum(F1-0.05,0))
                    #final_focal_loss = tf.multiply(tf.reduce_mean(F1),1)
                    final_focal_loss = tf.multiply(tf.reduce_sum(F1)/ tf.to_float(tf.maximum(F1_count, 1024*5)), 1)
                    tf.summary.scalar('Count focal loss', F1_count)
                    tf.summary.scalar('Focal losssum ', tf.reduce_sum(F1))
                    #focal_loss = tf.multiply(tf.multiply(Y, tf.square(1 - P)),L) + tf.multiply(tf.multiply(1-Y, tf.square(P)),max_x+L)
                    #final_focal_loss = tf.reduce_mean(focal_loss)
                    #eps = tf.constant(value=1e-5)
                    #sigmoid = tf.nn.sigmoid(logits) + eps
                '''

            with tf.name_scope('BCE_Loss'):
                offset = 1e-5
                Threshold = 0.8
                Probs = tf.nn.sigmoid(logits)
                Wmask = 1 - Probs + Threshold
                Wmask_con = tf.floor(1 - Wmask) * (1 - output_placeholder)
                Wmask = tf.floor(Wmask) * output_placeholder
                Probs_processed = tf.clip_by_value(Probs, offset, 1.0)
                Con_Probs_processed = tf.clip_by_value(1 - Probs, offset, 1.0)
                W_I = (-Wmask * tf.log(Probs_processed) -
                       (1 - output_placeholder) * tf.log(Con_Probs_processed))
                Weighted_BCE_loss = tf.reduce_sum(W_I) / (
                    tf.reduce_sum(Wmask) + 100)
                EU_loss = tf.losses.huber_loss(output_placeholder, Probs)

            #Dice Loss
            with tf.name_scope('Dice_Loss'):

                eps = tf.constant(value=1e-5, name='eps')
                sigmoid = tf.nn.sigmoid(output, name='sigmoid') + eps
                sigmoid = tf.reshape(
                    sigmoid,
                    shape=[-1, kwargs['Image_width'] * kwargs['Image_height']])
                intersection = sigmoid * output_placeholder
                union = tf.reduce_sum(intersection, 1) / (tf.reduce_sum(
                    sigmoid, 1, name='reduce_sigmoid') + tf.reduce_sum(
                        output_placeholder, 1, name='reduce_mask') + 1e-5)
                Dice_loss = 2. * (union)
                Dice_loss = 1 - tf.reduce_mean(Dice_loss, name='diceloss')

            #Graph Exports
            tf.add_to_collection(kwargs['Model_name'] + '_Input_ph',
                                 input_placeholder)
            tf.add_to_collection(kwargs['Model_name'] + '_Input_reshape',
                                 input_reshape)
            tf.add_to_collection(kwargs['Model_name'] + '_Weight_ph',
                                 weight_placeholder)
            tf.add_to_collection(kwargs['Model_name'] + '_Output_ph',
                                 output_placeholder)
            tf.add_to_collection(kwargs['Model_name'] + '_Output', output)
            tf.add_to_collection(kwargs['Model_name'] + '_Dropout_prob_ph',
                                 dropout_prob_placeholder)
            tf.add_to_collection(kwargs['Model_name'] + '_State',
                                 state_placeholder)
            tf.add_to_collection(kwargs['Model_name'] + '_Loss',
                                 Weighted_BCE_loss)
            tf.add_to_collection(kwargs['Model_name'] + '_Loss', Dice_loss)
            tf.add_to_collection(kwargs['Model_name'] + '_Loss', EU_loss)
            tf.add_to_collection(kwargs['Model_name'] + '_Prior_path',
                                 prior_image_path)

            #Graph Summaries
            if kwargs['Summary']:
                tf.summary.image('PI', prior_image)
                #frnn_c_builder.variable_summaries(sigmoid, name='logits')
                #tf.add_to_collection(kwargs['Model_name'] + '_Loss', final_focal_loss)
                tf.summary.scalar('WBCE loss', Weighted_BCE_loss)
                tf.summary.image(
                    'WCBE',
                    tf.reshape(
                        W_I,
                        [1, kwargs['Image_width'], kwargs['Image_height'], 1]))
                tf.summary.image(
                    'mask',
                    tf.reshape(
                        Wmask,
                        [1, kwargs['Image_width'], kwargs['Image_height'], 1]))
                #tf.summary.scalar('Count WCBE loss', W_I_count)
                #tf.summary.scalar('WCBE losssum ', tf.reduce_sum(W_I))
                #tf.summary.scalar('Dice loss', Dice_loss)
                #tf.summary.scalar('Focal loss', final_focal_loss)
            return 'Segmentation'
예제 #9
0
def Build_Im2txt(kwargs):
        ''' IM2TXT'''
        with tf.name_scope('IM2TXT'):
            with Builder(**kwargs) as im2txt_builder:
                '''
                input_placeholder = tf.placeholder(tf.float32, \
                    shape=[None, kwargs['Image_width']*kwargs['Image_height']*kwargs['Image_cspace']], name='Input')
                #dropout_prob_placeholder = tf.placeholder(tf.float32, name='Dropout')
                #state_placeholder = tf.placeholder(tf.string, name="State")
                #input_reshape = im2txt_builder.Reshape_input(input_placeholder, width=kwargs['Image_width'], height=kwargs['Image_height'], colorspace= kwargs['Image_cspace'])
                #Redundant feature extractor already creates this placeholder
                '''
                if kwargs['State'] is 'Train':
                    input_seq_placeholder = tf.placeholder(tf.int32, shape=[None, kwargs['Padded_length']], name='Input_Seq')
                    target_seq_placeholder = tf.placeholder(tf.int32, shape=[None, kwargs['Padded_length']], name='Target_Seq')
                elif kwargs['State'] is 'Test':
                    input_seq_placeholder = tf.placeholder(tf.int32, shape=[None, 1], name='Input_Seq')
                    target_seq_placeholder = tf.placeholder(tf.int32, shape=[None, 1], name='Target_Seq')

                mask_placeholder = tf.placeholder(tf.int32, shape=[None, kwargs['Padded_length']], name='Seq_Mask')
                Lstm_state_placeholder = tf.placeholder(tf.float32, shape=[])

                '''
                TODO:
                Get input_seq, mask and target seq from reader
                Init inception-resnet correctly and attach input from reader to input_placeholder of inception-resnet
                Understand and build deploy state
                Seperate implementation of loss and construction of network
                '''

                #reader will give input seq, mask and target seq
                #show tell init
                initalizer = tf.random_uniform_initializer(minval=-0.08 , maxval=0.08)


                #Building feature extractor
                Build_Inception_Resnet_v2a(kwargs)
                
                #Extracting necessary variables from feature extractor
                with tf.name_scope('Feature_Extractor'):
                    inception_output = tf.get_collection(kwargs['Model_name'] + '_Incepout')[0]
                    inception_state = tf.get_collection(kwargs['Model_name'] + '_State')[0]
                    inception_dropout = tf.get_collection(kwargs['Model_name'] + '_Dropout_prob_ph')[0]

                #Setting control params
                im2txt_builder.control_params(Dropout_control=inception_dropout, State=inception_state)

                #Image embeddings
                with tf.name_scope('Lstm_Embeddings'):
                    image_embeddings = im2txt_builder.FC_layer(inception_output, filters=512)
                    image_embeddings_size= tf.shape(image_embeddings)
                    #Seq embeddings
                    embeddings_map = tf.get_variable(name='Map', shape=[40,512], initializer=initalizer)
                    seq_embeddings = tf.nn.embedding_lookup(embeddings_map, input_seq_placeholder) 


                    lstm_cell = im2txt_builder.Lstm_cell_LayerNorm()


                    #lstm_cell = im2txt_builder.Lstm_cell();
                    #lstm_cell = im2txt_builder.Rnn_dropout(lstm_cell)
                    
                with tf.variable_scope("lstm") as lstm_scope:
                    zero_state = lstm_cell.zero_state(batch_size=image_embeddings_size[0], dtype=tf.float32)
                    _, initial_stae = lstm_cell(image_embeddings, zero_state)

                    lstm_scope.reuse_variables()
                    if kwargs['State'] is 'Test':
                        state_feed = tf.placeholder(dtype=tf.float32, shape=[None, sum(lstm_cell.state_size)], name='State_feed')
                        state_tuple = tf.split(value=state_feed, num_or_size_splits=2, axis=1)
                        lstm_outputs, state_tuple = lstm_cell(inputs = tf.squeeze(seq_embeddings, axis=[1]), state=state_tuple)
                        concat_input = tf.concat(values= initial_stae, axis=1)
                        concat_state = tf.concat(values=state_tuple, axis=1)

                    elif kwargs['State'] is 'Train':
                        sequence_length = tf.reduce_sum(mask_placeholder, 1) #Add sequence_mask 
                        lstm_outputs, _ =nn.dynamic_rnn(cell=lstm_cell, inputs=seq_embeddings, sequence_length=sequence_length, initial_state=initial_stae, dtype=tf.float32, scope=lstm_scope)

                    with tf.name_scope('Lstm_output'):
                        lstm_outputs = tf.reshape(lstm_outputs, [-1, lstm_cell.output_size])

                        logits = im2txt_builder.FC_layer(lstm_outputs, filters=40, readout=True)
                    #Target seq and losses next 
                    with tf.name_scope('Lstm_loss'):
                        if kwargs['State'] is 'Train':
                            targets = tf.reshape(target_seq_placeholder, [-1]) #flattening target seqs
                            weights = tf.to_float(tf.reshape(mask_placeholder, [-1]))

                            with tf.name_scope('Softmax_CE_loss'):
                                seq_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets, logits=logits)
                                batch_loss = tf.div(tf.reduce_sum(tf.multiply(seq_loss, weights)), tf.maximum(tf.reduce_sum(weights),1))


                    tf.add_to_collection(kwargs['Model_name'] + '_Input_seq_ph', input_seq_placeholder)
                    tf.add_to_collection(kwargs['Model_name'] + '_Output_ph', target_seq_placeholder)
                    tf.add_to_collection(kwargs['Model_name'] + '_Mask_ph', mask_placeholder)
                    tf.add_to_collection(kwargs['Model_name'] + '_Output', logits)

                    if kwargs['State'] is 'Test':
                        tf.add_to_collection(kwargs['Model_name'] + '_Initial_state', concat_input)
                        tf.add_to_collection(kwargs['Model_name'] + '_Lstm_state_feed', state_feed)
                        tf.add_to_collection(kwargs['Model_name'] + '_Lstm_state', concat_state)

                    elif kwargs['State'] is 'Train':
                        tf.add_to_collection(kwargs['Model_name'] + '_Loss', batch_loss)

                    #Test output next

                    return 'Sequence'
예제 #10
0
    def build_net(self):
        '''Small network to generate prior maps for final segmentation network'''
        with tf.name_scope('Pnet'):
            with Builder(**self.build_params) as Pnet_builder:
                #Setting control params
                Pnet_builder.control_params(
                    Dropout_control=self.dropout_placeholder,
                    State=self.state_placeholder)

                #Stem
                conv1 = Pnet_builder.Conv2d_layer(self.input_placeholder,
                                                  stride=[1, 2, 2, 1],
                                                  filters=32,
                                                  Batch_norm=True)  #512
                conv3 = Pnet_builder.DConv_layer(conv1,
                                                 filters=32,
                                                 Batch_norm=True,
                                                 D_rate=2)

                #ASPP
                conv1_rate4 = Pnet_builder.DConv_layer(conv3,
                                                       filters=32,
                                                       Batch_norm=True,
                                                       D_rate=4)
                R_conn = Pnet_builder.Concat([conv1_rate4, conv3])
                conv1_rate4a = Pnet_builder.DConv_layer(R_conn,
                                                        filters=32,
                                                        Batch_norm=True,
                                                        D_rate=4)
                R_conn1 = Pnet_builder.Concat(
                    [conv1_rate4, conv1_rate4a, conv3])
                conv1_rate4b = Pnet_builder.DConv_layer(
                    R_conn1, filters=32, Batch_norm=True,
                    D_rate=4)  #Embedd layers into final net #248
                R_conn2 = Pnet_builder.Concat(
                    [conv1_rate4, conv1_rate4a, conv1_rate4b, conv3])
                conv2_rate4 = Pnet_builder.DConv_layer(
                    R_conn2,
                    filters=32,
                    Batch_norm=True,
                    D_rate=4,
                    name='Embedd4')  #Embedd layers into final net #248
                conv2_rate4 = Pnet_builder.Concat([
                    conv1_rate4, conv1_rate4a, conv1_rate4b, conv2_rate4, conv3
                ])

                conv1_rate8 = Pnet_builder.DConv_layer(conv3,
                                                       filters=32,
                                                       Batch_norm=True,
                                                       D_rate=8)
                R_conn18 = Pnet_builder.Concat([conv1_rate8, conv3])
                conv1_rate8a = Pnet_builder.DConv_layer(R_conn18,
                                                        filters=32,
                                                        Batch_norm=True,
                                                        D_rate=8)
                R_conn28 = Pnet_builder.Concat(
                    [conv1_rate8, conv1_rate8a, conv3])
                conv1_rate8b = Pnet_builder.DConv_layer(R_conn28,
                                                        filters=32,
                                                        Batch_norm=True,
                                                        D_rate=8)
                R_conn38 = Pnet_builder.Concat(
                    [conv1_rate8, conv1_rate8a, conv1_rate8b, conv3])
                conv2_rate8 = Pnet_builder.DConv_layer(
                    R_conn38,
                    filters=32,
                    Batch_norm=True,
                    D_rate=8,
                    name='Embedd8')  #Embedd layers into final net #64
                conv2_rate8 = Pnet_builder.Concat([
                    conv1_rate8, conv1_rate8a, conv1_rate8b, conv2_rate8, conv3
                ])

                conv1_rate16 = Pnet_builder.DConv_layer(conv3,
                                                        filters=64,
                                                        Batch_norm=True,
                                                        D_rate=16)
                R_conn116 = Pnet_builder.Concat([conv1_rate16, conv3])
                conv1_rate16a = Pnet_builder.DConv_layer(
                    R_conn116, filters=64, Batch_norm=True,
                    D_rate=16)  #Embedd layers into final net #32
                R_conn216 = Pnet_builder.Concat(
                    [conv1_rate16, conv1_rate16a, conv3])
                conv1_rate16b = Pnet_builder.DConv_layer(R_conn216,
                                                         filters=64,
                                                         Batch_norm=True,
                                                         D_rate=16)
                R_conn316 = Pnet_builder.Concat(
                    [conv1_rate16, conv1_rate16a, conv1_rate16b, conv3])
                conv2_rate16 = Pnet_builder.DConv_layer(
                    R_conn316, filters=64, Batch_norm=True,
                    D_rate=16)  #Embedd layers into final net #32
                conv2_rate16 = Pnet_builder.Concat([
                    conv1_rate16, conv1_rate16a, conv1_rate16b, conv2_rate16,
                    conv3
                ])

                conv1_rate32 = Pnet_builder.DConv_layer(conv3,
                                                        filters=128,
                                                        Batch_norm=True,
                                                        D_rate=32)
                R_conn132 = Pnet_builder.Concat([conv1_rate32, conv3])
                conv1_rate32a = Pnet_builder.DConv_layer(
                    R_conn132, filters=128, Batch_norm=True,
                    D_rate=32)  #Embedd layers into final net #32
                R_conn232 = Pnet_builder.Concat(
                    [conv1_rate32, conv1_rate32a, conv3])
                conv1_rate32b = Pnet_builder.DConv_layer(R_conn232,
                                                         filters=128,
                                                         Batch_norm=True,
                                                         D_rate=32)
                R_conn332 = Pnet_builder.Concat(
                    [conv1_rate32, conv1_rate32a, conv1_rate32b, conv3])
                conv2_rate32 = Pnet_builder.DConv_layer(
                    R_conn332, filters=128, Batch_norm=True,
                    D_rate=32)  #Embedd layers into final net #32
                conv2_rate32 = Pnet_builder.Concat([
                    conv1_rate32, conv1_rate32a, conv1_rate32b, conv2_rate32,
                    conv3
                ])

                concat = Pnet_builder.Concat(
                    [conv2_rate16, conv2_rate4, conv2_rate8, conv2_rate32])
                conv_l = Pnet_builder.Conv2d_layer(concat,
                                                   filters=256,
                                                   k_size=[1, 1])
                unpool = Pnet_builder.Conv_Resize_layer(conv_l)
                conv5 = Pnet_builder.Conv2d_layer(unpool,
                                                  filters=256,
                                                  Batch_norm=True)  #512
                output = Pnet_builder.Conv2d_layer(conv5,
                                                   filters=1,
                                                   Activation=False,
                                                   name='Output',
                                                   Batch_norm=False)
                #logits = tf.reshape(output, shape= [-1, kwargs['Image_width']*kwargs['Image_height']])
                return output
예제 #11
0
    def build_net(self):
        with tf.name_scope('Inception_Resnet_v2a_model'):
            with Builder(**self.build_params) as inceprv2a_builder:
                self.builder = inceprv2a_builder
                #Setting control params
                inceprv2a_builder.control_params(
                    Dropout_control=self.dropout_placeholder,
                    State=self.state_placeholder,
                    Renorm=self.build_params['Renorm'],
                    Share_var=True)

                #Construct functional building blocks
                def stem(input):
                    with tf.variable_scope('Stem'):
                        conv1 = inceprv2a_builder.Conv2d_layer(
                            input,
                            stride=[1, 2, 2, 1],
                            filters=32,
                            Batch_norm=True)
                        conv2 = inceprv2a_builder.Conv2d_layer(
                            conv1,
                            stride=[1, 1, 1, 1],
                            k_size=[3, 3],
                            filters=32,
                            Batch_norm=True,
                            padding='VALID')
                        conv3 = inceprv2a_builder.Conv2d_layer(
                            conv2,
                            stride=[1, 1, 1, 1],
                            k_size=[3, 3],
                            filters=64,
                            Batch_norm=True)
                        pool1 = inceprv2a_builder.Pool_layer(
                            conv3,
                            stride=[1, 2, 2, 1],
                            k_size=[1, 3, 3, 1],
                            padding='VALID')

                        conv4 = inceprv2a_builder.Conv2d_layer(
                            pool1,
                            stride=[1, 1, 1, 1],
                            filters=80,
                            Batch_norm=True)
                        conv5 = inceprv2a_builder.Conv2d_layer(
                            conv4,
                            stride=[1, 1, 1, 1],
                            k_size=[3, 3],
                            filters=192,
                            Batch_norm=True,
                            padding='VALID')

                        pool2 = inceprv2a_builder.Pool_layer(
                            conv5,
                            stride=[1, 2, 2, 1],
                            k_size=[1, 3, 3, 1],
                            padding='VALID')

                        conv1a_split1 = inceprv2a_builder.Conv2d_layer(
                            pool2,
                            stride=[1, 1, 1, 1],
                            filters=96,
                            Batch_norm=True)

                        conv1b_split1 = inceprv2a_builder.Conv2d_layer(
                            pool2,
                            stride=[1, 1, 1, 1],
                            filters=48,
                            Batch_norm=True)
                        conv2b_split1 = inceprv2a_builder.Conv2d_layer(
                            conv1b_split1,
                            stride=[1, 1, 1, 1],
                            k_size=[5, 5],
                            filters=64,
                            Batch_norm=True)

                        conv1c_split1 = inceprv2a_builder.Conv2d_layer(
                            pool2,
                            stride=[1, 1, 1, 1],
                            filters=64,
                            Batch_norm=True)
                        conv2c_split1 = inceprv2a_builder.Conv2d_layer(
                            conv1c_split1,
                            stride=[1, 1, 1, 1],
                            k_size=[3, 3],
                            filters=96,
                            Batch_norm=True)
                        conv3c_split1 = inceprv2a_builder.Conv2d_layer(
                            conv2c_split1,
                            stride=[1, 1, 1, 1],
                            k_size=[3, 3],
                            filters=96,
                            Batch_norm=True)

                        avgpool1d_split1 = inceprv2a_builder.Pool_layer(
                            pool2,
                            k_size=[1, 3, 3, 1],
                            stride=[1, 1, 1, 1],
                            pooling_type='AVG')
                        conv1d_split1 = inceprv2a_builder.Conv2d_layer(
                            avgpool1d_split1,
                            k_size=[1, 1],
                            filters=64,
                            Batch_norm=True)

                        concat = inceprv2a_builder.Concat([
                            conv1a_split1, conv2b_split1, conv3c_split1,
                            conv1d_split1
                        ])

                        return concat

                def incep_block35(input, Activation=True, scale=1.0):
                    with tf.variable_scope('Block35'):
                        conv1a_split1 = inceprv2a_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=32,
                            Batch_norm=True)

                        conv1b_split1 = inceprv2a_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=32,
                            Batch_norm=True)
                        conv2b_split1 = inceprv2a_builder.Conv2d_layer(
                            conv1b_split1,
                            stride=[1, 1, 1, 1],
                            filters=32,
                            Batch_norm=True)

                        conv1c_split1 = inceprv2a_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=32,
                            Batch_norm=True)
                        conv2c_split1 = inceprv2a_builder.Conv2d_layer(
                            conv1c_split1,
                            stride=[1, 1, 1, 1],
                            filters=48,
                            Batch_norm=True)
                        conv3c_split1 = inceprv2a_builder.Conv2d_layer(
                            conv2c_split1,
                            stride=[1, 1, 1, 1],
                            filters=64,
                            Batch_norm=True)

                        concat = inceprv2a_builder.Concat(
                            [conv1a_split1, conv2b_split1, conv3c_split1])

                        conv2 = inceprv2a_builder.Conv2d_layer(
                            concat,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=input.get_shape()[3],
                            Batch_norm=False,
                            Activation=False)
                        conv2_scale = inceprv2a_builder.Scale_activations(
                            conv2, scaling_factor=scale)
                        residual_out = inceprv2a_builder.Residual_connect(
                            [input, conv2_scale], Activation=Activation)

                        return residual_out

                def incep_block17(input, Activation=True, scale=1.0):
                    with tf.variable_scope('Block17'):
                        conv1a_split1 = inceprv2a_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=192,
                            Batch_norm=True)

                        conv1b_split1 = inceprv2a_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=128,
                            Batch_norm=True)
                        conv2b_split1 = inceprv2a_builder.Conv2d_layer(
                            conv1b_split1,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 7],
                            filters=160,
                            Batch_norm=True)
                        conv3b_split1 = inceprv2a_builder.Conv2d_layer(
                            conv2b_split1,
                            stride=[1, 1, 1, 1],
                            k_size=[7, 1],
                            filters=192,
                            Batch_norm=True)

                        concat = inceprv2a_builder.Concat(
                            [conv1a_split1, conv3b_split1])

                        conv2 = inceprv2a_builder.Conv2d_layer(
                            concat,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=input.get_shape()[3],
                            Batch_norm=False,
                            Activation=False)
                        conv2_scale = inceprv2a_builder.Scale_activations(
                            conv2, scaling_factor=scale)
                        residual_out = inceprv2a_builder.Residual_connect(
                            [input, conv2_scale], Activation=Activation)

                        return residual_out

                def incep_block8(input, Activation=True, scale=1.0):
                    with tf.variable_scope('Block8'):
                        conv1a_split1 = inceprv2a_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=192,
                            Batch_norm=True)

                        conv1b_split1 = inceprv2a_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=192,
                            Batch_norm=True)
                        conv2b_split1 = inceprv2a_builder.Conv2d_layer(
                            conv1b_split1,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 3],
                            filters=224,
                            Batch_norm=True)
                        conv3b_split1 = inceprv2a_builder.Conv2d_layer(
                            conv2b_split1,
                            stride=[1, 1, 1, 1],
                            k_size=[3, 1],
                            filters=256,
                            Batch_norm=True)

                        concat = inceprv2a_builder.Concat(
                            [conv1a_split1, conv3b_split1])

                        conv2 = inceprv2a_builder.Conv2d_layer(
                            concat,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=input.get_shape()[3],
                            Batch_norm=False,
                            Activation=False)
                        conv2_scale = inceprv2a_builder.Scale_activations(
                            conv2, scaling_factor=scale
                        )  #Last layer has no activations, recheck with implementation
                        residual_out = inceprv2a_builder.Residual_connect(
                            [input, conv2_scale], Activation=Activation)

                        return residual_out

                def ReductionA(input):
                    with tf.variable_scope('Reduction_35x17'):
                        conv1a_split1 = inceprv2a_builder.Conv2d_layer(
                            input,
                            stride=[1, 2, 2, 1],
                            k_size=[3, 3],
                            filters=384,
                            Batch_norm=True,
                            padding='VALID')

                        conv1b_split1 = inceprv2a_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=256,
                            Batch_norm=True)
                        conv2b_split1 = inceprv2a_builder.Conv2d_layer(
                            conv1b_split1,
                            stride=[1, 1, 1, 1],
                            k_size=[3, 3],
                            filters=256,
                            Batch_norm=True)
                        conv3b_split1 = inceprv2a_builder.Conv2d_layer(
                            conv2b_split1,
                            stride=[1, 2, 2, 1],
                            k_size=[3, 3],
                            filters=384,
                            Batch_norm=True,
                            padding='VALID')

                        pool1c_split1 = inceprv2a_builder.Pool_layer(
                            input,
                            stride=[1, 2, 2, 1],
                            k_size=[1, 3, 3, 1],
                            padding='VALID')

                        concat = inceprv2a_builder.Concat(
                            [conv1a_split1, conv3b_split1, pool1c_split1])

                        return concat

                def ReductionB(input):
                    with tf.variable_scope('Reduction_17x8'):
                        conv1a_split1 = inceprv2a_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=256,
                            Batch_norm=True)
                        conv2a_split1 = inceprv2a_builder.Conv2d_layer(
                            conv1a_split1,
                            stride=[1, 2, 2, 1],
                            k_size=[3, 3],
                            filters=384,
                            Batch_norm=True,
                            padding='VALID')

                        conv1b_split1 = inceprv2a_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=256,
                            Batch_norm=True)
                        conv2b_split1 = inceprv2a_builder.Conv2d_layer(
                            conv1b_split1,
                            stride=[1, 2, 2, 1],
                            k_size=[3, 3],
                            filters=288,
                            Batch_norm=True,
                            padding='VALID')

                        conv1c_split1 = inceprv2a_builder.Conv2d_layer(
                            input,
                            stride=[1, 1, 1, 1],
                            k_size=[1, 1],
                            filters=256,
                            Batch_norm=True)
                        conv2c_split1 = inceprv2a_builder.Conv2d_layer(
                            conv1c_split1,
                            stride=[1, 1, 1, 1],
                            k_size=[3, 3],
                            filters=288,
                            Batch_norm=True)
                        conv3c_split1 = inceprv2a_builder.Conv2d_layer(
                            conv2c_split1,
                            stride=[1, 2, 2, 1],
                            k_size=[3, 3],
                            filters=320,
                            Batch_norm=True,
                            padding='VALID')

                        pool1d_split1 = inceprv2a_builder.Pool_layer(
                            input,
                            stride=[1, 2, 2, 1],
                            k_size=[1, 3, 3, 1],
                            padding='VALID')

                        concat = inceprv2a_builder.Concat([
                            conv2a_split1, conv2b_split1, conv3c_split1,
                            pool1d_split1
                        ])
                        return concat

                #Model Construction

                #Stem
                Block_35 = stem(self.input_placeholder)
                #Inception 35x35
                for index in range(10):
                    Block_35 = incep_block35(Block_35, scale=0.17)
                #Reduction 35->17
                self.Endpoints['Block_35'] = Block_35

                Block_17 = ReductionA(Block_35)
                #Inception 17x17
                for index in range(20):
                    Block_17 = incep_block17(Block_17, scale=0.1)
                self.Endpoints['Block_17'] = Block_17

                #Reduction 17->8
                Block_8 = ReductionB(Block_17)
                for index in range(9):
                    Block_8 = incep_block8(Block_8, scale=0.2)
                Block_8 = incep_block8(Block_8, False)
                self.Endpoints['Block_8'] = Block_8

                #Normal Logits
                with tf.variable_scope('Logits'):
                    model_conv = inceprv2a_builder.Conv2d_layer(
                        Block_8,
                        stride=[1, 1, 1, 1],
                        k_size=[1, 1],
                        filters=1024,
                        Batch_norm=True)  #1536
                    self.Endpoints['Model_conv'] = model_conv
                    model_conv_shape = model_conv.get_shape().as_list()
                    model_avg_pool = inceprv2a_builder.Pool_layer(
                        model_conv,
                        k_size=[
                            1, model_conv_shape[1], model_conv_shape[2], 1
                        ],
                        stride=[
                            1, model_conv_shape[1], model_conv_shape[2], 1
                        ],
                        padding='SAME',
                        pooling_type='AVG')
                    #model_conv = inceprv2a_builder.Conv2d_layer(Block_8, stride=[1, 1, 1, 1], k_size=[1, 1], filters=512, Batch_norm=True) #1536
                    #model_conv = tf.reshape(model_conv, shape=[-1,  model_conv_shape[1] * model_conv_shape[2], model_conv_shape[3]])   #stacking heightwise for attention module
                    drop1 = inceprv2a_builder.Dropout_layer(model_avg_pool)
                    output = inceprv2a_builder.FC_layer(
                        drop1,
                        filters=self.build_params['Classes'],
                        readout=True)
                    return output
예제 #12
0
def Build_Attn_lstm(kwargs):
        with tf.name_scope('Attn_lstm'):
            with Builder(**kwargs) as Attn_lstm_builder:
                '''
                input_placeholder = tf.placeholder(tf.float32, \
                    shape=[None, kwargs['Image_width']*kwargs['Image_height']*kwargs['Image_cspace']], name='Input')
                #dropout_prob_placeholder = tf.placeholder(tf.float32, name='Dropout')
                #state_placeholder = tf.placeholder(tf.string, name="State")
                #input_reshape = Attn_lstm_builder.Reshape_input(input_placeholder, width=kwargs['Image_width'], height=kwargs['Image_height'], colorspace= kwargs['Image_cspace'])
                #Redundant feature extractor already creates this placeholder
                '''
                if kwargs['State'] is 'Train':
                    input_seq_placeholder = tf.placeholder(tf.int32, shape=[None, kwargs['Padded_length']], name='Input_Seq')
                    target_seq_placeholder = tf.placeholder(tf.int32, shape=[None, kwargs['Padded_length']], name='Target_Seq')
                elif kwargs['State'] is 'Test':
                    input_seq_placeholder = tf.placeholder(tf.int32, shape=[None, 1], name='Input_Seq')
                    target_seq_placeholder = tf.placeholder(tf.int32, shape=[None, 1], name='Target_Seq')

                mask_placeholder = tf.placeholder(tf.int32, shape=[None, kwargs['Padded_length']], name='Seq_Mask')
                Lstm_state_placeholder = tf.placeholder(tf.float32, shape=[])

                '''
                TODO:
                Get input_seq, mask and target seq from reader
                Init inception-resnet correctly and attach input from reader to input_placeholder of inception-resnet
                Understand and build deploy state
                Seperate implementation of loss and construction of network
                '''

                #reader will give input seq, mask and target seq
                #show tell init
                initalizer = tf.random_uniform_initializer(minval=-0.08 , maxval=0.08)


                #Building feature extractor
                Build_Inception_Resnet_v2a(kwargs)
                
                
                #Extracting necessary variables from feature extractor
                with tf.name_scope('Feature_Extractor'):
                    inception_output_init_state = tf.get_collection(kwargs['Model_name'] + '_Incepout')[0]
                    inception_output = tf.get_collection(kwargs['Model_name'] + '_Incepout_attn')[0]
                    inception_state = tf.get_collection(kwargs['Model_name'] + '_State')[0]
                    inception_dropout = tf.get_collection(kwargs['Model_name'] + '_Dropout_prob_ph')[0]

                #Setting control params
                Attn_lstm_builder.control_params(Dropout_control=inception_dropout, State=inception_state)

                #Image embeddings
                with tf.name_scope('Lstm_Embeddings'):
                    #image_embeddings = Attn_lstm_builder.FC_layer(inception_output, filters=512, flatten=False)
                    image_embeddings = inception_output
                    image_embeddings_size= tf.shape(image_embeddings)
                    inital_image_embeddings =  Attn_lstm_builder.FC_layer(inception_output_init_state, filters=1024) 
                    #Seq embeddings
                    embeddings_map = tf.get_variable(name='Map', shape=[40,512*2], initializer=initalizer)
                    seq_embeddings = tf.nn.embedding_lookup(embeddings_map, input_seq_placeholder) 


                    lstm_cell = Attn_lstm_builder.Lstm_cell_LayerNorm(1024)
                    Bah_atten_mech = tf.contrib.seq2seq.BahdanauAttention(512*2, normalize=True, memory=image_embeddings)
                    lstm_cell = tf.contrib.seq2seq.AttentionWrapper(lstm_cell, Bah_atten_mech, output_attention=False, attention_layer_size=512)
                    top_cell =Attn_lstm_builder.Lstm_cell_LayerNorm(1024)

                    lstm_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell, top_cell])
                    
                    #lstm_cell = Attn_lstm_builder.Lstm_cell();
                    #lstm_cell = Attn_lstm_builder.Rnn_dropout(lstm_cell)
                    
                
                zero_state = lstm_cell.zero_state(batch_size=image_embeddings_size[0], dtype=tf.float32)
                _, initial_stae = lstm_cell(inital_image_embeddings, zero_state)
                next_state = initial_stae
                #lstm_scope.reuse_variables()
                if kwargs['State'] is 'Test':
                        
                        state_feed = tf.placeholder(dtype=tf.float32, shape=[None, 1024], name='State_feed')
                        attn_feed = tf.placeholder(dtype=tf.float32, shape=[None, 1024], name='Attn_feed')
                        #state_tuple = tf.split(value=state_feed, num_or_size_splits=2, axis=1)
                        #prev_state = tf.contrib.seq2seq.AttentionWrapperState(cell_state= state_tuple, attention=attn_feed)
                        
                        helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(embedding=embeddings_map, start_tokens=tf.tile([36], [image_embeddings_size[0]]), end_token=37)

                        
                elif kwargs['State'] is 'Train':
                        sequence_length = tf.reduce_sum(mask_placeholder, 1) #Add sequence_mask 
                        #sequence_length = tf.constant(tf.ones(shape=[9])) *10
                        #lstm_outputs, _ =nn.dynamic_rnn(cell=lstm_cell, inputs=seq_embeddings, sequence_length=sequence_length, initial_state=initial_stae, dtype=tf.float32, scope=lstm_scope)
                        
                        helper = tf.contrib.seq2seq.TrainingHelper(inputs=seq_embeddings, sequence_length=tf.tile([kwargs['Padded_length']], [10]))

                output_layer = Dense(units=40, use_bias=False, name='output_proj')
                decoder = tf.contrib.seq2seq.BasicDecoder(cell=lstm_cell, helper=helper, initial_state=initial_stae, output_layer=output_layer)
                lstm_outputs, a ,b = tf.contrib.seq2seq.dynamic_decode(decoder = decoder, impute_finished=True, maximum_iterations=15, output_time_major=False)
                lstm_outputs = lstm_outputs.rnn_output
                print(lstm_outputs.get_shape().as_list())
                with tf.name_scope('Lstm_output'):
                        #lstm_outputs = tf.reshape(lstm_outputs, [-1,512*2])
                        
                        #logits = Attn_lstm_builder.FC_layer(lstm_outputs, filters=40, readout=True)
                        logits = tf.reshape(lstm_outputs, [-1,40])
                    #Target seq and losses next 
                with tf.name_scope('Lstm_loss'):
                        if kwargs['State'] is 'Train':
                            targets = tf.reshape(target_seq_placeholder, [-1]) #flattening target seqs

                            weights = tf.to_float(tf.reshape(mask_placeholder, [-1]))

                            with tf.name_scope('Softmax_CE_loss'):
                                seq_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets, logits=logits)
                                batch_loss = tf.div(tf.reduce_sum(tf.multiply(seq_loss, weights)), tf.maximum(tf.reduce_sum(weights),1))


                tf.add_to_collection(kwargs['Model_name'] + '_Input_seq_ph', input_seq_placeholder)
                tf.add_to_collection(kwargs['Model_name'] + '_Output_ph', target_seq_placeholder)
                tf.add_to_collection(kwargs['Model_name'] + '_Mask_ph', mask_placeholder)
                tf.add_to_collection(kwargs['Model_name'] + '_Output', logits)
                '''
                    if kwargs['State'] is 'Test':
                        #tf.add_to_collection(kwargs['Model_name'] + '_Initial_state', concat_input)
                        #tf.add_to_collection(kwargs['Model_name'] + '_Lstm_state_feed', state_feed)
                        #tf.add_to_collection(kwargs['Model_name'] + '_Lstm_state', concat_state)
                        #tf.add_to_collection(kwargs['Model_name'] + '_Lstm_state_feed', attn_feed)
                        #tf.add_to_collection(kwargs['Model_name'] + '_Lstm_state', nxt_attn)
                        #tf.add_to_collection(kwargs['Model_name'] + '_Initial_state', init_attn)
                 '''
                if kwargs['State'] is 'Train':
                        tf.add_to_collection(kwargs['Model_name'] + '_Loss', batch_loss)

                #Test output next

                return 'Sequence'
예제 #13
0
    def build_net(self):
        '''
        F-Net, based off FRRN, replaced encode-decode block with Atrous convolutions, reduced network size, enclosed FRRU sequence within pool-upscale block
        '''
        with tf.name_scope('FRRN_C'):
            with Builder(**self.build_params) as frnn_c_builder:

                #Setting control params
                frnn_c_builder.control_params(Dropout_control=self.dropout_placeholder, State=self.state_placeholder)

                #Construct functional building blocks
                def RU(input, filters):
                    with tf.name_scope('Residual_Unit'):
                        Conv1 = frnn_c_builder.Conv2d_layer(input, stride=[1, 1, 1, 1], filters=filters, Batch_norm=True)
                        Conv2 = frnn_c_builder.Conv2d_layer(Conv1, stride=[1, 1, 1, 1], filters=filters, Batch_norm=True)
                        Conv3 = frnn_c_builder.Conv2d_layer(Conv2, k_size=[5, 5], stride=[1, 1, 1, 1], filters=filters, Activation=False)

                        return frnn_c_builder.Residual_connect([input, Conv3])


                def FRRU(Residual_stream, Pooling_stream, scale_factor, filters, res_filters=32, D_rate=1, k_size=[3,3]):
                    with tf.name_scope('Full_Resolution_Unit'):
                        scale_dims = [1, scale_factor, scale_factor, 1]
                        Pool = frnn_c_builder.Pool_layer(Residual_stream, k_size=scale_dims, stride=scale_dims)

                        Concat = frnn_c_builder.Concat([Pool, Pooling_stream])

                        Conv0 = frnn_c_builder.Conv2d_layer(Concat, stride=[1,1,1,1], k_size=[1,1], filters=filters, Batch_norm=True)
                        Conv1 = frnn_c_builder.DConv_layer(Conv0,  filters=filters, Batch_norm=True, D_rate=D_rate)
                        Conv2 = frnn_c_builder.DConv_layer(Conv1, filters=filters, Batch_norm=True, D_rate=D_rate)

                        Res_connect = frnn_c_builder.Residual_connect([Conv0, Conv2])

                        Conv3 = frnn_c_builder.Conv2d_layer(Res_connect, k_size=[1, 1], stride=[1, 1, 1, 1], filters=res_filters, Activation=False, Batch_norm=True)

                        Unpool = frnn_c_builder.Conv_Resize_layer(Conv3, k_size=k_size, output_scale=scale_factor, Batch_norm=False )
                    Residual_stream_out = frnn_c_builder.Concat([Residual_stream, Unpool])
                    Pooling_stream_out = Res_connect
                    return Residual_stream_out, Pooling_stream_out


                #Model Construction
                with tf.name_scope('First_half'):
                    Stem = frnn_c_builder.Conv2d_layer(self.input_placeholder, stride=[1, 1, 1, 1], k_size=[3, 3], filters=64, Batch_norm=True)
                    Stem_pool = frnn_c_builder.Pool_layer(Stem)
                
                    Stem_pool = RU(Stem_pool, 64)
                    Stem_pool = RU(Stem_pool, 64)

                    Residual_stream = frnn_c_builder.Conv2d_layer(Stem_pool, stride=[1, 1, 1, 1], k_size=[1, 1], filters=32, Batch_norm=True)
                    Pooling_stream = frnn_c_builder.Pool_layer(Stem_pool)

                    #Encoder
                    scale_factor = 2
                    Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=96)
                    Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=32, D_rate=2)
                    Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=32, D_rate=4)
                    Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=64, D_rate=8)
                    Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=64, D_rate=16)
                    Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=256, D_rate=32)
                    Residual_stream, Pooling_stream = FRRU(Residual_stream=Residual_stream, Pooling_stream=Pooling_stream, scale_factor=scale_factor, filters=256, D_rate=32, k_size=[5,5])



                    Pooling_stream = Pooling_stream = frnn_c_builder.Conv_Resize_layer(Pooling_stream, k_size=[3, 3], Batch_norm=True, output_scale=2)
                    RP_stream_merge = frnn_c_builder.Concat([Pooling_stream, Residual_stream])
                    Conv3 = frnn_c_builder.Conv2d_layer(RP_stream_merge, stride=[1, 1, 1, 1], k_size=[1, 1], filters=64, Batch_norm=True)
                    Upconv = frnn_c_builder.Conv_Resize_layer(Conv3, stride=[1,1,1,1],Batch_norm=True,Activation=False,k_size=[3, 3], filters=64)
                    Res_connect = frnn_c_builder.Residual_connect([Stem, Upconv])
                    Res_connect = RU(Res_connect, 64)
                output = frnn_c_builder.Conv2d_layer(Res_connect, filters=1, stride=[1, 1, 1, 1], k_size=[1, 1], Batch_norm=False, Activation=False)
                return output