Beispiel #1
0
    def __init__(self, width, height, channels, classes):
        super(LeNet, self).__init__()

        # activation_function = "tanh"
        activation_function = "relu"

        self._number_of_classes = classes

        size = width * height * channels

        x = tf.placeholder(tf.float32, [None, size], name="x")

        self._inputs = x

        with tf.variable_scope("reshape1"):
            x_image = tf.reshape(x, [-1, width, height, channels],
                             name="input_reshape")

        with tf.variable_scope("conv1"):
            out = conv(x_image, 5, 5, 20, activation=activation_function)
            out = max_pool_2x2(out)

        with tf.variable_scope("conv2"):
            out = conv(out, 5, 5, 50, activation=activation_function)
            out = max_pool_2x2(out)

        dims = out.get_shape().as_list()
        flatten_size = 1
        for d in dims[1:]:
            flatten_size *= d

        with tf.variable_scope("reshape2"):
            flatten = tf.reshape(out, [-1, int(flatten_size)])

        with tf.variable_scope("fc1"):
            out = fc(flatten, 500)
            if activation_function == "relu":
                out = tf.nn.relu(out)
            elif activation_function == "tanh":
                out = tf.nn.tanh(out)

        with tf.variable_scope("fc2"):
            self._logits = fc(out, classes)

        if classes > 1:
            self._predictions = tf.nn.softmax(self._logits)
        else:
            self._predictions = self._logits
Beispiel #2
0
    def __init__(self, width=28, height=28, channels=1, classes=10,
                 hidden_layers=[], dropout=[], activation_fn=nn.relu):
        super(MultiLayerPerceptron, self).__init__()

        self._number_of_classes = classes
        size = width * height * channels
        x = tf.placeholder(tf.float32, [None, size], name="x")

        # Hidden dropout
        dropout_default = tf.zeros([2], dtype=tf.float32)
        self._hidden_dropout = tf.placeholder_with_default(dropout_default,
                                                           [None],
                                                           name="hidden_dropout")

        # Input dropout
        self._input_dropout = tf.placeholder_with_default(tf.constant(0.0, dtype=tf.float32),
                                                [],
                                                name="input_dropout")

        # Activations
        self._activations = tf.placeholder_with_default([0, 0],
                                                [None],
                                                name="activations")

        self._inputs = x
        #out = tf.nn.dropout(x, keep_prob=tf.constant(1.0, dtype=tf.float32) - self._input_dropout)
        out = x

        for idx, h in enumerate(hidden_layers):
            with tf.variable_scope("fc%d" % idx):
                if 1 == self._activations[idx]:
                    activation_fn = nn.tanh
                else:
                    activation_fn = nn.relu
                out = fc(out, h, activation_fn=activation_fn)

            if self._hidden_dropout.get_shape()[0] > idx:
                with tf.variable_scope("dropout%d" % idx):
                    out = tf.nn.dropout(out, keep_prob=1.0 - self._hidden_dropout[idx])

        with tf.variable_scope("fc%d" % len(hidden_layers)):
            out = fc(out, classes)

        self._logits = out
        if classes > 1:
            self._predictions = tf.nn.softmax(out)
        else:
            self._predictions = self._logits
Beispiel #3
0
    def __init__(self, width, height, channels, classes, number_of_gpus=4):
        super(LeNet, self).__init__()

        # activation_function = "tanh"
        activation_function = "relu"

        self._number_of_classes = classes

        size = width * height * channels

        x = tf.placeholder(tf.float32, [None, size], name="x")

        self._inputs = x

        with tf.variable_scope("reshape1"):
            x_image = tf.reshape(x, [-1, width, height, channels],
                                 name="input_reshape")

        if number_of_gpus <= 1:
            gpus = 1
        else:
            gpus = number_of_gpus

        if gpus > 1:
            x_image_arr = tf.split(x_image, gpus, axis=0)
        else:
            x_image_arr = [x_image]
        classes_arr = [classes] * gpus

        logits_arr = [0.0] * gpus

        for gpu in range(gpus):
            with tf.device('/gpu:' + str(gpu)):

                with tf.variable_scope("conv1_" + str(gpu)):
                    out = conv(x_image_arr[gpu], 5, 5, 20, activation=activation_function)
                    out = max_pool_2x2(out)

                with tf.variable_scope("conv2_" + str(gpu)):
                    out = conv(out, 5, 5, 50, activation=activation_function)
                    out = max_pool_2x2(out)

                dims = out.get_shape().as_list()
                flatten_size = 1
                for d in dims[1:]:
                    flatten_size *= d

                with tf.variable_scope("reshape2_" + str(gpu)):
                    flatten = tf.reshape(out, [-1, int(flatten_size)])

                with tf.variable_scope("fc1_" + str(gpu)):
                    out = fc(flatten, 500)
                    if activation_function == "relu":
                        out = tf.nn.relu(out)
                    elif activation_function == "tanh":
                        out = tf.nn.tanh(out)

                with tf.variable_scope("fc2_" + str(gpu)):
                    logits_arr[gpu] = fc(out, classes_arr[gpu])

        self._logits = tf.concat(logits_arr, 0)

        # print(self._logits.graph.as_graph_def())

        if classes > 1:
            self._predictions = tf.nn.softmax(self._logits)
        else:
            self._predictions = self._logits
Beispiel #4
0
    def __init__(self, width=299, height=299, channels=3, classes=10):
        super(InceptionV4, self).__init__()

        assert width == height, "width and height must be the same"

        size = width * height * channels

        x = tf.placeholder(tf.float32, [None, size], name="x")
        self._inputs = x

        with tf.variable_scope("reshape1"):
            x = tf.reshape(x, [-1, width, height, channels])

        self._number_of_classes = classes

        max_w = 299
        min_w = 299 // 3

        with tf.variable_scope("resize1"):
            if width < min_w:
                x = tf.image.resize_images(x, [min_w, min_w])
            elif width == 299:
                pass  # do nothing
            else:
                x = tf.image.resize_images(x, [max_w, max_w])

        with tf.variable_scope("stem"):
            out = stem(x)

        for i in range(4):
            with tf.variable_scope("incA" + str(i)):
                out = inceptionA(out)
        with tf.variable_scope("redA"):
            out = reductionA(out, k=192, l=224, m=256, n=384)

        for i in range(7):
            with tf.variable_scope("incB" + str(i)):
                out = inceptionB(out)
        with tf.variable_scope("redB"):
            out = reductionB(out)

        for i in range(3):
            with tf.variable_scope("incC" + str(i)):
                out = inceptionC(out)

        a, b = out.get_shape().as_list()[1:3]
        out = tf.nn.avg_pool(out,
                             ksize=[1, a, b, 1],
                             strides=[1, 1, 1, 1],
                             padding="VALID")

        dims = out.get_shape().as_list()
        flatten_size = 1
        for d in dims[1:]:
            flatten_size *= d

        with tf.variable_scope("reshape2"):
            out = tf.reshape(out, [-1, int(flatten_size)])

        with tf.variable_scope("fc1"):
            out = fc(out, classes)

        self._logits = out

        if classes > 1:
            self._predictions = tf.nn.softmax(self._logits)
        else:
            self._predictions = self._logits
Beispiel #5
0
    def __init__(self, width=299, height=299, channels=3, classes=10):
        super(InceptionV3, self).__init__()

        self._number_of_classes = classes

        size = width * height * channels

        x = tf.placeholder(tf.float32, [None, size], name="x")
        self._inputs = x

        with tf.variable_scope("reshape1"):
            x = tf.reshape(x, [-1, width, height, channels])

        max_w = 299
        min_w = 299 // 3

        with tf.variable_scope("resize1"):
            if width < min_w:
                x = tf.image.resize_images(x, [min_w, min_w])
            elif width > 299:
                x = tf.image.resize_images(x, [max_w, max_w])

        # Stage 1
        out = conv3x3(x,
                      32,
                      stride=2,
                      padding="VALID",
                      batch_norm=use_batch_norm)
        out = conv3x3(out, 32, padding="VALID", batch_norm=use_batch_norm)
        out = conv3x3(out, 64, batch_norm=use_batch_norm)
        out = max_pool_3x3(out, padding="VALID")

        # Stage 2
        out = conv3x3(out, 80, padding="VALID", batch_norm=use_batch_norm)
        out = conv3x3(out, 192, padding="VALID", batch_norm=use_batch_norm)
        out = max_pool_3x3(out, padding="VALID")

        # Stage 3
        out = inception7A(out, 64, 64, 96, 96, 48, 64, 32)
        out = inception7A(out, 64, 64, 96, 96, 48, 64, 64)
        out = inception7A(out, 64, 64, 96, 96, 48, 64, 64)
        out = inception7B(out, 384, 64, 96, 96)

        # Stage 4
        out = inception7C(out, 192, 128, 128, 192, 128, 128, 128, 128, 192,
                          192)
        out = inception7C(out, 192, 160, 160, 192, 160, 160, 160, 160, 192,
                          192)
        out = inception7C(out, 192, 160, 160, 192, 160, 160, 160, 160, 192,
                          192)
        out = inception7C(out, 192, 192, 192, 192, 192, 192, 192, 192, 192,
                          192)
        out = inception7D(out, 192, 320, 192, 192, 192, 192)

        # Stage 5
        out = inception7E(out, 320, 384, 384, 384, 448, 384, 384, 384, 192,
                          avg_pool_3x3)
        out = inception7E(out, 320, 384, 384, 384, 448, 384, 384, 384, 192,
                          max_pool_3x3)

        out = tf.nn.avg_pool(out,
                             ksize=[1, 8, 8, 1],
                             strides=[1, 1, 1, 1],
                             padding='SAME')

        dims = out.get_shape().as_list()
        flatten_size = 1
        for d in dims[1:]:
            flatten_size *= d

        with tf.variable_scope("reshape2"):
            out = tf.reshape(out, [-1, int(flatten_size)])

        with tf.variable_scope("fc1"):
            out = fc(out, classes)

        self._logits = out

        if classes > 1:
            self._predictions = tf.nn.softmax(self._logits)
        else:
            self._predictions = self._logits
Beispiel #6
0
    def __init__(self, width, height, channels, classes):
        super(AlexNet, self).__init__()

        size = width * height * channels

        x = tf.placeholder(tf.float32, [None, size], name="x")
        self._inputs = x

        with tf.variable_scope("reshape1"):
            x = tf.reshape(self._inputs, [-1, width, height, channels])

        self._number_of_classes = classes

        with tf.variable_scope("resize1"):
            if width < 224:
                x = tf.image.resize_images(x, [48, 48])
            else:
                x = tf.image.resize_images(x, [224, 224])

        with tf.variable_scope("conv1"):
            conv1 = conv11x11(x, 96, stride=4, padding='VALID')
            pool1 = max_pool_3x3(conv1, padding='VALID')
            lrn1 = tf.nn.lrn(pool1, 5, bias=1.0, alpha=0.0001, beta=0.75)

        with tf.variable_scope("conv2"):
            conv2 = conv5x5(lrn1, 256)
            pool2 = max_pool_3x3(conv2, padding='VALID')
            lrn2 = tf.nn.lrn(pool2, 5, bias=1.0, alpha=0.0001, beta=0.75)

        with tf.variable_scope("conv3"):
            conv3 = conv3x3(lrn2, 384)
        with tf.variable_scope("conv4"):
            conv4 = conv3x3(conv3, 384)
        with tf.variable_scope("conv5"):
            conv5 = conv3x3(conv4, 256)
            pool3 = max_pool_3x3(conv5)
            lrn3 = tf.nn.lrn(pool3, 5, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

        dims = lrn3.get_shape().as_list()
        flatten_size = 1
        for d in dims[1:]:
            flatten_size *= d

        with tf.variable_scope("reshape2"):
            flatten = tf.reshape(lrn3, [-1, int(flatten_size)])

        with tf.variable_scope("fc1"):
            fc1 = fc(flatten, 4096)
            relu1 = tf.nn.relu(fc1)

        with tf.variable_scope("fc2"):
            fc2 = fc(relu1, 4096)
            relu2 = tf.nn.relu(fc2)

        with tf.variable_scope("fc3"):
            y = fc(relu2, classes)

        self._logits = y

        if classes > 1:
            self._predictions = tf.nn.softmax(y)
        else:
            self._predictions = self._logits
Beispiel #7
0
    def __init__(self, width, height, channels, classes):
        super(AlexNet, self).__init__()

        size = width * height * channels

        x = tf.placeholder(tf.float32, [None, size], name="x")
        self._inputs = x

        with tf.variable_scope("reshape1"):
            x = tf.reshape(self._inputs, [-1, width, height, channels])

        self._number_of_classes = classes

        with tf.variable_scope("resize1"):
            if width < 224:
                x = tf.image.resize_images(x, [48, 48])
            else:
                x = tf.image.resize_images(x, [224, 224])

        with tf.variable_scope("conv1"):
            conv1 = conv11x11(x, 96, stride=4, padding='VALID')
            pool1 = max_pool_3x3(conv1, padding='VALID')
            lrn1 = tf.nn.lrn(pool1, 5, bias=1.0, alpha=0.0001, beta=0.75)

        with tf.variable_scope("conv2"):
            conv2 = conv5x5(lrn1, 256)
            pool2 = max_pool_3x3(conv2, padding='VALID')
            lrn2 = tf.nn.lrn(pool2, 5, bias=1.0, alpha=0.0001, beta=0.75)

        with tf.variable_scope("conv3"):
            conv3 = conv3x3(lrn2, 384)
        with tf.variable_scope("conv4"):
            conv4 = conv3x3(conv3, 384)
        with tf.variable_scope("conv5"):
            conv5 = conv3x3(conv4, 256)
            pool3 = max_pool_3x3(conv5)
            lrn3 = tf.nn.lrn(pool3, 5, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

        dims = lrn3.get_shape().as_list()
        flatten_size = 1
        for d in dims[1:]:
            flatten_size *= d

        with tf.variable_scope("reshape2"):
            flatten = tf.reshape(lrn3, [-1, int(flatten_size)])

        with tf.variable_scope("fc1"):
            fc1 = fc(flatten, 4096)
            relu1 = tf.nn.relu(fc1)

        with tf.variable_scope("fc2"):
            fc2 = fc(relu1, 4096)
            relu2 = tf.nn.relu(fc2)

        with tf.variable_scope("fc3"):
            y = fc(relu2, classes)

        self._logits = y

        if classes > 1:
            self._predictions = tf.nn.softmax(y)
        else:
            self._predictions = self._logits
Beispiel #8
0
    def __init__(self, width=299, height=299, channels=3, classes=10):
        super(ResNet, self).__init__()

        size = width * height * channels

        x = tf.placeholder(tf.float32, [None, size], name="x")
        self._inputs = x

        x = tf.reshape(x, [-1, width, height, channels])

        self._number_of_classes = classes

        # normalizer_params = { 'is_training': is_training() }

        # adapted from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/learn/resnet.py

        # Configurations for each bottleneck group.

        # activation = tf.nn.relu
        BottleneckGroup = namedtuple('BottleneckGroup',
                                     ['num_blocks', 'num_filters', 'bottleneck_size'])
        groups = [
            BottleneckGroup(3, 128, 32), BottleneckGroup(3, 256, 64),
            # BottleneckGroup(3, 512, 128), BottleneckGroup(3, 1024, 256)
        ]

        # First convolution expands to 64 channels
        with tf.variable_scope('conv_layer1'):
            # net = convolution2d(
            #     x, 64, 7, normalizer_fn=batch_norm, normalizer_params=normalizer_params, activation_fn=activation)
            net = conv(x, 7, 7, 64)

        # Max pool
        net = tf.nn.max_pool(net, [1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')

        # First chain of resnets
        with tf.variable_scope('conv_layer2'):
            # net = convolution2d(net, groups[0].num_filters, 1, padding='VALID')
            net = conv(net, 1, 1, groups[0].num_filters, padding='VALID')

        # Create the bottleneck groups, each of which contains `num_blocks`
        # bottleneck groups.
        for group_i, group in enumerate(groups):
            for block_i in range(group.num_blocks):
                name = 'group_%d/block_%d' % (group_i, block_i)

                # 1x1 convolution responsible for reducing dimension
                with tf.variable_scope(name + '/conv_in'):
                    # conv = convolution2d(
                    #     net,
                    #     group.bottleneck_size,
                    #     1,
                    #     padding='VALID',
                    #     activation_fn=activation,
                    #     normalizer_fn=batch_norm,
                    #     normalizer_params=normalizer_params)
                    convLayer = conv(net, 1, 1, group.bottleneck_size, padding='VALID')

                with tf.variable_scope(name + '/conv_bottleneck'):
                    # conv = convolution2d(
                    #     conv,
                    #     group.bottleneck_size,
                    #     3,
                    #     padding='SAME',
                    #     activation_fn=activation,
                    #     normalizer_fn=batch_norm,
                    #     normalizer_params=normalizer_params)
                    convLayer = conv(convLayer, 3, 3, group.bottleneck_size)

                # 1x1 convolution responsible for restoring dimension
                with tf.variable_scope(name + '/conv_out'):
                    input_dim = net.get_shape()[-1].value
                    # conv = convolution2d(
                    #     conv,
                    #     input_dim,
                    #     1,
                    #     padding='VALID',
                    #     activation_fn=activation,
                    #     normalizer_fn=batch_norm,
                    #     normalizer_params=normalizer_params)
                    convLayer = conv(convLayer, 1, 1, input_dim, padding='VALID')

                # shortcut connections that turn the network into its counterpart
                # residual function (identity shortcut)
                net = convLayer + net

            try:
                # upscale to the next group size
                next_group = groups[group_i + 1]
                with tf.variable_scope('block_%d/conv_upscale' % group_i):
                    # net = convolution2d(
                    #     net,
                    #     next_group.num_filters,
                    #     1,
                    #     activation_fn=None,
                    #     biases_initializer=None,
                    #     padding='SAME')
                    net = conv(net, 1, 1, next_group.num_filters)
            except IndexError:
                pass

        net_shape = net.get_shape().as_list()
        net = tf.nn.avg_pool(
            net,
            ksize=[1, net_shape[1], net_shape[2], 1],
            strides=[1, 1, 1, 1],
            padding='VALID')

        out = net

        dims = out.get_shape().as_list()
        flatten_size = 1
        for d in dims[1:]:
            flatten_size *= d

        out = tf.reshape(out, [-1, int(flatten_size)])

        out = fc(out, classes)

        self._logits = out

        if classes > 1:
            self._predictions = tf.nn.softmax(self._logits)
        else:
            self._predictions = self._logits
Beispiel #9
0
    def __init__(self, width=299, height=299, channels=3, classes=10):
        super(InceptionV3, self).__init__()

        self._number_of_classes = classes

        size = width * height * channels

        x = tf.placeholder(tf.float32, [None, size], name="x")
        self._inputs = x

        with tf.variable_scope("reshape1"):
            x = tf.reshape(x, [-1, width, height, channels])

        max_w = 299
        min_w = 299 // 3

        with tf.variable_scope("resize1"):
            if width < min_w:
                x = tf.image.resize_images(x, [min_w, min_w])
            elif width > 299:
                x = tf.image.resize_images(x, [max_w, max_w])

        # Stage 1
        out = conv3x3(x, 32, stride=2, padding="VALID", batch_norm=use_batch_norm)
        out = conv3x3(out, 32, padding="VALID", batch_norm=use_batch_norm)
        out = conv3x3(out, 64, batch_norm=use_batch_norm)
        out = max_pool_3x3(out, padding="VALID")

        # Stage 2
        out = conv3x3(out, 80, padding="VALID", batch_norm=use_batch_norm)
        out = conv3x3(out, 192, padding="VALID", batch_norm=use_batch_norm)
        out = max_pool_3x3(out, padding="VALID")

        # Stage 3
        out = inception7A(out, 64, 64, 96, 96, 48, 64, 32)
        out = inception7A(out, 64, 64, 96, 96, 48, 64, 64)
        out = inception7A(out, 64, 64, 96, 96, 48, 64, 64)
        out = inception7B(out, 384, 64, 96, 96)

        # Stage 4
        out = inception7C(out, 192, 128, 128, 192, 128, 128, 128, 128, 192, 192)
        out = inception7C(out, 192, 160, 160, 192, 160, 160, 160, 160, 192, 192)
        out = inception7C(out, 192, 160, 160, 192, 160, 160, 160, 160, 192, 192)
        out = inception7C(out, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192)
        out = inception7D(out, 192, 320, 192, 192, 192, 192)

        # Stage 5
        out = inception7E(out, 320, 384, 384, 384, 448, 384, 384, 384, 192, avg_pool_3x3)
        out = inception7E(out, 320, 384, 384, 384, 448, 384, 384, 384, 192, max_pool_3x3)

        out = tf.nn.avg_pool(out, ksize=[1, 8, 8, 1], strides=[1, 1, 1, 1], padding='SAME')

        dims = out.get_shape().as_list()
        flatten_size = 1
        for d in dims[1:]:
            flatten_size *= d

        with tf.variable_scope("reshape2"):
            out = tf.reshape(out, [-1, int(flatten_size)])

        with tf.variable_scope("fc1"):
            out = fc(out, classes)

        self._logits = out

        if classes > 1:
            self._predictions = tf.nn.softmax(self._logits)
        else:
            self._predictions = self._logits
Beispiel #10
0
    def __init__(self, width=299, height=299, channels=3, classes=10):
        super(InceptionV4, self).__init__()

        assert width == height, "width and height must be the same"

        size = width * height * channels

        x = tf.placeholder(tf.float32, [None, size], name="x")
        self._inputs = x

        with tf.variable_scope("reshape1"):
            x = tf.reshape(x, [-1, width, height, channels])

        self._number_of_classes = classes

        max_w = 299
        min_w = 299 // 3

        with tf.variable_scope("resize1"):
            if width < min_w:
                x = tf.image.resize_images(x, [min_w, min_w])
            elif width == 299:
                pass  # do nothing
            else:
                x = tf.image.resize_images(x, [max_w, max_w])

        with tf.variable_scope("stem"):
            out = stem(x)

        for i in range(4):
            with tf.variable_scope("incA" + str(i)):
                out = inceptionA(out)
        with tf.variable_scope("redA"):
            out = reductionA(out, k=192, l=224, m=256, n=384)

        for i in range(7):
            with tf.variable_scope("incB" + str(i)):
                out = inceptionB(out)
        with tf.variable_scope("redB"):
            out = reductionB(out)

        for i in range(3):
            with tf.variable_scope("incC" + str(i)):
                out = inceptionC(out)

        a, b = out.get_shape().as_list()[1:3]
        out = tf.nn.avg_pool(out, ksize=[1, a, b, 1],
                             strides=[1, 1, 1, 1], padding="VALID")

        dims = out.get_shape().as_list()
        flatten_size = 1
        for d in dims[1:]:
            flatten_size *= d

        with tf.variable_scope("reshape2"):
            out = tf.reshape(out, [-1, int(flatten_size)])

        with tf.variable_scope("fc1"):
            out = fc(out, classes)

        self._logits = out

        if classes > 1:
            self._predictions = tf.nn.softmax(self._logits)
        else:
            self._predictions = self._logits
Beispiel #11
0
    def __init__(self, width, height, channels, classes, number_of_gpus=4):
        super(LeNet, self).__init__()

        # activation_function = "tanh"
        activation_function = "relu"

        self._number_of_classes = classes

        size = width * height * channels

        x = tf.placeholder(tf.float32, [None, size], name="x")

        self._inputs = x

        with tf.variable_scope("reshape1"):
            x_image = tf.reshape(x, [-1, width, height, channels],
                                 name="input_reshape")

        if number_of_gpus <= 1:
            gpus = 1
        else:
            gpus = number_of_gpus

        if gpus > 1:
            x_image_arr = tf.split(x_image, gpus, axis=0)
        else:
            x_image_arr = [x_image]
        classes_arr = [classes] * gpus

        logits_arr = [0.0] * gpus

        for gpu in range(gpus):
            with tf.device('/gpu:' + str(gpu)):

                with tf.variable_scope("conv1"):
                    out = conv(x_image_arr[gpu],
                               5,
                               5,
                               20,
                               activation=activation_function)
                    out = max_pool_2x2(out)

                with tf.variable_scope("conv2"):
                    out = conv(out, 5, 5, 50, activation=activation_function)
                    out = max_pool_2x2(out)

                dims = out.get_shape().as_list()
                flatten_size = 1
                for d in dims[1:]:
                    flatten_size *= d

                with tf.variable_scope("reshape2"):
                    flatten = tf.reshape(out, [-1, int(flatten_size)])

                with tf.variable_scope("fc1"):
                    out = fc(flatten, 500)
                    if activation_function == "relu":
                        out = tf.nn.relu(out)
                    elif activation_function == "tanh":
                        out = tf.nn.tanh(out)

                with tf.variable_scope("fc2"):
                    logits_arr[gpu] = fc(out, classes_arr[gpu])

        self._logits = tf.concat(logits_arr, 0)

        # print(self._logits.graph.as_graph_def())

        if classes > 1:
            self._predictions = tf.nn.softmax(self._logits)
        else:
            self._predictions = self._logits
Beispiel #12
0
    def __init__(self, width=28, height=28, channels=1, classes=10):
        super(VGG16, self).__init__()

        assert width == height, "width and height must be the same"

        size = width * height * channels

        x = tf.placeholder(tf.float32, [None, size], name="x")
        self._inputs = x

        with tf.variable_scope("reshape1"):
            x = tf.reshape(x, [-1, width, height, channels])

        self._number_of_classes = classes

        with tf.variable_scope("resize1"):
            if width < 224:
                x = tf.image.resize_images(x, [48, 48])
            elif width > 224:
                x = tf.image.resize_images(x, [224, 224])

        with tf.variable_scope("conv1"):
            out = conv3x3(x, 64, stride=1)
            out = max_pool_2x2(out, stride=2)

        with tf.variable_scope("conv2"):
            out = conv3x3(out, 128, stride=1)
            out = max_pool_2x2(out, stride=2)

        with tf.variable_scope("conv3"):
            out = conv3x3(out, 256, stride=1)
        with tf.variable_scope("conv4"):
            out = conv3x3(out, 256, stride=1)
            out = max_pool_2x2(out, stride=2)

        with tf.variable_scope("conv5"):
            out = conv3x3(out, 512, stride=1)
        with tf.variable_scope("conv6"):
            out = conv3x3(out, 512, stride=1)
            out = max_pool_2x2(out, stride=2)

        with tf.variable_scope("conv7"):
            out = conv3x3(out, 512, stride=1)
        with tf.variable_scope("conv8"):
            out = conv3x3(out, 512, stride=1)
            out = max_pool_2x2(out, stride=2)

        dims = out.get_shape().as_list()
        flatten_size = 1
        for d in dims[1:]:
            flatten_size *= d

        with tf.variable_scope("reshape2"):
            out = tf.reshape(out, [-1, int(flatten_size)])

        # fully connected
        with tf.variable_scope("fc1"):
            out = fc(out, 4096)
            out = tf.nn.relu(out)
        with tf.variable_scope("fc2"):
            out = fc(out, 4096)
            out = tf.nn.relu(out)
        with tf.variable_scope("fc3"):
            y = fc(out, classes)

        self._logits = y

        if classes > 1:
            self._predictions = tf.nn.softmax(y)
        else:
            self._predictions = self._logits
Beispiel #13
0
    def __init__(self, width=28, height=28, channels=1, classes=10):
        super(VGG16, self).__init__()

        assert width == height, "width and height must be the same"

        size = width * height * channels

        x = tf.placeholder(tf.float32, [None, size], name="x")
        self._inputs = x

        with tf.variable_scope("reshape1"):
            x = tf.reshape(x, [-1, width, height, channels])

        self._number_of_classes = classes

        with tf.variable_scope("resize1"):
            if width < 224:
                x = tf.image.resize_images(x, [48, 48])
            elif width > 224:
                x = tf.image.resize_images(x, [224, 224])

        with tf.variable_scope("conv1"):
            out = conv3x3(x, 64, stride=1)
            out = max_pool_2x2(out, stride=2)

        with tf.variable_scope("conv2"):
            out = conv3x3(out, 128, stride=1)
            out = max_pool_2x2(out, stride=2)

        with tf.variable_scope("conv3"):
            out = conv3x3(out, 256, stride=1)
        with tf.variable_scope("conv4"):
            out = conv3x3(out, 256, stride=1)
            out = max_pool_2x2(out, stride=2)

        with tf.variable_scope("conv5"):
            out = conv3x3(out, 512, stride=1)
        with tf.variable_scope("conv6"):
            out = conv3x3(out, 512, stride=1)
            out = max_pool_2x2(out, stride=2)

        with tf.variable_scope("conv7"):
            out = conv3x3(out, 512, stride=1)
        with tf.variable_scope("conv8"):
            out = conv3x3(out, 512, stride=1)
            out = max_pool_2x2(out, stride=2)

        dims = out.get_shape().as_list()
        flatten_size = 1
        for d in dims[1:]:
            flatten_size *= d

        with tf.variable_scope("reshape2"):
            out = tf.reshape(out, [-1, int(flatten_size)])

        # fully connected
        with tf.variable_scope("fc1"):
            out = fc(out, 4096)
            out = tf.nn.relu(out)
        with tf.variable_scope("fc2"):
            out = fc(out, 4096)
            out = tf.nn.relu(out)
        with tf.variable_scope("fc3"):
            y = fc(out, classes)

        self._logits = y

        if classes > 1:
            self._predictions = tf.nn.softmax(y)
        else:
            self._predictions = self._logits