示例#1
0
def bottleneck(input, dim_out, name, size=3, stride=1):
    c1 = layers.conv_layer(input, dim_out, size, stride, name + '_c1')
    c2 = layers.conv_layer(c1, dim_out, size, stride, name + '_c2')

    # print(c2)

    return c2
示例#2
0
    def build_model(self):
        # Weights for fully connected layers
        self.w_alice = init_weights("alice_w", [2 * self.N, 2 * self.N])
        self.w_bob = init_weights("bob_w", [2 * self.N, 2 * self.N])
        self.w_eve1 = init_weights("eve_w1", [self.N, 2 * self.N])
        self.w_eve2 = init_weights("eve_w2", [2 * self.N, 2 * self.N])

        # Placeholder variables for Message and Key
        self.msg = tf.placeholder("float", [None, self.msg_len])
        self.key = tf.placeholder("float", [None, self.key_len])

        # Alice's network
        self.alice_input = tf.concat(concat_dim=1, values=[self.msg, self.key])
        self.alice_hidden = tf.nn.sigmoid(
            tf.matmul(self.alice_input, self.w_alice))
        self.alice_hidden = tf.expand_dims(self.alice_hidden, 2)
        self.alice_output = tf.squeeze(conv_layer(self.alice_hidden, "alice"))

        # Bob's network
        self.bob_input = tf.concat(concat_dim=1,
                                   values=[self.alice_output, self.key])
        self.bob_hidden = tf.nn.sigmoid(tf.matmul(self.bob_input, self.w_bob))
        self.bob_hidden = tf.expand_dims(self.bob_hidden, 2)
        self.bob_output = tf.squeeze(conv_layer(self.bob_hidden, "bob"))

        # Eve's network
        self.eve_input = self.alice_output
        self.eve_hidden1 = tf.nn.sigmoid(tf.matmul(self.eve_input,
                                                   self.w_eve1))
        self.eve_hidden2 = tf.nn.sigmoid(
            tf.matmul(self.eve_hidden1, self.w_eve2))
        self.eve_hidden2 = tf.expand_dims(self.eve_hidden2, 2)
        self.eve_output = tf.squeeze(conv_layer(self.eve_hidden2, "eve"))
示例#3
0
    def __call__(self, inp, training):
        a = time.time()

        with tf.variable_scope("StackedSRM"):  # define variable scope
            inter = inp  # intermediate input
            outputs = []
            for i in range(self.nb_stacks):
                with tf.name_scope("stack"):
                    conv = layers.conv_layer(inter,
                                             out_channels=64,
                                             filter_size=(4, 4),
                                             strides=(2, 2),
                                             padding=(1, 1),
                                             pad_values=0,
                                             use_bias=False)
                    relu = layers.relu_layer(conv)
                    res_module = layers.residual_module_srm(relu,
                                                            training,
                                                            out_channels=64,
                                                            nb_blocks=6,
                                                            pad_values=0,
                                                            use_bias=False)

                    h, w = tf.shape(res_module)[2], tf.shape(res_module)[3]
                    up_sample1 = layers.resize_layer(
                        res_module,
                        new_size=[2 * h, 2 * w],
                        resize_method=tf.image.ResizeMethod.NEAREST_NEIGHBOR
                    )  # nearest neighbor up sampling
                    conv1 = layers.conv_layer(up_sample1,
                                              out_channels=128,
                                              filter_size=(3, 3),
                                              strides=(1, 1),
                                              padding=(1, 1),
                                              pad_values=0,
                                              use_bias=False)

                    h, w = tf.shape(conv1)[2], tf.shape(conv1)[3]
                    up_sample2 = layers.resize_layer(
                        conv1,
                        new_size=[2 * h, 2 * w],
                        resize_method=tf.image.ResizeMethod.NEAREST_NEIGHBOR
                    )  # nearest neighbor up sampling
                    conv2 = layers.conv_layer(up_sample2,
                                              out_channels=1,
                                              filter_size=(3, 3),
                                              strides=(1, 1),
                                              padding=(1, 1),
                                              pad_values=0,
                                              use_bias=False)

                    inter = (
                        layers.tanh_layer(conv2) + 1
                    ) / 2.0  # apply tanh and renormalize so that the output is in the range [0, 1] to prepare it to be inputted to the next stack

                outputs.append(inter)

        print("SRM Model built in {} s".format(time.time() - a))
        return outputs
示例#4
0
def down(input, dim_out, name, size=3, stride=1):
    c1 = layers.conv_layer(input, dim_out, size, stride, name + '_c1')
    c2 = layers.conv_layer(c1, dim_out, size, stride, name + '_c2')
    mp = layers.avgpool_layer(c2, name)

    # print(c2, mp)

    return c2, mp
def run_simple_net():
    cifar = CifarDataManager()

    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    keep_prob = tf.placeholder(tf.float32)

    conv1 = conv_layer(x, shape=[5, 5, 3, 32])
    conv1_pool = max_pool_2x2(conv1)

    conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
    conv2_pool = max_pool_2x2(conv2)

    conv3 = conv_layer(conv2_pool, shape=[5, 5, 64, 128])
    conv3_pool = max_pool_2x2(conv3)
    conv3_flat = tf.reshape(conv3_pool, [-1, 4 * 4 * 128])
    conv3_drop = tf.nn.dropout(conv3_flat, keep_prob=keep_prob)

    full_1 = tf.nn.relu(full_layer(conv3_flat, 512))
    full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 10)

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
    train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    def test(sess):
        X = cifar.test.images.reshape(10, 1000, 32, 32, 3)
        Y = cifar.test.labels.reshape(10, 1000, 10)
        acc = np.mean([
            sess.run(accuracy, feed_dict={
                x: X[i],
                y_: Y[i],
                keep_prob: 1.0
            }) for i in range(10)
        ])
        print("Accuracy: {:.4}%".format(acc * 100))

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(STEPS):
            batch = cifar.train.next_batch(BATCH_SIZE)
            sess.run(train_step,
                     feed_dict={
                         x: batch[0],
                         y_: batch[1],
                         keep_prob: 0.5
                     })

            if i % 500 == 0:
                test(sess)

        test(sess)
示例#6
0
def residual_block(x,
                   out_channels,
                   projection=False,
                   name='residual',
                   block_activation_function='relu',
                   block_is_batch_normalization='True'):
    """Create a Residual Block with two conv layers"""

    # Get the input channels
    input_channels = int(x.get_shape()[-1])

    conv1 = conv_layer(x,
                       3,
                       3,
                       out_channels,
                       stride=1,
                       name='{}_conv1'.format(name),
                       activation_function=block_activation_function,
                       is_batch_normalization=block_is_batch_normalization)
    conv2 = conv_layer(conv1,
                       3,
                       3,
                       out_channels,
                       stride=1,
                       name='{}_conv2'.format(name),
                       activation_function=block_activation_function,
                       is_batch_normalization=block_is_batch_normalization)

    # What type of shortcut connection to use
    if input_channels != out_channels:
        if projection:
            # Option B: Projection Shortcut
            # This introduces extra parameters.
            shortcut = conv_layer(
                x,
                1,
                1,
                out_channels,
                stride=1,
                name='{}_shortcut'.format(name),
                activation_function=block_activation_function,
                is_batch_normalization=block_is_batch_normalization)
        else:
            # Option A: Identity mapping with Zero-Padding
            # This method doesn't introduce any extra parameters.
            shortcut = tf.pad(
                x,
                [[0, 0], [0, 0], [0, 0], [0, out_channels - input_channels]])
    else:
        # Identity mapping.
        shortcut = x

    # Element wise addition.
    out = conv2 + shortcut

    return out
示例#7
0
def run_second_net():

    cifar = CifarDataManager()

    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    rate = tf.placeholder(tf.float32)

    C1, C2, C3, = 32, 64, 128
    F1 = 600

    conv1_1 = conv_layer(x, shape=[3, 3, 3, C1])
    conv1_2 = conv_layer(conv1_1, shape=[3, 3, C1, C1])
    conv1_3 = conv_layer(conv1_2, shape=[3, 3, C1, C1])
    conv1_pool = max_pool_2x2(conv1_3)
    conv1_drop = tf.nn.dropout(conv1_pool, rate=rate)

    conv2_1 = conv_layer(conv1_drop, shape=[3, 3, C1, C2])
    conv2_2 = conv_layer(conv2_1, shape=[3, 3, C2, C2])
    conv2_3 = conv_layer(conv2_2, shape=[3, 3, C2, C2])
    conv2_pool = max_pool_2x2(conv2_3)
    conv2_drop = tf.nn.dropout(conv2_pool, rate=rate)

    conv3_1 = conv_layer(conv2_drop, shape=[3, 3, C2, C3])
    conv3_2 = conv_layer(conv3_1, shape=[3, 3, C3, C3])
    conv3_3 = conv_layer(conv3_2, shape=[3, 3, C3, C3])
    conv3_pool = tf.nn.max_pool(conv3_3, ksize=[1, 8, 8, 1], strides=[1, 8, 8, 1], padding='SAME')
    conv3_flat = tf.reshape(conv3_pool, [-1, C3])
    conv3_drop = tf.nn.dropout(conv3_flat, rate=rate)

    full1 = tf.nn.relu(full_layer(conv3_drop, F1))
    full1_drop = tf.nn.dropout(full1, rate=rate)

    y_conv = full_layer(full1_drop, 10)

    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=y_conv,
                                                                              labels=y_))
    train_step = tf.train.AdamOptimizer(5e-4).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy =tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    def test(sess):
        X = cifar.test.images.reshape(10, 1000, 32, 32, 3)
        Y = cifar.test.labels.reshape(10, 1000, 10)
        acc = np.mean([sess.run(accuracy, feed_dict={x: X[i], y_: Y[i], rate: 0.0})
                       for i in range(10)])
        print(f'Accuracy {acc * 100}')

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(STEPS):
            batch = cifar.train.next_batch(BATCH_SIZE)
            sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], rate: 0.5})

            if i % 50 == 0:
                test(sess)

        test(sess)
def atrous_spatial_pyramid_pooling_block(x, is_train, depth=256, name='aspp'):
    in_shape = x.get_shape()
    input_size = tf.shape(x)[1:3]
    filters = [1, 4, 3, 3, 1, 1]
    atrous_rates = [1, 6, 12, 18, 1, 1]
    with tf.variable_scope(name) as scope:
        print('\tBuilding aspp unit: %s' % scope.name)
        # Branch 0: 1x1 conv
        branch0 = conv_layer(x, [filters[0], filters[0], in_shape[3], depth],
                             name="branch0")
        branch0 = mygn(branch0, name='gn_0')
        # Branch 1: 3x3 atrous_conv (rate = 6)
        branch1 = atrous_conv_layer(
            x, [filters[1], filters[1], in_shape[-1], depth],
            depth,
            atrous_rates[1],
            name='branch1')
        branch1 = mygn(branch1, name='gn_1')
        # Branch 2: 3x3 atrous_conv (rate = 12)
        branch2 = atrous_conv_layer(
            x, [filters[2], filters[2], in_shape[-1], depth],
            depth,
            atrous_rates[2],
            name='branch2')
        branch2 = mygn(branch2, name='gn_2')
        # Branch 3: 3x3 atrous_conv (rate = 18)
        branch3 = atrous_conv_layer(
            x, [filters[3], filters[3], in_shape[-1], depth],
            depth,
            atrous_rates[3],
            name='branch3')
        branch3 = mygn(branch3, name='gn_3')
        # Branch 4: image pooling
        # 4.1 global average pooling
        branch4 = tf.reduce_mean(x, [1, 2],
                                 name='global_average_pooling',
                                 keepdims=True)
        # 4.2 1x1 convolution with 256 filters and batch normalization
        branch4 = conv_layer(x, [filters[4], filters[4], in_shape[3], depth],
                             name="brach4")
        branch4 = mygn(branch4, name='gn_4')
        # 4.3 bilinearly upsample features
        branch4 = tf.image.resize_bilinear(branch4,
                                           input_size,
                                           name='branch4_upsample')
        # Output
        out = tf.concat([branch0, branch1, branch2, branch3, branch4],
                        axis=3,
                        name='aspp_concat')
        out = myrelu(out, name='relu_out')
        in_shape = out.get_shape()
        out = conv_layer(out, [filters[5], filters[5], in_shape[3], depth],
                         name="aspp_out",
                         relu='no')
        return out
示例#9
0
def run_simple_net(bs, lr):
    # load the data
    cifar = CifarDataManager()
    # init variables
    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    keep_prob = tf.placeholder(tf.float32)
    # 2 conv and 1 max pooling
    conv1 = conv_layer(x, shape=[3, 3, 3, 64])
    conv2 = conv_layer(conv1, shape=[3, 3, 64, 64])
    conv2_pool = max_pool_2x2(conv2)
    # 2 conv and 1 max pooling
    conv3 = conv_layer(conv2_pool, shape=[3, 3, 64, 128])
    conv4 = conv_layer(conv3, shape=[3, 3, 128, 128])
    conv4_pool = max_pool_2x2(conv4)
    # flatten and drop to prevent overfitting
    conv4_flat = tf.reshape(conv4_pool, [-1, 8 * 8 * 128])
    conv4_drop = tf.nn.dropout(conv4_flat, keep_prob=keep_prob)
    # fully connected nn using relu as activation function
    full_0 = tf.nn.relu(full_layer(conv4_drop, 512))
    full0_drop = tf.nn.dropout(full_0, keep_prob=keep_prob)
    full_1 = tf.nn.relu(full_layer(full0_drop, 512))
    full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 10)
    # loss function
    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv,
                                                                           labels=y_))
    # original: train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)
    # for the table
    train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    def test(sess):
        X = cifar.test.images.reshape(10, 1000, 32, 32, 3)
        Y = cifar.test.labels.reshape(10, 1000, 10)
        acc = np.mean([sess.run(accuracy, feed_dict={x: X[i], y_: Y[i], keep_prob: 1.0})
                       for i in range(10)])
        print("Accuracy: {:.4}%".format(acc * 100))
        return acc*100

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(STEPS):
            batch = cifar.train.next_batch(bs)
            sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})

            '''if i % 500 == 0:
                # print(i//500)
                test(sess)'''
        result = test(sess)
    return result
示例#10
0
def res_down(input, dim_out, name, size=3, stride=1):
    c1 = layers.conv_layer(input, dim_out, size, stride, name + '_c1')
    c2 = layers.conv_layer(c1, dim_out, size, stride, name + '_c2')

    shortcut = layers.conv1x1_layer(input, dim_out, name + '_shortcut')
    add = shortcut + c2

    mp = layers.avgpool_layer(add, name)

    # print(c2, mp)

    return c2, mp
示例#11
0
def get_y_predict(x, keep_prob):
    conv1 = conv_layer(x, shape=[3, 3, 3, 32])
    conv1_pool = max_pool_2x2(conv1)

    conv2 = conv_layer(conv1_pool, shape=[3, 3, 32, 64])
    conv2_pool = max_pool_2x2(conv2)
    conv2_flat = tf.reshape(conv2_pool, [-1, 8 * 8 * 64])

    full_1 = tf.nn.relu(full_layer(conv2_flat, 1024))
    full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

    y_predict = full_layer(full1_drop, 10)
    return y_predict
示例#12
0
def run_simple_net():
    cifar = CifarDataManager()

    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    keep_prob = tf.placeholder(tf.float32)

    conv1 = conv_layer(x, shape=[5, 5, 3, 32])
    conv1_pool = max_pool_2x2(conv1)

    conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
    conv2_pool = max_pool_2x2(conv2)

    conv3 = conv_layer(conv2_pool, shape=[5, 5, 64, 128])
    conv3_pool = max_pool_2x2(conv3)
    conv3_flat = tf.reshape(conv3_pool, [-1, 4 * 4 * 128])
    conv3_drop = tf.nn.dropout(conv3_flat, keep_prob=keep_prob)

    full_1 = tf.nn.relu(full_layer(conv3_drop, 512))
    full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 10)

    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv,
                                                                           labels=y_))
    train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    def test(sess):
        X = cifar.test.images.reshape(10, 1000, 32, 32, 3)
        Y = cifar.test.labels.reshape(10, 1000, 10)
        acc = np.mean([sess.run(accuracy, feed_dict={x: X[i], y_: Y[i], keep_prob: 1.0})
                       for i in range(10)])
        print("Accuracy: {:.4}%".format(acc * 100))

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(STEPS):
            batch = cifar.train.next_batch(BATCH_SIZE)
            sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})

            if i % 500 == 0:
                test(sess)

        test(sess)
示例#13
0
def BKStart(x, reuse):
    with tf.variable_scope('BKS', reuse=reuse):

        n = "BKStart_"
        x = conv_layer(x, 1, 32, 5, n + "conv_1", 1, pad='SAME')
        x = pool(x, 3, 2, name=n + "max_pool_1", pad='SAME', pool='max')
        x = conv_layer(x, 32, 32, 4, n + "conv_2", 1, pad='SAME')
        x = pool(x, 3, 2, n + "avg_pool_1", pool='avg')
        x = conv_layer(x, 32, 64, 5, n + "conv_3", 1, pad='SAME')
        x = pool(x, 3, 2, n + "avg_pool_2", pool='avg')
        flattened_shape = np.prod([s.value for s in x.get_shape()[1:]])
        x = tf.reshape(x, [-1, flattened_shape], name=n + 'flatten')
        x = fc_layer(x, 2048, activation='Relu', name=n + 'FC_1')
        #x=dropout_layer(x,keep_prob)
        logits = fc_layer(x, 7, activation='None', name=n + 'FC_2')
    return logits
示例#14
0
 def __init__(self, n_users, n_items, n_features, n_track_features, 
     layers=None, mlp_layer=fc_layer, with_bias=False, with_audio_feats=False):
     super(ConvNCF, self).__init__()
     self.name = "convncf"
     self.n_users = n_users
     self.n_items = n_items
     self.n_features = n_features
     self.n_track_features = n_track_features
     self.with_bias = with_bias
     self.user_embeddings= torch.nn.Embedding(num_embeddings=self.n_users, embedding_dim=self.n_features)
     self.item_embeddings = torch.nn.Embedding(num_embeddings=self.n_items, embedding_dim=self.n_features)
     n_layers = int(np.log2(self.n_features))
     if layers is None:
         self.layers = [32] * n_layers
     else:
         self.layers = layers
     conv_layers = []
     for inp_dim, out_dim in zip([1] + self.layers[:-1], self.layers):
         conv_layers.append(conv_layer(inp_dim, out_dim, k=2, stride=2))
     self.cnn = nn.Sequential(*conv_layers)
     self.linear = nn.Linear(self.layers[-1], 1)
     if with_bias:
         self.user_biases = torch.nn.Embedding(num_embeddings=self.n_users, embedding_dim=1)
         self.item_biases = torch.nn.Embedding(num_embeddings=self.n_items, embedding_dim=1)
         self.user_biases.weight.data.normal_(0, 0.01)
         self.item_biases.weight.data.normal_(0, 0.01)
示例#15
0
def get_siamnese(X):
    """
    creates the siamnese stem for the neural network. This is the part both images are send through sequentially
    :param X: the input tensor
    :return:
    """
    with tf.variable_scope('Layer1'):
        conv1 = conv_layer(X,16,5,5,activation_function=tf.nn.relu)

    with tf.variable_scope('Layer2'):
        conv2 = conv_layer(conv1,32,3,3,activation_function=tf.nn.relu)

    with tf.variable_scope('Layer4'):
       conv3 = conv_layer(conv2,128,2,2,activation_function=tf.nn.relu)

    flat = tf.layers.flatten(conv3)
    return flat
示例#16
0
def up(input,
       skip,
       dim_out,
       name,
       up_size=2,
       up_stride=2,
       conv_size=3,
       conv_stride=1):
    up = layers.up_layer(input, dim_out, up_size, up_stride, name)
    concat = layers.concat_layer(up, skip, name)
    c1 = layers.conv_layer(concat, dim_out, conv_size, conv_stride,
                           name + '_c1')
    c2 = layers.conv_layer(c1, dim_out, conv_size, conv_stride, name + '_c2')

    # print(c2)

    return c2
    def build_model(self):
        # Weights for fully connected layers
        self.w_alice = init_weights("alice_w", [self.msg_len + self.secret_len + self.key_len, 2*self.msg_len])
        self.w_bob = init_weights("bob_w", [self.msg_len + self.key_len, 2 * self.secret_len])
        self.w_keygen = init_weights("keygen_w",[self.random_seed_len, 2*self.key_len])
        self.w_eve1 = init_weights("eve_w1", [self.msg_len, 2 * self.msg_len])
        self.w_eve2 = init_weights("eve_w2", [2 * self.msg_len, 2 * self.secret_len])

        # Placeholder variables for Message and Key
        self.msg = tf.placeholder("float", [None, self.msg_len])
        self.secret = tf.placeholder("float", [None, self.secret_len])
        self.seed = tf.placeholder("float", [None, self.random_seed_len])


        # KeyGen's network
        # self.keygen_input = self.seed
        # self.keygen_hidden = tf.nn.tanh(tf.matmul(self.keygen_input,self.w_keygen))
        # self.keygen_hidden = tf.expand_dims(self.keygen_hidden, 2)
        # self.key = tf.sigmoid(tf.squeeze(conv_layer(self.keygen_hidden,"keygen")));
        self.key = self.seed


        # Alice's network
        # FC layer -> Conv Layer (4 1-D convolutions)
        self.alice_input = tf.concat(axis=1, values=[self.msg, self.secret, self.key])
        self.alice_hidden = tf.nn.tanh(tf.matmul(self.alice_input, self.w_alice))
        self.alice_hidden = tf.expand_dims(self.alice_hidden, 2)
        self.alice_output = tf.squeeze(conv_layer(self.alice_hidden, "alice"))
        #self.alice_output = encrypt(self.msg,self.key)

        # Bob's network
        # FC layer -> Conv Layer (4 1-D convolutions)
        self.bob_input = tf.concat(axis=1, values=[self.alice_output, self.key])
        self.bob_hidden = tf.nn.tanh(tf.matmul(self.bob_input, self.w_bob))
        self.bob_hidden = tf.expand_dims(self.bob_hidden, 2)
        self.bob_output = tf.squeeze(conv_layer(self.bob_hidden, "bob"))
        #self.bob_output = decrypt(self.alice_output,self.key)

        # Eve's network
        # FC layer -> FC layer -> Conv Layer (4 1-D convolutions)
        self.eve_input = self.alice_output
        self.eve_hidden1 = tf.nn.tanh(tf.matmul(self.eve_input, self.w_eve1))   #Sigmoid Earlier
        self.eve_hidden2 = tf.nn.tanh(tf.matmul(self.eve_hidden1, self.w_eve2)) #Sigmoid Earlier
        self.eve_hidden2 = tf.expand_dims(self.eve_hidden2, 2)
        self.eve_output = tf.squeeze(conv_layer(self.eve_hidden2, "eve"))
def largeFOV(x, c_prime):

    batch_size = tf.shape(x)[0]

    # NHWC TO NCHW *****************************************************************************************************

    x = tf.transpose(x, [0, 3, 1, 2])

    # DEFINE MODEL *****************************************************************************************************

    # Convolution 1
    x = layers.conv_layer(x, [3, 3, c_prime, 96], name = "d_conv1", data_format='NCHW')
    # Convolution 2
    x = layers.conv_layer(x, [3, 3, 96, 128], name="d_conv2", data_format='NCHW')
    # Convolution 3
    x = layers.conv_layer(x, [3, 3, 128, 128], name="d_conv3", data_format='NCHW')
    # Max-Pooling 1
    x = layers.max_pool_layer(x, padding='SAME', data_format='NCHW')
    # Convolution 4
    x = layers.conv_layer(x, [3, 3, 128, 256], name="d_conv4", data_format='NCHW')
    # Convolution 5
    x = layers.conv_layer(x, [3, 3, 256, 256], name="d_conv5", data_format='NCHW')
    # Max-Pooling 2
    x = layers.max_pool_layer(x, padding='SAME', data_format='NCHW')
    # Convolution 6
    x = layers.conv_layer(x, [3, 3, 256, 512], name="d_conv6", data_format='NCHW')
    # Convolution 7
    x = layers.conv_layer(x, [3, 3, 512, 2], name="d_conv7", data_format='NCHW', relu='no')

    # NCHW TO NHWC *****************************************************************************************************

    x = tf.transpose(x, [0, 2, 3, 1])

    return x
示例#19
0
def BKVGG8(x, keep_prob):

    n = "BKVGG8_"
    x = conv_layer(x, 1, 32, 3, n + "conv_1", 1, pad='SAME')
    x = pool(x, 2, 2, name=n + "max_pool_1", pad='SAME', pool='max')
    x = conv_layer(x, 32, 64, 3, n + "conv_2", 1, pad='SAME')
    x = pool(x, 2, 2, n + "max_pool_1", pool='max')
    x = conv_layer(x, 64, 128, 3, n + "conv_3", 1, pad='SAME')
    x = pool(x, 2, 2, n + "max_pool_2", pool='max')
    x = conv_layer(x, 128, 256, 3, n + "conv_4", 1, pad='SAME')
    x = conv_layer(x, 256, 256, 3, n + "conv_5", 1, pad='SAME')
    flattened_shape = np.prod([s.value for s in x.get_shape()[1:]])
    x = tf.reshape(x, [-1, flattened_shape], name=n + 'flatten')
    x = fc_layer(x, 256, activation='Relu', name=n + 'FC_1')
    x = dropout_layer(x, keep_prob)
    x = fc_layer(x, 256, activation='Relu', name=n + 'FC_2')
    x = dropout_layer(x, keep_prob)
    logits = fc_layer(x, 7, activation='None', name=n + 'FC_3')
    return logits
示例#20
0
def res_up(input,
           skip,
           dim_out,
           name,
           up_size=2,
           up_stride=2,
           conv_size=3,
           conv_stride=1):
    up = layers.up_layer(input, dim_out, up_size, up_stride, name)
    concat = layers.concat_layer(up, skip, name)
    c1 = layers.conv_layer(concat, dim_out, conv_size, conv_stride,
                           name + '_c1')
    c2 = layers.conv_layer(c1, dim_out, conv_size, conv_stride, name + '_c2')

    shortcut = layers.conv1x1_layer(concat, dim_out, name + '_shortcut')
    add = shortcut + c2

    # print(c2)

    return add
示例#21
0
    def create_graph(self):
        # TODO: get_Variable 써보기?

        x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])

        conv1 = conv_layer(x, shape=[5, 5, 1, 32])
        conv1_pool = max_pool_2x2(conv1)

        conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
        conv2_pool = max_pool_2x2(conv2)

        conv2_flat = tf.reshape(conv2_pool, [-1, 7 * 7 * 64])
        full_1 = tf.nn.relu(full_layer(conv2_flat, 1024))

        keep_prob = tf.placeholder(tf.float32)
        full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

        y_conv = full_layer(full1_drop, 10)

        return x, keep_prob, y_conv
示例#22
0
    def create_graph(self):
        # TODO: get_Variable 써보기?

        x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])

        conv1 = conv_layer(x, shape=[5, 5, 1, 32])
        conv1_pool = max_pool_2x2(conv1)

        conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
        conv2_pool = max_pool_2x2(conv2)

        conv2_flat = tf.reshape(conv2_pool, [-1, 7 * 7 * 64])
        full_1 = tf.nn.relu(full_layer(conv2_flat, 1024))

        keep_prob = tf.placeholder(tf.float32)
        full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

        y_conv = full_layer(full1_drop, 10)

        return x, keep_prob, y_conv
示例#23
0
def stanford_bd_model(image, segmentation, c_prime):

    # NHWC TO NCHW *****************************************************************************************************

    image = tf.transpose(image, [0, 3, 1, 2])
    segmentation = tf.transpose(segmentation, [0, 3, 1, 2])

    # DEFINE MODEL *****************************************************************************************************

    # Branch 1
    # Convolution 1
    b1 = layers.conv_layer(segmentation, [5, 5, c_prime, 64],
                           name="b1_conv1",
                           data_format='NCHW')

    # Branch 2
    # Convolution 1
    b2 = layers.conv_layer(image, [5, 5, 3, 16],
                           name="b2_conv1",
                           data_format='NCHW')
    # Convolution 2
    b2 = layers.conv_layer(b2, [5, 5, 16, 64],
                           name="b2_conv2",
                           data_format='NCHW')

    # Feature concatenation
    feat_concat = tf.concat([b1, b2], axis=1)

    # Merged branch
    # Convolution 1
    x = layers.conv_layer(feat_concat, [3, 3, 128, 128],
                          name="m_conv1",
                          data_format='NCHW')
    # Max-Pooling 1
    x = layers.max_pool_layer(x, padding='SAME', data_format='NCHW')
    # Convolution 2
    x = layers.conv_layer(x, [3, 3, 128, 256],
                          name="m_conv2",
                          data_format='NCHW')
    # Max-Pooling 2
    x = layers.max_pool_layer(x, padding='SAME', data_format='NCHW')
    # Convolution 3
    x = layers.conv_layer(x, [3, 3, 256, 512],
                          name="m_conv3",
                          data_format='NCHW')
    # Convolution 4
    x = layers.conv_layer(x, [3, 3, 512, 2],
                          name="m_conv4",
                          data_format='NCHW',
                          relu='no')
    # Average-Pooling
    x = tf.transpose(x, [0, 2, 3, 1])
    #x = tf.reduce_mean(x, axis = [1,2])
    # Reshape
    #x = tf.reshape(x, (1, 2))
    return x
def first_residual_block(x,
                         kernel,
                         out_channel,
                         strides,
                         is_train,
                         name="unit"):
    input_channels = x.get_shape().as_list()[-1]
    with tf.variable_scope(name) as scope:
        print('\tBuilding residual unit: %s' % scope.name)
        # Shortcut connection
        if input_channels == out_channel:
            if strides == 1:
                shortcut = tf.identity(
                    x
                )  # returns a tensor with the same shape and contents as x
            else:
                shortcut = tf.nn.max_pool(x, [1, strides, strides, 1],
                                          [1, strides, strides, 1], 'VALID')
        else:
            in_shape = x.get_shape()
            shortcut = conv_layer(
                x, [1, 1, in_shape[3], out_channel],
                strides=strides,
                name="shortcut")  # 1x1 conv to obtain out_channel maps
        # Residual
        in_shape = x.get_shape()
        x = conv_layer(x, [kernel, kernel, in_shape[3], out_channel],
                       strides=strides,
                       name="conv_1")
        x = mygn(x, name='gn_1')
        x = myrelu(x, name='relu_1')
        in_shape = x.get_shape()
        x = conv_layer(x, [kernel, kernel, in_shape[3], out_channel],
                       strides=1,
                       name="conv_2")
        x = mygn(x, name='gn_2')
        # Merge
        x = x + shortcut
        x = myrelu(x, name='relu_2')
    return x
def residual_block(x, kernel, is_train, name="unit"):
    num_channel = x.get_shape().as_list()[-1]
    with tf.variable_scope(name) as scope:
        print('\tBuilding residual unit: %s' % scope.name)
        # Shortcut connection
        shortcut = x
        # Residual
        in_shape = x.get_shape()
        x = conv_layer(x, [kernel, kernel, in_shape[3], num_channel],
                       strides=1,
                       name="conv_1")
        x = mygn(x, name='gn_1')
        x = myrelu(x, name='relu_1')
        in_shape = x.get_shape()
        conv_layer(x, [kernel, kernel, in_shape[3], num_channel],
                   strides=1,
                   name="conv_2")
        x = mygn(x, name='gn_2')
        # Merge
        x = x + shortcut
        x = myrelu(x, name='relu_2')
    return x
def build_second_net():
    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    keep_prob = tf.placeholder(tf.float32)

    C1, C2, C3 = 32, 64, 128
    F1 = 600

    conv1_1 = conv_layer(x, shape=[3, 3, 3, C1])
    conv1_2 = conv_layer(conv1_1, shape=[3, 3, C1, C1])
    conv1_3 = conv_layer(conv1_2, shape=[3, 3, C1, C1])
    conv1_pool = max_pool_2x2(conv1_3)
    conv1_drop = tf.nn.dropout(conv1_pool, keep_prob=keep_prob)

    conv2_1 = conv_layer(conv1_drop, shape=[3, 3, C1, C2])
    conv2_2 = conv_layer(conv2_1, shape=[3, 3, C2, C2])
    conv2_3 = conv_layer(conv2_2, shape=[3, 3, C2, C2])
    conv2_pool = max_pool_2x2(conv2_3)
    conv2_drop = tf.nn.dropout(conv2_pool, keep_prob=keep_prob)

    conv3_1 = conv_layer(conv2_drop, shape=[3, 3, C2, C3])
    conv3_2 = conv_layer(conv3_1, shape=[3, 3, C3, C3])
    conv3_3 = conv_layer(conv3_2, shape=[3, 3, C3, C3])
    conv3_pool = tf.nn.max_pool(conv3_3,
                                ksize=[1, 8, 8, 1],
                                strides=[1, 8, 8, 1],
                                padding='SAME')
    conv3_flat = tf.reshape(conv3_pool, [-1, C3])
    conv3_drop = tf.nn.dropout(conv3_flat, keep_prob=keep_prob)

    full1 = tf.nn.relu(full_layer(conv3_drop, F1))
    full1_drop = tf.nn.dropout(full1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 10)

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
    train_step = tf.train.AdamOptimizer(5e-4).minimize(cross_entropy)  # noqa

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))  # noqa
    def build_model(self):
        # Weights for fully connected layers
        self.w_alice = init_weights("alice_w", [2 * self.N, 2 * self.N])

        # Placeholder variables for Message and Key
        self.msg = tf.placeholder("float", [None, self.msg_len])
        self.key = tf.placeholder("float", [None, self.key_len])

        # Alice's network
        # FC layer -> Conv Layer (4 1-D convolutions)
        self.alice_input = tf.concat([self.msg, self.key], 1)
        self.alice_hidden = tf.nn.sigmoid(
            tf.matmul(self.alice_input, self.w_alice))
        self.alice_hidden = tf.expand_dims(self.alice_hidden, 2)
        self.alice_output = tf.squeeze(conv_layer(self.alice_hidden, "alice"))
示例#28
0
def Yolov3_Tiny(inputs, cfg_file="../cfg/yolov3-depth-tiny.cfg"):
    blocks = parse_cfg(cfg_file)
    # print(blocks)
    x, layers, outputs = inputs, [], []
    weights_ptr = 0
    config = {}
    input_dims = 416

    for i, block in enumerate(blocks):
        block_type = block["type"]
        # print("{} {}".format(i-1, block_type))
        # print("Input shape: ", x.shape)
        if block_type == "convolutional":
            x, layers, weights_ptr = conv_layer(x, block, layers, weights_ptr)

        elif block_type == "maxpool":
            x, layers = maxpool_layer(x, block, layers)

        elif block_type == "upsample":
            x, layers = upsample_layer(x, block, layers)

        elif block_type == "route":
            x, layers = route_layer(x, block, layers)

        elif block_type == "yolo":
            x, layers, outputs = yolo_layer(x, block, layers, outputs,
                                            input_dims)

        elif block_type == "convolutional_transpose":
            x, layers = conv_transpose_layer(x, block, layers)

        elif block_type == "residual":
            x, layers = residual_layer(x, block, layers)

        # print("Output shape: ", x.shape)
        # print("")
    # output_layers = [layers[i - 1] for i in range(len(layers)) if layers[i] is None]

    outputs = tf.keras.layers.Concatenate(axis=1)(outputs)
    # x = tf.sigmoid(x)

    # print(x.shape)

    # Run NMS
    # outputs = non_maximum_suppression(outputs, confidence=0.5, num_classes=80, nms_threshold=0.5)

    return (outputs, x)  # outputs: yolo bounding box, x: depth
	def create(self,dropout_keep_prob,is_training=True):
		# 1 layer first 3 means filter_height, second 3 means filter_width. default 1 as stride.  
		# conv1(x,filter_height,filter_width, num_filters, name, stride=1, padding='SAME')
		conv1 = conv_layer(self.X, 3, 3, 16, name = 'conv1',activation_function=self.activation_function,is_batch_normalization=self.is_batch_normalization)
		self.out = conv1
		""" All residual blocks use zero-padding for shortcut connections """
        # No matter how deep the network it is, just be divided into 4-5 Big Block.
        # Every Block can be divided into Block1_1(Block1_ResUnit1), Block1_2(Block1_ResUnit2), Block1_3(Block1_ResUnit3) again.
		# Then every Block1_1 is already residual unit.
		# Every resiudal unit has 2 conv layer.
		# one for loop has 6 conv layer.
		# residual_block should be changed into residual_unit.
		for i in range(self.NUM_CONV): # i=0,1,2.
		# It seems that every Block has 3 Residual Unit(block with lowercase).
			resBlock2 = residual_block(self.out, 16, name = 'resBlock2_{}'.format(i + 1), block_activation_function=self.activation_function,block_is_batch_normalization=self.is_batch_normalization)
			self.out = resBlock2
		# 1 max_pool layer
		pool2 = max_pool(self.out, name = 'pool2')
		self.out = pool2
		# It is different from original paper. In original paper, there has no pool operation in the middle layer.
		# Every ResUnit has 2 conv layer.
		# Every Block has 3 Residual Unit(block with lowercase).
		for i in range(self.NUM_CONV):
			resBlock3 = residual_block(self.out, 32, name = 'resBlock3_{}'.format(i + 1),block_activation_function=self.activation_function,block_is_batch_normalization=self.is_batch_normalization)
			self.out = resBlock3
		# 1 max_pool layer
		pool3 = max_pool(self.out, name = 'pool3')
		self.out = pool3
		# i=0,1,2 every block has 2 conv layer.
		# one for loop has 6 conv layer.
		# Every Block has 3 Residual Unit(block with lowercase).
		for i in range(self.NUM_CONV):
			resBlock4 = residual_block(self.out, 64, name = 'resBlock4_{}'.format(i + 1),block_activation_function=self.activation_function,block_is_batch_normalization=self.is_batch_normalization)
			self.out = resBlock4
		# 1 global pool layer
		# Perform global average pooling to make spatial dimensions as 1x1
		global_pool = global_average(self.out, name = 'gap')
		self.out = global_pool
		# flatten is not layer
		flatten = tf.contrib.layers.flatten(self.out)
		# 1 fully connected layer.
		# @Hazard
		# dropout_keep_prob: float, the fraction to keep before final layer.
		dpot_net = slim.dropout(flatten,dropout_keep_prob,is_training=is_training,scope='Dropout')
		fc5 = fc_layer(dpot_net, input_size = 64, output_size = self.NUM_CLASSES,relu = False, name = 'fc5')
		self.out = fc5
示例#30
0
def build_second_net():
    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    keep_prob = tf.placeholder(tf.float32)

    C1, C2, C3 = 32, 64, 128
    F1 = 600

    conv1_1 = conv_layer(x, shape=[3, 3, 3, C1])
    conv1_2 = conv_layer(conv1_1, shape=[3, 3, C1, C1])
    conv1_3 = conv_layer(conv1_2, shape=[3, 3, C1, C1])
    conv1_pool = max_pool_2x2(conv1_3)
    conv1_drop = tf.nn.dropout(conv1_pool, keep_prob=keep_prob)

    conv2_1 = conv_layer(conv1_drop, shape=[3, 3, C1, C2])
    conv2_2 = conv_layer(conv2_1, shape=[3, 3, C2, C2])
    conv2_3 = conv_layer(conv2_2, shape=[3, 3, C2, C2])
    conv2_pool = max_pool_2x2(conv2_3)
    conv2_drop = tf.nn.dropout(conv2_pool, keep_prob=keep_prob)

    conv3_1 = conv_layer(conv2_drop, shape=[3, 3, C2, C3])
    conv3_2 = conv_layer(conv3_1, shape=[3, 3, C3, C3])
    conv3_3 = conv_layer(conv3_2, shape=[3, 3, C3, C3])
    conv3_pool = tf.nn.max_pool(conv3_3, ksize=[1, 8, 8, 1], strides=[1, 8, 8, 1], padding='SAME')
    conv3_flat = tf.reshape(conv3_pool, [-1, C3])
    conv3_drop = tf.nn.dropout(conv3_flat, keep_prob=keep_prob)

    full1 = tf.nn.relu(full_layer(conv3_drop, F1))
    full1_drop = tf.nn.dropout(full1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 10)

    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv,
                                                                           labels=y_))
    train_step = tf.train.AdamOptimizer(5e-4).minimize(cross_entropy)  # noqa

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))  # noqa
示例#31
0
    def create(self):

        conv1 = conv_layer(self.X, 3, 3, 16, name='conv1')
        self.out = conv1
        """ All residual blocks use zer-padding
		for shortcut connections """

        for i in range(self.NUM_CONV):
            resBlock2 = residual_block(self.out,
                                       16,
                                       name='resBlock2_{}'.format(i + 1))
            self.out = resBlock2

        pool2 = max_pool(self.out, name='pool2')
        self.out = pool2

        for i in range(self.NUM_CONV):
            resBlock3 = residual_block(self.out,
                                       32,
                                       name='resBlock3_{}'.format(i + 1))
            self.out = resBlock3

        pool3 = max_pool(self.out, name='pool3')
        self.out = pool3

        for i in range(self.NUM_CONV):
            resBlock4 = residual_block(self.out,
                                       64,
                                       name='resBlock4_{}'.format(i + 1))
            self.out = resBlock4

        # Perform global average pooling to make spatial dimensions as 1x1
        global_pool = global_average(self.out, name='gap')
        self.out = global_pool

        flatten = tf.contrib.layers.flatten(self.out)
        fc5 = fc_layer(flatten,
                       input_size=64,
                       output_size=self.NUM_CLASSES,
                       relu=False,
                       name='fc5')

        self.out = fc5
def smallFOV(x, c_prime):

    # NHWC TO NCHW *****************************************************************************************************

    x = tf.transpose(x, [0, 3, 1, 2])

    # DEFINE MODEL *****************************************************************************************************

    # Convolution 1
    x = layers.conv_layer(x, [3, 3, c_prime, 96],
                          name="d_conv1",
                          data_format='NCHW')
    # Convolution 2
    x = layers.conv_layer(x, [1, 1, 96, 128],
                          name="d_conv2",
                          data_format='NCHW')
    # Max-Pooling 1
    x = layers.avg_pool_layer(x, padding='SAME', data_format='NCHW')
    # Convolution 3
    x = layers.conv_layer(x, [3, 3, 128, 256],
                          name="d_conv3",
                          data_format='NCHW')
    # Convolution 4
    x = layers.conv_layer(x, [1, 1, 256, 256],
                          name="d_conv4",
                          data_format='NCHW')
    # Max-Pooling 2
    x = layers.avg_pool_layer(x, padding='SAME', data_format='NCHW')
    # Convolution 5
    x = layers.conv_layer(x, [3, 3, 256, 512],
                          name="d_conv5",
                          data_format='NCHW')
    # Convolution 6
    x = layers.conv_layer(x, [1, 1, 512, 2],
                          name="d_conv6",
                          data_format='NCHW',
                          relu='no')

    # NCHW TO NHWC *****************************************************************************************************

    x = tf.transpose(x, [0, 2, 3, 1])

    return x
示例#33
0
def run_simple_net():
    dataset = DeepSatData()

    x = tf.placeholder(tf.float32, shape=[None, 28, 28, 4])
    y_ = tf.placeholder(tf.float32, shape=[None, 6])
    keep_prob = tf.placeholder(tf.float32)

    conv1 = conv_layer(x, shape=[3, 3, 4, 16], pad='SAME')
    conv1_pool = avg_pool_2x2(conv1, 2, 2)  #28x28x4->14x14x16

    conv2 = conv_layer(conv1_pool, shape=[3, 3, 16, 32], pad='SAME')
    conv2_pool = avg_pool_2x2(conv2, 2, 2)  #14x14x16->7x7x32

    conv3 = conv_layer(conv2_pool, shape=[3, 3, 32, 64], pad='SAME')
    # conv3_pool = max_pool_2x2(conv3) # 7x7x32 ->7x7x64

    conv4 = conv_layer(conv3, shape=[3, 3, 64, 96], pad='SAME')
    # conv4_pool = max_pool_2x2(conv4) # 7x7x64 -> 7x7x96

    conv5 = conv_layer(conv4, shape=[3, 3, 96, 64], pad='SAME')
    conv5_pool = avg_pool_2x2(conv5, 2, 2)  # 7x7x96 ->7x7x64

    _flat = tf.reshape(conv5_pool, [-1, 3 * 3 * 64])
    _drop1 = tf.nn.dropout(_flat, keep_prob=keep_prob)

    # full_1 = tf.nn.relu(full_layer(_drop1, 200))
    full_1 = tf.nn.relu(full_layer(_drop1, 512))
    # -- until here
    # classifier:add(nn.Threshold(0, 1e-6))
    _drop2 = tf.nn.dropout(full_1, keep_prob=keep_prob)
    full_2 = tf.nn.relu(full_layer(_drop2, 256))
    # classifier:add(nn.Threshold(0, 1e-6))
    full_3 = full_layer(full_2, 6)

    predict = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=full_3, labels=y_))

    #train_step = tf.train.RMSPropOptimizer(lr, decay, momentum).minimize(predict)
    train_step = tf.train.AdamOptimizer(lr).minimize(predict)

    correct_prediction = tf.equal(tf.argmax(full_3, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    # TENSORBOARD
    tf.summary.scalar('loss', predict)
    tf.summary.scalar('accuracy', accuracy)

    merged_sum = tf.summary.merge_all()

    def test(sess):
        X = dataset.test.images.reshape(10, NUM_TEST_SAMPLES, 28, 28, 4)
        Y = dataset.test.labels.reshape(10, NUM_TEST_SAMPLES, 6)
        acc = np.mean([
            sess.run(accuracy, feed_dict={
                x: X[i],
                y_: Y[i],
                keep_prob: 1.0
            }) for i in range(10)
        ])
        return acc

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sum_writer = tf.summary.FileWriter('logs/' + 'v4_sat6')
        sum_writer.add_graph(sess.graph)

        for i in range(STEPS):
            batch = dataset.train.random_batch(BATCH_SIZE)
            #batch = dataset.train.next_batch(BATCH_SIZE)
            batch_x = batch[0]
            batch_y = batch[1]

            _, summ = sess.run([train_step, merged_sum],
                               feed_dict={
                                   x: batch_x,
                                   y_: batch_y,
                                   keep_prob: 0.5
                               })
            sum_writer.add_summary(summ, i)

            sess.run(train_step,
                     feed_dict={
                         x: batch_x,
                         y_: batch_y,
                         keep_prob: dropoutProb
                     })

            if i % ONE_EPOCH == 0:
                print("\n*****************EPOCH: %d" % (i / ONE_EPOCH))
            if i % TEST_INTERVAL == 0:
                acc = test(sess)
                loss = sess.run(predict,
                                feed_dict={
                                    x: batch_x,
                                    y_: batch_y,
                                    keep_prob: dropoutProb
                                })
                print("EPOCH:%d" % (i / ONE_EPOCH) + " Step:" + str(i) +
                      "|| Minibatch Loss= " + "{:.4f}".format(loss) +
                      " Accuracy: {:.4}%".format(acc * 100))

        test(sess)
        sum_writer.close()
示例#34
0
import numpy as np

from layers import conv_layer, max_pool_2x2, full_layer

DATA_DIR = '/tmp/data'
MINIBATCH_SIZE = 50
STEPS = 5000


mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)

x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])

x_image = tf.reshape(x, [-1, 28, 28, 1])
conv1 = conv_layer(x_image, shape=[5, 5, 1, 32])
conv1_pool = max_pool_2x2(conv1)

conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
conv2_pool = max_pool_2x2(conv2)

conv2_flat = tf.reshape(conv2_pool, [-1, 7*7*64])
full_1 = tf.nn.relu(full_layer(conv2_flat, 1024))

keep_prob = tf.placeholder(tf.float32)
full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

y_conv = full_layer(full1_drop, 10)

cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)