Пример #1
0
def body2(i, x, out):
    # Convolution Layer
    inputx = x.read(index=i)
    conv1 = func.conv2d(inputx, weights['wc1'], biases['bc1'])
    # Pooling (down-sampling)
    p1 = func.extract_patches(conv1, 'SAME', 2, 2)
    f1 = func.majority_frequency(p1)
    #     maxpooling
    pool1 = func.majority_pool(p=p1, f=f1)

    # Convolution Layer
    conv2 = func.conv2d(pool1, weights['wc2'], biases['bc2'])
    #     Pooling (down-sampling)
    p2 = func.extract_patches(conv2, 'SAME', 2, 2)
    f2 = func.majority_frequency(p2)
    #     maxpooling
    pool2 = func.majority_pool(p=p2, f=f2)

    # Fully connected layer
    # Reshape conv2 output to fit fully connected layer input
    fc = tf.reshape(pool2, [-1, weights['wd1'].get_shape().as_list()[0]])
    fc1 = tf.add(tf.matmul(fc, weights['wd1']), biases['bd1'])
    fc1 = tf.nn.relu(fc1)
    # Apply Dropout
    fc1 = tf.nn.dropout(fc1, dropout)
    # Output, class prediction
    out = out.write(index=i,
                    value=tf.add(tf.matmul(fc1, weights['out']),
                                 biases['out']))
    i += 1
    return i, x, out
Пример #2
0
def unet(X, classes):
    # X is a placeholder !
    Conv1, conv = conv_conv_pool(input=X, num_filters=64,
                                 filter_size=3, pool_size=2, block_nos=1)

    Conv2, conv = conv_conv_pool(conv, 128, 3, 2, 2)

    Conv3, conv = conv_conv_pool(conv, 256, 3, 2, 3)

    Conv4, conv = conv_conv_pool(conv, 512, 3, 2, 4)

    conv = conv2d(input=conv, num_filters=1024, filter_size=3, block_nos=5)

    conv = conv_upconv(input=conv, num_filters=1024,
                       conv_filter_size=3, upconv_scale=2, block_nos=6)

    conv = concat_conv(prev_conved=Conv4, upconved=conv,
                       num_filters=512, filter_size=3, block_nos=7)

    conv = conv_upconv(conv, 512, 3, 2, 8)

    conv = concat_conv(Conv3, conv, 256, 3, 9)

    conv = conv_upconv(conv, 256, 3, 2, 10)

    conv = concat_conv(Conv2, conv, 128, 3, 11)

    conv = conv_upconv(conv, 128, 3, 2, 12)

    conv = concat_conv(Conv1, conv, 64, 3, 13)

    conv = conv2d(conv, 64, 3, 14)

    conv = conv2d(conv, num_filters=classes,
                  filter_size=1, block_nos=15, last_flag=True)

    return conv
Пример #3
0
def body(i, x, y, grads):
    # Convolution Layer
    inputx = x.read(index=i)
    conv1 = func.conv2d(inputx, weights['wc1'], biases['bc1'])
    # Pooling (down-sampling)
    p1 = func.extract_patches(conv1, 'SAME', 2, 2)
    f1 = func.majority_frequency(p1)
    #     maxpooling
    pool1, mask1 = func.pca_pool_with_mask(temp=p1)

    # Convolution Layer
    conv2 = func.conv2d(pool1, weights['wc2'], biases['bc2'])
    #     Pooling (down-sampling)
    p2 = func.extract_patches(conv2, 'SAME', 2, 2)
    f2 = func.majority_frequency(p2)
    #     maxpooling
    pool2, mask2 = func.pca_pool_with_mask(temp=p2)

    # Fully connected layer
    # Reshape conv2 output to fit fully connected layer input
    fc = tf.reshape(pool2, [-1, weights['wd1'].get_shape().as_list()[0]])
    fc1 = tf.add(tf.matmul(fc, weights['wd1']), biases['bd1'])
    fc1 = tf.nn.relu(fc1)
    # Apply Dropout
    fc1 = tf.nn.dropout(fc1, dropout)

    # Output, class prediction
    yi = y.read(index=i)
    temp_pred = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
    grads[8] = pred.write(index=i, value=temp_pred)

    # ------------------------------end of define graph------------------------------------

    # ------------------------------define gradient descent-------------------------

    # the last fc
    e = tf.nn.softmax(temp_pred) - yi
    grads[3] = grads[3].write(index=i, value=tf.transpose(fc1) @ e)
    grads[7] = grads[7].write(index=i, value=tf.reduce_sum(e, axis=0))

    # the second last fc
    # we use droupout at the last second layer, then we should just update the nodes that are active
    e = tf.multiply(e @ tf.transpose(weights['out']), tf.cast(tf.greater(fc1, 0), dtype=tf.float32)) / dropout
    grads[2] = grads[2].write(index=i, value=tf.transpose(fc) @ e)
    grads[6] = grads[6].write(index=i, value=tf.reduce_sum(e, axis=0))

    # the last pooling layer
    e = e @ tf.transpose(weights['wd1'])
    e = tf.reshape(e, pool2.get_shape().as_list())

    # the last conv layer
    # unpooling get error from pooling layer
    e = func.error_pooling2conv(e, mask2)

    # multiply with the derivative of the active function on the conv layer
    #     this one is also important this is a part from the upsampling, but
    e = tf.multiply(e, tf.cast(tf.greater(conv2, 0), dtype=tf.float32))
    temp1, temp2 = func.filter_gradient(e, pool1, conv2)
    grads[1] = grads[1].write(index=i, value=temp1)
    grads[5] = grads[5].write(index=i, value=temp2)

    # conv to pool
    e = func.error_conv2pooling(e, weights['wc2'])

    # pool to the first conv
    e = func.error_pooling2conv(e, mask1)
    e = tf.multiply(e, tf.cast(tf.greater(conv1, 0), dtype=tf.float32))
    temp1, temp2 = func.filter_gradient(e, inputx, conv1)
    grads[0] = grads[0].write(index=i, value=temp1)
    grads[4] = grads[4].write(index=i, value=temp2)
    i += 1
    return i, x, y, grads
Пример #4
0
def model(image):
    w1_1 = fun.weight_variable([3, 3, 1, 64])
    b1_1 = fun.bias_variable([64])
    conv1_1 = fun.conv2d(image, w1_1, b1_1)
    relu1_1 = fun.relu(conv1_1)
    pool1 = fun.avg_pool(relu1_1)

    w2_1 = fun.weight_variable([3, 3, 64, 128])
    b2_1 = fun.weight_variable([128])
    conv2_1 = fun.conv2d(pool1, w2_1, b2_1)
    relu2_1 = fun.relu(conv2_1)

    w2_2 = fun.weight_variable([3, 3, 128, 128])
    b2_2 = fun.weight_variable([128])
    conv2_2 = fun.conv2d(relu2_1, w2_2, b2_2)
    relu2_2 = fun.relu(conv2_2)
    pool2 = fun.avg_pool(relu2_2)

    w3_1 = fun.weight_variable([3, 3, 128, 64])
    b3_1 = fun.bias_variable([64])
    conv3_1 = fun.conv2d(pool2, w3_1, b3_1)
    relu3_1 = fun.relu(conv3_1)

    w3_2 = fun.weight_variable([3, 3, 64, 64])
    b3_2 = fun.bias_variable([64])
    conv3_2 = fun.conv2d(relu3_1, w3_2, b3_2)
    relu3_2 = fun.relu(conv3_2)

    w3_3 = fun.weight_variable([3, 3, 64, 64])
    b3_3 = fun.bias_variable([64])
    conv3_3 = fun.conv2d(relu3_2, w3_3, b3_3)
    relu3_3 = fun.relu(conv3_3)

    w3_4 = fun.weight_variable([3, 3, 64, 32])
    b3_4 = fun.bias_variable([32])
    conv3_4 = fun.conv2d(relu3_3, w3_4, b3_4)
    relu3_4 = fun.relu(conv3_4)
    pool3 = fun.avg_pool(relu3_4)

    w4_1 = fun.weight_variable([3, 3, 32, 32])
    b4_1 = fun.bias_variable([32])
    conv4_1 = fun.conv2d(pool3, w4_1, b4_1)
    relu4_1 = fun.relu(conv4_1)

    w4_2 = fun.weight_variable([3, 3, 32, 32])
    b4_2 = fun.bias_variable([32])
    conv4_2 = fun.conv2d(relu4_1, w4_2, b4_2)
    relu4_2 = fun.relu(conv4_2)

    w4_3 = fun.weight_variable([3, 3, 32, 32])
    b4_3 = fun.bias_variable([3, 3, 32, 32])
    conv4_3 = fun.conv2d(relu4_2, w4_3, b4_3)
    relu4_3 = fun.relu(conv4_3)

    w4_4 = fun.weight_variable([3, 3, 32, 32])
    b4_4 = fun.bias_variable([3, 3, 32, 32])
    conv4_4 = fun.conv2d(relu4_3, w4_4, b4_4)
    relu4_4 = fun.relu(conv4_4)
    pool4 = fun.avg_pool(relu4_4)

    w5_1 = fun.weight_variable([3, 3, 32, 64])
    b5_1 = fun.bias_variable([64])
    conv5_1 = fun.conv2d(pool4, w5_1, b5_1)
    relu5_1 = fun.relu(conv5_1)

    w5_2 = fun.weight_variable([3, 3, 64, 64])
 def forward(self, input):
     return conv2d(input, self.weight, self.bias, self.stride, self.padding)
# We are going to be modeling our IRIS data as a 2 x 2 x 1 image
# since we have 4 features. The first two dimensions are the patch
# size, the third is the number of input channels, and the last
# dimension is how many output channels we have. We also initialize
# a bias vector for each output channel.
W_conv1 = weight_variable([1, 1, 1, 32])
b_conv1 = bias_variable([32])

# We reshape x to a 4D tensor. The second and third dimensions refer
# to height, and the fourth dimension is number of color channels.
x_image = tf.reshape(x, [-1,2,2,1])

# Convolve the x_image with the weight tensor, apply the RELU function, 
# and then max-pool. Note that in this specific case we will not max-pool
# because max-pool is over 2x2 blocks and our entire "image" is a 2x2 block.
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = h_conv1

# SECOND CONVOLUTIONAL LAYER
W_conv2 = weight_variable([1, 1, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = h_conv2

# DENSELY CONNECTED LAYER
# We add a fully-connected layer with 1024 neurons to allow processing
# on the entire image. We reshape the tensor from the pooling layer into 
# a batch of vectors, multiply by a weight matrix, add a bias, and apply a 
# ReLU.
W_fc1 = weight_variable([2 * 2 * 64, 1024])
b_fc1 = bias_variable([1024])