def CNNLight(X, Training, Scope):

    with tf.variable_scope(Scope):

        ConvLayer1 = ConvLayer(X, 1, 64, Training, 'ConvLayer1')

        MPool1 = max_pool(ConvLayer1, ksize=(2, 2), stride=(2, 2))

        ConvLayer2 = ConvLayer(MPool1, 64, 128, Training, 'ConvLayer2')

        MPool2 = max_pool(ConvLayer2, ksize=(2, 2), stride=(2, 2))

        ConvLayer3 = ConvLayer(MPool2, 128, 256, Training, 'ConvLayer3')

        ConvLayer4 = ConvLayer(ConvLayer3, 256, 256, Training, 'ConvLayer4')

        MPool4 = max_pool(ConvLayer4, ksize=(2, 1), stride=(2, 1))

        ConvLayer5 = ConvLayer(MPool4, 256, 512, Training, 'ConvLayer5')

        ConvLayer6 = ConvLayer(ConvLayer5, 512, 512, Training, 'ConvLayer6')

        MPool6 = max_pool(ConvLayer6, ksize=(2, 1), stride=(2, 1))

        ConvLayer7 = ConvLayer(MPool6, 512, 512, Training, 'ConvLayer7')

        MPool7 = max_pool(ConvLayer7, ksize=(2, 1), stride=(2, 1))

        MPool7_T = tf.transpose(MPool7, perm=[0, 2, 1, 3])

        MPool7_T_RSH = tf.reshape(MPool7_T, [-1, FV, LastFilters])

        return tf.reshape(MPool7_T_RSH, [-1, NFeatures])
Example #2
0
 def net_real(self, x, y, training=True, get_activ=False):
     ##
     ## mean subtract the image inputs
     use_eval_mean = (not training and self.eval_mean)
     mean_to_subtract = self.dataset_synth.val_image_channel_mean if use_eval_mean else self.dataset_real.train_image_channel_mean
     x = tf.subtract(x, mean_to_subtract)
     # VGG16 takes bgr format
     conv1_1 = conv2d_relu2(x[..., ::-1], name='conv1_1', training=False)
     conv1_2 = conv2d_relu2(conv1_1, name='conv1_2', training=False)
     pool1 = max_pool(conv1_2, 'pool1')
     ##
     conv2_1 = conv2d_relu2(pool1, name='conv2_1', training=False)
     conv2_2 = conv2d_relu2(conv2_1, name='conv2_2', training=False)
     ##
     pool2 = max_pool(conv2_2, 'pool2')
     ##
     conv3_1 = conv2d_relu2(pool2, name='conv3_1', training=False)
     conv3_2 = conv2d_relu2(conv3_1, name='conv3_2', training=False)
     conv3_3 = conv2d_relu2(conv3_2, name='conv3_3', training=False)
     pool3 = max_pool(conv3_3, 'pool3')
     ####
     #if get_activ:
     #    return conv1_1, conv1_2, conv2_1, conv2_2, conv3_1, conv3_2, conv3_3
     #    ##
     conv4_1 = conv2d_relu2(pool3, name='conv4_1', training=False)
     conv4_2 = conv2d_relu2(conv4_1, name='conv4_2', training=False)
     conv4_3 = conv2d_relu2(conv4_2, name='conv4_3', training=False)
     #pool4 = max_pool(conv4_3, 'pool4')
     ###
     if get_activ:
         return conv1_1, conv1_2, conv2_1, conv2_2, conv3_1, conv3_2, conv3_3, conv4_1, conv4_2, conv4_3
def classifier(x):
    #parameters for convolution kernels
    IMG_DEPTH = 1
    C1_KERNEL_SIZE, C2_KERNEL_SIZE, C3_KERNEL_SIZE = 5, 5, 5
    C1_OUT_CHANNELS, C2_OUT_CHANNELS, C3_OUT_CHANNELS = 6, 16, 120
    C1_STRIDES, C2_STRIDES, C3_STRIDES = 1, 1, 1

    P1_SIZE, P2_SIZE = 2, 2
    P1_STRIDE, P2_STRIDE = 2, 2

    F4_SIZE, F5_SIZE = 84, 10

    C1_kernel = util.weights(
        [C1_KERNEL_SIZE, C1_KERNEL_SIZE, IMG_DEPTH, C1_OUT_CHANNELS], 0.1,
        'C1_kernel')
    C2_kernel = util.weights(
        [C2_KERNEL_SIZE, C2_KERNEL_SIZE, C1_OUT_CHANNELS, C2_OUT_CHANNELS],
        0.1, 'C2_kernel')
    C3_kernel = util.weights(
        [C3_KERNEL_SIZE, C3_KERNEL_SIZE, C2_OUT_CHANNELS, C3_OUT_CHANNELS],
        0.1, 'C3_kernel')

    C1_bias = util.bias([C1_OUT_CHANNELS], 'C1_bias')
    C2_bias = util.bias([C2_OUT_CHANNELS], 'C2_bias')
    C3_bias = util.bias([C3_OUT_CHANNELS], 'C3_bias')

    #LeNet-5 structure
    C1 = util.convLayer(x, C1_kernel, C1_STRIDES, 'SAME')
    ReLU1 = tf.nn.relu(C1 + C1_bias)
    P1 = util.max_pool(ReLU1, P1_SIZE, P1_STRIDE)

    C2 = util.convLayer(P1, C2_kernel, C2_STRIDES, 'SAME')
    ReLU2 = tf.nn.relu(C2 + C2_bias)
    P2 = util.max_pool(ReLU2, P2_SIZE, P2_STRIDE)

    C3 = util.convLayer(P2, C3_kernel, C3_STRIDES, 'SAME')
    ReLU3 = tf.nn.relu(C3 + C3_bias)

    num_F4_in = (int)(ReLU3.shape[1] * ReLU3.shape[2] * ReLU3.shape[3])
    F4_in = tf.reshape(ReLU3, [-1, num_F4_in])

    F4_weights = util.weights([num_F4_in, F4_SIZE], 0.1, 'F4_weights')
    F4_bias = util.bias([F4_SIZE], 'F4_bias')
    F4 = tf.matmul(F4_in, F4_weights)
    ReLU4 = tf.nn.relu(F4 + F4_bias)

    F5_weights = util.weights([F4_SIZE, F5_SIZE], 0.1, 'F5_weights')
    F5_bias = util.bias([F5_SIZE], 'F5_bias')
    F5 = tf.matmul(ReLU4, F5_weights) + F5_bias

    return F5
Example #4
0
def CNN(X, Training, Scope):

	with tf.variable_scope(Scope):

		ConvLayer1 = ConvLayer(X, 1, 64, Training, 'ConvLayer1')

		ConvLayer2 = ConvLayer(ConvLayer1, 64, 64, Training, 'ConvLayer2')

		MPool2 = max_pool(ConvLayer2, ksize=(2, 2), stride=(2, 2))

		ConvLayer3 = ConvLayer(MPool2, 64, 128, Training, 'ConvLayer3')

		ConvLayer4 = ConvLayer(ConvLayer3, 128, 128, Training, 'ConvLayer4')

		MPool4 = max_pool(ConvLayer4, ksize=(2, 2), stride=(2, 2))

		ConvLayer5 = ConvLayer(MPool4, 128, 256, Training, 'ConvLayer5')

		ConvLayer6 = ConvLayer(ConvLayer5, 256, 256, Training, 'ConvLayer6')

		ConvLayer7 = ConvLayer(ConvLayer6, 256, 256, Training, 'ConvLayer7')

		MPool7 = max_pool(ConvLayer7, ksize=(2, 1), stride=(2, 1))

		ConvLayer8 = ConvLayer(MPool7, 256, 512, Training, 'ConvLayer8')

		ConvLayer9 = ConvLayer(ConvLayer8, 512, 512, Training, 'ConvLayer9')

		ConvLayer10 = ConvLayer(ConvLayer9, 512, 512, Training, 'ConvLayer10')

		MPool10 = max_pool(ConvLayer10, ksize=(2, 1), stride=(2, 1))

		ConvLayer11 = ConvLayer(MPool10, 512, 512, Training, 'ConvLayer11')

		ConvLayer12 = ConvLayer(ConvLayer11, 512, 512, Training, 'ConvLayer12')

		ConvLayer13 = ConvLayer(ConvLayer12, 512, LastFilters, Training, 'ConvLayer13')

		MPool13 = max_pool(ConvLayer13, ksize=(2, 1), stride=(2, 1))

		MPool13_T = tf.transpose(MPool13, perm=[0,2,1,3])

		MPool13_T_RSH = tf.reshape(MPool13_T, [-1, FV, LastFilters])

		cnn_res = tf.reshape(MPool13_T_RSH, [-1, NFeatures])

		return cnn_res
Example #5
0
def classifier(x):
    #parameters for convolution kernels
    IMG_DEPTH = 1
    C1_KERNEL_SIZE, C2_KERNEL_SIZE, C3_KERNEL_SIZE, C4_KERNEL_SIZE, C5_KERNEL_SIZE = 11, 5, 3, 3, 3
    C1_OUT_CHANNELS, C2_OUT_CHANNELS, C3_OUT_CHANNELS, C4_OUT_CHANNELS, C5_OUT_CHANNELS = 96, 256, 384, 384, 256
    #C1_OUT_CHANNELS,C2_OUT_CHANNELS,C3_OUT_CHANNELS,C4_OUT_CHANNELS,C5_OUT_CHANNELS=48,128,197,197,128
    C1_STRIDES, C2_STRIDES, C3_STRIDES, C4_STRIDES, C5_STRIDES = 1, 1, 1, 1, 1
    P1_SIZE, P2_SIZE, P5_SIZE = 3, 3, 3
    P1_STRIDE, P2_STRIDE, P5_STRIDE = 2, 2, 2
    F6_SIZE, F7_SIZE = 4096, 4096
    F8_SIZE = 10

    #convolution kernels and bias
    C1_kernel = util.weights(
        [C1_KERNEL_SIZE, C1_KERNEL_SIZE, IMG_DEPTH, C1_OUT_CHANNELS], 0.01,
        'C1_kernel')
    C2_kernel = util.weights(
        [C2_KERNEL_SIZE, C2_KERNEL_SIZE, C1_OUT_CHANNELS, C2_OUT_CHANNELS],
        0.01, 'C2_kernel')
    C3_kernel = util.weights(
        [C3_KERNEL_SIZE, C3_KERNEL_SIZE, C2_OUT_CHANNELS, C3_OUT_CHANNELS],
        0.01, 'C3_kernel')
    C4_kernel = util.weights(
        [C4_KERNEL_SIZE, C4_KERNEL_SIZE, C3_OUT_CHANNELS, C4_OUT_CHANNELS],
        0.01, 'C4_kernel')
    C5_kernel = util.weights(
        [C5_KERNEL_SIZE, C5_KERNEL_SIZE, C4_OUT_CHANNELS, C5_OUT_CHANNELS],
        0.01, 'C5_kernel')

    C1_bias = util.bias([C1_OUT_CHANNELS], 'C1_bias')
    C2_bias = util.bias([C2_OUT_CHANNELS], 'C2_bias')
    C3_bias = util.bias([C3_OUT_CHANNELS], 'C3_bias')
    C4_bias = util.bias([C4_OUT_CHANNELS], 'C4_bias')
    C5_bias = util.bias([C5_OUT_CHANNELS], 'C5_bias')

    #AlexNet network

    #Conv layer 1
    C1 = util.convLayer(x, C1_kernel, C1_STRIDES, 'SAME')
    ReLU1 = tf.nn.relu(C1 + C1_bias)
    P1 = util.max_pool(ReLU1, P1_SIZE, P1_STRIDE)
    NORM1 = tf.nn.local_response_normalization(P1)

    #Conv layer 2
    C2 = util.convLayer(NORM1, C2_kernel, C2_STRIDES, 'SAME')
    ReLU2 = tf.nn.relu(C2 + C2_bias)
    P2 = util.max_pool(ReLU2, P2_SIZE, P2_STRIDE)
    NORM2 = tf.nn.local_response_normalization(P2)

    #Conv layer 3
    C3 = util.convLayer(NORM2, C3_kernel, C3_STRIDES, 'SAME')
    ReLU3 = tf.nn.relu(C3 + C3_bias)

    #Conv layer 4
    C4 = util.convLayer(ReLU3, C4_kernel, C4_STRIDES, 'SAME')
    ReLU4 = tf.nn.relu(C4 + C4_bias)

    #Conv layer 5
    C5 = util.convLayer(ReLU4, C5_kernel, C5_STRIDES, 'SAME')
    ReLU5 = tf.nn.relu(C5 + C5_bias)
    P5_pre = util.max_pool(ReLU5, P5_SIZE, P5_STRIDE)

    num_P5_out = (int)(P5_pre.shape[1] * P5_pre.shape[2] * P5_pre.shape[3])
    P5 = tf.reshape(P5_pre, [-1, num_P5_out])

    #Fully connected layer 6
    F6_weights = util.weights([num_P5_out, F6_SIZE], 0.01, 'F6_weights')
    F6_bias = util.bias([F6_SIZE], 'F6_bias')
    F6 = tf.matmul(P5, F6_weights)
    ReLU6 = tf.nn.relu(F6 + F6_bias)
    DROP6 = tf.nn.dropout(ReLU6, 0.5)

    #Fully connected layer 7
    F7_weights = util.weights([F6_SIZE, F7_SIZE], 0.01, 'F7_weights')
    F7_bias = util.bias([F7_SIZE], 'F7_bias')
    F7 = tf.matmul(DROP6, F7_weights)
    ReLU7 = tf.nn.relu(F7 + F7_bias)
    DROP7 = tf.nn.dropout(ReLU7, 0.5)

    #Fully connected layer 8
    F8_weights = util.weights([F7_SIZE, F8_SIZE], 0.01, 'F8_weights')
    F8_bias = util.bias([F8_SIZE], 'F8_bias')
    logits = tf.matmul(DROP7, F8_weights) + F8_bias

    return logits
Example #6
0
file_writer = tf.summary.FileWriter('LOGS', sess.graph)

# Create placeholders for independent and dependant variables once batch has been selected.
with tf.name_scope('Input_Image'):
	x = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='Image')  # Independent variables.
	# Reshape to amenable shape.
	# x_image = tf.reshape(x, [-1, windowSize[0], windowSize[1], 1])
with tf.name_scope('Input_Synapse'):
	y_syn = tf.placeholder(tf.float32, shape=[None, 2])  # Target values.

with tf.name_scope('First_Layer'):
	# Create first convolutional layer. (No pooling.)
	W_conv1 = util.weight_variable(firstLayerDimensions, "w_conv_1")  # Weights in first layer.
	b_conv1 = util.bias_variable([firstLayerDimensions[3]], "b_conv_1")  # Biases in first layer.
	h_conv1 = tf.nn.relu(util.conv2d(x, W_conv1, valid=True, stride=1) + b_conv1)  # Perform convolution (with zero padding) and apply ReLU.
	h_pool1 = util.max_pool(h_conv1, 1) #, kernelWidth=2)

with tf.name_scope('Second_Layer'):
	# Create first convolutional layer. (No pooling.)
	W_conv2 = util.weight_variable(secondLayerDimensions, "w_conv_2")  # Weights in first layer.
	b_conv2 = util.bias_variable([secondLayerDimensions[3]], "b_conv_2")  # Biases in first layer.
	h_conv2 = tf.nn.relu(util.atrous_conv2d(h_pool1, W_conv2, valid=True, rate=2) + b_conv2)  # Perform convolution (with zero padding) and apply ReLU.
	h_pool2 = util.atrous_max_pool(h_conv2, mask_size=2, rate=2)

with tf.name_scope('Third_Layer'):
	# Create first convolutional layer. (No pooling.)
	W_conv3 = util.weight_variable(thirdLayerDimensions, "w_conv_3")  # Weights in first layer.
	b_conv3 = util.bias_variable([thirdLayerDimensions[3]], "b_conv_3")  # Biases in first layer.
	h_conv3 = tf.nn.relu(util.atrous_conv2d(h_pool2, W_conv3, valid=True, rate=4) + b_conv3)  # Perform convolution (with zero padding) and apply ReLU.
	h_pool3 = util.atrous_max_pool(h_conv3, mask_size=2, rate=4)