def convolve(self, image, training, keep_prob): result = image result = layer.batch_normalization(result, training) result = layer.conv(result, 1, 16, width=5, stride=2, padding="VALID") result = tf.nn.tanh(result) result = layer.conv(result, 16, 16, width=3, stride=2, padding="VALID") result = tf.nn.tanh(result) result = layer.conv(result, 16, 32, width=3, padding="VALID") result = tf.nn.tanh(result) result = layer.conv(result, 32, 32, width=3, padding="VALID") result = tf.nn.tanh(result) result = tf.nn.dropout(result, keep_prob) result = layer.conv_relu(result, 32, 10, width=1, padding="VALID") return result
def buildResNet(self, inputs, n, is_training): filters = [16, 32, 64] inputs = tf.reshape(inputs, shape=(-1, image_width, image_height, image_depth)) #Conv1_x x = layer.conv(inputs, [3, 3, 3, 16], strides=1, name="Conv1") #Conv2_x #x = layer.maxpool(x, win_size=3, strides=2, name="Conv2_Pool1") for i in range(n): x = layer.res_block(x, 16, is_training=is_training, name="Conv2_Res" + str(i)) #Conv3_x for i in range(n): x = layer.res_block(x, 32, is_training=is_training, name="Conv3_Res" + str(i)) #Conv4_x for i in range(n): x = layer.res_block(x, 64, is_training=is_training, name="Conv4_Res" + str(i)) #x = layer.avgpool(x, win_size=7, strides=7, name="Global_avgpool") x = layer.avgpool(x, win_size=8, strides=8, name="Global_avgpool") reshaped_x = tf.reshape(x, [-1, filters[2]]) x = layer.fc(reshaped_x, output_dim=num_classes, name="FC") return x
def decode(image, layers_in, layers_out=0, width=5, reuse=True): with tf.variable_scope("deconv" + str(layers_in), reuse=reuse): layers_out = layers_in / 2 if layers_out == 0 else layers_out image = layer.upscaleFlat(image, scale=2) image = layer.conv(image, layers_in, layers_out, width=width, name="stage1") logits = image image = tf.tanh(image) return image , logits
def convolve(self, image, training, keep_prob): result = layer.batch_normalization(image, training) result = layer.conv_relu(result, 1, 18, width=5, padding="VALID") result = layer.max_pool(result) # 12 result = layer.conv_relu(result, 18, 24, width=5, padding="VALID") result = layer.max_pool(result) # 4 result = tf.nn.dropout(result, keep_prob) return layer.conv(result, 24, 10, width=4, padding="VALID")
def convolve(self, image, training, keep_prob): result = layer.conv_relu(image, 1, 18, width=5, stride=2, padding="VALID") return layer.conv(result, 18, 10, width=12, padding="VALID")
def convolve(self, image, training, keep_prob): result = layer.batch_normalization(image, training) result = layer.conv_relu(result, 1, 18, width=5) result = layer.max_pool(result) # 14 result = tf.nn.relu(drop_conv(keep_prob, result, 18, 24, width=5)) result = layer.max_pool(result) # 7 result = tf.nn.relu( drop_conv(keep_prob, result, 24, 32, width=5, padding="VALID")) return layer.conv(result, 32, 10, width=3, padding="VALID")
def buildNet(iSize, layerDescription, seedNum=None): #in: #tuple iSize = (ix, iy, iz): input size #list layerDescription = [(type, arg)]: config of layers #uint seedNum: init np.random #out: #list layerList = [layer.layer]: network layers #init random seed if seedNum is None: pass else: tools.setNpSeed(seedNum) #build layer tx, ty, tz = iSize layerList = [] for layerType, layerArg in layerDescription: if layerType == 'c': kernelNum, padSize, stepLen, kernelSize = layerArg ox = (tx - kernelSize[0] + 2 * padSize) // stepLen + 1 oy = (ty - kernelSize[1] + 2 * padSize) // stepLen + 1 oz = kernelNum layerList.append( layer.conv((tx, ty, tz), (ox, oy, oz), kernelNum, padSize, stepLen, kernelSize)) elif layerType == 'a': func, = layerArg ox = tx oy = ty oz = tz layerList.append(layer.active((tx, ty, tz), (ox, oy, oz), func)) elif layerType == 'p': stepLen, func = layerArg ox = tx // stepLen oy = ty // stepLen oz = tz layerList.append( layer.pool((tx, ty, tz), (ox, oy, oz), stepLen, func)) elif layerType == 'f': kernelNum, = layerArg ox = 1 oy = 1 oz = kernelNum layerList.append(layer.fullC((tx, ty, tz), (ox, oy, oz), kernelNum)) else: print('E: network.buildNet: worng layerType') tx, ty, tz = ox, oy, oz return layerList
def convolve(self, image, training, keep_prob): result = layer.batch_normalization(image, training) result = layer.conv_relu(result, 1, 18, width=5) result = layer.resnet_block(result, 18, 3, training, momentum=0.99) result = layer.max_pool(result) # 14 result = layer.resnet_block(result, 18, 3, training, momentum=0.99) result = layer.conv_relu(result, 18, 24, width=5) result = layer.resnet_block(result, 24, 3, training, momentum=0.99) result = layer.max_pool(result) # 7 result = layer.resnet_block(result, 24, 3, training, momentum=0.99) result = layer.conv_relu(result, 24, 32, width=5, padding="VALID") result = layer.resnet_block(result, 32, 3, training, momentum=0.99) result = tf.nn.dropout(result, keep_prob) return layer.conv(result, 32, 10, width=3, padding="VALID")
def build(input, input_chanel, num_output): section1 = layer.conv(input, input_chanel=input_chanel, num_output=64, kernel_size=1, stride=1, 'SAME') section2 = layer.conv(input, input_chanel=input_chanel, num_output=96, kernel_size=1, stride=1, 'SAME') section2 = layer.conv(section2, input_chanel=96, num_output=128, kernel_size=3, stride=1, 'SAME') section3 = layer.conv(input, input_chanel=input_chanel, num_output=16, kernel_size=1, stride=1, 'SAME') section3 = layer.conv(input, input_chanel=16, num_output=32, kernel_size=5, stride=1, 'SAME') section4 = layer.max_pooling(input) section4 = layer.conv(input, input_chanel=input_chanel, num_output=32, kernel_size=1, stride=1, 'SAME') return tf.concat([section1, section2, section3, section4], 3)
def convolve(self, image, training, keep_prob): result = layer.conv_relu(image, 1, 24, width=28, padding="VALID") result = tf.nn.dropout(result, keep_prob) return layer.conv(result, 24, 10, width=1, padding="VALID")
def encode(image, layers_in, layers_out=0, width=5, reuse=True): with tf.variable_scope("conv" + str(layers_in), reuse=reuse): layers_out = layers_in * 2 if layers_out == 0 else layers_out image = layer.conv(image, layers_in, layers_out, stride=2, width=width, name="stage1") image = tf.tanh(image) return image
def convolve(self, image, training, keep_prob): return layer.conv(image, 1, 10, width=28, padding="VALID")