def darknet53(): nn.conv(1,32,3,1,1,'leaky') darknet_block(32*ratio,1) darknet_block(64*ratio,2) darknet_block(128*ratio,8) darknet_block(256*ratio,8) darknet_block(512*ratio,4)
def inference(images, keep_probability, phase_train=True, weight_decay=0.0): """ Define an inference network for face recognition based on inception modules using batch normalization Args: images: The images to run inference on, dimensions batch_size x height x width x channels phase_train: True if batch normalization should operate in training mode """ # images = np.reshape(images, [-1, IMAGE_SIZE, IMAGE_SIZE, 3]) # print("nn4, images : ", images.shape) images = tf.reshape(images, [-1, IMAGE_SIZE, IMAGE_SIZE, 3]) endpoints = {} net = network.conv(images, 3, 64, 7, 7, 2, 2, 'SAME', 'conv1_7x7', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay) endpoints['conv1'] = net net = network.mpool(net, 3, 3, 2, 2, 'SAME', 'pool1') endpoints['pool1'] = net net = network.conv(net, 64, 64, 1, 1, 1, 1, 'SAME', 'conv2_1x1', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay) endpoints['conv2_1x1'] = net net = network.conv(net, 64, 192, 3, 3, 1, 1, 'SAME', 'conv3_3x3', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay) endpoints['conv3_3x3'] = net net = network.mpool(net, 3, 3, 2, 2, 'SAME', 'pool3') endpoints['pool3'] = net net = network.inception(net, 192, 1, 64, 96, 128, 16, 32, 3, 32, 1, 'MAX', 'incept3a', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay) endpoints['incept3a'] = net net = network.inception(net, 256, 1, 64, 96, 128, 32, 64, 3, 64, 1, 'MAX', 'incept3b', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay) endpoints['incept3b'] = net net = network.inception(net, 320, 2, 0, 128, 256, 32, 64, 3, 0, 2, 'MAX', 'incept3c', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay) endpoints['incept3c'] = net net = network.inception(net, 640, 1, 256, 96, 192, 32, 64, 3, 128, 1, 'MAX', 'incept4a', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay) endpoints['incept4a'] = net net = network.inception(net, 640, 1, 224, 112, 224, 32, 64, 3, 128, 1, 'MAX', 'incept4b', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay) endpoints['incept4b'] = net net = network.inception(net, 640, 1, 192, 128, 256, 32, 64, 3, 128, 1, 'MAX', 'incept4c', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay) endpoints['incept4c'] = net net = network.inception(net, 640, 1, 160, 144, 288, 32, 64, 3, 128, 1, 'MAX', 'incept4d', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay) endpoints['incept4d'] = net net = network.inception(net, 640, 2, 0, 160, 256, 64, 128, 3, 0, 2, 'MAX', 'incept4e', phase_train=phase_train, use_batch_norm=True) endpoints['incept4e'] = net net = network.inception(net, 1024, 1, 384, 192, 384, 0, 0, 3, 128, 1, 'MAX', 'incept5a', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay) endpoints['incept5a'] = net net = network.inception(net, 896, 1, 384, 192, 384, 0, 0, 3, 128, 1, 'MAX', 'incept5b', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay) endpoints['incept5b'] = net net = network.apool(net, 3, 3, 1, 1, 'VALID', 'pool6') endpoints['pool6'] = net net = tf.reshape(net, [-1, 896]) endpoints['prelogits'] = net net = tf.nn.dropout(net, keep_probability) endpoints['dropout'] = net return net, endpoints
def cnn_text(net_input, sequence_length, vocab_size, embedding_size, stride_h, filter_sizes, num_filters, num_classes, keep_prob, l2_lambda, is_bn, bn_training): embedding_layer = embedding(net_input, vocab_size, embedding_size, "embedding") pools = list() for i, filter_size in enumerate(filter_sizes): convi = conv(embedding_layer, filter_size, embedding_size, 1, num_filters, stride_h, 1, "conv" + str(i)) if is_bn: bni = batch_norm(convi, bn_training, "bn" + str(i)) convi = bni pooli = max_pool(convi, (sequence_length - filter_size) // stride_h + 1, 1, 1, 1, "pool" + str(i)) pools.append(pooli) num_filters_total = num_filters * len(filter_sizes) h_pool = concat(pools, 3, "concat") h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total]) h_drop = dropout(h_pool_flat, keep_prob) h_fc = fc(h_drop, num_filters_total, num_classes, l2_lambda, "fc") return h_fc
def __init__(self): super(Discriminator, self).__init__() options = { 'leaky': True, 'bn': True, 'wn': False, 'pixel': False, 'gdrop': True } self.from_feature = layer.linear(feature_size, 64 * 64, leaky=options['leaky'], wn=options['wn']) self.feature_conv = layer.conv(1, 64, 4, 2, 1, **options) # 64 x 64 self.from_rgb = layer.conv(nc, 64, 4, 2, 1, leaky=True, bn=False, gdrop=True) # 32 x 32 self.conv1 = layer.conv(128, 128, 4, 2, 1, **options) # 16 x 16 self.conv2 = layer.conv(128, 256, 4, 2, 1, **options) # 8 x 8 self.conv3 = layer.conv(256, 512, 4, 2, 1, **options) # 4 x 4 self.conv4 = nn.Sequential( minibatch_std_concat_layer(), layer.conv(513, 1, 4, 1, 0, gdrop=options['gdrop'], only=True)) # 1 x 1 # self.linear = layer.linear(512, 1, sig=False, wn=options['wn']) self.convs = [self.conv1, self.conv2, self.conv3, self.conv4]
def darknet_block(filters,n): nn.conv(1,filters*2,3,2,1,'leaky') for i in range(1,n+1): darknet_unit(filters)
def darknet_unit(filters): nn.conv(1,filters,1,1,1,'leaky') nn.conv(1,filters*2,3,1,1,'leaky') nn.shortcut(-3,'linear')
def Fuse(filters,layer_id): nn.route(-4) nn.conv(1,filters,1,1,1,'leaky') #nn.upsample(2) nn.deconv('null',filters,2,2,0,'linear') nn.route('-1, '+str(layer_id))
def detection_moudle(filters): nn.conv(1,filters,1,1,1,'leaky') nn.conv(1,filters*2,3,1,1,'leaky') nn.conv(1,filters,1,1,1,'leaky') nn.conv(1,filters*2,3,1,1,'leaky') nn.conv(1,filters,1,1,1,'leaky') nn.conv(1,filters*2,3,1,1,'leaky') nn.conv('null',config.num/config.layer_num*(config.num_class+5),1,1,1,'linear') return config.net_id