def FireNet_generic(FireNet_module_func, choose_num_output_func, batch_size, pool_after, s): print s n = NetSpec() FireNet_data_layer(n, batch_size) #add data layer to the net layer_idx=1 #e.g. conv1, fire2, etc. n.conv1 = L.Convolution(n.data, kernel_size=7, num_output=96, stride=2, weight_filler=dict(type='xavier')) curr_bottom = 'conv1' n.tops['relu_conv1'] = L.ReLU(n.tops[curr_bottom], in_place=True) if curr_bottom in pool_after.keys(): curr_bottom = FireNet_pooling_layer(n, curr_bottom, pool_after[curr_bottom], layer_idx) for layer_idx in xrange(2,10): firenet_dict = choose_num_output_func(layer_idx-2, s) print firenet_dict curr_bottom = FireNet_module_func(n, curr_bottom, firenet_dict, layer_idx) if curr_bottom in pool_after.keys(): curr_bottom = FireNet_pooling_layer(n, curr_bottom, pool_after[curr_bottom], layer_idx) n.tops['drop'+str(layer_idx)] = L.Dropout(n.tops[curr_bottom], dropout_ratio=0.5, in_place=True) n.tops['conv_final'] = L.Convolution(n.tops[curr_bottom], kernel_size=1, num_output=1000, weight_filler=dict(type='gaussian', std=0.01, mean=0.0)) n.tops['relu_conv_final'] = L.ReLU(n.tops['conv_final'], in_place=True) n.tops['pool_final'] = L.Pooling(n.tops['conv_final'], global_pooling=1, pool=P.Pooling.AVE) if phase == 'trainval': n.loss = L.SoftmaxWithLoss(n.tops['pool_final'], n.label, include=dict(phase=caffe_pb2.TRAIN)) n.accuracy = L.Accuracy(n.tops['pool_final'], n.label, include=dict(phase=caffe_pb2.TEST)) n.accuracy_top5 = L.Accuracy(n.tops['pool_final'], n.label, include=dict(phase=caffe_pb2.TEST), top_k=5) return n.to_proto()
def FireNet(batch_size, pool_after, s, c1): print s n = NetSpec() FireNet_data_layer(n, batch_size) #add data layer to the net layer_idx=1 #e.g. conv1, fire2, etc. n.conv1 = L.Convolution(n.data, kernel_size=c1['dim'], num_output=c1['nfilt'], stride=2, weight_filler=dict(type='xavier')) curr_bottom = 'conv1' n.tops['relu_conv1'] = L.ReLU(n.tops[curr_bottom], in_place=True) #if curr_bottom in pool_after.keys(): # curr_bottom = FireNet_pooling_layer(n, curr_bottom, pool_after[curr_bottom], layer_idx) if layer_idx in pool_after: n.tops['pool1'] = L.Pooling(n.tops[curr_bottom], kernel_size=3, stride=2, pool=P.Pooling.MAX) curr_bottom = 'pool1' for layer_idx in xrange(2, s['n_layers']+2): firenet_dict = choose_num_output(layer_idx-2, s) print firenet_dict curr_bottom = FireNet_module(n, curr_bottom, firenet_dict, layer_idx) if layer_idx in pool_after: next_bottom = 'pool%d' %layer_idx n.tops[next_bottom] = L.Pooling(n.tops[curr_bottom], kernel_size=3, stride=2, pool=P.Pooling.MAX) curr_bottom = next_bottom n.tops['drop'+str(layer_idx)] = L.Dropout(n.tops[curr_bottom], dropout_ratio=0.5, in_place=True) #optional pre_conv_final (w/ appropriate CEratio) #n.pre_conv_final = L.Convolution(n.tops[curr_bottom], kernel_size=1, num_output=int(1000*s['CEratio']), stride=1, weight_filler=dict(type='xavier')) #n.tops['relu_pre_conv_final'] = L.ReLU(n.tops['pre_conv_final'], in_place=True) #curr_bottom='pre_conv_final' n.tops['conv_final'] = L.Convolution(n.tops[curr_bottom], kernel_size=1, num_output=1000, weight_filler=dict(type='gaussian', std=0.01, mean=0.0)) n.tops['relu_conv_final'] = L.ReLU(n.tops['conv_final'], in_place=True) n.tops['pool_final'] = L.Pooling(n.tops['conv_final'], global_pooling=1, pool=P.Pooling.AVE) if phase == 'trainval': n.loss = L.SoftmaxWithLoss(n.tops['pool_final'], n.label, include=dict(phase=caffe_pb2.TRAIN)) n.accuracy = L.Accuracy(n.tops['pool_final'], n.label, include=dict(phase=caffe_pb2.TEST)) n.accuracy_top5 = L.Accuracy(n.tops['pool_final'], n.label, include=dict(phase=caffe_pb2.TEST), top_k=5) return n.to_proto()
def lenet(lmdbData, lmdbLabel, batch_size): n = NetSpec() n.data = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdbData, transform_param=dict(scale=1./255), ntop=1) n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdbLabel, transform_param=dict(scale=1./255), ntop=1) n.conv1 = L.Convolution(n.data, kernel_size=4, num_output=200, weight_filler=dict(type='xavier')) n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX) n.conv2 = L.Convolution(n.pool1, kernel_size=3, num_output=50, weight_filler=dict(type='xavier')) n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=1, pool=P.Pooling.MAX) n.fc1 = L.InnerProduct(n.pool2, num_output=200, weight_filler=dict(type='xavier')) n.relu1 = L.ReLU(n.fc1, in_place=True) n.score = L.InnerProduct(n.relu1, num_output=1200, weight_filler=dict(type='xavier')) n.loss = L.Python(n.score, n.label, module='pyloss', layer='EuclideanLossLayer') return n.to_proto()