net = caffe.NetSpec() net.data = L.MemoryData(dim=[1, 1], ntop=1) net.label = L.MemoryData(dim=[1, 1], ntop=1, include=[dict(phase=0)]) net.sknet = ML.SKNet(net.data, ip_depth=0, dropout=0, fmap_inc_rule = lambda fmaps: 80, fmap_dec_rule = lambda fmaps: 80, fmap_bridge_rule = lambda fmaps: 3, fmap_start=80, conv=[[3,3,3],[3,3,3],[3,3,3],[3,3,3],[3,3,3],[3,3,3]], pool=[[2,2,2],[2,2,2],[2,2,2],[1,1,1]], padding=[85,85,85]) net.prob = L.Softmax(net.sknet, ntop=1, in_place=False, include=[dict(phase=1)]) net.loss = L.SoftmaxWithLoss(net.sknet, net.label, ntop=0, loss_weight=1.0, include=[dict(phase=0)]) pygt.fix_input_dims(net, [net.data, net.label], max_shapes = [[130,130,130],[130,130,130]], shape_coupled = [-1, 0, 0]) protonet = net.to_proto() protonet.name = 'net'; # Store the network as prototxt with open(protonet.name + '.prototxt', 'w') as f: print(protonet, file=f)
from caffe import params as P from caffe import to_proto from pygreentea.pygreentea import metalayers as ML net = caffe.NetSpec() net.data = L.MemoryData(dim=[1, 1], ntop=1) net.label = L.MemoryData(dim=[1, 1], ntop=1, include=[dict(phase=0)]) net.sknet = ML.SKNet(net.data, fmap_start=64, conv=[[1,8,8],[2,6,6],[2,4,4]], pool=[[1,2,2],[1,2,2],[1,2,2]], padding=[8,88,88]) net.out = L.Convolution(net.sknet, kernel_size=[1,1,1], num_output=9, weight_filler=dict(type='msra'), bias_filler=dict(type='constant')) net.prob = L.Softmax(net.out, ntop=1, in_place=False, include=[dict(phase=1)]) net.loss = L.SoftmaxWithLoss(net.out, net.label, ntop=0, loss_weight=1.0, include=[dict(phase=0)]) pygt.fix_input_dims(net, [net.data, net.label], max_shapes = [[9,250,250],[9,250,250]], shape_coupled = [-1, -1, 1]) protonet = net.to_proto() protonet.name = 'net'; # Store the network as prototxt with open(protonet.name + '.prototxt', 'w') as f: print(protonet, file=f)
net.aff_pred = L.Sigmoid(net.aff_out, ntop=1, in_place=False) net.smax_pred = L.Softmax(net.smax_out, ntop=1, in_place=False, include=[dict(phase=1)]) # Choose a loss function and input data, label and scale inputs. Only include it during the training phase (phase = 0) net.euclid_loss = L.EuclideanLoss(net.aff_pred, net.aff_label, net.scale, ntop=0, loss_weight=10.0, include=[dict(phase=0, stage='euclid')]) net.malis_loss = L.MalisLoss(net.aff_pred, net.aff_label, net.comp_label, net.nhood, ntop=0, loss_weight=5.0, include=[dict(phase=0, stage='malis')]) net.smax_loss = L.SoftmaxWithLoss(net.smax_out, net.smax_label, ntop=0, loss_weight=1.0, include=[dict(phase=0)]) # Fix the spatial input dimensions. Note that only spatial dimensions get modified, the minibatch size # and the channels/feature maps must be set correctly by the user (since this code can definitely not # figure out the user's intent). If the code does not seem to terminate, then the issue is most likely # a wrong number of feature maps / channels in either the MemoryData-layers or the network output. # This function takes as input: # - The network # - A list of other inputs to test (note: the nhood input is static and not spatially testable, thus excluded here) # - A list of the maximal shapes for each input # - A list of spatial dependencies; here [-1, 0] means the Y axis is a free parameter, and the X axis should be identical to the Y axis. pygt.fix_input_dims(net, [net.data, net.aff_label, net.comp_label, net.smax_label, net.scale], max_shapes = [[500,500],[500,500],[500,500],[500,500],[500,500]], shape_coupled = [-1, 0]) protonet = net.to_proto() protonet.name = 'net'; # Store the network as prototxt with open(protonet.name + '.prototxt', 'w') as f: print(protonet, file=f)