def main(): try: import setproctitle setproctitle.setproctitle(os.path.basename(os.getcwd())) except: pass # init caffe.set_device(int(sys.argv[1])) caffe.set_mode_gpu() solver = caffe.SGDSolver('solver.prototxt') #solver.net.copy_from(weights) print("Transplanting weights:") weights = '../ilsvrc-nets/VGG_ILSVRC_16_layers.caffemodel' proto = "../ilsvrc-nets/VGG_ILSVRC_16_layers_deploy.prototxt" base_net = caffe.Net(proto, weights, caffe.TEST) surgery.transplant(solver.net, base_net, suffix='color') del base_net print("Transplanted weights!") # surgeries interp_layers = [k for k in solver.net.params.keys() if 'up' in k] surgery.interp(solver.net, interp_layers) # scoring with open('/media/ssd500/autocity_dataset/image_train.txt', 'r') as fid: im_files = [f.strip() for f in fid.readlines()] for _ in range(50): solver.step(2000)
def load_pretrained_weights2(self, solver): base_net = caffe.Net(self.params.pretrained_deploy, self.params.pretrained_model, caffe.TEST) surgery.transplant(solver.net, base_net, "_1") surgery.transplant(solver.net, base_net, "_2") # surgeries interp_layers = [k for k in solver.net.params.keys() if 'up' in k] surgery.interp(solver.net, interp_layers, self.params.interp_scale) if self.params.architect == "Scale8S": surgery.load_dil_weights(solver.net, base_net) del base_net return solver
def main(): rt_dir = './model' model_src = os.path.join(rt_dir, sys.argv[1]) weights_src = os.path.join(rt_dir, sys.argv[2]) model_dst = os.path.join(rt_dir, sys.argv[3]) weights_dst = os.path.join(rt_dir, sys.argv[4]) caffe.set_mode_cpu() net_src = caffe.Net(model_src, weights_src, caffe.TRAIN) net_dst = caffe.Net(model_dst, caffe.TRAIN) # net architecture print('======== source network architecture ========') for layer_name, blob in net_src.blobs.iteritems(): print(layer_name + '\t' + str(blob.data.shape)) print('====== destination network architecture =====') for layer_name, blob in net_dst.blobs.iteritems(): print(layer_name + '\t' + str(blob.data.shape)) # net parameters print('========= source network parameters =========') for layer_name, param in net_src.params.iteritems(): print(layer_name + '\t' + str(param[0].data.shape)) # , str(param[1].data.shape) print('======= destination network parameters ======') for layer_name, param in net_dst.params.iteritems(): print(layer_name + '\t' + str(param[0].data.shape)) # , str(param[1].data.shape) # transfer # copy parameters source net => destination net print('================= transfer ==================') transplant(net_dst, net_src) # initialize # use bilinear kernels to initialize Deconvolution layer print('================ initialize =================') interp_layers = [k for k in net_dst.params.keys() if 'up' in k] for k in interp_layers: print(k) interp(net_dst, interp_layers) print('=============================================') # save new weights net_dst.save(weights_dst)
setproctitle.setproctitle(os.path.basename(os.getcwd())) color_proto = '../nyud-rgb-32s/trainval.prototxt' color_weights = '../nyud-rgb-32s/nyud-rgb-32s-28k.caffemodel' hha_proto = '../nyud-hha-32s/trainval.prototxt' hha_weights = '../nyud-hha-32s/nyud-hha-32s-60k.caffemodel' # init caffe.set_device(int(sys.argv[1])) caffe.set_mode_gpu() solver = caffe.SGDSolver('solver.prototxt') # surgeries color_net = caffe.Net(color_proto, color_weights, caffe.TEST) surgery.transplant(solver.net, color_net, suffix='color') del color_net hha_net = caffe.Net(hha_proto, hha_weights, caffe.TEST) surgery.transplant(solver.net, hha_net, suffix='hha') del hha_net interp_layers = [k for k in solver.net.params.keys() if 'up' in k] surgery.interp(solver.net, interp_layers) # scoring test = np.loadtxt('../data/nyud/test.txt', dtype=str) for _ in range(50): solver.step(2000) score.seg_tests(solver, False, val, layer='score')
import numpy as np import os import setproctitle setproctitle.setproctitle(os.path.basename(os.getcwd())) weights = '../vgg16fc.caffemodel' base_net = caffe.Net('../vgg16fc.prototxt', '../vgg16fc.caffemodel', caffe.TEST) # init caffe.set_device(int(sys.argv[1])) caffe.set_mode_gpu() solver = caffe.SGDSolver('solver.prototxt') surgery.transplant(solver.net, base_net) # surgeries interp_layers = [k for k in solver.net.params.keys() if 'up' in k] surgery.interp(solver.net, interp_layers) solver.net.params['conv1_1_bgrd'][0].data[:, :3] = base_net.params['conv1_1'][0].data solver.net.params['conv1_1_bgrd'][0].data[:, 3] = np.mean(base_net.params['conv1_1'][0].data, axis=1) solver.net.params['conv1_1_bgrd'][1].data[...] = base_net.params['conv1_1'][1].data del base_net # scoring test = np.loadtxt('../data/nyud/test.txt', dtype=str) for _ in range(50):
import numpy as np import os import sys sys.path.append("yourpathtocaffe/caffe/python") sys.path.append("yourpathtocaffe/SG-FCN/caffe/caffe") import caffe try: import setproctitle setproctitle.setproctitle(os.path.basename(os.getcwd())) except: pass vgg_weights = 'vgg.caffemodel' vgg_proto = 'vgg.prototxt' # init caffe.set_mode_gpu() caffe.set_device(0) solver = caffe.SGDSolver('solver.prototxt') vgg_net = caffe.Net(vgg_proto, vgg_weights, caffe.TRAIN) surgery.transplant(solver.net, vgg_net) del vgg_net # surgeries interp_layers = [k for k in solver.net.params.keys() if 'up' in k] surgery.interp(solver.net, interp_layers) solver.solve()
import caffe import surgery import numpy as np context_proto = '../../../pascalcontext-fcn32s/train.prototxt' context_weights = '../../../pascalcontext-fcn32s/pascalcontext-fcn32s-heavy.caffemodel' solver = caffe.SGDSolver('solver.prototxt') context_net = caffe.Net(context_proto, context_weights, caffe.TEST) surgery.transplant(solver.net, context_net, suffix='part') del context_net
import surgery import os import argparse import setproctitle setproctitle.setproctitle(os.path.basename(os.getcwd())) # parse arguments parser = argparse.ArgumentParser() parser.add_argument('--sp', help='source prototxt') parser.add_argument('--tp', help='target prototxt') parser.add_argument('--sc', help='sorce caffemodel') parser.add_argument('--tc', help='target caffemodel') args = parser.parse_args() source_prototxt = args.sp target_prototxt = args.tp source_caffemodel = args.sc target_caffemodel = args.tc # Load the original network and extract the fully connected layers' parameters. base_net = caffe.Net(source_prototxt, source_caffemodel, caffe.TEST) print "Base net: {}".format(base_net.params) # n_cl = 21 new_net = caffe.Net(target_prototxt, caffe.TEST) print "New net: {}".format(new_net.params) # transplant surgery.transplant(new_net, base_net) new_net.save(target_caffemodel)
try: import setproctitle setproctitle.setproctitle(os.path.basename(os.getcwd())) except: pass weights = 'fcn/ilsvrc-nets/vgg16-fcn.caffemodel' base_net = caffe.Net('fcn/ilsvrc-nets/vgg16-fcn.prototxt', 'fcn/ilsvrc-nets/vgg16-fcn.caffemodel', caffe.TEST) # init caffe.set_device(int(sys.argv[1])) caffe.set_mode_gpu() solver = caffe.SGDSolver( 'cocostuff/config/cocostuff-fcn8s-atonce/solver.prototxt') # Modified by Holger: surgery.transplant(solver.net, base_net, suffix='cs') #solver.net.copy_from(weights) # surgeries interp_layers = [k for k in solver.net.params.keys() if 'up' in k] surgery.interp(solver.net, interp_layers) # scoring val = np.loadtxt('cocostuff/list/val.txt', dtype=str) for _ in range(75): solver.step(4000) score.seg_tests(solver, False, val, layer='score')
prototxt_file = 'fcn32s.prototxt' # Pre-trained weights path of RGB model weights = 'fcn32s-heavy-pascal.caffemodel' # Initialize RGB model to copy 3 input filter weights (corresponding to RGB) base_net = caffe.Net(prototxt_file, weights, caffe.TRAIN) # Initialize SGD solver for the RGBD CNN solver = caffe.SGDSolver('solver.prototxt') # copy filter weights from the RGB model to the RGBD model # this will copy weights from the parameters with the same # name in the RGB and RGBD model. Since the input layer will # be 4-channel instead of 3-channel (RGBD instead of RGB), it # has a different name, so the weights will not be copied surgery.transplant(solver.net, base_net) # Resize blobs corresponding to deconvolutions interp_layers = [k for k in solver.net.params.keys() if 'up' in k] surgery.interp(solver.net, interp_layers) # Copy the filters of RGB input to the first 3 filters of the RGBD CNN input solver.net.params['conv1_1_bgrd'][0].data[:, :3] = base_net.params['conv1_1'][ 0].data # Initialize the depth channel filter weights with the average of the RGB weights solver.net.params['conv1_1_bgrd'][0].data[:, 3] = np.mean( base_net.params['conv1_1'][0].data, axis=1) # Copy the filter bias terms solver.net.params['conv1_1_bgrd'][1].data[ ...] = base_net.params['conv1_1'][1].data
import caffe import surgery import numpy as np import os import sys weights = './VGG_ILSVRC_16_layers.caffemodel' proto = './PFN.prototxt' #weights = '/media/fangzheng/fang/code/fcn.berkeleyvision.org-master/ilsvrc-nets/vgg16-fcn.caffemodel' # init #caffe.set_device(int(sys.argv[1])) caffe.set_device(1) caffe.set_mode_gpu() solver = caffe.SGDSolver('solver.prototxt') VGGnet = caffe.Net(proto, weights, caffe.TRAIN) surgery.transplant(solver.net, VGGnet) del VGGnet #solver.net.copy_from(weights) # surgeries #interp_layers = [k for k in solver.net.params.keys() if 'up' in k] #surgery.interp(solver.net, interp_layers) # scoring solver.step(60000)
try: import setproctitle setproctitle.setproctitle(os.path.basename(os.getcwd())) except: pass weights = 'siftflow-fcn32s-heavy.caffemodel' deploy_proto ='deploy32.prototxt' # init caffe.set_mode_cpu() ### solver = caffe.SGDSolver('solver.prototxt') #solver.net.copy_from(weights) fcn_net = caffe.Net(deploy_proto, weights, caffe.TRAIN) surgery.transplant(solver.net,fcn_net) del fcn_net # surgeries interp_layers = [k for k in solver.net.params.keys() if 'up' in k] surgery.interp(solver.net, interp_layers) # scoring test = np.loadtxt('E:/FCN32/sift-flow32s/sift-flow/test.txt', dtype=str) for _ in range(50): solver.step(2000) # N.B. metrics on the semantic labels are off b.c. of missing classes; # score manually from the histogram instead for proper evaluation score.seg_tests(solver, False, test, layer='score_sem', gt='sem')
# -------------------------------------------------------- # Seg-FCN for Dragon # Copyright (c) 2017 SeetaTech # Written by Ting Pan # -------------------------------------------------------- """ Transplant fully-connected caffemodel into fully-convolution ver. """ import surgery import dragon.vm.caffe as caffe if __name__ == '__main__': net = caffe.Net('net.prototxt', 'VGG16.v2.caffemodel', caffe.TEST) new_net = caffe.Net('new_net.prototxt', caffe.TEST) surgery.transplant(new_net, net) new_net.save('VGG16.fcn.caffemodel')
import surgery import numpy as np import os import sys weights = './DenseNet_161.caffemodel' proto = './DDN.prototxt' #weights = '/media/fangzheng/fang/code/fcn.berkeleyvision.org-master/ilsvrc-nets/vgg16-fcn.caffemodel' # init #caffe.set_device(int(sys.argv[1])) caffe.set_device(0) caffe.set_mode_gpu() solver = caffe.SGDSolver('solver.prototxt') densenet=caffe.Net(proto,weights,caffe.TRAIN) surgery.transplant(solver.net,densenet) del densenet #solver.net.copy_from(weights) # surgeries #interp_layers = [k for k in solver.net.params.keys() if 'up' in k] #surgery.interp(solver.net, interp_layers) # scoring solver.step(100000)
import surgery, score import numpy as np import os import setproctitle setproctitle.setproctitle(os.path.basename(os.getcwd())) proto = 'path-to-VGG_ILSVRC_16_layers_deploy.prototxt' weights = 'path-to-VGG_ILSVRC_16_layers.caffemodel' caffe.set_device(0) caffe.set_mode_gpu() solver = caffe.SGDSolver('Dilated_FCN-2s_VGG16/solver.prototxt') # surgeries custom_net = caffe.Net(proto, weights, caffe.TEST) surgery.transplant(solver.net, custom_net) del custom_net interp_layers = [k for k in solver.net.params.keys() if 'up' in k] surgery.interp(solver.net, interp_layers) # scoring val = np.loadtxt('../seg12val.txt', dtype=str) for _ in range(1): solver.step(1) score.seg_tests(solver, False, val, layer='score')
# -------------------------------------------------------- # Seg-FCN for Dragon # Copyright (c) 2017 SeetaTech # Written by Ting Pan # -------------------------------------------------------- """ Transplant fully-connected caffemodel into fully-convolution ver. """ import surgery import dragon.vm.caffe as caffe if __name__ == '__main__': net = caffe.Net('net.prototxt', 'VGG16.v2.caffemodel', caffe.TEST) new_net = caffe.Net('new_net.prototxt', caffe.TEST) surgery.transplant(new_net, net) new_net.save('VGG16.fcn.caffemodel', suffix='')
try: import setproctitle setproctitle.setproctitle(os.path.basename(os.getcwd())) except: pass weights = 'fcn/ilsvrc-nets/vgg16-fcn.caffemodel' base_net = caffe.Net('fcn/ilsvrc-nets/vgg16-fcn.prototxt', 'fcn/ilsvrc-nets/vgg16-fcn.caffemodel', caffe.TEST) # init caffe.set_device(int(sys.argv[1])) caffe.set_mode_gpu() solver = caffe.SGDSolver('cocostuff/config/cocostuff-fcn8s-atonce/solver.prototxt') # Modified by Holger: surgery.transplant(solver.net, base_net, suffix='cs') #solver.net.copy_from(weights) # surgeries interp_layers = [k for k in solver.net.params.keys() if 'up' in k] surgery.interp(solver.net, interp_layers) # scoring val = np.loadtxt('cocostuff/list/val.txt', dtype=str) for _ in range(75): solver.step(4000) score.seg_tests(solver, False, val, layer='score')
import sys sys.path.append('/home/arg_ws3/caffe/python') from surgery import transplant #caffe_root = '/home/arg_ws3/caffe' # this file should be run from {caffe_root}/examples (otherwise change this line) #sys.path.insert(0, caffe_root + 'python') import caffe # If you get "No module named _caffe", either you have not built pycaffe or you have the wrong path. import os caffe.set_mode_gpu() old_model_def = '/home/arg_ws3/caffe/models/bvlc_reference_caffenet/deploy.prototxt' old_model_weights = '/home/arg_ws3/caffe/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel' old_net = caffe.Net( old_model_def, # defines the structure of the model old_model_weights, # contains the trained weights caffe.TEST) # use test mode (e.g., don't perform dropout) new_model_def = '/home/arg_ws3/david_trainings/fcn.berkeleyvision.org/deploy.prototxt' new_net = caffe.Net( new_model_def, # defines the structure of the model caffe.TEST) # use test mode (e.g., don't perform dropout) transplant(new_net, old_net, suffix='') new_net.save('initmodel.caffemodel')
def main(args): data = str( args.data ) + '_1' # Images (and labels) saved as 1_1.jpg, 2_1.jpg, 3_1.jpg etc. in root-fcn/data/prague_normal n_iter = args.n_iter ###### Train ###### print 'start train' # Link the correct train.txt and val.txt symlink_force('train' + data + '.txt', '../data/prague_normal/ImageSets/Segmentation/train.txt') symlink_force('val' + data + '.txt', '../data/prague_normal/ImageSets/Segmentation/val.txt') # Link the ground truth for training i = 0 with open('../data/prague_normal/ImageSets/Segmentation/train.txt', 'r') as f: trainlist = f.read().splitlines() for line in trainlist: if 'grid4_t0' not in line: # grid4 is used only in set 14 and has a different shape. We keep it linked to the same padded image. symlink_force( 'gt' + str(i) + '.png', '../data/prague_normal/SegmentationClass/' + line + '.png') else: symlink_force( 'gt_grid4_' + str(i) + '.png', '../data/prague_normal/SegmentationClass/' + line + '.png') i += 1 # Import ground truth true_label = Image.open('../data/prague_normal/SegmentationClass/' + data + '.png') nb_class = len( np.unique(true_label) ) # also given by the number of train images but it's easier to obtin it like this true_label = np.array(true_label, dtype=np.float32) # Create train.prototxt and val.prototxt net_fcnT.make_net(nb_class) # Load weights weights = '../voc-fcn8s/fcn8s-heavy-pascal.caffemodel' base_net = caffe.Net('../voc-fcn8s/deploy.prototxt', weights, caffe.TEST) # init caffe.set_mode_gpu() solver = caffe.SGDSolver('solver.prototxt') solver.net.copy_from(weights) # surgeries surgery.transplant(solver.net, base_net) # Train for n_iter iterations and save the model solver.step(n_iter) if not os.path.exists('snapshot'): os.makedirs('snapshot') solver.net.save('snapshot/prague.caffemodel') # Copy the train.prototxt to deploy.prototxt (for testing) f = open('train.prototxt') l_train = f.readlines() f.close() f = open('deploy.prototxt', 'r') l_deploy = f.readlines() f.close() with open('deploy.prototxt', 'w') as f: for _, line in enumerate(l_deploy[:10]): f.write(line) for _, line in enumerate(l_train[11:-11]): f.write(line) ###### Test ###### # Import and prepare test image im = Image.open('../data/prague_normal/JPEGImages/' + data + '.jpg') in_ = np.array(im, dtype=np.float32) in_ = in_[:, :, ::-1] in_ -= np.array((104.00698793, 116.66876762, 122.67891434)) in_ = in_.transpose((2, 0, 1)) # load trained net net = caffe.Net('deploy.prototxt', 'snapshot/prague.caffemodel', caffe.TEST) # shape for input (data blob is N x C x H x W), set data net.blobs['data'].reshape(1, *in_.shape) net.blobs['data'].data[...] = in_ # run net and take argmax for prediction net.forward() outnopost = net.blobs['score'].data[0].argmax(axis=0) ###### Refinement method ###### current = outnopost.copy() past = np.zeros(current.shape, np.uint8) nb_clust = len(np.unique(current)) while 1: for n in range(nb_clust): past = current.copy() current = relabelSmallBlobs( current, np.argsort(-net.blobs['score'].data[0], axis=0)[n]) current = bigBlobs(current.astype(np.uint8), nb_clust) if not np.array_equal(current, past): break if 255 not in current: # 255 represents thesmall isolated regnions not part of the biggest blobs. break out = current.copy() # Measure correct pixel assignment (CO) w, h = out.shape accu = round(float(np.sum(true_label == out)) / float(w * h), 4) # Plot and save fig = plt.figure(figsize=(20, 20)) fig.add_subplot(2, 2, 1) imgplot = plt.imshow(im) plt.axis('off') plt.title('input image') fig.add_subplot(2, 2, 2) imgplot = plt.imshow(true_label) plt.axis('off') plt.title('ground truth segmentation') fig.add_subplot(2, 2, 3) imgplot = plt.imshow(outnopost) plt.axis('off') plt.title('segmentation before refinement') fig.add_subplot(2, 2, 4) imgplot = plt.imshow(out) plt.axis('off') plt.title('segmentation after refinement (CO=' + str(accu) + ')') if not os.path.exists('results'): os.makedirs('results') plt.savefig('results/out_' + data + '.jpg', bbox_inches='tight')