w.close()
else:
    f = open('../models/' + MAC_model + '/solver_train_LF.prototxt', 'r+')
    w = open('../models/' + MAC_model + '/solver_train_LF_aug.prototxt', 'w+')
    s = f.read()
    f.seek(0, 0)
    if '#test_' not in s and 'test_' in s:
        s = s.replace('test_', '#test_')
    ind = s.rfind('model/')
    ind1 = s.rfind('train"')
    w.write(s[:ind] + 'model/' + str(k) + s[ind1:])
    f.close()
    w.close()

caffemodel = '../pretrain/pretrain.caffemodel'
solver = caffe.SGDSolver('../models/' + MAC_model +
                         '/solver_train_LF_aug.prototxt')
solver.net.copy_from(caffemodel)

max_iter = 160000

if not record_val_loss:

    solver.step(max_iter)
else:
    loss_PATH = '../models/' + MAC_model + '/loss/'

    train_loss = []
    val_loss = []
    test_iter = 128
    test_interval = 500
    start = time.time()
예제 #2
0
    # 			pre_solver.test_nets[0].forward()
    # 			correct += sum(pre_solver.test_nets[0].blobs['ip2'].data.argmax(1)
    # 							 == pre_solver.test_nets[0].blobs['label'].data)
    # 		test_acc[it // test_interval] = float(correct) / (test_iters * test_batchsz)
    # 		print 'Iteration', it, 'testing accuracy:',float(correct) / (test_iters *test_batchsz)

    niter = 1020
    semi_start = 240
    semi_ = 0
    test_interval = 50
    train_loss = zeros(niter)
    test_acc = zeros(int(np.ceil(niter / test_interval)) + 1)
    label = zeros(train_batchsz, dtype=int)
    prototxt = 'models/bvlc_alexnet/subset_solver.prototxt'
    solverstate = 'models/bvlc_alexnet/caffe_alexnet_train_iter_1500.solverstate'
    solver = caffe.SGDSolver(prototxt)
    caffemodel = 'bvlc_alexnet.caffemodel'
    caffemodel = 'models/bvlc_alexnet/solver_state/alex_cifar_sub_trained.caffemodel'
    # solver.restore(solverstate)
    solver.net.copy_from(caffemodel)
    # print solver.net.blobs['fc8'].shape[1]
    # print solver.net.layers[20].blobs.shape
    semi_layer = 14
    # net.layers[semi_layer].use_data.reshape(1,1,1,train_batchsz)
    for it in xrange(niter):
        solver.step_forward()
        # print "after step_forward",solver.net.blobs['label'].data[:5]

        # end_index=start_index + train_batchsz
        # if end_index>TOTAL_NUM:
        # 	end_index=TOTAL_NUM
예제 #3
0
import numpy as np
import os
import sys

try:
    import setproctitle
    setproctitle.setproctitle(os.path.basename(os.getcwd()))
except:
    pass

weights = "/home/arg_ws3/caffe/examples/net_surgery/bvlc_caffenet_6_channels.caffemodel"

# init
#caffe.set_device(int(sys.argv[1]))
caffe.set_mode_gpu()

solver = caffe.SGDSolver('solver_6_channel.prototxt')
#solver.net.copy_from(weights)

# surgeries
#interp_layers = [k for k in solver.net.params.keys() if 'up' in k]
#surgery.interp(solver.net, interp_layers)

# scoring
#val = np.loadtxt('/home/peter/caffe/data/new_brand/full_mask_final/predict_mask/val.txt', dtype=str)

for i in range(30):
    solver.step(1000)
    print "===================== Round:", i+1, "====================="
    #score.seg_tests(solver, False, val, layer='score')
# -*- coding: UTF-8 -*-
import sys

sys.path.append("/home/ljf/caffe-master/python")
sys.path.append("/home/ljf/caffe-master/python/caffe")
import caffe
from caffe import layers as L, params as P, to_proto
import tools

root_str = "/home/ljf/caffe-master/examples/ljftest_cifar10_DenseNet/"
solver_dir = root_str + 'solver.prototxt'

if __name__ == '__main__':
    #下面的代码和上面的代码要分开执行,除非 solver 的 prototxt 不需要修改
    caffe.set_device(0)  #设置使用的 GPU 编号
    caffe.set_mode_gpu()  #设置迭代采用 GPU 加速模式
    solver = caffe.SGDSolver(str(solver_dir))
    solver.restore(root_str +
                   "model_save/caffe_ljftest_train_iter_100000.solverstate")
    for _ in range(4000):
        solver.step(100)
예제 #5
0
    n.relu1 = L.ReLU(n.ip1, in_place=True)
    n.ip2 = L.InnerProduct(n.relu1,
                           num_output=10,
                           weight_filler=dict(type='xavier'))
    n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
    n.accu = L.Accuracy(n.ip2, n.label)  # accuracy
    return n.to_proto()


# Write proto file
with open('image_data_train.prototxt', 'w') as f:
    f.write(str(net('train00.imglist', 200, imgdata_mean)))
with open('image_data_test.prototxt', 'w') as f:
    f.write(str(net('test00.imglist', 50, imgdata_mean)))

solver = caffe.SGDSolver('auto_solver00_step.prototxt')

solver.net.forward()

niter = 301
plot_interval = 10
train_loss = zeros(niter)
test_acc = zeros(niter)
train_acc = zeros(niter)

# The main solver loop
for it in range(niter):
    solver.step(10)  # SGD by Caffe
    train_loss[it] = solver.net.blobs['loss'].data
    train_acc[it] = solver.net.blobs['accu'].data
    test_acc[it] = solver.test_nets[0].blobs['accu'].data
예제 #6
0
def main():
  # Example usage:  python fine_tune.py <patch size> <resize>

  # # Fine-tuning a Pretrained Network for Style Recognition
  # 
  # In this example, we'll explore a common approach that is particularly useful in real-world applications: take a pre-trained Caffe network and fine-tune the parameters on your custom data.
  # 
  # The upside of such approach is that, since pre-trained networks are learned on a large set of images, the intermediate layers capture the "semantics" of the general visual appearance. Think of it as a very powerful feature that you can treat as a black box. On top of that, only a few layers will be needed to obtain a very good performance of the data.

  # First, we will need to prepare the data. This involves the following parts:
  # (1) Get the ImageNet ilsvrc pretrained model with the provided shell scripts.
  # (3) Compile the downloaded the OIRDS dataset into a database that Caffe can then consume.

  import os
  os.chdir('/opt/caffe')
  import sys
  sys.path.append('/opt/caffe/python')

  import matplotlib
  matplotlib.use('Agg')
  import caffe
  import numpy as np
  from pylab import *
  import subprocess

  # This downloads the ilsvrc auxiliary data (mean file, etc),
  # and a subset of 2000 images for the style recognition task.
  # subprocess.call(['data/ilsvrc12/get_ilsvrc_aux.sh'])
  # subprocess.call(['scripts/download_model_binary.py',
  #                  'models/bvlc_reference_caffenet'
  #                  ])


  # For your record, if you want to train the network in pure C++ tools, here is the command:
  # 
  # <code>
  # build/tools/caffe train \
  #     -solver models/finetune_flickr_style/solver.prototxt \
  #     -weights models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel \
  #     -gpu 0
  # </code>
  # 
  # However, we will train using Python in this example.

  niter = 200
  # losses will also be stored in the log
  train_loss = np.zeros(niter)
  scratch_train_loss = np.zeros(niter)

  caffe.set_device(0)
  caffe.set_mode_gpu()
  patch_size = sys.argv[1]
  subprocess.call(['tools/oirds/crop_finetune.py', patch_size])


  if not patch_size=='40':
    network = 'models/oirds/finetune_train_val'+patch_size+'.prototxt'
    with open('models/oirds/finetune_train_val40.prototxt', 'r') as f:
      with open(network, 'w+') as tv_file:
        for line in f:
          # Change the train.txt and val.txt filenames.
          if '40.txt' in line:
            tv_file.write(line.replace('40', patch_size))
          else:
            tv_file.write(line)

    proto_solve = 'models/oirds/finetune_solver'+patch_size+'.prototxt'
    with open('models/oirds/finetune_solver40.prototxt', 'r') as g:
      with open(proto_solve, 'w+') as solver_file:
        for line in g:
          # Change the train-val prototxt filename.
          if 'net:' in line:
            solver_file.write(line.replace('40', patch_size))
          # Change the snapshot prefix.
          elif 'snapshot_prefix:' in line:
            solver_file.write(line.replace('40', patch_size))
          else:
            solver_file.write(line)

  # We create a solver that fine-tunes from a previously trained network.
  solver = caffe.SGDSolver(proto_solve)
  solver.net.copy_from('git/caffe/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')
  # For reference, we also create a solver that does no finetuning.
  scratch_solver = caffe.SGDSolver(proto_solve)

  # We run the solver for niter times, and record the training loss.
  for it in range(niter):
      solver.step(1)  # SGD by Caffe
      scratch_solver.step(1)
      # store the train loss
      train_loss[it] = solver.net.blobs['loss'].data
      scratch_train_loss[it] = scratch_solver.net.blobs['loss'].data
      if it % 10 == 0:
          print 'iter %d, finetune_loss=%f, scratch_loss=%f' % (it, train_loss[it], scratch_train_loss[it])
  print 'done'


  # Let's look at the training loss produced by the two training procedures respectively.

  # In[5]:

  plot(np.vstack([train_loss, scratch_train_loss]).T)


  # Notice how the fine-tuning procedure produces a more smooth loss function change, and ends up at a better loss. A closer look at small values, clipping to avoid showing too large loss during training:

  # In[6]:

  plot(np.vstack([train_loss, scratch_train_loss]).clip(0, 4).T)


  # Let's take a look at the testing accuracy after running 200 iterations. Note that we are running a classification task of 5 classes, thus a chance accuracy is 20%. As we will reasonably expect, the finetuning result will be much better than the one from training from scratch. Let's see.

  # In[7]:

  test_iters = 200 # 10
  accuracy = 0
  scratch_accuracy = 0
  for it in arange(test_iters):
      solver.test_nets[0].forward()
      accuracy += solver.test_nets[0].blobs['accuracy'].data
      scratch_solver.test_nets[0].forward()
      scratch_accuracy += scratch_solver.test_nets[0].blobs['accuracy'].data
  accuracy /= test_iters
  scratch_accuracy /= test_iters
  
  print 'Accuracy for fine-tuning on '+patch_size+'x'+patch_size+' patches: '+str(accuracy)
  print 'Accuracy for training from scratch: '+str(scratch_accuracy)
  
  logfile = '/opt/caffe/tools/oirds/logfile-finetune.csv'

  if not os.path.isfile(logfile):
    with open(logfile, 'w') as h:
      # the header
      h.write('patch_size,accuracy,scratch_accuracy')

  with open(logfile, 'a') as i:
    i.write(patch_size+','+str(accuracy)+','+str(scratch_accuracy))
예제 #7
0
                             pascal_root=pascal_root)
    f.write(
        caffenet_multilabel(data_layer_params,
                            'PascalMultilabelDataLayerSync'))

# write validation net.
with open(osp.join(workdir, 'valnet.prototxt'), 'w') as f:
    data_layer_params = dict(batch_size=128,
                             im_shape=[227, 227],
                             split='val',
                             pascal_root=pascal_root)
    f.write(
        caffenet_multilabel(data_layer_params,
                            'PascalMultilabelDataLayerSync'))

solver = caffe.SGDSolver(osp.join(workdir, 'solver.prototxt'))
solver.net.copy_from(
    caffe_root +
    'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')
solver.test_nets[0].share_with(solver.net)
solver.step(1)

transformer = tools.SimpleTransformer(
)  # This is simply to add back the bias, re-shuffle the color channels to RGB, and so on...
image_index = 0  # First image in the batch.
plt.figure()
plt.imshow(
    transformer.deprocess(copy(solver.net.blobs['data'].data[image_index,
                                                             ...])))
gtlist = solver.net.blobs['label'].data[image_index, ...].astype(np.int)
plt.title('GT: {}'.format(classes[np.where(gtlist)]))
예제 #8
0
def train(initmodel,gpu):
    # n = caffe.NetSpec()
    # # n.data = L.Input(shape=[dict(dim=[1, 3, 224, 224])])
    # n.data, n.label = L.ImageLabelmapData(include={'phase': 0}, ## 0-TRAIN 1-TEST
    #                                       image_data_param={
    #                                           'source': "../../data/PIOD/Augmentation/train_pair_320x320.lst",
    #                                           'batch_size': 5,
    #                                           'shuffle': True,
    #                                           'new_height': 0,
    #                                           'new_width': 0,
    #                                           'root_folder': "",
    #                                           'data_type': "h5"},
    #                                       transform_param={
    #                                           'mirror': False,
    #                                           'crop_size': 320,
    #                                           'mean_value': [104.006988525,116.668769836,122.678916931]
    #                                        },
    #                                       ntop=2)
    #
    # n.data, n.label = L.ImageLabelmapData(include={'phase': 1},  ## 0-TRAIN 1-TEST
    #                                 image_data_param={
    #                                     'source': "../../data/PIOD/Augmentation/train_pair_320x320.lst",
    #                                     'batch_size': 5,
    #                                     'shuffle': True,
    #                                     'new_height': 0,
    #                                     'new_width': 0,
    #                                     'root_folder': "",
    #                                     'data_type': "h5"},
    #                                 transform_param={
    #                                     'mirror': False,
    #                                     'crop_size': 320,
    #                                     'mean_value': [104.006988525, 116.668769836, 122.678916931]
    #                                 },
    #                                 ntop=2)
    #
    # n.label_edge, n.label_ori = L.Slice(n.label, slice_param={'slice_point': 1}, ntop=2)
    #
    # ofnet(n, is_train=True)
    #
    # loss_bottoms = [n.unet1b_edge,n.label_edge]
    # n.edge_loss = L.ClassBalancedSigmoidCrossEntropyAttentionLoss(*loss_bottoms,
    #                                  loss_weight=1.0, attention_loss_param={'beta': 4.0,'gamma': 0.5})
    #
    # loss_bottoms = [n.unet1b_ori, n.label_ori, n.label_edge]
    # n.ori_loss = L.OrientationSmoothL1Loss(*loss_bottoms, loss_weight=0.5, smooth_l1_loss_param={'sigma': 3.0})
    #
    # with open('ofnet.prototxt', 'w') as f:
    #     f.write(str(n.to_proto())) ## write network

    # net = caffe.Net('ofnet.prototxt',
    #                 '../ofnet3/ResNet-50-model.caffemodel',
    #                 caffe.TRAIN)

    caffe.set_mode_gpu()
    caffe.set_device(gpu)
    # write_solver()
    solver = caffe.SGDSolver('solver.prototxt')
    if initmodel:
        solver.net.copy_from(initmodel)  ## why use solver.net.copy_from() to load  pretrained model?

    solver.step(solver.param.max_iter)
예제 #9
0
import caffe
caffe.set_device(0)
caffe.set_mode_gpu()
solver = caffe.SGDSolver('./mnist_siamese_solver.prototxt')
solver.solve()

예제 #10
0
                    help='target image path')
args = parser.parse_args()

base_dir = os.getcwd()
sys.path.append(base_dir)

if args.gpu < 0:
    caffe.set_mode_cpu()
else:
    caffe.set_device(args.gpu)
    caffe.set_mode_gpu()

if args.phase == "train":
    functions.misc.rewrite_data('models/WaveletCNN_4level.prototxt', args.dataset)
    Netsolver = os.path.join(base_dir, 'models/solver_WaveletCNN_4level.prototxt')
    solver = caffe.SGDSolver(Netsolver)
    solver.solve()
elif args.phase == "test":
    net = caffe.Net('models/WaveletCNN_4level_deploy.prototxt', args.initmodel, caffe.TEST)
    # load input and configure preprocessing
    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
    transformer.set_transpose('data', (2, 0, 1))
    transformer.set_channel_swap('data', (2, 1, 0))
    transformer.set_raw_scale('data', 255.0)

    # load the image in the data layer
    image = caffe.io.load_image(args.target_image)
    min_length = min(image.shape[:2])
    crop_length = int(min_length * 0.6)  # crop image with 60% length of shorter edge
    cropped_imgs = functions.misc.random_crop(image, (crop_length, crop_length), 1)  # shape is N x H x W x C
    cropped_im = cropped_imgs[0]
예제 #11
0
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 19 16:22:22 2016

@author: root
"""

import matplotlib.pyplot as plt
import caffe

caffe.set_device(0)
caffe.set_mode_gpu()
# 使用SGDSolver,即随机梯度下降算法
solver = caffe.SGDSolver('/home/xxx/mnist/solver.prototxt')

# 等价于solver文件中的max_iter,即最大解算次数
niter = 9380
# 每隔100次收集一次数据
display = 100

# 每次测试进行100次解算,10000/100
test_iter = 100
# 每500次训练进行一次测试(100次解算),60000/64
test_interval = 938

# 初始化
train_loss = zeros(ceil(niter * 1.0 / display))
test_loss = zeros(ceil(niter * 1.0 / test_interval))
test_acc = zeros(ceil(niter * 1.0 / test_interval))

# iteration 0,不计入
if __name__ == '__main__':
    par = CorrespondenceParams()
    corrNet = CorrespondenceNetwork(par)

    if par.solver_mode == "GPU":
        caffe.set_mode_gpu()
        caffe.set_device(par.gpu_id)
        #caffe.set_device([0,1])
    else:
        caffe.set_mode_cpu()

    if par.phase == "TRAIN":
        corrNet.generateSolverFile()
        corrNet_name = corrNet.createNetwork()
        # Initialize solver
        solver = caffe.SGDSolver(par.viewpoint_solver_prototxt_file)
        solver = corrNet.load_pretrained_weights2(solver)
        #solver = corrNet.load_pretrained_weights_8S(solver)

        loss_contr = np.zeros((par.max_iter + 1), dtype=float)
        utils.save_params(
            par)  # store the training parameters for this session
        while solver.iter < par.max_iter:
            solver.step(1)
            #print "upscore2 params", solver.net.params['upscore2_1'][0].data[0,0,:,:]
            # take snapshot
            if solver.iter % par.train_snapshot_iters == 0:
                filename = par.train_save_path + "corrNetwork_" + str(
                    solver.iter) + ".caffemodel"
                solver.net.save(filename)
            # store contrastive loss
예제 #13
0
#coding=utf-8
import matplotlib.pyplot as plt 
import caffe
import numpy as np

solver = caffe.SGDSolver("/home/capstone/Intel_face_detection_origin/cnn_train_models/train_front/mult_solver.prototxt")
#solver.solve()

# 等价于solver文件中的max_iter,即最大解算次数  
niter = 1086450
# 每隔500次收集一次数据  
display= 2000 
  
# 每次测试进行100次解算,70000/100  
test_iter = 3712
# 每500次训练进行一次测试(100次解算),60000/64  
test_interval =12071
  

print '\ninitialize plot\n'
#初始化 
train_loss = np.zeros(np.ceil(niter * 1.0 / display))   
test_loss = np.zeros(np.ceil(niter * 1.0 / test_interval))  
test_acc = np.zeros(np.ceil(niter * 1.0 / test_interval))  
  
# iteration 0,不计入  
solver.step(1)  
print '\n calculating \n'
# 辅助变量  
_train_loss = 0; _test_loss = 0; _accuracy = 0  
# 进行解算  
예제 #14
0
import caffe
caffe.set_device(0)
caffe.set_mode_cpu()
import numpy as np

niter = 10000
solver = None
solver = caffe.SGDSolver('examples/mnist/lenet_solver.prototxt')

# Automatic SGD: TEST2
solver.step(niter)
# save the weights to compare later
w_solver_step = solver.net.layers[1].blobs[0].data.copy()
b_solver_step = solver.net.layers[1].blobs[1].data.copy()

# Manual SGD: TEST1
solver = None
solver = caffe.SGDSolver('examples/mnist/lenet_solver.prototxt')
base_lr = 0.01
momentum = 0.9
weight_decay = 0.0005
lr_w_mult = 1
lr_b_mult = 2
gamma = 0.1
stepsize = 5000

momentum_hist = {}
for layer in solver.net.params:
    m_w = np.zeros_like(solver.net.params[layer][0].data)
    m_b = np.zeros_like(solver.net.params[layer][1].data)
    momentum_hist[layer] = [m_w, m_b]
예제 #15
0
print 'Processing: finetune_alexnet_{}_{}'.format(
    model_type_str, model_name), 'on GPU', gpu, ',', model_type_str

import caffe

niter = 10000
# Losses will also be stored in the log.
train_loss = np.zeros(niter)
train_accuracy = np.zeros(niter)
val_accuracy = {}

caffe.set_device(gpu)
caffe.set_mode_gpu()

solver = caffe.SGDSolver(result_root +
                         'model/finetune_alexnet_{}_{}/solver.prototxt'.format(
                             model_type_str, model_name))
solver.net.copy_from(result_root +
                     'model/bvlc_alexnet/bvlc_alexnet.caffemodel')

start_time = time.time()
# We run the solver for niter times, and record the training loss.
for it in range(niter):
    solver.step(1)  # SGD by Caffe
    train_loss[it] = solver.net.blobs['loss'].data
    train_accuracy[it] = solver.net.blobs['accuracy_train'].data
    second = int(time.time() - start_time)
    estimated = int(float(niter) / (it + 1) * second)
    estimated_day = estimated / 3600 / 24
    now_time = time.strftime("%H:%M:%S", time.gmtime(second))
    estimated_time = time.strftime("%H:%M:%S", time.gmtime(estimated))
예제 #16
0
#this may switch to CPU, unsure
#caffe.set_device(0)
caffe.set_mode_gpu()
caffe_root = '/work/04035/dnelson8/maverick/caffe'
model_root = '/work/04035/dnelson8/maverick/vr_project/caffe_vid/models/f800w7'
model_prototxt = os.path.join(model_root,
                              'train_net{0}.prototxt'.format(sys.argv[1]))
model_solver = os.path.join(model_root,
                            'train_solver{0}.prototxt'.format(sys.argv[1]))

label_root = '/work/04035/dnelson8/maverick/vr_project/dataset/ucfTrainTestlist'
h5_root = '/work/04035/dnelson8/maverick/vr_project/dataset/UCF-101-extract'
split = int(sys.argv[1])

solver = caffe.SGDSolver(model_solver)

# manually load data for now b/c why not
counter = 0
'''train_list, test_list = get_caffe_data.get_train_test_lists(label_root, split)
print 'loading:'
TRUNCATE_FOR_TESTING = 300
train_data, train_label, test_data, test_label = get_caffe_data.get_train_test_data_labels(train_list[:9500], test_list[:3600], h5_root)

print 'loaded:'
# code to get train_data in the proper shape:
train_data = np.array(train_data, dtype=np.float32)
train_data = np.ascontiguousarray(train_data[:,np.newaxis,:,:])

train_label = np.ascontiguousarray(train_label, dtype=np.float32)
print 'train manipulated:'
예제 #17
0
파일: train_net.py 프로젝트: iamwsg/caffe
folder = caffe_root + '/examples/siamese/'
import sys
import os
sys.path.insert(0, caffe_root + 'python')
import caffe

os.chdir(caffe_root)

MODEL_FILE = caffe_root + 'examples/siamese/mnist_siamese_train_test_sim.prototxt'
#PRETRAINED_FILE = 'My_mnist_siamese_0to2_feat3_iter_5000.caffemodel'

#net = caffe.Net(MODEL_FILE, caffe.TEST)
#net_train = caffe.Net(MODEL_FILE, caffe.TEST)
caffe.set_mode_cpu()
solver = None
solver = caffe.SGDSolver(folder + 'mnist_siamese_solver.prototxt')
[(k, v.data.shape) for k, v in solver.net.blobs.items()]

solver.net.forward()  # train net
solver.test_nets[0].forward()  # test net (there can be more than one)

#imshow(solver.net.blobs['pair_data'].data[0].transpose(1, 0, 2).reshape(28, 2*28), cmap='gray'); axis('off');show()
#print 'train labels:', solver.net.blobs['sim'].data[:8]
#print 'test labels:', solver.test_nets[0].blobs['sim'].data[:8]

niter = 100
test_interval = 50
# losses will also be stored in the log
train_loss = zeros(niter)
acc = zeros(int(np.ceil(niter / test_interval)))
output = zeros((niter, 100))
    # solver.net.params['scale2_2'][0].data[...] = ratio * 4
    # solver.net.params['scale3_1'][0].data[...] = 1.0 / ratio
    # solver.net.params['scale3_2'][0].data[...] = ratio * 4
    # solver.net.params['scale4_1'][0].data[...] = 1.0 / ratio
    # solver.net.params['scale4_2'][0].data[...] = ratio * 4
    # solver.net.params['scale5_1'][0].data[...] = 1.0 / ratio
    # solver.net.params['scale5_2'][0].data[...] = ratio * 4
    solver.net.params['scale6_1'][0].data[...] = 1.0 / ratio
    solver.net.params['scale6_2'][0].data[...] = ratio * 4
    solver.net.params['scale7_1'][0].data[...] = 1.0 / ratio
    solver.net.params['scale7_2'][0].data[...] = ratio * 4


# load the solver and create train and test nets
solver = None  # ignore this workaround for lmdb data (can't instantiate two solvers on the same data)
solver = caffe.SGDSolver(solver_file)
# solver.net.copy_from(pretrained_model)
if relaxation:
    sigmoid_ratio = 1000
else:
    sigmoid_ratio = 0.5
set_ratio(sigmoid_ratio)

transformer = caffe.io.Transformer(
    {'data': solver.net.blobs['data'].data.shape})
data_mean = np.load('ilsvrc12/ilsvrc_2012_mean.npy').mean(1).mean(1)
transformer.set_transpose('data', (2, 0, 1))
transformer.set_mean(
    'data', data_mean -
    8)  # Subtract a bit more to avoid overflow, only for visualization
transformer.set_channel_swap('data', (2, 1, 0))
# gen solver prototxt
solver = CaffeSolver(debug=cfgs.debug)
solver.sp = cfgs.sp.copy()
solver.write(cfgs.solver_pt)

debug = True

weights = cfgs.init

# init
caffe.set_device(3)
caffe.set_mode_gpu()
# caffe.set_mode_cpu()

solver = caffe.SGDSolver(cfgs.solver_pt)
if weights is not None:
    solver.net.copy_from(weights)

for iter in range(500 * 2000):
    if debug:
        if iter % 100 == 0 and iter != 0:
            nethelper = NetHelper(solver.net)
            # nethelper.hist('data')
            nethelper.hist('label')
            nethelper.hist('prob', filters=2, attr="blob")
            nethelper.hist('data', filters=2, attr="blob")

            if True:
                for i in range(nethelper.net.blobs['data'].data.shape[0]):
                    plt.subplot(221)
예제 #20
0
from __future__ import division
import sys

caffe_root = '/home/debidatd/parsenet/'
sys.path.insert(0, caffe_root + 'python')

import caffe
import numpy as np

# init
caffe.set_mode_gpu()
caffe.set_device(0)

# caffe.set_mode_cpu()

solver = caffe.SGDSolver(sys.argv[1])
solver.net.copy_from(sys.argv[2])

niter = 50000 
test_iter = 100
test_interval = 500
train_loss = np.zeros(niter)
test_loss = np.zeros(niter)
f = open('train_log.txt', 'w')
g = open('test_log.txt', 'w')

for i in range(niter):
    solver.step(1)
    train_loss[i] = solver.net.blobs['loss'].data
    f.write('{} {}\n'.format(i, train_loss[i]))
    if (i+1)%test_interval  == 0:
예제 #21
0
import numpy as np
import os
import sys
import caffe

weights = None
# weights = '/data/yonatan/yonatan_files/prepared_caffemodels/ResNet-152-model.caffemodel'  #in brainia container jr2

# init
caffe.set_device(int(sys.argv[1]))
caffe.set_mode_gpu()

solver = caffe.SGDSolver(
    '/data/yonatan/yonatan_files/trendi/yonatan/resnet_152_kaggle_planet/solver_152.prototxt'
)

# for finetune
# solver.net.copy_from(weights)

# surgeries
#interp_layers = [k for k in solver.net.params.keys() if 'up' in k]
#surgery.interp(solver.net, interp_layers)

#run net learning iterations
for _ in range(100000):
    solver.step(1)
    myfc17 = solver.net.blobs['myfc17'].data
    print('output of layer "myfc17" {}'.format(myfc17))
#    score.seg_tests(solver, False, val, layer='score')
#    jrinfer.seg_tests(solver, False, val, layer='score')
#    progress_plot.parse_solveoutput('net_output.txt')
예제 #22
0
파일: caffe_train.py 프로젝트: jklhj222/bin
#!/usr/bin/env python3

import sys
import os.path as osp

caffe_root = '/home/hugh/pkg/local/caffe/'
sys.path.append(caffe_root + 'python')
sys.path.append(caffe_root + 'examples/pycaffe/')
sys.path.append(caffe_root + 'examples/pycaffe/layers')

import caffe

# initialize caffe for gpu mode
caffe.set_mode_gpu()
caffe.set_device(0)

solver = caffe.SGDSolver(osp.join('solver.prototxt'))
#solver.net.copy_from(caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')
solver.test_nets[0].share_with(solver.net)

solver.step(100000)
예제 #23
0
파일: solve.py 프로젝트: skhi/DLAnalyzer
import sys
sys.path.append('/disk2/Faliu/caffe/python')
import caffe
import numpy as np
import os
import matplotlib.pyplot as plt

# init
caffe.set_device(int(0))
caffe.set_mode_gpu()

solver = caffe.SGDSolver('../model/solver.prototxt')

niter = 2000
display = 50
test_iter = 250
test_interval = 50

_test_loss = 0

train_loss = np.zeros(niter // display)
test_loss = np.zeros(niter // test_interval)

for iter in range(niter):
    solver.step(1)
    if iter % display == 0:
        train_loss[iter // display] = solver.net.blobs['loss'].data
    if iter % test_interval == 0:
        for test_it in range(test_iter):
            solver.test_nets[0].forward()
            _test_loss += solver.test_nets[0].blobs['loss'].data
예제 #24
0
def _save_caffemodel(solver_file, blob_file):
    """ Generate .caffemodel file."""
    solver = caffe.SGDSolver(solver_file)
    solver.net.save(blob_file)
예제 #25
0
solver_prototxt_path = conf.solver['_solver_prototxt_path']

#root = '/cs/vml3/mkhodaba/cvpr16/code/embedding_segmentation/model/'

#caffe.set_decive(2)
caffe.set_mode_gpu()
#caffe.set_device(2)

test_interval = conf.solver['test_interval']  #10000
niter = conf.solver['max_iter']  #500000
train_interval = conf.solver['_train_interval']  #1000
termination_threshold = conf.solver['_termination_threshold']
net = caffe.Net(model_prototxt_path, caffe.TRAIN)

#solver = caffe.SGDSolver(root+'solver.prototxt')
solver = caffe.SGDSolver(solver_prototxt_path)
# losses will also be stored in the log
#train_loss = zeros(niter)
train_loss = np.array([])
test_acc = zeros(int(np.ceil(niter / test_interval)))
#output = zeros((niter, 8, 10))
test_loss = 0
# the main solver loop
it = -1
prev_loss = 100000000
diff_loss = 100

while it < niter and abs(diff_loss) >= termination_threshold:
    it += 1
    #for it in xrange(niter):
    #print 'iter', it
예제 #26
0
import math
import os
import sys

sys.path.append("..")
sys.path.append("../pylayers")
from pyutils import refine_util as rv

if __name__ == "__main__":
    caffe.set_mode_gpu()
    caffe.set_device(0)
    save_path = '../states/v3/'
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    solverproto = '../models/solver_v3.prototxt'
    solver = caffe.SGDSolver(solverproto)
    # default 0, need to be set when restart
    solver.set_iter(0)
    # =============================
    Sov = rv.parse_solverproto(solverproto)
    max_iter = 60000
    save_iter = 100
    display_iter = 10
    _train_loss = 0
    tmpname = save_path + 'loss' + '.mat'
    cur_res_mat = save_path + 'infer_res.mat'
    cur_iter = save_path + 'iter.mat'

    if not os.path.exists(cur_iter):
        weights = '../models/pretrain/vgg16_20M.caffemodel'.format(save_path)
        solver.net.copy_from(weights)
예제 #27
0
import caffe
import surgery, score

import numpy as np
import os
import sys

try:
    import setproctitle
    setproctitle.setproctitle(os.path.basename(os.getcwd()))
except:
    pass
weights = '../mitos-fcn32s/fcn32s-heavy-pascal.caffemodel'

# init
caffe.set_device(1)
caffe.set_mode_gpu()

solver = caffe.SGDSolver('solver.prototxt')
solver.net.copy_from(weights)

# surgeries
interp_layers = [k for k in solver.net.params.keys() if 'up' in k]
surgery.interp(solver.net, interp_layers)

solver.step(180000)
예제 #28
0
def text_save(content, filename, mode='a'):
    # Try to save a list variable in txt file.
    file = open(filename, mode)
    for i in range(len(content)):
        file.write(str(content[i]) + ' ')
    file.write('\n')
    file.close()


savefile = 'tpp_feat_flow.txt'
if os.path.isfile(savefile):
    os.remove(savefile)

solver = caffe.SGDSolver(
    relative_path +
    '/deeptemporal/models/ucf101/flow_feat_tpp_solver.prototxt')
solver.net.copy_from(
    relative_path +
    "/ucf101_split_1_rgb_flow_models/ucf101_split_1_tsn_flow_reference_bn_inception.caffemodel"
)

# savefile = 'end2end_tpp_feat_flow.txt'
# if os.path.isfile(savefile):
#     os.remove(savefile)
#
# solver = caffe.SGDSolver(relative_path + '/deeptemporal/models/ucf101/flow_feat_tpp_solver.prototxt')
# solver.net.copy_from("/home/lilin/my_code/ucf101_split_1_rgb_flow_models/ucf101_split_1_flow_tpp_delete_dropout_lr_0.00001_iter_1500.caffemodel")

# savefile = 'tpp_feat_rgb.txt'
# if os.path.isfile(savefile):
예제 #29
0
import numpy as np
import time
import caffe

beg = time.time()
#traindata = np.load("face.1.train.npy")
#valdata = np.load("face.1.val.npy")
traindata = np.load("train.2.npy")
valdata = np.load("val.2.npy")
end = time.time()

with open(
        "/home/haichen/datasets/MSRBingFaces/labels/msr_cs_faces.n14.90.0.train.shuf.label",
        "r") as f:
    train_labels = map(lambda x: int(x.strip().split("\t")[1]), f.readlines())
with open(
        "/home/haichen/datasets/MSRBingFaces/labels/msr_cs_faces.n14.90.0.val.label",
        "r") as f:
    val_labels = map(lambda x: int(x.strip().split("\t")[1]), f.readlines())

#solver = caffe.SGDSolver("solver.prototxt")
solver = caffe.SGDSolver("solver2.prototxt")
caffe.set_mode_gpu()
solver.net.set_input_arrays(traindata[:3880],
                            np.array(train_labels[:3880], dtype='f'))
#print(traindata.shape)
#print(train_labels[:10])
for net in solver.test_nets:
    net.set_input_arrays(valdata[:765], np.array(val_labels[:765], dtype='f'))
solver.solve()
예제 #30
0
import matplotlib
matplotlib.use('Agg')
import numpy
from pylab import *
import sys
import caffe
import os
import cv2

caffe.set_device(1)
caffe.set_mode_gpu()
solver = caffe.SGDSolver('../caffemodel-1/lenet_auto_solver.prototxt')

niter = 1000
test_interval = 100
train_loss = zeros(niter)
test_acc = zeros(int(np.ceil(niter / test_interval)))
output = zeros((niter, 8, 10))

# the main solver loop
#May be done by command line however manually looping lets us perform other different computations in the loop
for it in range(niter):
    solver.step(1)  # 1 step ofSGD by Caffe
    # store the train loss
    train_loss[it] = solver.net.blobs['loss'].data
    # store the output of the first test batch
    # (starting the forward pass at conv1 to avoid loading new data)
    solver.test_nets[0].forward(start='conv1')
    output[it] = solver.test_nets[0].blobs['score'].data[:8]
    #Running a full test on each 100th iteration and computing test accuracy
    if it % test_interval == 0: