예제 #1
0
_, ax1 = subplots()
ax2 = ax1.twinx()
ax1.set_xlabel('iteration')
ax1.set_ylabel('train loss (b) - val loss (r)')
ax2.set_ylabel('val accuracy (y) - val iu (g)')
ax2.set_autoscaley_on(False)
ax2.set_ylim([0, 1])

for it in range(num_intervals):

    solver.step(size_intervals)
    # solver.net.forward()

    # Test with validation set every 'size_intervals' iterations
    [loss, acc, iu] = score.seg_tests(solver, False, val, layer='score')
    val_acc[it] = acc
    val_iu[it] = iu
    val_loss[it] = loss
    train_loss[it] = solver.net.blobs['loss_conv'].data

    # Plot results
    if it > 0:
        ax1.lines.pop(1)
        ax1.lines.pop(0)
        ax2.lines.pop(1)
        ax2.lines.pop(0)

    ax1.plot(it_axes[0:it+1], train_loss[0:it+1], 'b') #Training loss averaged last 20 iterations
    ax1.plot(it_axes[0:it+1], val_loss[0:it+1], 'r')    #Average validation loss
    ax2.plot(it_axes[0:it+1], val_acc[0:it+1], 'y') #Average validation accuracy (mean accuracy of text and background)
예제 #2
0
파일: test.py 프로젝트: neopenx/Dragon
# --------------------------------------------------------
# Seg-FCN for Dragon
# Copyright (c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------

""" Test a FCN-8s(PASCAL VOC) network """

import dragon.vm.caffe as caffe
import score
import numpy as np

weights = 'snapshot/train_iter_100000.caffemodel'

if __name__ == '__main__':

    # init
    caffe.set_mode_gpu()
    caffe.set_device(0)

    solver = caffe.SGDSolver('solver.prototxt')
    solver.net.copy_from(weights)

    # scoring
    val = np.loadtxt('../data/seg11valid.txt', dtype=str)
    score.seg_tests(solver, 'D:/seg', val)

예제 #3
0
#caffe.set_device(int(sys.argv[1]))
caffe.set_mode_gpu()

solver = caffe.SGDSolver('solver.prototxt')
solver.net.copy_from(weights)

# surgeries
interp_layers = [k for k in solver.net.params.keys() if 'up' in k]
surgery.interp(solver.net, interp_layers)

# scoring
val = np.loadtxt('/scratch/eecs542w17_fluxg/zzdai/valList.txt', dtype=str)
f = open('/scratch/eecs542w17_fluxg/zzdai/Result16_8/result.txt','w+')

for ix in range(100):
    solver.step(10000)
    print 'start'
    hist = score.seg_tests(solver, False, val, layer='score',gt = 'label')
    f.write('\n'+str(ix)+'----------------------------------')
    acc = np.diag(hist).sum() / hist.sum()
    f.write('\n overall accuracy:  '+str(acc))
    acc = np.diag(hist) / hist.sum(1)
    f.write('\n mean accuracy:  '+str(np.nanmean(acc)))
    iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
    f.write('\n mean IU:  '+str(np.nanmean(iu)))
    freq = hist.sum(1) / hist.sum()
    f.write('\n fwavacc:  ' +str((freq[freq > 0] * iu[freq > 0]).sum()))
    print hist
    

예제 #4
0
                'train_iter_52000.solverstate',
                'train_iter_56000.solverstate',
                'train_iter_60000.solverstate',
                'train_iter_64000.solverstate',
                'train_iter_68000.solverstate',
                'train_iter_72000.solverstate',
                'train_iter_76000.solverstate',
                'train_iter_80000.solverstate',
                'train_iter_84000.solverstate',
                'train_iter_88000.solverstate',
                'train_iter_92000.solverstate',
                'train_iter_96000.solverstate',
                'train_iter_100000.solverstate',
                'train_iter_104000.solverstate',
                'train_iter_108000.solverstate',
                'train_iter_112000.solverstate',
                'train_iter_116000.solverstate',
                'train_iter_120000.solverstate',
                ]
solverstates = [os.path.join(snapshot_prefix, s)
        for s in solverstates]

# scoring
#train = np.loadtxt('../data/pascal-obfuscated/VOC2011/ImageSets/Segmentation/train.txt', dtype=str)
val = np.loadtxt('../data/pascal-obfuscated/seg11valid.txt', dtype=str)

for ss in solverstates:
    solver.restore(ss)
    #score.seg_trains(solver, False, train, layer='score')
    score.seg_tests(solver, False, val, layer='score')
예제 #5
0
    'inception_4a/3x3_reduce', 'inception_4a/3x3', 'inception_4a/5x5_reduce',
    'inception_4a/5x5', 'inception_4a/pool_proj', 'inception_4b/1x1',
    'inception_4b/3x3_reduce', 'inception_4b/3x3', 'inception_4b/5x5_reduce',
    'inception_4b/5x5', 'inception_4b/pool_proj', 'inception_4c/1x1',
    'inception_4c/3x3_reduce', 'inception_4c/3x3', 'inception_4c/5x5_reduce',
    'inception_4c/5x5', 'inception_4c/pool_proj', 'inception_4d/1x1',
    'inception_4d/3x3_reduce', 'inception_4d/3x3', 'inception_4d/5x5_reduce',
    'inception_4d/5x5', 'inception_4d/pool_proj', 'inception_4e/1x1',
    'inception_4e/3x3_reduce', 'inception_4e/3x3', 'inception_4e/5x5_reduce',
    'inception_4e/5x5', 'inception_4e/pool_proj', 'inception_5a/1x1',
    'inception_5a/3x3_reduce', 'inception_5a/3x3', 'inception_5a/5x5_reduce',
    'inception_5a/5x5', 'inception_5a/pool_proj', 'inception_5b/1x1',
    'inception_5b/3x3_reduce', 'inception_5b/3x3', 'inception_5b/5x5_reduce',
    'inception_5b/5x5', 'inception_5b/pool_proj'
]
for key in layerkeys:
    solver.net.params[key][0].data[...] = net.params[key][0].data[...]

# scoring
test = np.loadtxt('../../data/douyu_2700/test.txt', dtype=str)

# solver.restore('./snapshot/douyu2700_train_without_bn_iter_130000.solverstate');

for _ in range(2000):
    solver.step(500)
    # N.B. metrics on the semantic labels are off b.c. of missing classes;
    # score manually from the histogram instead for proper evaluation
    print '================== Accuracy in test dataset ======================'
    score.seg_tests(solver, False, test, layer='score', gt='sem')
    print '=================================================================='
예제 #6
0
import sys

try:
    import setproctitle
    setproctitle.setproctitle(os.path.basename(os.getcwd()))
except:
    pass

weights = '../siftflow-fcn32s/siftflow-fcn32s.caffemodel'

# init
caffe.set_device(int(sys.argv[1]))
caffe.set_mode_gpu()

solver = caffe.SGDSolver('solver.prototxt')
solver.net.copy_from(weights)

# surgeries
interp_layers = [k for k in solver.net.params.keys() if 'up' in k]
surgery.interp(solver.net, interp_layers)

# scoring
test = np.loadtxt('../data/sift-flow/test.txt', dtype=str)

for _ in range(50):
    solver.step(2000)
    # N.B. metrics on the semantic labels are off b.c. of missing classes;
    # score manually from the histogram instead for proper evaluation
    score.seg_tests(solver, False, test, layer='score_sem', gt='sem')
    score.seg_tests(solver, False, test, layer='score_geo', gt='geo')
예제 #7
0
import caffe
import surgery, score

import numpy as np
import os

import setproctitle

setproctitle.setproctitle(os.path.basename(os.getcwd()))

weights = "../fcn16s-heavy-72k.caffemodel"

# init
caffe.set_device(int(sys.argv[1]))
caffe.set_mode_gpu()

solver = caffe.SGDSolver("solver.prototxt")
solver.net.copy_from(weights)

# surgeries
interp_layers = [k for k in solver.net.params.keys() if "up" in k]
surgery.interp(solver.net, interp_layers)

# scoring
val = np.loadtxt("../data/segvalid11.txt", dtype=str)

for _ in range(25):
    solver.step(4000)
    score.seg_tests(solver, False, val, layer="score")
예제 #8
0
_, ax1 = subplots()
ax2 = ax1.twinx()
ax1.set_xlabel('iteration')
ax1.set_ylabel('train loss (b) - val loss (r)')
ax2.set_ylabel('val accuracy (y) - val iu (g)')
ax2.set_autoscaley_on(False)
ax2.set_ylim([0, 1])

for it in range(num_intervals):

    solver.step(size_intervals)
    # solver.net.forward()

    # Test with validation set every 'size_intervals' iterations
    [loss, acc, iu] = score.seg_tests(solver, False, val, layer='score')
    val_acc[it] = acc
    val_iu[it] = iu
    val_loss[it] = loss
    train_loss[it] = solver.net.blobs['loss_conv'].data

    # Plot results
    if it > 0:
        ax1.lines.pop(1)
        ax1.lines.pop(0)
        ax2.lines.pop(1)
        ax2.lines.pop(0)

    ax1.plot(it_axes[0:it + 1], train_loss[0:it + 1],
             'b')  #Training loss averaged last 20 iterations
    ax1.plot(it_axes[0:it + 1], val_loss[0:it + 1],
예제 #9
0
    setproctitle.setproctitle(os.path.basename(os.getcwd()))
except:
    pass

weights = 'fcn16s-heavy-pascal.caffemodel'
#weights = 'snapshot/train_iter_1000.caffemodel'

# init
if '-gpu' in sys.argv:
    caffe.set_mode_gpu()
    caffe.set_device(0)
else:
    caffe.set_mode_cpu()

solver = caffe.SGDSolver('solver.prototxt')
solver.net.copy_from(weights)

print('Weights copied!')

# surgeries
interp_layers = [k for k in solver.net.params.keys() if 'up' in k]
surgery.interp(solver.net, interp_layers)

# scoring
val = np.loadtxt('../../data/segvalid11.txt', dtype=str)

for _ in range(25):
    solver.step(4000)

    score.seg_tests(solver, False, val, layer='score_output2')
예제 #10
0
파일: test.py 프로젝트: zycanfly/Dragon
# --------------------------------------------------------
# Seg-FCN for Dragon
# Copyright (c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
""" Test a FCN-32s(PASCAL VOC) network """

import dragon.vm.caffe as caffe
import score
import numpy as np

weights = 'snapshot/train_iter_100000.caffemodel'

if __name__ == '__main__':

    # init
    caffe.set_mode_gpu()
    caffe.set_device(0)

    solver = caffe.SGDSolver('solver.prototxt')
    solver.net.copy_from(weights)

    # scoring
    val = np.loadtxt('../data/seg11valid.txt', dtype=str)
    score.seg_tests(solver, 'seg', val)
예제 #11
0
args = parser.parse_args()

weights = args.weights

# init
#caffe.set_device(int(sys.argv[1]))
caffe.set_mode_gpu()

model_name = args.prototxt

solver = caffe.SGDSolver(args.prototxt)
solver.net.copy_from(weights)
#solver.restore('/media/peter/Blue_Others/VGG_Dictnet_model/snapshot/train_iter_200000.solverstate')

model_name = model_name.replace("/solver.prototxt", "")

# surgeries
interp_layers = [k for k in solver.net.params.keys() if 'up' in k]
surgery.interp(solver.net, interp_layers)

if model_name == "VGG-Dictnet-16s":
    val_name = "dataset-brandname_20_products/val.txt"
else:
    val_name = "dataset-object_20_products/val.txt"
# scoring
val = np.loadtxt(val_name, dtype=str)

for _ in range(1):
    solver.step(4000)
    score.seg_tests(solver, False, val, model_name, layer='score')
예제 #12
0
#weights = '/home/mipal/Data/HYOJIN/Pascal_Context59/Exp33_Pym2/snapshot/train_iter_24000.caffemodel'
weights = '/home/mipal/Data/HYOJIN/Pascal_Context59/Exp44_Pym_fc_448/snapshot/train_iter_16000.caffemodel'
# init
caffe.set_device(2)
caffe.set_mode_gpu()

solver = caffe.SGDSolver('solver.prototxt')
solver.net.copy_from(weights)

# surgeries
#A= solver.net.params.keys()
#interp_layers = [k for k in solver.net.params.keys() if 'up' in k]
#surgery.interp(solver.net, interp_layers)

# scoring
val = np.loadtxt(
    '/home/mipal/Data/HYOJIN/DATA/Pascal_Context_448_59cls/val.txt', dtype=str)
#val = np.loadtxt('D:/Code_room/DATA/VOC2010/VOC2010/Pascal_Context_224_59cls/val.txt', dtype=str)

#save_f.save_feature(solver, 'Result' , train, 'fc7', 'score_pascon', 're_sampling_label')
#print('end')
score.seg_tests(solver, 'Result2', val, layer='score_pascon')
#for i in range(5000):
#solver.step(4000)
#if((i %5)==0) :
#print( str((i+1)*4000) + 'th seg test is saved ---------------')
#score.seg_tests(solver, 'Result', val, layer='score_pascon')
#else :
#print( str((i+1)*4000) + 'th seg test is not saved ---------------')
#score.seg_tests(solver, False, val, layer='score_pascon')
예제 #13
0
import caffe
import surgery, score

import numpy as np
import os

#import setproctitle
#setproctitle.setproctitle(os.path.basename(os.getcwd()))

weights = 'd80k500dp-train_iter_5000.caffemodel'

# init
#gpu&device
#caffe.set_device(int(sys.argv[1]))
#caffe.set_mode_gpu()
caffe.set_mode_cpu()

solver = caffe.SGDSolver('solver.prototxt')
solver.net.copy_from(weights)

# surgeries
interp_layers = [k for k in solver.net.params.keys() if 'up' in k]
surgery.interp(solver.net, interp_layers)

# scoring
val = np.loadtxt('E:/QMYdata/val.txt', dtype=str)

for _ in range(10):
    solver.step(1000)
    score.seg_tests(solver, False, val, layer='fc8_newk')
예제 #14
0
파일: solve.py 프로젝트: gadkins/caffe
    os.mkdir(loss)

if not os.path.exists(log):
    print 'Creating training_log directory...\n'
    os.mkdir(log)

# init
caffe.set_device(int(device))
caffe.set_mode_gpu()

solver = caffe.SGDSolver('{}/pascalpart-fcn32s/person/{}/solver.prototxt'.format(models, part))
if is_resume:
    solver.net.copy_from('{}/train_iter_{}.caffemodel'.format(snapshot, iteration))
    solver.restore('{}/train_iter_{}.solverstate'.format(snapshot, iteration))
else:
    solver.net.copy_from('../vgg_no_bilinear_vgg16fc.caffemodel')

# surgeries
interp_layers = [k for k in solver.net.params.keys() if 'up' in k]
surgery.interp(solver.net, interp_layers)

# scoring
val = np.loadtxt('{}/data/pascal/VOC/VOC2010/ImageSets/person/{}_val.txt'.format(caffe_root, part), dtype=str)
seg_results = '{}/models/pascalpart-fcn32s/person/{}/segmentation_results'.format(caffe_root, part)
for _ in range(10):
    solver.step(8000)
    score.seg_tests(solver, False, val, layer='score')

elapsed = time.time() - t
print 'Time elapse: %ds' % elapsed
예제 #15
0
make_net()

# init
caffe.set_device(int(sys.argv[1]))
caffe.set_mode_gpu()

solver = caffe.AdamSolver('solver.prototxt')

# weights = '../hair-mn/siftflow-fcn8s-heavy.caffemodel'
# solver.net.copy_from(weights)

# surgeries
interp_layers = [k for k in solver.net.params.keys() if 'upsample' in k]

surgery.interp(solver.net, interp_layers)

# scoring
# test = np.loadtxt('../data/hair/test.txt', dtype=str)
import glob

test = sorted([
    s.split('/')[-1].split('.jpg')[0]
    for s in glob.glob('../data/hair/realdata/test/*.jpg')
])

for ind in range(100):  # epoch=100
    solver.step(4500 /
                4)  # every epoch has iterations = 4500 images/ batchsize4
    # score manually from the histogram instead for proper evaluation
    score.seg_tests(solver, './test_img', test, layer='output_sep', gt='label')
예제 #16
0
import sys

try:
    import setproctitle
    setproctitle.setproctitle(os.path.basename(os.getcwd()))
except:
    pass

weights = '../siftflow-fcn16s/siftflow-fcn16s.caffemodel'

# init
caffe.set_device(int(sys.argv[1]))
caffe.set_mode_gpu()

solver = caffe.SGDSolver('solver.prototxt')
solver.net.copy_from(weights)

# surgeries
interp_layers = [k for k in solver.net.params.keys() if 'up' in k]
surgery.interp(solver.net, interp_layers)

# scoring
test = np.loadtxt('../data/sift-flow/test.txt', dtype=str)

for _ in range(50):
    solver.step(2000)
    # N.B. metrics on the semantic labels are off b.c. of missing classes;
    # score manually from the histogram instead for proper evaluation
    score.seg_tests(solver, False, test, layer='score_sem', gt='sem')
    score.seg_tests(solver, False, test, layer='score_geo', gt='geo')
예제 #17
0
import os
import sys
import numpy as np

import caffe

import score

weights = './model/face_fcn32s_trans_init.caffemodel'

# init
caffe.set_device(0)
caffe.set_mode_gpu()

solver = caffe.SGDSolver('./model/solver.prototxt')
solver.net.copy_from(weights)

# scoring
clip = 200
val = range(162771, 182638)[:clip]

for _ in range(10000):
    solver.step(1000)
    ##score.seg_tests(solver, False, val, layer='score')
    score.seg_tests(solver, 'val_score_{0}', val, layer='score')