def write_solver(**kwargs):
    solverprototxt = tools.CaffeSolver(
        trainnet_prototxt_path=osp.join(workdir, "trainnet.prototxt"),
        testnet_prototxt_path=osp.join(workdir, "valnet.prototxt"))
    for key, value in kwargs.items():
        solverprototxt.sp[key] = value
    solverprototxt.write(osp.join(workdir, 'solver.prototxt'))
コード例 #2
0
    def solver(self):
        solverprototxt = tools.CaffeSolver(
            trainnet_prototxt_path=osp.join(self.model_dir,
                                            "trainnet.prototxt"),
            testnet_prototxt_path=osp.join(self.model_dir, "valnet.prototxt"))
        solverprototxt.sp['solver_mode'] = self.meta_data['solver_mode']

        solverprototxt.sp['display'] = str(self.meta_data['caffe_display'])
        solverprototxt.sp['base_lr'] = str(self.meta_data['base_lr'])
        solverprototxt.sp['max_iter'] = str(self.meta_data['caffe_max_iter'])
        solverprototxt.sp['momentum'] = str(self.meta_data['momentum'])
        solverprototxt.sp['weight_decay'] = str(
            self.meta_data['caffe_weight_decay'])
        solverprototxt.sp['stepsize'] = str(self.meta_data['caffe_stepsize'])
        solverprototxt.sp['gamma'] = str(self.meta_data['caffe_gamma'])
        solverprototxt.sp[
            'lr_policy'] = '\"' + self.meta_data['caffe_lr_policy'] + '\"'
        solverprototxt.sp['regularization_type'] = '\"' + self.meta_data[
            'regularization_type'] + '\"'
        solverprototxt.sp['snapshot_prefix'] = '\"' + self.snapshot_dir + '\"'

        solverprototxt.sp['snapshot'] = '1000000'  # dummmy value
        solverprototxt.sp['test_interval'] = '1000000'  # dummmy value
        solverprototxt.sp[
            'test_iter'] = '1000000000'  # dummmy value as we handle our own validation

        solverprototxt.write(osp.join(self.model_dir, 'solver.prototxt'))
コード例 #3
0
 def get_prototxt(self, learning_rate=0.001, num_epochs=100):
     self.solverprototxt = tools.CaffeSolver(
         trainnet_prototxt_path=osp.join(self.workdir, "trainnet.prototxt"),
         testnet_prototxt_path=osp.join(self.workdir, "valnet.prototxt"))
     self.solverprototxt.sp['base_lr'] = str(learning_rate)
     self.solverprototxt.sp['test_interval'] = str(self.batchsize * 40)
     self.solverprototxt.write(osp.join(self.workdir, 'solver.prototxt'))
     # write train_val net.
     with open(osp.join(self.workdir, 'trainnet.prototxt'), 'w') as f:
         f.write(self.createCNN(True))
     with open(osp.join(self.workdir, 'valnet.prototxt'), 'w') as f:
         f.write(self.createCNN(False))
     print('get prototxt finished.')
コード例 #4
0
ファイル: Generate_net.py プロジェクト: lqs19881030/DAN-Caffe
import sys
import os
import numpy as np
import caffe
from caffe import layers as L, params as P
from DeepAlignmentNetwork import DeepAlignmentNetwork
import tools
import os.path as osp

caffe_root = '../'  # this file is expected to be in {caffe_root}/DAN-caffe
sys.path.append(caffe_root + 'python/DesignLayer')

caffe.set_mode_gpu()
caffe.set_device(0)

workdir = './'
solverprototxt = tools.CaffeSolver(
    trainnet_prototxt_path=osp.join(workdir, "trainnet.prototxt"),
    testnet_prototxt_path=osp.join(workdir, "valnet.prototxt"))
solverprototxt.sp['base_lr'] = "0.05"
solverprototxt.write(osp.join(workdir, 'solver.prototxt'))

training = DeepAlignmentNetwork()

# write train net.
with open(osp.join(workdir, 'trainnet.prototxt'), 'w') as f:
    f.write(training.createCNN(2))

solver = caffe.AdamSolver(osp.join(workdir, 'solver.prototxt'))
コード例 #5
0
ファイル: net.py プロジェクト: KID-7391/my_fcn
                                    convolution_param=dict(
                                        num_output=21,
                                        kernel_size=128,
                                        stride=8,
                                        bias_term=False,
                                    ),
                                    param=[dict(lr_mult=0)])

    n.score = L.Crop(n.upscore_new, n.data)
    n.loss = L.SoftmaxWithLoss(n.score,
                               n.label,
                               loss_param=dict(normalize=False,
                                               ignore_label=255))

    return n.to_proto()


def make_net():
    with open('train.prototxt', 'w') as f:
        f.write(str(fcn('train')))

    with open('val.prototxt', 'w') as f:
        f.write(str(fcn('seg11valid')))


if __name__ == '__main__':
    make_net()
    solver_path = '/home/wen/caffe-master/semantic/fcn/my_fcn32s/solver.prototxt'
    solver_prototxt = tools.CaffeSolver()
    solver_prototxt.write(solver_path)
コード例 #6
0
if network_name == 'deep_attribute_network':
    #run_name = 'attribute_cropped_trial1'
    run_name = 'dan_cropped_adam_trial5'
    num_training_samples = 5760
    num_val_samples = 1920
    batch_size = 32
    num_epochs = 1

    # generate train/test prototxt
    deep_attribute_network(output_net=run_name+'_train.prototxt', train=True, num_classes=25, batch_size=batch_size)
    deep_attribute_network(output_net=run_name+'_test.prototxt', train=False, num_classes=25, batch_size=batch_size)
    print 'Network specification generated.'

    # generate solver
    solverprototxt = tools.CaffeSolver(
        trainnet_prototxt_path=project_root + 'out/'+run_name+'_train.prototxt',
        testnet_prototxt_path=project_root + 'out/'+run_name+'_test.prototxt')
    solverprototxt.sp['test_iter'] = str(num_val_samples/batch_size)
    solverprototxt.sp['test_interval'] = "500"
    #solverprototxt.sp['base_lr'] = "0.001"
    solverprototxt.sp['momentum'] = "0.9"
    #solverprototxt.sp['weight_decay'] = "0.0005"
    #solverprototxt.sp['lr_policy'] = "'step'"
    #solverprototxt.sp['type'] = "'SGD'"
    #solverprototxt.sp['gamma'] = "0.1"
    #solverprototxt.sp['stepsize'] = "5000"
    solverprototxt.sp['display'] = "100"
    solverprototxt.sp['max_iter'] = str(num_epochs * num_training_samples/batch_size)
    solverprototxt.sp['snapshot'] = "3000"
    solverprototxt.sp['random_seed'] = "147"
    solverprototxt.sp['snapshot_prefix'] = "'" + model_root + "dan_adam/" + run_name + "'"