def net_prms(dbFile=DEF_DB % 'nyu2_net', **kwargs): dArgs = mec.get_default_net_prms(dbFile, **kwargs) del dArgs['expStr'] #The data NetDefProto dArgs.dataNetDefProto = 'data_layer_nyu2' #the basic network architecture: baseNetDefProto dArgs.baseNetDefProto = 'doublefc-v1_window_fc6' #the loss layers: dArgs.lossNetDefProto = 'nyu2_loss_classify_layers' if dArgs.batchSize is None: dArgs.batchSize = 128 #The amount of jitter in both the images dArgs.maxJitter = 0 #The size of crop that should be cropped from the image dArgs.cropScale = 0.9 #the size to which the cropped image should be resized dArgs.ipImSz = 101 ##The mean file dArgs.meanFile = '' dArgs.meanType = None dArgs.opLrMult = None dArgs = mpu.get_defaults(kwargs, dArgs, False) allKeys = dArgs.keys() dArgs['expStr'] = mec.get_sql_id(dbFile, dArgs) return dArgs, allKeys
def get_default_solver_prms(dbFile=DEF_DB, **kwargs): ''' Refer to caffe.proto for a description of the variables. ''' dArgs = edict() dArgs.baseSolDefFile = None dArgs.iter_size = 1 dArgs.max_iter = 250000 dArgs.base_lr = 0.001 dArgs.lr_policy = 'step' dArgs.stepsize = 20000 dArgs.gamma = 0.5 dArgs.weight_decay = 0.0005 dArgs.clip_gradients = -1 #Momentum dArgs.momentum = 0.9 #Other dArgs.regularization_type = 'L2' dArgs.random_seed = -1 #Testing info dArgs.test_iter = 100 dArgs.test_interval = 1000 dArgs.snapshot = 2000 dArgs.display = 20 #Update parameters dArgs = mpu.get_defaults(kwargs, dArgs, False) dArgs.expStr = 'solprms' + get_sql_id(dbFile, dArgs, ignoreKeys=['test_iter', 'test_interval', 'snapshot', 'display']) return dArgs
def get_default_solver_prms(dbFile=DEF_DB, **kwargs): ''' Refer to caffe.proto for a description of the variables. ''' dArgs = edict() dArgs.baseSolDefFile = None dArgs.iter_size = 1 dArgs.max_iter = 250000 dArgs.base_lr = 0.001 dArgs.lr_policy = 'step' dArgs.stepsize = 20000 dArgs.gamma = 0.5 dArgs.weight_decay = 0.0005 dArgs.clip_gradients = -1 #Momentum dArgs.momentum = 0.9 #Other dArgs.regularization_type = 'L2' dArgs.random_seed = -1 #Testing info dArgs.test_iter = 100 dArgs.test_interval = 1000 dArgs.snapshot = 2000 dArgs.display = 20 #Update parameters dArgs = mpu.get_defaults(kwargs, dArgs, False) dArgs.expStr = 'solprms' + get_sql_id( dbFile, dArgs, ignoreKeys=['test_iter', 'test_interval', 'snapshot', 'display']) return dArgs
def net_prms(dbFile=DEF_DB % 'net', **kwargs): dArgs = mec.get_siamese_net_prms(dbFile, **kwargs) del dArgs['expStr'] #The data NetDefProto dArgs.dataNetDefProto = 'data_layer_groups' #the basic network architecture: baseNetDefProto dArgs.baseNetDefProto = 'smallnet-v5_window_siamese_fc5' #the loss layers: dArgs.lossNetDefProto = 'pose_loss_log_l1_layers' if dArgs.batchSize is None: dArgs.batchSize = 128 #The amount of jitter in both the images dArgs.maxJitter = 0 #The amount of roll jitter to apply to the images dArgs.maxRollJitter = None #The size of crop that should be cropped from the image dArgs.crpSz = 192 #the size to which the cropped image should be resized dArgs.ipImSz = 101 #The size of the fc layer if present dArgs.fcSz = None dArgs.fcName = 'fc5' ##The mean file dArgs.meanFile = '' dArgs.meanType = None dArgs.ncpu = 3 dArgs.readSingleGrp = False dArgs = mpu.get_defaults(kwargs, dArgs, False) allKeys = dArgs.keys() dArgs['expStr'] = mec.get_sql_id(dbFile, dArgs, ignoreKeys=['ncpu', 'readSingleGrp']) return dArgs, allKeys
def get_lr_prms(**kwargs): #return mec.get_lr_prms() dArgs = edict() dArgs.batchsize = 128 dArgs.stepsize = 20000 dArgs.base_lr = 0.001 dArgs.max_iter = 250000 dArgs.gamma = 0.5 dArgs.weight_decay = 0.0005 dArgs.clip_gradients = -1 dArgs.debug_info = False dArgs = mpu.get_defaults(kwargs, dArgs) #Make the solver debugStr = '%s' % dArgs.debug_info debugStr = debugStr.lower() del dArgs['debug_info'] solArgs = edict({ 'test_iter': 100, 'test_interval': 1000, 'snapshot': 2000, 'debug_info': debugStr }) print dArgs.keys() for k in dArgs.keys(): if k in ['batchsize']: continue solArgs[k] = copy.deepcopy(dArgs[k]) dArgs.solver = mpu.make_solver(**solArgs) expStr = 'batchSz%d_stepSz%.0e_blr%.5f_mxItr%.1e_gamma%.2f_wdecay%.6f'\ % (dArgs.batchsize, dArgs.stepsize, dArgs.base_lr, dArgs.max_iter, dArgs.gamma, dArgs.weight_decay) if not (dArgs.clip_gradients == -1): expStr = '%s_gradClip%.1f' % (expStr, dArgs.clip_gradients) dArgs.expStr = expStr return dArgs
def get_data_prms(dbFile=DEF_DB % 'nyu2_data', **kwargs): dArgs = edict() dArgs.dataset = 'nyu2' allKeys = dArgs.keys() dArgs = mpu.get_defaults(kwargs, dArgs) dArgs['expStr'] = mec.get_sql_id(dbFile, dArgs) dArgs['paths'] = snyu.get_paths() return dArgs
def get_siamese_window_net_prms(dbFile=DEF_DB, **kwargs): dArgs = get_siamese_net_prms(dbFile) del dArgs['expStr'] #Size of input image dArgs.imSz = 227 #If random cropping is to be used dArgs.randCrop = False #If gray scale images need to be used dArgs.isGray = False dArgs = mpu.get_defaults(kwargs, dArgs, False) dArgs.expStr = get_sql_id(dbFile, dArgs) return dArgs
def get_siamese_net_prms(dbFile=DEF_DB, **kwargs): dArgs = get_default_net_prms(dbFile) del dArgs['expStr'] #Layers at which the nets are to be concatenated dArgs.concatLayer = 'fc6' #If dropouts should be used in the concatenation layer dArgs.concatDrop = False #Number of filters in concatenation layer dArgs.concatSz = None #If an extra FC layer needs to be added dArgs.extraFc = None dArgs = mpu.get_defaults(kwargs, dArgs, False) dArgs.expStr = get_sql_id(dbFile, dArgs) return dArgs
def get_default_net_prms(dbFile=DEF_DB, **kwargs): dArgs = edict() #Name of the net which will be constructed dArgs.netName = 'alexnet' #For layers below lrAbove, learning rate is set to 0 dArgs.lrAbove = None #If weights from a pretrained net are to be used dArgs.preTrainNet = None #The base proto from which net will be constructed dArgs.baseNetDefProto = None #Batch size dArgs.batchSize = None #runNum dArgs.runNum = 0 dArgs = mpu.get_defaults(kwargs, dArgs, False) dArgs.expStr = get_sql_id(dbFile, dArgs) return dArgs
def get_data_prms(dbFile=DEF_DB % 'data', lbPrms=None, tvPrms=None, **kwargs): if lbPrms is None: lbPrms = slu.PosePrms() if tvPrms is None: tvPrms = get_trainval_split_prms() dArgs = mec.edict() dArgs.dataset = 'dc-v2' dArgs.lbStr = lbPrms.get_lbstr() dArgs.tvStr = tvPrms.pStr dArgs.isAlign = True allKeys = dArgs.keys() dArgs = mpu.get_defaults(kwargs, dArgs) dArgs['expStr'] = mec.get_sql_id(dbFile, dArgs) dArgs['splitPrms'] = tvPrms dArgs['lbPrms'] = lbPrms dArgs['paths'] = get_paths(dArgs) return dArgs
def get_finetune_prms(**kwargs): ''' sourceModelIter: The number of model iterations of the source model to consider fine_max_iter : The maximum iterations to which the target model should be trained. lrAbove : If learning is to be performed some layer. fine_base_lr : The base learning rate for finetuning. fineRunNum : The run num for the finetuning. fineNumData : The amount of data to be used for the finetuning. fineMaxLayer : The maximum layer of the source n/w that should be considered. ''' dArgs = edict() dArgs.base_lr = 0.001 dArgs.runNum = 1 dArgs.maxLayer = None dArgs.lrAbove = None dArgs.dataset = 'sun' dArgs.maxIter = 40000 dArgs.extraFc = False dArgs.extraFcDrop = False dArgs.sourceModelIter = 60000 dArgs = mpu.get_defaults(kwargs, dArgs) return dArgs
def get_data_prms(dbFile=DEF_DB % 'pascal_data', **kwargs): dArgs = edict() dArgs.dataset = 'pascal' dArgs.imCutSz = 256 dArgs.imPadSz = 36 dArgs.angleFormat = 'radian' dArgs.anglePreProc = 'mean_sub' dArgs.nAzBins = None dArgs.nElBins = None allKeys = dArgs.keys() dArgs = mpu.get_defaults(kwargs, dArgs) if dArgs.anglePreProc == 'classify': assert dArgs.nAzBins is not None assert dArgs.nElBins is not None dArgs['expStr'] = mec.get_sql_id(dbFile, dArgs) dArgs['paths'] = get_paths(dArgs) dArgs.azBins = None dArgs.elBins = None if dArgs.nAzBins is not None: dArgs.azBins = np.linspace(-np.pi, np.pi, dArgs.nAzBins + 1) if dArgs.nElBins is not None: dArgs.elBins = np.linspace(-np.pi, np.pi, dArgs.nElBins + 1) return dArgs
def get_nw_prms(**kwargs): #return mec.get_nw_prms(**kwargs) dArgs = edict() dArgs.netName = 'alexnet' dArgs.concatLayer = 'fc6' dArgs.concatDrop = False dArgs.contextPad = 0 dArgs.imSz = 227 dArgs.imgntMean = True dArgs.maxJitter = 11 dArgs.randCrop = False dArgs.lossWeight = 1.0 dArgs.multiLossProto = None dArgs.ptchStreamNum = 256 dArgs.poseStreamNum = 256 dArgs.isGray = False dArgs.isPythonLayer = False dArgs.extraFc = None dArgs.numFc5 = None dArgs.numConv4 = None dArgs.numCommonFc = None dArgs.lrAbove = None dArgs = mpu.get_defaults(kwargs, dArgs) if dArgs.numFc5 is not None: assert (dArgs.concatLayer == 'fc5') expStr = 'net-%s_cnct-%s_cnctDrp%d_contPad%d_imSz%d_imgntMean%d_jit%d'\ %(dArgs.netName, dArgs.concatLayer, dArgs.concatDrop, dArgs.contextPad, dArgs.imSz, dArgs.imgntMean, dArgs.maxJitter) if dArgs.numFc5 is not None: expStr = '%s_numFc5-%d' % (expStr, dArgs.numFc5) if dArgs.numConv4 is not None: expStr = '%s_numConv4-%d' % (expStr, dArgs.numConv4) if dArgs.numCommonFc is not None: expStr = '%s_numCommonFc-%d' % (expStr, dArgs.numCommonFc) if dArgs.randCrop: expStr = '%s_randCrp%d' % (expStr, dArgs.randCrop) if not (dArgs.lossWeight == 1.0): if type(dArgs.lossWeight) == list: lStr = '' for i, l in enumerate(dArgs.lossWeight): lStr = lStr + 'lw%d-%.1f_' % (i, l) lStr = lStr[0:-1] print lStr expStr = '%s_%s' % (expStr, lStr) else: assert isinstance(dArgs.lossWeight, (int, long, float)) expStr = '%s_lw%.1f' % (expStr, dArgs.lossWeight) if dArgs.multiLossProto is not None: expStr = '%s_mlpr%s-posn%d-ptsn%d' % (expStr, dArgs.multiLossProto, dArgs.poseStreamNum, dArgs.ptchStreamNum) if dArgs.isGray: expStr = '%s_grayIm' % expStr if dArgs.isPythonLayer: expStr = '%s_pylayers' % expStr if dArgs.extraFc is not None: expStr = '%s_extraFc%d' % (expStr, dArgs.extraFc) if dArgs.lrAbove is not None: expStr = '%s_lrAbove-%s' % (expStr, dArgs.lrAbove) dArgs.expStr = expStr return dArgs