def test_convert(): fName = '/work4/pulkitag-code/code/ief/IEF/models/new_models/models/new-model.mat' outName = 'try.prototxt' model = MatConvNetModel(fName) imgLayer = mpu.get_layerdef_for_proto('DeployData', 'image', None, **{'ipDims': [1, 3, 224, 224]}) kpLayer = mpu.get_layerdef_for_proto('DeployData', 'kp_pos', None, **{'ipDims': [1, 17, 2, 1]}) lbLayer = mpu.get_layerdef_for_proto('DeployData', 'label', None, **{'ipDims': [1, 16, 2, 1]}) pdef = model.save_caffe_model(ipLayers=[imgLayer, kpLayer, lbLayer], layerOrder=['render1', 'concat1'])
def make_def_proto(nw, isSiamese=True, baseFileStr='split_im.prototxt', getStreamTopNames=False): ''' If is siamese then wait for the Concat layers - and make all layers until then siamese. ''' baseFile = os.path.join(baseFileStr) protoDef = mpu.ProtoDef(baseFile) #if baseFileStr in ['split_im.prototxt', 'normal.prototxt']: lastTop = 'data' siameseFlag = isSiamese stream1, stream2 = [], [] mainStream = [] nameGen = mpu.LayerNameGenerator() for l in nw: lType, lParam = l lName = nameGen.next_name(lType) #To account for layers that should not copied while finetuning # Such layers need to named differently. if lParam.has_key('nameDiff'): lName = lName + '-%s' % lParam['nameDiff'] if lType == 'Concat': siameseFlag = False if not lParam.has_key('bottom2'): lParam['bottom2'] = lastTop + '_p' if siameseFlag: lDef, lsDef = mpu.get_siamese_layerdef_for_proto( lType, lName, lastTop, **lParam) stream1.append(lDef) stream2.append(lsDef) else: lDef = mpu.get_layerdef_for_proto(lType, lName, lastTop, **lParam) mainStream.append(lDef) if lParam.has_key('shareBottomWithNext'): assert lParam['shareBottomWithNext'] pass else: lastTop = lName #Add layers mainStream = stream1 + stream2 + mainStream for l in mainStream: protoDef.add_layer(l['name'][1:-1], l) if getStreamTopNames: if isSiamese: top1Name = stream1[-1]['name'][1:-1] top2Name = stream2[-1]['name'][1:-1] else: top1Name, top2Name = None, None return protoDef, top1Name, top2Name else: return protoDef
def make_def_proto(nw, isSiamese=True, baseFileStr='split_im.prototxt', getStreamTopNames=False): ''' If is siamese then wait for the Concat layers - and make all layers until then siamese. ''' baseFile = os.path.join(baseFileStr) protoDef = mpu.ProtoDef(baseFile) #if baseFileStr in ['split_im.prototxt', 'normal.prototxt']: lastTop = 'data' siameseFlag = isSiamese stream1, stream2 = [], [] mainStream = [] nameGen = mpu.LayerNameGenerator() for l in nw: lType, lParam = l lName = nameGen.next_name(lType) #To account for layers that should not copied while finetuning # Such layers need to named differently. if lParam.has_key('nameDiff'): lName = lName + '-%s' % lParam['nameDiff'] if lType == 'Concat': siameseFlag = False if not lParam.has_key('bottom2'): lParam['bottom2'] = lastTop + '_p' if siameseFlag: lDef, lsDef = mpu.get_siamese_layerdef_for_proto(lType, lName, lastTop, **lParam) stream1.append(lDef) stream2.append(lsDef) else: lDef = mpu.get_layerdef_for_proto(lType, lName, lastTop, **lParam) mainStream.append(lDef) if lParam.has_key('shareBottomWithNext'): assert lParam['shareBottomWithNext'] pass else: lastTop = lName #Add layers mainStream = stream1 + stream2 + mainStream for l in mainStream: protoDef.add_layer(l['name'][1:-1], l) if getStreamTopNames: if isSiamese: top1Name = stream1[-1]['name'][1:-1] top2Name = stream2[-1]['name'][1:-1] else: top1Name, top2Name = None, None return protoDef, top1Name, top2Name else: return protoDef
def setup_experiment_finetune(prms, cPrms, returnTgCPrms=False, srcDefFile=None): if srcDefFile is None: #Get the def file. if cPrms['fine']['extraFc'] and cPrms['fine']['addDrop']: defFile = os.path.join(baseFilePath, 'kitti_finetune_fc6_drop_extraFc_deploy.prototxt') else: if cPrms['concatLayer'] == 'conv4' and cPrms['isMySimple']: defFile = os.path.join(baseFilePath, 'kitti_finetune_conv4_mysimple_deploy.prototxt') else: defFile = os.path.join(baseFilePath, 'kitti_finetune_fc6_deploy.prototxt') else: defFile = srcDefFile #Setup the target experiment. tgCPrms = get_caffe_prms(isFineTune=True, convConcat = cPrms['convConcat'], fine_base_lr=cPrms['fine']['base_lr'], fineRunNum = cPrms['fine']['runNum'], sourceModelIter = cPrms['fine']['modelIter'], lrAbove = cPrms['fine']['lrAbove'], fineNumData = cPrms['fine']['numData'], fineMaxLayer = cPrms['fine']['maxLayer'], fineDataSet = cPrms['fine']['dataset'], fineMaxIter = cPrms['fine']['max_iter'], deviceId = cPrms['deviceId'], addDrop = cPrms['fine']['addDrop'], extraFc = cPrms['fine']['extraFc'], imgntMean=cPrms['imgntMean'], stepsize=cPrms['stepsize'], batchSz=cPrms['batchSz'], concatLayer = cPrms['concatLayer'], isMySimple = cPrms['isMySimple'], imSz = cPrms['imSz'], contextPad = cPrms['contextPad']) tgPrms = copy.deepcopy(prms) tgPrms['expName'] = 'fine-FROM-%s' % prms['expName'] tgExp = get_experiment_object(tgPrms, tgCPrms) tgExp.init_from_external(tgCPrms['solver'], defFile) print (tgExp.expFile_.netDef_.get_all_layernames()) #pdb.set_trace() if not tgCPrms['fine']['maxLayer'] is None: fcLayer = copy.copy(tgExp.expFile_.netDef_.layers_['TRAIN']['class_fc']) lossLayer = copy.copy(tgExp.expFile_.netDef_.layers_['TRAIN']['loss']) accLayer = copy.copy(tgExp.expFile_.netDef_.layers_['TRAIN']['accuracy']) tgExp.del_all_layers_above(tgCPrms['fine']['maxLayer']) mxLayer = tgCPrms['fine']['maxLayer'] lastTop = tgExp.get_last_top_name() if tgCPrms['fine']['addDrop']: dropLayer = mpu.get_layerdef_for_proto('Dropout', 'drop-%s' % lastTop, lastTop, **{'top': lastTop, 'dropout_ratio': 0.5}) tgExp.add_layer('drop-%s' % lastTop, dropLayer, 'TRAIN') if tgCPrms['fine']['extraFc']: eName = 'fc-extra' ipLayer = mpu.get_layerdef_for_proto('InnerProduct', eName, lastTop, **{'top': eName, 'num_output': 2048}) reLayer = mpu.get_layerdef_for_proto('ReLU', 'relu-extra', eName, **{'top': eName}) tgExp.add_layer(eName, ipLayer, 'TRAIN') tgExp.add_layer('relu-extra', reLayer, 'TRAIN') lastTop = eName if tgCPrms['fine']['addDrop']: dropLayer = mpu.get_layerdef_for_proto('Dropout', 'drop-%s' % eName, eName, **{'top': eName, 'dropout_ratio': 0.5}) tgExp.add_layer('drop-%s' % eName, dropLayer, 'TRAIN') fcLayer['bottom'] = '"%s"' % lastTop tgExp.add_layer('class_fc', fcLayer, phase='TRAIN') tgExp.add_layer('loss', lossLayer, phase='TRAIN') tgExp.add_layer('accuracy', accLayer, phase='TRAIN') #Do things as needed. if not tgCPrms['fine']['lrAbove'] is None: tgExp.finetune_above(tgCPrms['fine']['lrAbove']) #Put the right data files. if tgCPrms['fine']['prms'] is None: assert (tgCPrms['fine']['numData'] == 1) and (tgCPrms['fine']['dataset']=='sun') dbPath = '/data0/pulkitag/data_sets/sun/leveldb_store' dbFile = os.path.join(dbPath, 'sun-leveldb-%s-%d') trnFile = dbFile % ('train', tgCPrms['fine']['runNum']) tstFile = dbFile % ('test', tgCPrms['fine']['runNum']) tgExp.set_layer_property('data', ['data_param', 'backend'], 'LEVELDB', phase='TRAIN') tgExp.set_layer_property('data', ['data_param', 'backend'], 'LEVELDB', phase='TEST') else: trnFile = tgCPrms['fine']['prms']['paths']['lmdb']['train'] tstFile = tgCPrms['fine']['prms']['paths']['lmdb']['test'] tgExp.set_layer_property('data', ['data_param', 'backend'], 'LMDB', phase='TRAIN') tgExp.set_layer_property('data', ['data_param', 'backend'], 'LMDB', phase='TEST') #Set the data files tgExp.set_layer_property('data', ['data_param', 'source'], '"%s"' % trnFile, phase='TRAIN') tgExp.set_layer_property('data', ['data_param', 'source'], '"%s"' % tstFile, phase='TEST') #Set the imagenet mean if cPrms['imgntMean']: #muFile = '"%s"' % '/data1/pulkitag/caffe_models/ilsvrc2012_mean.binaryproto' muFile = tgCPrms['fine']['muFile'] print muFile tgExp.set_layer_property('data', ['transform_param', 'mean_file'], muFile, phase='TRAIN') tgExp.set_layer_property('data', ['transform_param', 'mean_file'], muFile, phase='TEST') #Set the batch-size tgExp.set_layer_property('data', ['data_param', 'batch_size'], tgCPrms['batchSz'], phase='TRAIN') tgExp.set_layer_property('data', ['data_param', 'batch_size'], tgCPrms['batchSz'], phase='TEST') if returnTgCPrms: return tgExp, tgCPrms else: return tgExp
def make_caffe_layer(self, lNum): #Get the name nameRef = self.dat_['net']['layers']['name'][lNum][0] name = ou.ints_to_str(self.dat_['#refs#'][nameRef][:]) #Get the inputs ipRef = self.dat_['net']['layers']['inputs'][lNum][0] ipNames = self.ref_to_str(ipRef) #Get the parameter names pmRef = self.dat_['net']['layers']['params'][lNum][0] pmNames = self.ref_to_str(pmRef) #Get the output names opRef = self.dat_['net']['layers']['outputs'][lNum][0] opNames = self.ref_to_str(opRef) #Get the layer type tpRef = self.dat_['net']['layers']['type'][lNum][0] lType = ou.ints_to_str(self.dat_['#refs#'][tpRef][:]) #Get the layer params lpRef = self.dat_['net']['layers']['block'][lNum][0] lParam = self.dat_['#refs#'][lpRef] assert (lType[0:5] == 'dagnn') lType = lType[6:] if lType == 'Conv': paramW = {'name': pmNames[0]} paramB = {'name': pmNames[1]} pDupKey = mpu.make_key('param', ['param']) lDef = mpu.get_layerdef_for_proto( 'Convolution', name, ipNames[0], **{ 'num_output': int(lParam['size'][3][0]), 'param': paramW, pDupKey: paramB, 'kernel_size': int(lParam['size'][0][0]), 'stride': int(lParam['stride'][0][0]), 'pad': int(lParam['pad'][0][0]) }) elif lType == 'ReLU': lDef = mpu.get_layerdef_for_proto(lType, name, ipNames[0], **{'top': opNames[0]}) elif lType == 'Pooling': poolType = lParam['method'][0] if poolType == 'max': poolType = 'MAX' elif poolType == 'avg': poolType = 'AVE' lDef = mpu.get_layerdef_for_proto( lType, name, ipNames[0], **{ 'top': opNames[0], 'kernel_size': int(lParam['poolSize'][0][0]), 'stride': int(lParam['stride'][0][0]), 'pad': int(lParam['pad'][0][0]), 'pool': poolType }) elif lType == 'LRN': N, kappa, alpha, beta = lParam['param'][0][0], lParam['param'][1][0],\ lParam['param'][2][0], lParam['param'][3][0] lDef = mpu.get_layerdef_for_proto( lType, name, ipNames[0], **{ 'top': opNames[0], 'local_size': int(N), 'alpha': N * alpha, 'beta': beta, 'k': kappa }) elif lType == 'Concat': lDef = mpu.get_layerdef_for_proto( lType, name, ipNames[0], **{ 'bottom2': ipNames[1:], 'concat_dim': 1, 'top': opNames[0] }) elif lType == 'Loss': lossType = ou.ints_to_str(lParam['loss']) if lossType == 'pdist': p = lParam['p'][0][0] if p == 2: lossName = 'EuclideanLoss' else: raise Exception('Loss type %s not recognized' % lossType) else: raise Exception('Loss type %s not recognized' % lossType) lDef = mpu.get_layerdef_for_proto(lossName, name, ipNames[0], **{'bottom2': ipNames[1]}) elif lType == 'gaussRender': lDef = mpu.get_layerdef_for_proto( lType, name, ipNames[0], **{ 'top': opNames[0], 'K': lParam['K'][0][0], 'T': lParam['T'][0][0], 'sigma': lParam['sigma'][0][0], 'imgSz': int(lParam['img_size'][0][0]) }) else: raise Exception('Layer Type %s not recognized, %d' % (lType, lNum)) return lDef
def make_net_proto(prms, cPrms, finePrms=None): baseFilePath = prms.paths.baseNetsDr isSiamese = False if prms.isSiamese: isSiamese = True if finePrms is not None and not finePrms.isSiamese: isSiamese = False if isSiamese: netFileStr = '%s_window_siamese_%s.prototxt' else: netFileStr = '%s_window_%s.prototxt' netFile = netFileStr % (cPrms.nwPrms.netName, cPrms.nwPrms.concatLayer) netFile = osp.join(baseFilePath, netFile) netDef = mpu.ProtoDef(netFile) if cPrms.nwPrms.extraFc is not None: #Changethe name of the existing common_fc to common_fc_prev netDef.rename_layer('common_fc', 'common_fc_prev') netDef.set_layer_property('common_fc_prev', 'top', '"%s"' % 'common_fc_prev') #Rename the params netDef.set_layer_property('common_fc_prev', ['param', 'name'], '"%s"' % 'common_fc_prev_w') netDef.set_layer_property('common_fc_prev', ['param', 'name'], '"%s"' % 'common_fc_prev_b', propNum=[1, 0]) netDef.rename_layer('relu_common', 'relu_common_prev') netDef.set_layer_property('relu_common_prev', 'top', '"%s"' % 'common_fc_prev') netDef.set_layer_property('relu_common_prev', 'bottom', '"%s"' % 'common_fc_prev') #Add the new layer eName = 'common_fc' lastTop = 'common_fc_prev' fcLayer = mpu.get_layerdef_for_proto( 'InnerProduct', eName, lastTop, **{ 'top': eName, 'num_output': cPrms.nwPrms.extraFc }) reLayer = mpu.get_layerdef_for_proto('ReLU', 'relu_common', eName, **{'top': eName}) netDef.add_layer(eName, fcLayer) netDef.add_layer('relu_common', reLayer) if cPrms.nwPrms.numFc5 is not None: netDef.set_layer_property('fc5', ['inner_product_param', 'num_output'], '%d' % cPrms.nwPrms.numFc5) if prms.isSiamese: netDef.set_layer_property('fc5_p', ['inner_product_param', 'num_output'], '%d' % cPrms.nwPrms.numFc5) if cPrms.nwPrms.numConv4 is not None: netDef.set_layer_property('conv4', ['convolution_param', 'num_output'], '%d' % cPrms.nwPrms.numConv4) if prms.isSiamese: netDef.set_layer_property('conv4_p', ['convolution_param', 'num_output'], '%d' % cPrms.nwPrms.numConv4) if cPrms.nwPrms.numCommonFc is not None: netDef.set_layer_property('common_fc', ['inner_product_param', 'num_output'], '%d' % cPrms.nwPrms.numCommonFc) if cPrms.nwPrms.concatDrop: dropLayer = mpu.get_layerdef_for_proto( 'Dropout', 'drop-%s' % 'common_fc', 'common_fc', **{ 'top': 'common_fc', 'dropout_ratio': 0.5 }) netDef.add_layer('drop-%s' % 'common_fc', dropLayer, 'TRAIN') if finePrms is not None: netDef.rename_layer('common_fc', 'common_fc_fine') return netDef
def setup_experiment_finetune(prms, cPrms, returnTgCPrms=False, srcDefFile=None): if srcDefFile is None: #Get the def file. if cPrms['fine']['extraFc'] and cPrms['fine']['addDrop']: defFile = os.path.join( baseFilePath, 'kitti_finetune_fc6_drop_extraFc_deploy.prototxt') else: if cPrms['concatLayer'] == 'conv4' and cPrms['isMySimple']: defFile = os.path.join( baseFilePath, 'kitti_finetune_conv4_mysimple_deploy.prototxt') else: defFile = os.path.join(baseFilePath, 'kitti_finetune_fc6_deploy.prototxt') else: defFile = srcDefFile #Setup the target experiment. tgCPrms = get_caffe_prms(isFineTune=True, convConcat=cPrms['convConcat'], fine_base_lr=cPrms['fine']['base_lr'], fineRunNum=cPrms['fine']['runNum'], sourceModelIter=cPrms['fine']['modelIter'], lrAbove=cPrms['fine']['lrAbove'], fineNumData=cPrms['fine']['numData'], fineMaxLayer=cPrms['fine']['maxLayer'], fineDataSet=cPrms['fine']['dataset'], fineMaxIter=cPrms['fine']['max_iter'], deviceId=cPrms['deviceId'], addDrop=cPrms['fine']['addDrop'], extraFc=cPrms['fine']['extraFc'], imgntMean=cPrms['imgntMean'], stepsize=cPrms['stepsize'], batchSz=cPrms['batchSz'], concatLayer=cPrms['concatLayer'], isMySimple=cPrms['isMySimple'], imSz=cPrms['imSz'], contextPad=cPrms['contextPad']) tgPrms = copy.deepcopy(prms) tgPrms['expName'] = 'fine-FROM-%s' % prms['expName'] tgExp = get_experiment_object(tgPrms, tgCPrms) tgExp.init_from_external(tgCPrms['solver'], defFile) print(tgExp.expFile_.netDef_.get_all_layernames()) #pdb.set_trace() if not tgCPrms['fine']['maxLayer'] is None: fcLayer = copy.copy( tgExp.expFile_.netDef_.layers_['TRAIN']['class_fc']) lossLayer = copy.copy(tgExp.expFile_.netDef_.layers_['TRAIN']['loss']) accLayer = copy.copy( tgExp.expFile_.netDef_.layers_['TRAIN']['accuracy']) tgExp.del_all_layers_above(tgCPrms['fine']['maxLayer']) mxLayer = tgCPrms['fine']['maxLayer'] lastTop = tgExp.get_last_top_name() if tgCPrms['fine']['addDrop']: dropLayer = mpu.get_layerdef_for_proto( 'Dropout', 'drop-%s' % lastTop, lastTop, **{ 'top': lastTop, 'dropout_ratio': 0.5 }) tgExp.add_layer('drop-%s' % lastTop, dropLayer, 'TRAIN') if tgCPrms['fine']['extraFc']: eName = 'fc-extra' ipLayer = mpu.get_layerdef_for_proto( 'InnerProduct', eName, lastTop, **{ 'top': eName, 'num_output': 2048 }) reLayer = mpu.get_layerdef_for_proto('ReLU', 'relu-extra', eName, **{'top': eName}) tgExp.add_layer(eName, ipLayer, 'TRAIN') tgExp.add_layer('relu-extra', reLayer, 'TRAIN') lastTop = eName if tgCPrms['fine']['addDrop']: dropLayer = mpu.get_layerdef_for_proto( 'Dropout', 'drop-%s' % eName, eName, **{ 'top': eName, 'dropout_ratio': 0.5 }) tgExp.add_layer('drop-%s' % eName, dropLayer, 'TRAIN') fcLayer['bottom'] = '"%s"' % lastTop tgExp.add_layer('class_fc', fcLayer, phase='TRAIN') tgExp.add_layer('loss', lossLayer, phase='TRAIN') tgExp.add_layer('accuracy', accLayer, phase='TRAIN') #Do things as needed. if not tgCPrms['fine']['lrAbove'] is None: tgExp.finetune_above(tgCPrms['fine']['lrAbove']) #Put the right data files. if tgCPrms['fine']['prms'] is None: assert (tgCPrms['fine']['numData'] == 1) and (tgCPrms['fine']['dataset'] == 'sun') dbPath = '/data0/pulkitag/data_sets/sun/leveldb_store' dbFile = os.path.join(dbPath, 'sun-leveldb-%s-%d') trnFile = dbFile % ('train', tgCPrms['fine']['runNum']) tstFile = dbFile % ('test', tgCPrms['fine']['runNum']) tgExp.set_layer_property('data', ['data_param', 'backend'], 'LEVELDB', phase='TRAIN') tgExp.set_layer_property('data', ['data_param', 'backend'], 'LEVELDB', phase='TEST') else: trnFile = tgCPrms['fine']['prms']['paths']['lmdb']['train'] tstFile = tgCPrms['fine']['prms']['paths']['lmdb']['test'] tgExp.set_layer_property('data', ['data_param', 'backend'], 'LMDB', phase='TRAIN') tgExp.set_layer_property('data', ['data_param', 'backend'], 'LMDB', phase='TEST') #Set the data files tgExp.set_layer_property('data', ['data_param', 'source'], '"%s"' % trnFile, phase='TRAIN') tgExp.set_layer_property('data', ['data_param', 'source'], '"%s"' % tstFile, phase='TEST') #Set the imagenet mean if cPrms['imgntMean']: #muFile = '"%s"' % '/data1/pulkitag/caffe_models/ilsvrc2012_mean.binaryproto' muFile = tgCPrms['fine']['muFile'] print muFile tgExp.set_layer_property('data', ['transform_param', 'mean_file'], muFile, phase='TRAIN') tgExp.set_layer_property('data', ['transform_param', 'mean_file'], muFile, phase='TEST') #Set the batch-size tgExp.set_layer_property('data', ['data_param', 'batch_size'], tgCPrms['batchSz'], phase='TRAIN') tgExp.set_layer_property('data', ['data_param', 'batch_size'], tgCPrms['batchSz'], phase='TEST') if returnTgCPrms: return tgExp, tgCPrms else: return tgExp
def make_caffe_layer(self, lNum): #Get the name nameRef = self.dat_['net']['layers']['name'][lNum][0] name = ou.ints_to_str(self.dat_['#refs#'][nameRef][:]) #Get the inputs ipRef = self.dat_['net']['layers']['inputs'][lNum][0] ipNames = self.ref_to_str(ipRef) #Get the parameter names pmRef = self.dat_['net']['layers']['params'][lNum][0] pmNames = self.ref_to_str(pmRef) #Get the output names opRef = self.dat_['net']['layers']['outputs'][lNum][0] opNames = self.ref_to_str(opRef) #Get the layer type tpRef = self.dat_['net']['layers']['type'][lNum][0] lType = ou.ints_to_str(self.dat_['#refs#'][tpRef][:]) #Get the layer params lpRef = self.dat_['net']['layers']['block'][lNum][0] lParam = self.dat_['#refs#'][lpRef] assert (lType[0:5] == 'dagnn') lType = lType[6:] if lType == 'Conv': paramW = {'name': pmNames[0]} paramB = {'name': pmNames[1]} pDupKey = mpu.make_key('param', ['param']) lDef = mpu.get_layerdef_for_proto('Convolution', name, ipNames[0], **{'num_output': int(lParam['size'][3][0]), 'param': paramW, pDupKey: paramB, 'kernel_size': int(lParam['size'][0][0]), 'stride': int(lParam['stride'][0][0]), 'pad': int(lParam['pad'][0][0])}) elif lType == 'ReLU': lDef = mpu.get_layerdef_for_proto(lType, name, ipNames[0], **{'top': opNames[0]}) elif lType == 'Pooling': poolType = lParam['method'][0] if poolType == 'max': poolType = 'MAX' elif poolType == 'avg': poolType = 'AVE' lDef = mpu.get_layerdef_for_proto(lType, name, ipNames[0], **{'top': opNames[0], 'kernel_size': int(lParam['poolSize'][0][0]), 'stride': int(lParam['stride'][0][0]), 'pad': int(lParam['pad'][0][0]), 'pool': poolType}) elif lType == 'LRN': N, kappa, alpha, beta = lParam['param'][0][0], lParam['param'][1][0],\ lParam['param'][2][0], lParam['param'][3][0] lDef = mpu.get_layerdef_for_proto(lType, name, ipNames[0], **{'top': opNames[0], 'local_size': int(N), 'alpha': N * alpha, 'beta' : beta, 'k' : kappa}) elif lType == 'Concat': lDef = mpu.get_layerdef_for_proto(lType, name, ipNames[0], **{'bottom2': ipNames[1:], 'concat_dim': 1, 'top': opNames[0]}) elif lType == 'Loss': lossType = ou.ints_to_str(lParam['loss']) if lossType == 'pdist': p = lParam['p'][0][0] if p == 2: lossName = 'EuclideanLoss' else: raise Exception('Loss type %s not recognized' % lossType) else: raise Exception('Loss type %s not recognized' % lossType) lDef = mpu.get_layerdef_for_proto(lossName, name, ipNames[0], **{'bottom2': ipNames[1]}) elif lType == 'gaussRender': lDef = mpu.get_layerdef_for_proto(lType, name, ipNames[0], **{'top': opNames[0], 'K': lParam['K'][0][0], 'T': lParam['T'][0][0], 'sigma': lParam['sigma'][0][0], 'imgSz': int(lParam['img_size'][0][0])}) else: raise Exception('Layer Type %s not recognized, %d' % (lType, lNum)) return lDef