コード例 #1
0
def gen_DNN():
    net = RandomDNN()
    data_layer_str = random_net_defs.dataLayerStr_deploy(256, 3, 227, 227)
    #data_layer_str = random_net_defs.dataLayerStr_trainval('LMDB', '/path/to/train_lmdb', 'LMDB', '/path/to/test_lmdb', 256, 50, 227, 'examples/imagenet/ilsvrc12_train_lmdb')
    print data_layer_str

    prev_layer_type = 'data'
    prev_layer_name = 'data_layer'
    for i in xrange(0, 10):
        curr_layer_type = net.chooseNextLayer(prev_layer_type)
        if curr_layer_type == 'conv':
            curr_layer_dict = net.convLayer(i, prev_layer_name)
        if curr_layer_type == 'pool':
            curr_layer_dict = net.poolLayer(i, prev_layer_name)
        if curr_layer_type == 'relu':
            curr_layer_dict = net.reluLayer(i, prev_layer_name)
        if curr_layer_type == 'lrn':
            curr_layer_dict = net.lrnLayer(i, prev_layer_name)

        print curr_layer_dict['prototxt']
        prev_layer_name = curr_layer_dict[
            'name']  #TODO: only update this for layers that can't be computed in place?
        prev_layer_type = curr_layer_type

    #boilerplate fully-connected (fast to compute, can't hurt.)
    print random_net_defs.fcLayerStr('fc6', prev_layer_name, 'fc6', 4096)
    print random_net_defs.reluLayerStr('relu6', 'fc6', 'fc6')  #in-place ReLU
    print random_net_defs.dropoutLayerStr('drop6', 'fc6', 'fc6')
    print random_net_defs.fcLayerStr('fc7', 'fc6', 'fc7', 4096)
    print random_net_defs.reluLayerStr('relu7', 'fc7', 'fc7')
    print random_net_defs.dropoutLayerStr('drop7', 'fc7', 'fc7')
    print random_net_defs.fcLayerStr('fc8', 'fc7', 'fc8', 1000)
コード例 #2
0
def gen_DNN(phase):
    net = RandomDNN()

    CAFFE_ROOT='/lustre/atlas/scratch/forresti/csc103/dnn_exploration/caffe-bvlc-master'
    DATA_PATH='/lustre/atlas/scratch/forresti/csc103/dnn_exploration' 

    if phase == 'deploy':
        data_layer_str = random_net_defs.dataLayerStr_deploy(256, 3, 227, 227)
    elif phase == 'trainval':
        data_layer_str = random_net_defs.dataLayerStr_trainval('LMDB', DATA_PATH+'/ilsvrc2012_train_256x256_lmdb', 'LMDB', DATA_PATH+'/ilsvrc2012_val_256x256_lmdb', 256, 50, 227, CAFFE_ROOT+'/data/ilsvrc12/imagenet_mean.binaryproto') 
    else:
        print "Warning: didn't generate data_layer. phase must be 'deploy' or 'trainval'"
    print data_layer_str

    downsampleTo = 227  #keep this below 227 (input img height = width = 227)

    prev_layer_type='data'
    prev_layer_name='data_layer'
    for i in xrange(0, 10):
        curr_layer_type = net.chooseNextLayer(prev_layer_type)
        if curr_layer_type == 'conv':
            tmp_layer_dict = net.convLayer(i, prev_layer_name, downsampleTo)
            if tmp_layer_dict['downsampleTo'] > 0:
                curr_layer_dict = tmp_layer_dict
                downsampleTo = curr_layer_dict['downsampleTo']
                #print 'downsampleTo: ', curr_layer_dict['downsampleTo']
            else: #if the new layer would downsample below 1x1, ignore the layer.
                continue
        if curr_layer_type == 'pool':
            tmp_layer_dict = net.poolLayer(i, prev_layer_name, downsampleTo)
            if tmp_layer_dict['downsampleTo'] > 0:
                curr_layer_dict = tmp_layer_dict
                downsampleTo = curr_layer_dict['downsampleTo']
                #print 'downsampleTo: ', curr_layer_dict['downsampleTo']
            else:
                continue
        if curr_layer_type == 'relu':
            curr_layer_dict = net.reluLayer(i, prev_layer_name)
        if curr_layer_type == 'lrn':
            curr_layer_dict = net.lrnLayer(i, prev_layer_name)

        print curr_layer_dict['prototxt']
        prev_layer_name = curr_layer_dict['name'] #TODO: only update this for layers that can't be computed in place?
        prev_layer_type = curr_layer_type

    #boilerplate fully-connected (fast to compute, can't hurt.)
    print random_net_defs.fcLayerStr('fc6', prev_layer_name, 'fc6', 4096)
    print random_net_defs.reluLayerStr('relu6', 'fc6', 'fc6') #in-place ReLU
    print random_net_defs.dropoutLayerStr('drop6', 'fc6', 'fc6')
    print random_net_defs.fcLayerStr('fc7', 'fc6', 'fc7', 4096) 
    print random_net_defs.reluLayerStr('relu7', 'fc7', 'fc7') 
    print random_net_defs.dropoutLayerStr('drop7', 'fc7', 'fc7')
    print random_net_defs.fcLayerStr('fc8', 'fc7', 'fc8', 1000)

    if phase == 'trainval':
        #boilerplate scoring (use only if trainval)
        print random_net_defs.scoringTrainvalStr('fc8')
コード例 #3
0
    def reluLayer(self, layerIdx, prevLayerStr):
        myName = "layer" + str(layerIdx) + "_relu"
        bottom = prevLayerStr
        top = myName #not bothering with in-place 

        #print myName
        retStr = random_net_defs.reluLayerStr(myName, bottom, top)
        
        return {'name':myName, 'prototxt':retStr}
コード例 #4
0
    def reluLayer(self, layerIdx, prevLayerStr):
        myName = "layer" + str(layerIdx) + "_relu"
        bottom = prevLayerStr
        top = myName  #not bothering with in-place

        #print myName
        retStr = random_net_defs.reluLayerStr(myName, bottom, top)

        return {'name': myName, 'prototxt': retStr}
コード例 #5
0
def gen_DNN(phase):
    net = RandomDNN()

    CAFFE_ROOT = '/lustre/atlas/scratch/forresti/csc103/dnn_exploration/caffe-bvlc-master'
    DATA_PATH = '/lustre/atlas/scratch/forresti/csc103/dnn_exploration'

    if phase == 'deploy':
        data_layer_str = random_net_defs.dataLayerStr_deploy(256, 3, 227, 227)
    elif phase == 'trainval':
        data_layer_str = random_net_defs.dataLayerStr_trainval(
            'LMDB', DATA_PATH + '/ilsvrc2012_train_256x256_lmdb', 'LMDB',
            DATA_PATH + '/ilsvrc2012_val_256x256_lmdb', 256, 50, 227,
            CAFFE_ROOT + '/data/ilsvrc12/imagenet_mean.binaryproto')
    else:
        print "Warning: didn't generate data_layer. phase must be 'deploy' or 'trainval'"
    print data_layer_str

    downsampleTo = 227  #keep this below 227 (input img height = width = 227)

    prev_layer_type = 'data'
    prev_layer_name = 'data_layer'
    for i in xrange(0, 10):
        curr_layer_type = net.chooseNextLayer(prev_layer_type)
        if curr_layer_type == 'conv':
            tmp_layer_dict = net.convLayer(i, prev_layer_name, downsampleTo)
            if tmp_layer_dict['downsampleTo'] > 0:
                curr_layer_dict = tmp_layer_dict
                downsampleTo = curr_layer_dict['downsampleTo']
                #print 'downsampleTo: ', curr_layer_dict['downsampleTo']
            else:  #if the new layer would downsample below 1x1, ignore the layer.
                continue
        if curr_layer_type == 'pool':
            tmp_layer_dict = net.poolLayer(i, prev_layer_name, downsampleTo)
            if tmp_layer_dict['downsampleTo'] > 0:
                curr_layer_dict = tmp_layer_dict
                downsampleTo = curr_layer_dict['downsampleTo']
                #print 'downsampleTo: ', curr_layer_dict['downsampleTo']
            else:
                continue
        if curr_layer_type == 'relu':
            curr_layer_dict = net.reluLayer(i, prev_layer_name)
        if curr_layer_type == 'lrn':
            curr_layer_dict = net.lrnLayer(i, prev_layer_name)

        print curr_layer_dict['prototxt']
        prev_layer_name = curr_layer_dict[
            'name']  #TODO: only update this for layers that can't be computed in place?
        prev_layer_type = curr_layer_type

    #boilerplate fully-connected (fast to compute, can't hurt.)
    print random_net_defs.fcLayerStr('fc6', prev_layer_name, 'fc6', 4096)
    print random_net_defs.reluLayerStr('relu6', 'fc6', 'fc6')  #in-place ReLU
    print random_net_defs.dropoutLayerStr('drop6', 'fc6', 'fc6')
    print random_net_defs.fcLayerStr('fc7', 'fc6', 'fc7', 4096)
    print random_net_defs.reluLayerStr('relu7', 'fc7', 'fc7')
    print random_net_defs.dropoutLayerStr('drop7', 'fc7', 'fc7')
    print random_net_defs.fcLayerStr('fc8', 'fc7', 'fc8', 1000)

    if phase == 'trainval':
        #boilerplate scoring (use only if trainval)
        print random_net_defs.scoringTrainvalStr('fc8')