Esempio n. 1
0
 def loadSolverParameters(self):
     self.data = open(root + '/net/netData.prototxt').read()
     self.netHandler = netConfig_pb2.Param()
     text_format.Merge(self.data, self.netHandler)
     self.protoHandler = caffe_pb2.SolverParameter()
     if (self.index != None and len(self.netHandler.net) > 0):
         text_format.Merge(
             open(self.netHandler.net[self.index].solverpath).read(),
             self.protoHandler)
     else:
         text_format.Merge(
             open(root + '/src/custom/defaultSolver.prototxt').read(),
             self.protoHandler)
     print self.protoHandler
Esempio n. 2
0
    def onUploadClicked(self):
        print 'Upload Button Clicked'
        path = QtGui.QFileDialog.getOpenFileName(self, self.tr("Open File"),
                                                 root)
        if (path != "" and path.endsWith(".prototxt")):
            print path
            if (not os.path.exists(path)): return
            with open(path, 'r') as f:
                print path, 'EXISTS'
                self.protoHandler = caffe_pb2.SolverParameter()
                text_format.Merge(f.read(), self.protoHandler)
                self.fillData()
                print 'DATA FILLED'

        print 'Help'
Esempio n. 3
0
    def test_batch_accumulation_calculations(self):
        batch_size = 10
        batch_accumulation = 2

        job_id = self.create_model(
            batch_size=batch_size,
            batch_accumulation=batch_accumulation,
        )
        assert self.model_wait_completion(job_id) == 'Done', 'create failed'
        info = self.model_info(job_id)
        solver = caffe_pb2.SolverParameter()
        with open(os.path.join(info['directory'], info['solver file']), 'r') as infile:
            text_format.Merge(infile.read(), solver)
        assert solver.iter_size == batch_accumulation, \
            'iter_size is %d instead of %d' % (solver.iter_size, batch_accumulation)
        max_iter = int(math.ceil(
            float(self.TRAIN_EPOCHS * self.IMAGE_COUNT * 3) /
            (batch_size * batch_accumulation)
        ))
        assert solver.max_iter == max_iter,\
            'max_iter is %d instead of %d' % (solver.max_iter, max_iter)
Esempio n. 4
0
    def createConfiguration(self, extra=None):
        name = self.page3Widget.lineEditConfigName.text().__str__().lower()
        path = root + '/net/train/' + self.page3Widget.lineEditConfigName.text(
        ).__str__().lower()
        self.path = path
        print 'PATH!!!', path
        # 1. Create Folder in train
        if (os.path.exists(path) == False):
            os.mkdir(
                root + '/net/train/' +
                self.page3Widget.lineEditConfigName.text().__str__().lower())
        # 2. Add train_net,solver,etc into it.
        self.index = self.page1NetEditor.index
        self.maxIterations = 10000
        self.netHandler = netConfig_pb2.Param()
        self.data = open(root + '/net/netData.prototxt').read()
        text_format.Merge(self.data, self.netHandler)
        print self.netHandler.net[self.index]
        open(path + '/' + name + '_train.prototxt', 'w').write(
            open(self.netHandler.net[self.index].trainpath, 'r').read())
        with open(path + '/' + name + '_solver.prototxt', 'w') as f:
            solverHandle = caffe_pb2.SolverParameter()
            #Loading it instant
            self.page2Widget.updateHandle()
            text_format.Merge(self.page2Widget.protoHandler.__str__(),
                              solverHandle)
            #Loading it instant ends
            #text_format.Merge(open(self.netHandler.net[self.index].solverpath,'r').read(),solverHandle)
            solverHandle.snapshot_prefix = (path + '/' + name)
            solverHandle.net = (path + '/' + name + '_train.prototxt')
            if (self.netHandler.net[self.index].gpu == True):
                solverHandle.solver_mode = 1
            else:
                solverHandle.solver_mode = 0
            #Getting solver maximum iterations
            self.maxIterations = solverHandle.max_iter
            #Getting solver iterations ends
            open(self.netHandler.net[self.index].solverpath,
                 'w').write(solverHandle.__str__())
            f.write(solverHandle.__str__())

        ### FINETUNE STARTS
        self.isFinetune = False
        if (self.netHandler.net[self.index].modelpath != ""):
            reply = QtGui.QMessageBox.question(
                self, 'Message',
                "Do you want to finetune using existing weights?",
                QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
                QtGui.QMessageBox.No)
            if (reply == QtGui.QMessageBox.Yes): self.isFinetune = True
        ### FINETUNE ENDS
        trainDataName = self.page3Widget.widget.comboBoxTraining.currentText()
        validationDataName = self.page3Widget.widget.comboBoxValidation.currentText(
        )
        shutil.copy(root + '/data/' + trainDataName + '.hdf5',
                    path + '/' + name + '_train.hdf5')
        shutil.copy(root + '/data/' + validationDataName + '.hdf5',
                    path + '/' + name + '_val.hdf5')
        with open(path + '/' + name + '_trainscript.sh', 'w') as f:
            f.write("#!/usr/bin/env sh\n")
            #f.write("cd $CAFFE_ROOT\n")
            otherarguments = ' -weights ' + self.netHandler.net[
                self.index].modelpath if self.isFinetune == True else ''
            if (self.netHandler.net[self.index].gpu == True):
                otherarguments = otherarguments + ' -gpu ' + str(
                    self.netHandler.net[self.index].gpu_index)
            f.write(
                os.getenv('CAFFE_ROOT') +
                '/build/tools/caffe train --solver=' + path + '/' + name +
                '_solver.prototxt' + otherarguments + '\n')
            f.write('echo \'Train Completed \'')
        with open(path + '/' + name + '_train.txt', 'w') as f:
            f.write(path + '/' + name + '_train.hdf5')
        with open(path + '/' + name + '_val.txt', 'w') as f:
            f.write(path + '/' + name + '_val.hdf5')

        #Change The train_test prototxt to associate right data
        handle = caffe_pb2.NetParameter()
        text_format.Merge(
            open(path + '/' + name + '_train.prototxt').read(), handle)
        ######### NEW VERSION #########################
        if len(handle.layers) == 0:
            if (True):
                if (True):
                    #if(handle.layers[0].type in [5,12,29,24]):
                    #del handle.layers[0].data_param
                    layerHandle = caffe_pb2.LayerParameter()
                    text_format.Merge(
                        open(root +
                             '/net/defaultHDF5TrainDataNew.prototxt').read(),
                        layerHandle)
                    #if(self.netHandler.net[self.index].has_mean==True):
                    #layerHandle.transform_param
                    #layerHandle.transform_param.mean_file=self.netHandler.net[self.index].meanpath
                    #else:
                    #layerHandle.ClearField('transform_param')
                    handle.layer[0].CopyFrom(layerHandle)
                    handle.layer[0].name = name.lower()
                    #BatchSize Starts
                    batchSize = self.page3Widget.widget.lineEditBatchSizeTraining.text(
                    ).__str__()
                    batchSize = 100 if batchSize == "" else int(batchSize)
                    handle.layer[0].hdf5_data_param.batch_size = batchSize
                    #BatchSize Ends
                    handle.layer[
                        0].hdf5_data_param.source = path + '/' + name + '_train.txt'
                    print handle.layer[0]

                if (self.page3Widget.widget.checkBoxValidation.checkState() >
                        0):
                    #if(handle.layers[1].type in [5,12,29,24]):#handle.layers[1].type="HDF5Data"
                    layerHandle = caffe_pb2.LayerParameter()
                    text_format.Merge(
                        open(root +
                             '/net/defaultHDF5TestDataNew.prototxt').read(),
                        layerHandle)
                    handle.layer[1].CopyFrom(layerHandle)
                    handle.layer[1].name = name
                    #BatchSize Starts
                    batchSize = self.page3Widget.widget.lineEditBatchSizeValidation.text(
                    ).__str__()
                    batchSize = 100 if batchSize == "" else int(batchSize)
                    handle.layer[1].hdf5_data_param.batch_size = batchSize
                    #BatchSize Ends
                    handle.layer[
                        1].hdf5_data_param.source = path + '/' + name + '_val.txt'
                    print handle.layer[1]
                else:
                    del handle.layer[1]
                    for layerIdx, layer in enumerate(handle.layer):
                        for incIdx, inc in enumerate(layer.include):
                            if inc.HasField('phase'):
                                if (inc.phase == 0):
                                    del handle.layer[layerIdx].include[incIdx]
                                    #Check Later what happens if only 'phase' is removed.
                                else:
                                    del handle.layer[layerIdx]

                    #if(self.netHandler.net[self.index].has_mean==True):
                        #layerHandle.transform_param
                        #layerHandle.transform_param.mean_file=self.netHandler.net[self.index].meanpath
                        #else:
                        #layerHandle.ClearField('transform_param')

        ################ OLD VERSION #########################
        else:
            if (True):
                if (True):
                    #if(handle.layers[0].type in [5,12,29,24]):
                    #del handle.layers[0].data_param
                    layerHandle = caffe_pb2.LayerParameter()
                    text_format.Merge(
                        open(root +
                             '/net/defaultHDF5TrainData.prototxt').read(),
                        layerHandle)
                    handle.layers[0].name = name.lower()
                    if (self.netHandler.net[self.index].has_mean == True):

                        #layerHandle.transform_param
                        layerHandle.transform_param.mean_file = self.netHandler.net[
                            self.index].meanpath
                    else:
                        layerHandle.ClearField('transform_param')

                    handle.layers[0].CopyFrom(layerHandle)
                    #BatchSize Starts
                    batchSize = self.page3Widget.widget.lineEditBatchSizeTraining.text(
                    ).__str__()
                    batchSize = 100 if batchSize == "" else int(batchSize)
                    handle.layer[0].hdf5_data_param.batch_size = batchSize
                    #BatchSize Ends

                    handle.layers[
                        0].hdf5_data_param.source = path + '/' + name + '_train.txt'
                    print handle.layers[0]

                if (self.page3Widget.widget.checkBoxValidation.checkState() >
                        0):
                    #if(handle.layers[1].type in [5,12,29,24]):#handle.layers[1].type="HDF5Data"
                    layerHandle = caffe_pb2.LayerParameter()
                    text_format.Merge(
                        open(root +
                             '/net/defaultHDF5TestData.prototxt').read(),
                        layerHandle)
                    handle.layers[1].name = name
                    handle.layers[1].CopyFrom(layerHandle)
                    #BatchSize Starts
                    batchSize = self.page3Widget.widget.lineEditBatchSizeValidation.text(
                    ).__str__()
                    batchSize = 100 if batchSize == "" else int(batchSize)
                    handle.layer[1].hdf5_data_param.batch_size = batchSize
                    #BatchSize Ends
                    handle.layers[
                        1].hdf5_data_param.source = path + '/' + name + '_val.txt'
                    print handle.layers[1]
                    if (self.netHandler.net[self.index].has_mean == True):
                        #layerHandle.transform_param
                        layerHandle.transform_param.mean_file = self.netHandler.net[
                            self.index].meanpath
                    else:
                        layerHandle.ClearField('transform_param')
                else:
                    del handle.layers[1]

        ####### REMODIFICATION OF FIRST TWO LAYERS START

        open(path + '/' + name + '_train.prototxt',
             'w').write(handle.__str__())
        print 'PATH BEFORE PROGRESS', path

        triggerList = self.createTriggerList()
        inthread(self.runParallel, name, path, triggerList, self.maxIterations,
                 str(self.netHandler.net[self.index]))
Esempio n. 5
0
def caffeToN2D2(netProtoFileName, solverProtoFileName="", iniFileName=""):
    urllib.urlretrieve(
        "https://github.com/BVLC/caffe/raw/master/" +
        "src/caffe/proto/caffe.proto", "caffe.proto")

    if iniFileName == "":
        iniFileName = os.path.splitext(netProtoFileName)[0] + ".ini"

    (stdoutData, stderrData) = subprocess.Popen(
        ["protoc", "--python_out=./", "caffe.proto"],
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE).communicate()
    print(stderrData)

    import caffe_pb2

    caffeNet = caffe_pb2.NetParameter()
    text_format.Merge(open(netProtoFileName).read(), caffeNet)

    caffeSolver = caffe_pb2.SolverParameter()

    if solverProtoFileName != "":
        text_format.Merge(open(solverProtoFileName).read(), caffeSolver)

    # Construct graph
    graph = {}  # graph stores the parent nodes as values
    attrs = {}

    for i, layer in enumerate(caffeNet.layer):
        # Rename Data layer to "sp", which is the mandatory name for N2D2
        if layer.type == "Data":
            for other_layer in caffeNet.layer:
                for b, val in enumerate(other_layer.bottom):
                    if val == layer.name:
                        other_layer.bottom[b] = "sp"
            layer.name = "sp"

        # Parent nodes
        graph[layer.name] = layer.bottom

        # Child nodes
        inPlace = set(layer.top) & set(layer.bottom)

        if len(layer.top) == 1 and len(inPlace) == 1:
            # Unfold in-place layers
            for next_layer in caffeNet.layer[i + 1:]:
                for b, val in enumerate(next_layer.bottom):
                    if val == layer.top[0]:
                        next_layer.bottom[b] = layer.name
                for t, val in enumerate(next_layer.top):
                    if val == layer.top[0]:
                        next_layer.top[t] = layer.name

    # Merge nodes
    for layer in caffeNet.layer:
        attrs[layer.name] = []
        parents = getParents(graph, layer.name)

        if layer.type == "ReLU" \
          or layer.type == "TanH" \
          or layer.type == "Sigmoid":
            # Merge with parents
            for parent in parents:
                graphMerge(graph, layer.name, parent)
                attrs[parent].append(layer.type)

        elif layer.type == "Concat":
            # Merge with childs
            for child in getChilds(graph, layer.name):
                graphMerge(graph, layer.name, child)

        elif layer.type == "Dropout" and len(parents) > 1:
            # Split Dropout
            for parent in parents:
                graph[layer.name + "_" + parent] = [parent]

            for n, parent_nodes in graph.iteritems():
                if layer.name in parent_nodes:
                    idx = list(graph[n]).index(layer.name)
                    graph[n][idx:idx+1] \
                        = [layer.name + "_" + parent for parent in parents]

        elif layer.type == "Scale":
            # TODO: not supported yet
            # For now, merge with parents
            for parent in parents:
                graphMerge(graph, layer.name, parent)

    # Generate INI file
    iniTxt = ""

    iniTxt += "; Learning parameters\n"
    iniTxt += "$LR=" + str(caffeSolver.base_lr) + "\n"
    iniTxt += "$WD=" + str(caffeSolver.weight_decay) + "\n"
    iniTxt += "$MOMENTUM=" + str(caffeSolver.momentum) + "\n"
    iniTxt += "\n"

    for layer in caffeNet.layer:
        if layer.name not in graph:
            continue

        config = ""
        commonConfig = False

        if layer.type == "Data":
            inc = (len(layer.include) == 0)
            for include in layer.include:
                if caffe_pb2.Phase.Name(include.phase) == "TRAIN":
                    inc = True
            if not inc:
                continue

            iniTxt += "[sp]\n"
            iniTxt += "SizeX= ; TODO\n"
            iniTxt += "SizeY= ; TODO\n"
            iniTxt += "NbChannels= ; TODO\n"
            iniTxt += "BatchSize=" + str(layer.data_param.batch_size) + "\n"

            if layer.transform_param.crop_size > 0:
                iniTxt += "[sp.Transformation-crop]\n"
                iniTxt += "Type=PadCropTransformation\n"
                iniTxt += "Width=" + str(
                    layer.transform_param.crop_size) + "\n"
                iniTxt += "Height=" + str(
                    layer.transform_param.crop_size) + "\n"

            if len(layer.transform_param.mean_value) > 0:
                scale = layer.transform_param.scale \
                    if layer.transform_param.scale != 1 else 1.0/255.0

                iniTxt += "[sp.Transformation-scale-mean]\n"
                iniTxt += "Type=RangeAffineTransformation\n"
                iniTxt += "FirstOperator=Minus\n"
                iniTxt += "FirstValue=" + " ".join(
                    [str(x) for x in layer.transform_param.mean_value]) + "\n"
                iniTxt += "SecondOperator=Multiplies\n"
                iniTxt += "SecondValue=" + str(scale) + "\n"

        elif layer.type == "Convolution":
            iniTxt += "[" + layer.name + "]\n"
            iniTxt += "Input=" + ",".join(graph[layer.name]) + "\n"
            iniTxt += "Type=Conv\n"

            if len(layer.convolution_param.kernel_size) == 1:
                iniTxt += "KernelSize=" \
                    + str(layer.convolution_param.kernel_size[0]) + "\n"
            else:
                iniTxt += "KernelDims=" + " ".join([str(dim) \
                    for dim in layer.convolution_param.kernel_size]) + "\n"

            if len(layer.convolution_param.pad) == 1:
                iniTxt += "Padding=" + str(layer.convolution_param.pad[0]) \
                    + "\n"
            elif len(layer.convolution_param.pad) > 1:
                iniTxt += "Padding=" + " ".join([str(pad) \
                    for pad in layer.convolution_param.pad]) + " ; TODO\n"

            if len(layer.convolution_param.stride) == 1:
                iniTxt += "Stride=" + str(layer.convolution_param.stride[0]) \
                    + "\n"
            elif len(layer.convolution_param.stride) > 1:
                iniTxt += "StrideDims=" + " ".join([str(dim) \
                    for dim in layer.convolution_param.stride]) + "\n"

            if layer.convolution_param.HasField('group'):
                iniTxt += "NbGroups=" + str(layer.convolution_param.group) \
                    + "\n"

            # Fillers
            if layer.convolution_param.HasField('weight_filler'):
                iniTxt += setFiller("WeightsFiller",
                                    layer.convolution_param.weight_filler)
            if layer.convolution_param.HasField('bias_filler'):
                iniTxt += setFiller("BiasFiller",
                                    layer.convolution_param.bias_filler)

            # Bias?
            if not layer.convolution_param.bias_term:
                config += "NoBias=1\n"

            if len(layer.param) > 0:
                config += setParamSpec("WeightsSolver", layer.param[0])
            if len(layer.param) > 1:
                config += setParamSpec("BiasSolver", layer.param[1])

            iniTxt += "NbOutputs=" + str(layer.convolution_param.num_output) \
                + "\n"

            commonConfig = True

        elif layer.type == "InnerProduct":
            iniTxt += "[" + layer.name + "]\n"
            iniTxt += "Input=" + ",".join(graph[layer.name]) + "\n"
            iniTxt += "Type=Fc\n"

            # Fillers
            if layer.inner_product_param.HasField('weight_filler'):
                iniTxt += setFiller("WeightsFiller",
                                    layer.inner_product_param.weight_filler)
            if layer.inner_product_param.HasField('bias_filler'):
                iniTxt += setFiller("BiasFiller",
                                    layer.inner_product_param.bias_filler)

            # Bias?
            if not layer.inner_product_param.bias_term:
                config += "NoBias=1\n"

            if len(layer.param) > 0:
                config += setParamSpec("WeightsSolver", layer.param[0])
            if len(layer.param) > 1:
                config += setParamSpec("BiasSolver", layer.param[1])

            iniTxt += "NbOutputs=" + str(layer.inner_product_param.num_output) \
                + "\n"

            commonConfig = True

        elif layer.type == "Pooling":
            iniTxt += "[" + layer.name + "]\n"
            iniTxt += "Input=" + ",".join(graph[layer.name]) + "\n"
            iniTxt += "Type=Pool\n"

            pool = caffe_pb2.PoolingParameter.PoolMethod.Name(\
                    layer.pooling_param.pool)

            if pool == "AVE":
                iniTxt += "Pooling=Average\n"
            elif pool == "MAX":
                iniTxt += "Pooling=Max\n"
            else:
                iniTxt += "Pooling= ; TODO: unsupported: " + pool + "\n"

            if layer.pooling_param.global_pooling:
                iniTxt += "PoolDims=[" + graph[layer.name][0] \
                    + "]_OutputsWidth [" + graph[layer.name][0] \
                    + "]_OutputsHeight\n"
            else:
                iniTxt += "PoolSize=" + str(layer.pooling_param.kernel_size) \
                    + "\n"

            if layer.pooling_param.pad != 0:
                iniTxt += "Padding=" + str(layer.pooling_param.pad) + "\n"

            if layer.pooling_param.stride != 1:
                iniTxt += "Stride=" + str(layer.pooling_param.stride) + "\n"

            iniTxt += "NbOutputs=[" + graph[layer.name][0] + "]NbOutputs\n"
            iniTxt += "Mapping.ChannelsPerGroup=1\n"

        elif layer.type == "BatchNorm":
            iniTxt += "[" + layer.name + "]\n"
            iniTxt += "Input=" + ",".join(layer.bottom) + "\n"
            iniTxt += "Type=BatchNorm\n"
            iniTxt += "NbOutputs=[" + graph[layer.name][0] + "]NbOutputs\n"

            config += "Epsilon=" + str(layer.batch_norm_param.eps) + "\n"

        elif layer.type == "Eltwise":
            iniTxt += "[" + layer.name + "]\n"
            iniTxt += "Input=" + ",".join(graph[layer.name]) + "\n"
            iniTxt += "Type=ElemWise\n"

            operation = caffe_pb2.EltwiseParameter.EltwiseOp.Name(\
                    layer.eltwise_param.operation)

            if operation == "PROD":
                iniTxt += "Operation=Prod\n"
            elif operation == "MAX":
                iniTxt += "Operation=Max\n"
            elif operation == "SUM":
                iniTxt += "Operation=Sum\n"
            else:
                iniTxt += "Operation= ; TODO: unsupported: " + operation + "\n"

            if len(layer.eltwise_param.coeff) > 0:
                iniTxt += "Weights=" + " ".join(layer.eltwise_param.coeff) \
                    + "\n"

            iniTxt += "NbOutputs=[" + graph[layer.name][0] + "]NbOutputs\n"

        elif layer.type == "Softmax" or layer.type == "SoftmaxWithLoss":
            iniTxt += "[" + layer.name + "]\n"
            iniTxt += "Input=" + graph[layer.name][0] + "\n"
            iniTxt += "Type=Softmax\n"

            if layer.type == "SoftmaxWithLoss":
                iniTxt += "WithLoss=1\n"

            iniTxt += "NbOutputs=[" + graph[layer.name][0] + "]NbOutputs\n"

            # TODO: support with Caffe Accuracy layer
            iniTxt += "[" + layer.name + ".Target]\n"

        elif layer.type == "LRN":
            iniTxt += "[" + layer.name + "]\n"
            iniTxt += "Input=" + graph[layer.name][0] + "\n"
            iniTxt += "Type=LRN\n"
            iniTxt += "NbOutputs=[" + graph[layer.name][0] + "]NbOutputs\n"

            config += "N=" + str(layer.lrn_param.local_size) + "\n"
            config += "Alpha=" + str(layer.lrn_param.alpha) + "\n"
            config += "Beta=" + str(layer.lrn_param.beta) + "\n"
            config += "K=" + str(layer.lrn_param.k) + "\n"

            normRegion = caffe_pb2.LRNParameter.NormRegion.Name(\
                    layer.lrn_param.norm_region)

            if normRegion != "ACROSS_CHANNELS":
                config += "; TODO: not supported: " + normRegion + "\n"

        elif layer.type == "Dropout":
            if len(graph[layer.name]) > 1:
                for k, in_layer in enumerate(graph[layer.name]):
                    iniTxt += "[" + layer.name + "_" + in_layer + "]\n"
                    iniTxt += "Input=" + graph[layer.name][k] + "\n"
                    iniTxt += "Type=Dropout\n"
                    iniTxt += "NbOutputs=[" + graph[layer.name][k] \
                        + "]NbOutputs\n"

                    if k != len(graph[layer.name]) - 1:
                        iniTxt += "ConfigSection=" + layer.name + ".cfg\n"
            else:
                iniTxt += "[" + layer.name + "_" + + "]\n"
                iniTxt += "Input=" + ",".join(graph[layer.name]) + "\n"
                iniTxt += "Type=Dropout\n"
                iniTxt += "NbOutputs=[" + graph[layer.name][0] + "]NbOutputs\n"

            config += "Dropout=" + str(
                layer.dropout_param.dropout_ratio) + "\n"

        elif layer.type == "Accuracy":
            iniTxt += "; Accuracy layer was ignored\n\n"
            continue

        else:
            iniTxt += "; TODO: not supported:\n"
            iniTxt += "[" + layer.name + "]\n"
            iniTxt += "Input=" + ",".join(graph[layer.name]) + "\n"
            iniTxt += "Type=" + layer.type + "\n"

        # Attributes
        if "ReLU" in attrs[layer.name]:
            iniTxt += "ActivationFunction=Rectifier\n"
        elif "TanH" in attrs[layer.name]:
            iniTxt += "ActivationFunction=Tanh\n"
        elif "Sigmoid" in attrs[layer.name]:
            iniTxt += "ActivationFunction=Logistic\n"
        elif layer.type == "Convolution":
            iniTxt += "ActivationFunction=Linear\n"

        # Config section
        if commonConfig or config != "":
            iniTxt += "ConfigSection="

        if commonConfig:
            iniTxt += "common.cfg"

            if config != "":
                iniTxt += ","
            else:
                iniTxt += "\n"

        if config != "":
            iniTxt += layer.name + ".cfg\n"
            iniTxt += "[" + layer.name + ".cfg]\n"
            iniTxt += config

        iniTxt += "\n"

    iniTxt += "[common.cfg]\n"
    iniTxt += "Solvers.LearningRate=${LR}\n"
    iniTxt += "Solvers.Decay=${WD}\n"
    iniTxt += "Solvers.Momentum=${MOMENTUM}\n"

    if caffeSolver.lr_policy == "poly":
        iniTxt += "Solvers.LearningRatePolicy=PolyDecay\n"
    else:
        iniTxt += "Solvers.LearningRatePolicy= ; TODO: unsupported: " \
            + caffeSolver.lr_policy + "\n"

    iniTxt += "Solvers.Power=" + str(caffeSolver.power) + "\n"
    iniTxt += "Solvers.IterationSize=" + str(caffeSolver.iter_size) + "\n"
    iniTxt += "Solvers.MaxIterations=" + str(caffeSolver.max_iter) + "\n"
    iniTxt += "\n"

    iniFile = open(iniFileName, "w")
    iniFile.write(iniTxt)
    iniFile.close()
Esempio n. 6
0
def parse_prototxt(model_txt=None, solver_txt=None, caffemodel=None, verbose=False):
    """
    This function parses and creates a graph of ngraph ops corresponding to each layer
    in the prototxt
    Arguments:
        model_txt: prototxt file of the Neural net topology
        solver_txt: protoxt file of the solver to train the neural net
        caffemodel: parameters (weights/biases) to be loded into the model
    return :
        Dictionary of the ngraph ops whose keys are the layer names of the prototxt
    """

    ops_bridge = OpsBridge()  # opsBridge constructor
    data_layers = [l for l in supported_layers if "Data" in l]
    name_op_map = {}  # graph data structure

    if model_txt is None and solver_txt is None:
        raise ValueError("Either model prototxt or solver prototxt is needed")

    model_def = caffe_pb2.NetParameter()
    solver_def = caffe_pb2.SolverParameter()

    # TBD: Addding support to load weights from .caffemodel

    if solver_txt is not None:
        with open(solver_txt, 'r') as fid:
            text_format.Merge(fid.read(), solver_def)

        if not solver_def.HasField("net"):
            raise ValueError('model prototxt is not available in the solver prototxt')
        else:
            model_txt = solver_def.net

    with open(model_txt, 'r') as fid:
        text_format.Merge(fid.read(), model_def)

    netLayers = model_def.layer

    for layer in netLayers:
        if verbose:
            print("\nLayer: ", layer.name, " Type: ", layer.type)
        if layer.type not in supported_layers:
            raise ValueError('layer type', layer.type, ' is not supported')
        if len(layer.top) > 1 and layer.type not in data_layers:
            raise ValueError('only "Data" layers can have more than one output (top)')

        input_ops = []
        for name in layer.bottom:
            if name in name_op_map:
                input_ops.append(name_op_map[name])
            elif layer.type not in data_layers:
                raise ValueError("Bottom layer:", name, " is missing in the prototxt")
        # get the ngraph op from bridge
        out_op = ops_bridge(layer, input_ops)

        if out_op is None:
            print("!!! Unknown Operation '{}' of type '{}' !!!"
                  .format(layer.name, layer.type))
        if verbose:
            print("input Ops:", input_ops)
            print("output Op:", [out_op])

        if layer.name in name_op_map:
            raise ValueError('Layer ', layer.name, ' already exists. Layer name should be unique')

        # update dictionary
        name_op_map[layer.name] = out_op

        #  handle special cases like relu,dropout etc
        if layer.top == layer.bottom:
            if layer.top in name_op_map:
                name_op_map[layer.top] = out_op

    return name_op_map
Esempio n. 7
0
    def save_files_generic(self):
        #####
        from digits.job_client import ssd_pascal
        ############## parameters #############
        # Add non-data layers
        job_path = self.path(self.train_val_file)
        solver_file = self.path('solver.prototxt')
        train_net_path = self.path('train_val.prototxt')
        test_net_path = self.path('test.prototxt')
        snapshot_path = self.path('VOC_Snapshot')
        train_data_path = self.data_dir+'/'+'VOC0712_trainval_lmdb'
        test_data_path = self.data_dir+'/'+'VOC0712_test_lmdb'
        label_map_file = self.data_dir+'/'+'labelmap_voc.prototxt'
        name_size_file = self.data_dir+'/'+'test_name_size.txt'
        output_result_dir = self.data_dir+'/'+'Main'

        iter_size = self.batch_accumulation / self.batch_size
        ################ end ##################
        print '----------------------------------------------'
        print 'train_ssd'
        print '----------------------------------------------'
        ############## train_net ##############
        # ssd_pascal.CreateTrainNet(train_net_path, train_data_path, self.batch_size) 
        ###directly edit on custom network text form page
        network_test, n = re.subn('(?<=source:)(.+)?(?=\n)', '"'+train_data_path+'"', self.network_text)
        # print (network_test)
        f = open(train_net_path, 'w')
        f.write(network_test)
        f.close()
        ################ end ##################

        ############### test_net ############## 
        # print 'create test.prototxt'
        # ssd_pascal.CreateTestNet(test_net_path, test_data_path, self.test_batch_size, 
        #     label_map_file, name_size_file, output_result_dir)
        ################# end #################
                                 
        ############## ssd solver #############
        solver = caffe_pb2.SolverParameter()

        # solver.max_iter = 120000
        solver.max_iter = self.max_iter_num
        self.solver = solver

        # Create solver
        ssd_pascal.CreateSolver(solver_file, 
            train_net_path, test_net_path, snapshot_path, 
            self.learning_rate, iter_size, self.solver_type)
        ################### end ###############

        ############## deploy_net #############
        deploy_network = caffe_pb2.NetParameter()
        # Write to file
        with open(self.path(self.deploy_file), 'w') as outfile:
            text_format.PrintMessage(deploy_network, outfile)

        with open(self.path('original.prototxt'), 'w') as outfile:
            text_format.PrintMessage(deploy_network, outfile)

        ################# end #################

        ############## snapshot ##############

        solver.snapshot_prefix = self.snapshot_prefix

        snapshot_interval = self.snapshot_interval * ( solver.max_iter / self.train_epochs )
        if 0 < snapshot_interval <= 1:
            solver.snapshot = 1  # don't round down
        elif 1 < snapshot_interval < solver.max_iter:
            solver.snapshot = int(snapshot_interval)
        else:
            solver.snapshot = 0  # only take one snapshot at the end

        ################# end #################
        
        return True
Esempio n. 8
0
    def save_prototxt_files(self):
        """
        Save solver, train_val and deploy files to disk
        """

        has_val_set = self.dataset.val_db_task() is not None

        ### Check what has been specified in self.network

        tops = []
        bottoms = {}
        train_data_layer = None
        val_data_layer = None
        hidden_layers = caffe_pb2.NetParameter()
        loss_layers = []
        accuracy_layers = []
        for layer in self.network.layer:
            assert layer.type not in ['MemoryData', 'HDF5Data', 'ImageData'
                                      ], 'unsupported data layer type'
            if layer.type == 'Data':
                for rule in layer.include:
                    if rule.phase == caffe_pb2.TRAIN:
                        assert train_data_layer is None, 'cannot specify two train data layers'
                        train_data_layer = layer
                    elif rule.phase == caffe_pb2.TEST:
                        assert val_data_layer is None, 'cannot specify two test data layers'
                        val_data_layer = layer
            elif layer.type == 'SoftmaxWithLoss':
                loss_layers.append(layer)
            elif layer.type == 'Accuracy':
                addThis = True
                if layer.accuracy_param.HasField('top_k'):
                    if layer.accuracy_param.top_k >= len(self.get_labels()):
                        self.logger.warning(
                            'Removing layer %s because top_k=%s while there are are only %s labels in this dataset'
                            % (layer.name, layer.accuracy_param.top_k,
                               len(self.get_labels())))
                        addThis = False
                if addThis:
                    accuracy_layers.append(layer)
            else:
                hidden_layers.layer.add().CopyFrom(layer)
                if len(layer.bottom) == 1 and len(
                        layer.top) == 1 and layer.bottom[0] == layer.top[0]:
                    pass
                else:
                    for top in layer.top:
                        tops.append(top)
                    for bottom in layer.bottom:
                        bottoms[bottom] = True

        if train_data_layer is None:
            assert val_data_layer is None, 'cannot specify a test data layer without a train data layer'

        assert len(loss_layers) > 0, 'must specify a loss layer'

        network_outputs = []
        for name in tops:
            if name not in bottoms:
                network_outputs.append(name)
        assert len(network_outputs), 'network must have an output'

        # Update num_output for any output InnerProduct layers automatically
        for layer in hidden_layers.layer:
            if layer.type == 'InnerProduct':
                for top in layer.top:
                    if top in network_outputs:
                        layer.inner_product_param.num_output = len(
                            self.get_labels())
                        break

        ### Write train_val file

        train_val_network = caffe_pb2.NetParameter()

        # data layers
        if train_data_layer is not None:
            if train_data_layer.HasField('data_param'):
                assert not train_data_layer.data_param.HasField(
                    'source'), "don't set the data_param.source"
                assert not train_data_layer.data_param.HasField(
                    'backend'), "don't set the data_param.backend"
            max_crop_size = min(self.dataset.image_dims[0],
                                self.dataset.image_dims[1])
            if self.crop_size:
                assert self.crop_size <= max_crop_size, 'crop_size is larger than the image size'
                train_data_layer.transform_param.crop_size = self.crop_size
            elif train_data_layer.transform_param.HasField('crop_size'):
                cs = train_data_layer.transform_param.crop_size
                if cs > max_crop_size:
                    # don't throw an error here
                    cs = max_crop_size
                train_data_layer.transform_param.crop_size = cs
                self.crop_size = cs
            train_val_network.layer.add().CopyFrom(train_data_layer)
            train_data_layer = train_val_network.layer[-1]
            if val_data_layer is not None and has_val_set:
                if val_data_layer.HasField('data_param'):
                    assert not val_data_layer.data_param.HasField(
                        'source'), "don't set the data_param.source"
                    assert not val_data_layer.data_param.HasField(
                        'backend'), "don't set the data_param.backend"
                if self.crop_size:
                    # use our error checking from the train layer
                    val_data_layer.transform_param.crop_size = self.crop_size
                train_val_network.layer.add().CopyFrom(val_data_layer)
                val_data_layer = train_val_network.layer[-1]
        else:
            train_data_layer = train_val_network.layer.add(type='Data',
                                                           name='data')
            train_data_layer.top.append('data')
            train_data_layer.top.append('label')
            train_data_layer.include.add(phase=caffe_pb2.TRAIN)
            train_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE
            if self.crop_size:
                train_data_layer.transform_param.crop_size = self.crop_size
            if has_val_set:
                val_data_layer = train_val_network.layer.add(type='Data',
                                                             name='data')
                val_data_layer.top.append('data')
                val_data_layer.top.append('label')
                val_data_layer.include.add(phase=caffe_pb2.TEST)
                val_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE
                if self.crop_size:
                    val_data_layer.transform_param.crop_size = self.crop_size
        train_data_layer.data_param.source = self.dataset.path(
            self.dataset.train_db_task().db_name)
        train_data_layer.data_param.backend = caffe_pb2.DataParameter.LMDB
        if val_data_layer is not None and has_val_set:
            val_data_layer.data_param.source = self.dataset.path(
                self.dataset.val_db_task().db_name)
            val_data_layer.data_param.backend = caffe_pb2.DataParameter.LMDB
        if self.use_mean:
            mean_pixel = None
            with open(self.dataset.path(
                    self.dataset.train_db_task().mean_file)) as f:
                blob = caffe_pb2.BlobProto()
                blob.MergeFromString(f.read())
                mean = np.reshape(blob.data, (
                    self.dataset.image_dims[2],
                    self.dataset.image_dims[0],
                    self.dataset.image_dims[1],
                ))
                mean_pixel = mean.mean(1).mean(1)
            for value in mean_pixel:
                train_data_layer.transform_param.mean_value.append(value)
            if val_data_layer is not None and has_val_set:
                for value in mean_pixel:
                    val_data_layer.transform_param.mean_value.append(value)
        if self.batch_size:
            train_data_layer.data_param.batch_size = self.batch_size
            if val_data_layer is not None and has_val_set:
                val_data_layer.data_param.batch_size = self.batch_size
        else:
            if not train_data_layer.data_param.HasField('batch_size'):
                train_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE
            if val_data_layer is not None and has_val_set and not val_data_layer.data_param.HasField(
                    'batch_size'):
                val_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE

        # hidden layers
        train_val_network.MergeFrom(hidden_layers)

        # output layers
        train_val_network.layer.extend(loss_layers)
        train_val_network.layer.extend(accuracy_layers)

        with open(self.path(self.train_val_file), 'w') as outfile:
            text_format.PrintMessage(train_val_network, outfile)

        ### Write deploy file

        deploy_network = caffe_pb2.NetParameter()

        # input
        deploy_network.input.append('data')
        deploy_network.input_dim.append(1)
        deploy_network.input_dim.append(self.dataset.image_dims[2])
        if self.crop_size:
            deploy_network.input_dim.append(self.crop_size)
            deploy_network.input_dim.append(self.crop_size)
        else:
            deploy_network.input_dim.append(self.dataset.image_dims[0])
            deploy_network.input_dim.append(self.dataset.image_dims[1])

        # hidden layers
        deploy_network.MergeFrom(hidden_layers)

        # output layers
        if loss_layers[-1].type == 'SoftmaxWithLoss':
            prob_layer = deploy_network.layer.add(type='Softmax', name='prob')
            prob_layer.bottom.append(network_outputs[-1])
            prob_layer.top.append('prob')

        with open(self.path(self.deploy_file), 'w') as outfile:
            text_format.PrintMessage(deploy_network, outfile)

        ### Write solver file

        solver = caffe_pb2.SolverParameter()
        # get enum value for solver type
        solver.solver_type = getattr(solver, self.solver_type)
        solver.net = self.train_val_file

        # Set CPU/GPU mode
        if config_value('caffe_root')['cuda_enabled'] and \
                bool(config_value('gpu_list')):
            solver.solver_mode = caffe_pb2.SolverParameter.GPU
        else:
            solver.solver_mode = caffe_pb2.SolverParameter.CPU

        solver.snapshot_prefix = self.snapshot_prefix

        # Epochs -> Iterations
        train_iter = int(
            math.ceil(
                float(self.dataset.train_db_task().entries_count) /
                train_data_layer.data_param.batch_size))
        solver.max_iter = train_iter * self.train_epochs
        snapshot_interval = self.snapshot_interval * train_iter
        if 0 < snapshot_interval <= 1:
            solver.snapshot = 1  # don't round down
        elif 1 < snapshot_interval < solver.max_iter:
            solver.snapshot = int(snapshot_interval)
        else:
            solver.snapshot = 0  # only take one snapshot at the end

        if has_val_set and self.val_interval:
            solver.test_iter.append(
                int(
                    math.ceil(
                        float(self.dataset.val_db_task().entries_count) /
                        val_data_layer.data_param.batch_size)))
            val_interval = self.val_interval * train_iter
            if 0 < val_interval <= 1:
                solver.test_interval = 1  # don't round down
            elif 1 < val_interval < solver.max_iter:
                solver.test_interval = int(val_interval)
            else:
                solver.test_interval = solver.max_iter  # only test once at the end

        # Learning rate
        solver.base_lr = self.learning_rate
        solver.lr_policy = self.lr_policy['policy']
        scale = float(solver.max_iter) / 100.0
        if solver.lr_policy == 'fixed':
            pass
        elif solver.lr_policy == 'step':
            # stepsize = stepsize * scale
            solver.stepsize = int(
                math.ceil(float(self.lr_policy['stepsize']) * scale))
            solver.gamma = self.lr_policy['gamma']
        elif solver.lr_policy == 'multistep':
            for value in self.lr_policy['stepvalue']:
                # stepvalue = stepvalue * scale
                solver.stepvalue.append(int(math.ceil(float(value) * scale)))
            solver.gamma = self.lr_policy['gamma']
        elif solver.lr_policy == 'exp':
            # gamma = gamma^(1/scale)
            solver.gamma = math.pow(self.lr_policy['gamma'], 1.0 / scale)
        elif solver.lr_policy == 'inv':
            # gamma = gamma / scale
            solver.gamma = self.lr_policy['gamma'] / scale
            solver.power = self.lr_policy['power']
        elif solver.lr_policy == 'poly':
            solver.power = self.lr_policy['power']
        elif solver.lr_policy == 'sigmoid':
            # gamma = -gamma / scale
            solver.gamma = -1.0 * self.lr_policy['gamma'] / scale
            # stepsize = stepsize * scale
            solver.stepsize = int(
                math.ceil(float(self.lr_policy['stepsize']) * scale))
        else:
            raise Exception('Unknown lr_policy: "%s"' % solver.lr_policy)

        # go with the suggested defaults
        if solver.solver_type != solver.ADAGRAD:
            solver.momentum = 0.9
        solver.weight_decay = 0.0005

        # Display 8x per epoch, or once per 5000 images, whichever is more frequent
        solver.display = max(
            1,
            min(
                int(
                    math.floor(
                        float(solver.max_iter) / (self.train_epochs * 8))),
                int(math.ceil(5000.0 /
                              train_data_layer.data_param.batch_size))))

        if self.random_seed is not None:
            solver.random_seed = self.random_seed

        with open(self.path(self.solver_file), 'w') as outfile:
            text_format.PrintMessage(solver, outfile)
        self.solver = solver  # save for later

        return True