Exemplo n.º 1
0
 def test_detach(self):
   """Test for ge.detach."""
   sgv = ge.sgv(self.c.op, self.a.op)
   control_outputs = ge.util.ControlOutputs(self.graph)
   ge.detach(sgv, control_inputs=control_outputs)
   # make sure the detached graph is as expected.
   self.assertTrue(ge.matcher("^foo/c$")
                   .input_ops("geph__a_0", "geph__b_0")(self.c.op))
Exemplo n.º 2
0
def build_pb_fact(pb_location, main_location, breath, quant, lvl):
    graph = load_graph(pb_location)

    W1 = graph.get_tensor_by_name('prefix/w_in:0')
    matmul = graph.get_tensor_by_name('prefix/MatMul:0')
    bias = graph.get_tensor_by_name('prefix/b_in:0')
    add = graph.get_tensor_by_name('prefix/add:0')
    reshape = graph.get_tensor_by_name('prefix/Reshape:0')

    # #remove all conncetions from matmul
    ge.detach(ge.sgv(matmul.op))

    with tf.Session(graph=graph) as sess:
        # os.system("mkdir " + main_location + breath + "/" + quant + "/fact_" + str(lvl))

        # for op in sess.graph.get_operations():
        #     print(op.name)

        W = W1.eval()
        u, s, v, ss = svd_compress_gs(W, lvl)
        logEntry("structural_similarity == > " + str(ss))
        u1 = tf.matmul(reshape, u, name="prefix/u1")
        s1 = tf.matmul(u1, s, name="prefix/s1")
        v1 = tf.matmul(s1, v, name="prefix/v1")
        ge.connect(ge.sgv(v1.op), ge.sgv(add.op).remap_inputs([0]))

        sess.run(tf.variables_initializer([tf.Variable(5, name="dummy" + str(lvl))]))
        saver = tf.train.Saver()

        # save log for tensorboad
        LOGDIR = main_location + '/LOG'
        train_writer = tf.summary.FileWriter(LOGDIR)
        train_writer.add_graph(sess.graph)
        train_writer.close()

        # save the freezed model
        os.system("mkdir " + main_location + "pb")
        tf.train.write_graph(sess.graph_def, main_location + 'pb/', "RNN_" + breath + "_" + quant + "_fact_" + str(lvl) + ".pbtxt")
        saver.save(sess, save_path=main_location + "model.ckpt")

        input_graph_path = main_location + '/pb/' + "RNN_" + breath + "_" + quant + "_fact_" + str(lvl) + ".pbtxt"

        checkpoint_path = main_location + "model.ckpt"
        restore_op_name = "save/restore_all"
        filename_tensor_name = "save/Const:0"
        output_frozen_graph_name = main_location + 'pb/' + "RNN_" + breath + "_" + quant + "_fact_" + str(lvl) + ".pb"

        logEntry("Start Freezing the graph")

        freeze_graph.freeze_graph(input_graph_path, input_saver="",
                                  input_binary=False, input_checkpoint=checkpoint_path,
                                  output_node_names="prefix/y_", restore_op_name="save/restore_all",
                                  filename_tensor_name="save/Const:0",
                                  output_graph=output_frozen_graph_name, clear_devices=True, initializer_nodes="")

        logEntry("End Freezing the graph")

        sess.close()
Exemplo n.º 3
0
    fact_levels = [32, 30, 25, 20, 15, 10, 5]

    for lvl in fact_levels:
        logEntry(lvl)
        # load the orignal graph
        graph = load_graph("../model/pb_files/rnn-deep-250-32-2.pb")

        W1 = graph.get_tensor_by_name('prefix/w_in:0')
        matmul = graph.get_tensor_by_name('prefix/MatMul:0')
        bias = graph.get_tensor_by_name('prefix/b_in:0')
        add = graph.get_tensor_by_name('prefix/add:0')
        reshape = graph.get_tensor_by_name('prefix/Reshape:0')

        # #remove all conncetions from matmul
        ge.detach(ge.sgv(matmul.op))

        with tf.Session(graph=graph) as sess:
            os.system("mkdir /flush1/raj034/RNN/model/test_cases/" + breath + "/" + quant + "/fact_" + str(lvl))

            # for op in sess.graph.get_operations():
            #     print(op.name)

            W = W1.eval()
            u, s, v, ss = svd_compress_gs(W, lvl)
            logEntry("structural_similarity == > " + str(ss))
            u1 = tf.matmul(reshape, u, name="prefix/u1")
            s1 = tf.matmul(u1, s, name="prefix/s1")
            v1 = tf.matmul(s1, v, name="prefix/v1")
            ge.connect(ge.sgv(v1.op), ge.sgv(add.op).remap_inputs([0]))
Exemplo n.º 4
0
def build_pb_fact(pb_location, main_location, breath, quant, lvl):
    graph = load_graph(pb_location)

    with tf.Session(graph=graph) as sess:
        count = 0
        pre_layer = [
            'Placeholder', 'hidden_layer1', 'hidden_layer2', 'hidden_layer3',
            'hidden_layer4', 'hidden_layer5', 'hidden_layer6'
        ]
        svd_num = [2048, 1024, 512, 256, 128, 64, 32]
        for layer in [
                'hidden_layer1', 'hidden_layer2', 'hidden_layer3',
                'hidden_layer4', 'hidden_layer5', 'hidden_layer6',
                'hidden_layer7'
        ]:
            if (count == 0):
                layer_input = graph.get_tensor_by_name('prefix/' +
                                                       pre_layer[count] + ':0')
            else:
                layer_input = graph.get_tensor_by_name('prefix/' +
                                                       pre_layer[count] +
                                                       '/Tanh:0')
            W = graph.get_tensor_by_name('prefix/' + layer + '/kernel:0')
            matmul = graph.get_tensor_by_name('prefix/' + layer + '/MatMul:0')
            add = graph.get_tensor_by_name('prefix/' + layer + '/BiasAdd:0')
            ge.detach(ge.sgv(matmul.op))

            W1 = W.eval()
            print(W1.shape)
            u_1, s_1, v_1, ss = svd_compress_gs(
                W1, math.ceil(svd_num[count] * lvl / 100))
            logEntry(
                str(lvl) + " ==> layer ==> " + str(count) + " ssim   ==> " +
                str(ss))
            u1 = tf.matmul(layer_input, u_1, name="prefix/" + layer + "/u1")
            s1 = tf.matmul(u1, s_1, name="prefix/" + layer + "/s1")
            v1 = tf.matmul(s1, v_1, name="prefix/" + layer + "/v1")
            ge.connect(ge.sgv(v1.op), ge.sgv(add.op).remap_inputs([0]))

            count += 1

        sess.run(
            tf.variables_initializer([tf.Variable(5,
                                                  name="dummy" + str(lvl))]))
        saver = tf.train.Saver()

        # save log for tensorboad
        LOGDIR = main_location + '/LOG'
        train_writer = tf.summary.FileWriter(LOGDIR)
        train_writer.add_graph(sess.graph)
        train_writer.close()

        # save the freezed model
        os.system("mkdir " + main_location + "pb")
        tf.train.write_graph(
            sess.graph_def, main_location + 'pb/',
            "DNN_" + breath + "_" + quant + "_fact_" + str(lvl) + ".pbtxt")
        saver.save(sess, save_path=main_location + "model.ckpt")

        input_graph_path = main_location + '/pb/' + "DNN_" + breath + "_" + quant + "_fact_" + str(
            lvl) + ".pbtxt"

        checkpoint_path = main_location + "model.ckpt"
        restore_op_name = "save/restore_all"
        filename_tensor_name = "save/Const:0"
        output_frozen_graph_name = main_location + 'pb/' + "DNN_" + breath + "_" + quant + "_fact_" + str(
            lvl) + ".pb"

        logEntry("Start Freezing the graph")

        freeze_graph.freeze_graph(input_graph_path,
                                  input_saver="",
                                  input_binary=False,
                                  input_checkpoint=checkpoint_path,
                                  output_node_names="prefix/softmax",
                                  restore_op_name="save/restore_all",
                                  filename_tensor_name="save/Const:0",
                                  output_graph=output_frozen_graph_name,
                                  clear_devices=True,
                                  initializer_nodes="")

        logEntry("End Freezing the graph")

        sess.close()
Exemplo n.º 5
0
def convert(file_path,
            inputNodeName,
            outputNodeName,
            msuffix,
            binaryPB,
            readMode,
            folderPath,
            checkpointExt,
            checkpointPath,
            modelName,
            shapeArray,
            modifyshapeAttribue,
            fixBatchNormal=True):
    tf.reset_default_graph()
    os.environ['CUDA_VISIBLE_DEVICES'] = ''
    config = tf.ConfigProto(allow_soft_placement=True,
                            device_count={
                                "GPU": 0,
                                "CPU": 1
                            })
    runIncommdLine = False
    if (os.path.isfile(file_path)):
        g_in, graph_def = loadGraph(file_path, binaryPB)
    else:
        raise ValueError('Can not find : %s ' % folderDir)

    fixTrainingBNAndDropout = False
    if (fixTrainingBNAndDropout):
        fcQuantWeightRemoved = False
        import tensorflow.contrib.graph_editor as ge
        maxModuleNumber = 6
        for i in range(1, maxModuleNumber +
                       1):  #if fire 6 is last fire layer set to 7
            blockName = 'fire' + str(i)
            if (i == 1):
                bnNames = [
                    '/bn'
                ]  # mobile net First layer  Quant model all 7 fires are '/bn'
            else:
                bnNames = [
                    '/bn'
                ]  # mobile net ['/dw_bn','/1x1_bn']  Squeeze Net  ['/bn']
            useConnectMethod = False
            for bnName in bnNames:
                if (useConnectMethod):
                    nodeName = blockName + bnName + '/moments/Squeeze_1'
                    oldInputName = blockName + bnName + '/moments/variance'
                    newInputName = blockName + bnName + '/moving_variance'
                    node = g_in.get_operation_by_name(nodeName)
                    oldInputNode = g_in.get_operation_by_name(oldInputName)
                    newInputNode = g_in.get_operation_by_name(newInputName)
                    placeHolderNew = tf.identity(newInputNode.outputs[0])
                    expDim = tf.expand_dims(
                        tf.expand_dims(tf.expand_dims(placeHolderNew, 0), 0),
                        0)
                    ge.detach(ge.sgv(oldInputNode))
                    ge.connect(ge.sgv(expDim), ge.sgv(node))
                    nodeName = blockName + bnName + '/moments/Squeeze'
                    oldInputName = blockName + bnName + '/moments/mean'
                    newInputName = blockName + bnName + '/moving_mean'
                    node = g_in.get_operation_by_name(nodeName)
                    print(
                        '%s before edit new node  node.node_def  .inputs[0]' %
                        blockName, node.node_def, node.inputs[0])
                    oldInputNode = g_in.get_operation_by_name(oldInputName)
                    newInputNode = g_in.get_operation_by_name(newInputName)
                    placeHolderNew = tf.identity(newInputNode.outputs[0])
                    expDim = tf.expand_dims(
                        tf.expand_dims(tf.expand_dims(placeHolderNew, 0), 0),
                        0)
                    ge.detach(ge.sgv(oldInputNode))
                    ge.connect(ge.sgv(expDim), ge.sgv(node))
                    print(
                        '%s after edit new node  node.node_def  .inputs[0]' %
                        blockName, node.node_def, node.inputs[0])
                else:

                    oldInputName = blockName + bnName + '/moments/Squeeze_1'
                    newInputName = blockName + bnName + '/moving_variance'
                    oldInputNode = g_in.get_operation_by_name(oldInputName)
                    newInputNode = g_in.get_operation_by_name(newInputName)
                    placeHolderNew = tf.identity(newInputNode.outputs[0])
                    ge.swap_outputs(ge.sgv(placeHolderNew),
                                    ge.sgv(oldInputNode))
                    oldInputName = blockName + bnName + '/moments/Squeeze'
                    newInputName = blockName + bnName + '/moving_mean'
                    oldInputNode = g_in.get_operation_by_name(oldInputName)
                    newInputNode = g_in.get_operation_by_name(newInputName)
                    placeHolderNew = tf.identity(newInputNode.outputs[0])
                    ge.swap_outputs(ge.sgv(placeHolderNew),
                                    ge.sgv(oldInputNode))
            removeWeightQuantize = False
            if (removeWeightQuantize):
                oldInputName = blockName + '/conv3x3/add'
                newInputName = blockName + '/conv3x3/kernels'
                #print('%s before edit new node  node.node_def  .inputs[0]'%blockName, node.node_def  , node.inputs[0])
                oldInputNode = g_in.get_operation_by_name(oldInputName)
                newInputNode = g_in.get_operation_by_name(newInputName)
                placeHolderNew = tf.identity(newInputNode.outputs[0])
                #                expDim=tf.expand_dims(tf.expand_dims(tf.expand_dims(placeHolderNew,0),0),0)
                #                ge.detach (ge.sgv(oldInputNode))
                ge.swap_outputs(
                    ge.sgv(placeHolderNew),
                    ge.sgv(oldInputNode))  #reroute_outputs get same results
                #Remove FC layer
                if (fcQuantWeightRemoved == False):
                    oldInputName = 'logit/add'
                    newInputName = 'logit/weights'
                    #print('%s before edit new node  node.node_def  .inputs[0]'%blockName, node.node_def  , node.inputs[0])

                    oldInputNode = g_in.get_operation_by_name(oldInputName)
                    newInputNode = g_in.get_operation_by_name(newInputName)
                    placeHolderNew = tf.identity(newInputNode.outputs[0])
                    #                expDim=tf.expand_dims(tf.expand_dims(tf.expand_dims(placeHolderNew,0),0),0)
                    #                ge.detach (ge.sgv(oldInputNode))
                    ge.swap_outputs(ge.sgv(placeHolderNew), ge.sgv(
                        oldInputNode))  #reroute_outputs get same results
                    fcQuantWeightRemoved = True

            with tf.Session(config=config, graph=g_in) as sess:
                graph_def = sess.graph_def

                for node in graph_def.node:
                    if 'dropout/mul' in node.name:
                        deleteDropOut = True
                        if (deleteDropOut):
                            oldInputName = 'dropout/mul'
                            newInputName = 'fire6/pool/MaxPool'
                            oldInputNode = g_in.get_operation_by_name(
                                oldInputName)
                            newInputNode = g_in.get_operation_by_name(
                                newInputName)
                            placeHolderNew = tf.identity(
                                newInputNode.outputs[0])
                            ge.swap_outputs(
                                ge.sgv(placeHolderNew), ge.sgv(oldInputNode)
                            )  #reroute_outputs get same results

                        else:
                            for node in graph_def.node:
                                if node.name == 'dropout/keep_prob':
                                    #node.attr['value'].tensor.float_val=tf.convert_to_tensor (1,dtype=tf.float32)
                                    #node.attr['value'].tensor.float_val=1
                                    node.attr['value'].tensor.CopyFrom(
                                        tensor_util.make_tensor_proto(
                                            1.0, dtype=tf.float32))
                                    #node.attr['value'].value =1.0
    # fix batch normal node nodes  https://github.com/tensorflow/tensorflow/issues/3628
    if (fixBatchNormal):

        for node in graph_def.node:
            if node.op == 'RefSwitch':
                node.op = 'Switch'
                for index in range(len(node.input)):
                    if 'moving_' in node.input[index]:
                        node.input[index] = node.input[index] + '/read'
            elif node.op == 'AssignSub':
                node.op = 'Sub'
                if 'use_locking' in node.attr: del node.attr['use_locking']
            elif node.op == 'AssignAdd':
                node.op = 'Add'
                if 'use_locking' in node.attr: del node.attr['use_locking']
            if ('dilations') in node.attr: del node.attr['dilations']
            node.device = ""

        #fixVariables not Working
        fixVariables = False
        if (fixVariables and node.op == 'VariableV2' and
            ('batchnorm/var' in node.name or 'batchnorm/mean' in node.name)):
            outputNodes = find_output_nodes(graph_def, node)
            for index in range(len(outputNodes)):
                if (outputNodes[index].op == 'Assign'):
                    #node.output[index] = node.output[index] + '/read'
                    #outputNodes[index].op ='Identity'
                    outputNodes[index].name = outputNodes[index].name + '/read'
                    print('Modified %s ' % outputNodes[index].name)


#################### Step 1 Training to inference simplification  , need checkpoint and  .pbtxt files from training   ######################################################

    graphDef = optimize_for_inference_lib.optimize_for_inference(
        graph_def,
        [inputNodeName],  # an array of the input node(s)
        [outputNodeName] if type(outputNodeName) is str else
        [item for item in outputNodeName],  # an array of output nodes
        tf.float32.as_datatype_enum)
    if (modifyshapeAttribue):
        inputOpType = 'Placeholder'
        for n in graphDef.node:
            #print('node to modify',n.name)
            if (n.name == inputNodeName):
                print('node to modify', n)
                setNodeAttribute(n, 'shape', shapeArray)
                if (n.op != inputOpType):
                    print(
                        "--Node %s op   %s   set to op=%s" %
                        (inputNodeName, n.op, inputOpType), shapeArray)
                    n.op = inputOpType
                print("--Name of the node - %s shape set to " % n.name,
                      shapeArray)
                print('node after modify', n)

    modifyClipValue = False
    if (modifyClipValue):
        for i in range(1, maxModuleNumber + 1):
            blockName = 'fire' + str(i)
            newClipValue = 127

            clipVNodeName = blockName + '/conv3x3/Rint_1/x'
            #clipnode= g_in.get_operation_by_name(clipVNodeName)
            clipnode = find_node_by_name(graphDef, clipVNodeName)

            print('clipnode to modify', clipnode)

            setNodeConstValue(graph_def, clipnode, newClipValue)

            print("--Name of the node - %s shape set to %f" %
                  (clipnode.name, newClipValue))
            print('clipnode after modify', clipnode)

            modifyFCClip = True
            if (modifyFCClip and i == maxModuleNumber):
                clipVNodeName = 'conv12/Rint_1/x'
                clipnodeFC = find_node_by_name(graphDef, clipVNodeName)
                #clipnodeFC= g_in.get_operation_by_name(clipVNodeName)
                setNodeConstValue(graph_def, clipnodeFC, newClipValue)

                print('clipnode after modify', clipnodeFC)

    if (runIncommdLine):
        copyfile(file_path, file_path + trainModelSuffix)
    outputNameSuffix = '%s_frozenforInference.pb' % checkpointExt
    inferenceSuffix = '.Inference'
    tf.train.write_graph(graphDef,
                         folderPath,
                         checkpointPath + modelName + '.pb' + inferenceSuffix,
                         as_text=False)
    tf.train.write_graph(graphDef,
                         folderPath,
                         checkpointPath + modelName + '.pbtxt' +
                         inferenceSuffix,
                         as_text=True)

    pbfileoutput_path = checkpointPath + modelName + outputNameSuffix
    checkpointfile_path = checkpointPath + modelName + checkpointExt

    pbfile_path = checkpointPath + modelName + msuffix + inferenceSuffix
    ####################   Step 2                    Frozen Inference mode                      ######################################################

    freeze_graph.freeze_graph(
        input_graph=pbfile_path,
        input_saver='',
        input_binary=binaryPB,
        input_checkpoint=checkpointfile_path,  # an array of the input node(s)
        output_node_names=outputNodeName
        if type(outputNodeName) is str else ",".join(outputNodeName),
        restore_op_name="save/restore_all",  #Unused.
        filename_tensor_name="save/Const:0",  # Unused.
        output_graph=pbfileoutput_path,  # an array of output nodes  
        clear_devices=True,
        initializer_nodes='')
    ####################   Step 3                    Save in tensor board                     ######################################################
    saveTensorboardForVisualizatoin = False
    if (saveTensorboardForVisualizatoin):
        modelFullPath = checkpointPath + modelName + outputNameSuffix
        tensorboardPath = checkpointPath + '\\tensorboard'
        #if not os.path.exists(tensorboardPath):
        #  os.mkdir(tensorboardPath)
        createTensorboard(modelFullPath, tensorboardPath)