Exemple #1
0
def test_annette_to_model(network="cf_reid"):
    json_file = Path('database', 'graphs', 'annette', network + '.json')
    annette_graph = AnnetteGraph(network, json_file)

    # execute the function under test
    generator.generate_tf_model(annette_graph)
    assert True
def estimate(args):
    """Estimate example function

    Args:
      network name (str): network name 

    Returns:
      float: estimated time in ms 
    """
    model = AnnetteGraph(
        args.network, get_database('graphs', 'annette',
                                   args.network + '.json'))

    if args.mapping != "none":
        opt = Mapping_model.from_json(
            get_database('models', 'mapping', args.mapping + '.json'))
        opt.run_optimization(model)

    # LOAD MODELS
    mod = Layer_model.from_json(
        get_database('models', 'layer', args.layer + '.json'))

    # APPLY ESTIMATION
    res = mod.estimate_model(model)
    write_result(args.network, res, args.mapping, args.layer,
                 get_database('results'))

    return res[0], res[2]
Exemple #3
0
def test_regression_estimation():
    network_list = [
        'cf_cityscapes', 'cf_resnet50', 'cf_openpose', 'tf_mobilenetv1',
        'tf_mobilenetv2'
    ]

    json_file = Path('database', 'models', 'mapping', 'ov.json')
    ncs2_opt = Mapping_model.from_json(json_file)

    # LOAD MODELS
    ncs2_mod = {}
    json_file = Path('database', 'models', 'layer', 'ov.json')
    ncs2_mod['roofline'] = Layer_model.from_json(
        "database/models/layer/ncs2-roofline.json")
    ncs2_mod['ref_roofline'] = Layer_model.from_json(
        "database/models/layer/ncs2-ref_roofline.json")
    ncs2_mod['statistical'] = Layer_model.from_json(
        "database/models/layer/ncs2-statistical.json")
    ncs2_mod['mixed'] = Layer_model.from_json(
        "database/models/layer/ncs2-mixed.json")

    for network in network_list:
        json_file = Path('database', 'graphs', 'annette', network + '.json')
        model = AnnetteGraph(network, json_file)
        ncs2_opt.run_optimization(model)
        # APPLY ESTIMATION
        ncs2_res = {}
        ncs2_res['mixed'] = ncs2_mod['mixed'].estimate_model(model)
        ncs2_res['roofline'] = ncs2_mod['roofline'].estimate_model(model)
        ncs2_res['ref_roofline'] = ncs2_mod['ref_roofline'].estimate_model(
            model)
        ncs2_res['statistical'] = ncs2_mod['statistical'].estimate_model(model)

    assert True
Exemple #4
0
def test_estimation(network="cf_resnet50"):
    json_file = Path('database', 'graphs', 'annette', network + '.json')
    model = AnnetteGraph(network, json_file)

    json_file = Path('database', 'models', 'mapping', 'ov.json')
    ncs2_opt = Mapping_model.from_json(json_file)
    ncs2_opt.run_optimization(model)

    # LOAD MODELS
    ncs2_mod = {}
    json_file = Path('database', 'models', 'layer', 'ov.json')
    ncs2_mod['roofline'] = Layer_model.from_json(
        "database/models/layer/ncs2-roofline.json")
    ncs2_mod['ref_roofline'] = Layer_model.from_json(
        "database/models/layer/ncs2-ref_roofline.json")
    ncs2_mod['statistical'] = Layer_model.from_json(
        "database/models/layer/ncs2-statistical.json")
    ncs2_mod['mixed'] = Layer_model.from_json(
        "database/models/layer/ncs2-mixed.json")

    # APPLY ESTIMATION
    ncs2_res = {}
    ncs2_res['mixed'] = ncs2_mod['mixed'].estimate_model(model)
    ncs2_res['roofline'] = ncs2_mod['roofline'].estimate_model(model)
    ncs2_res['ref_roofline'] = ncs2_mod['ref_roofline'].estimate_model(model)
    ncs2_res['statistical'] = ncs2_mod['statistical'].estimate_model(model)

    assert True

    return model
Exemple #5
0
def test_optimization(network="cf_resnet50"):
    json_file = Path('database', 'graphs', 'annette', network + '.json')
    model = AnnetteGraph(network, json_file)

    json_file = Path('database', 'models', 'mapping', 'ov.json')
    ncs2_opt = Mapping_model.from_json(json_file)
    ncs2_opt.run_optimization(model)

    assert True

    return model
 def __init__(self, network):
     #load graphstruct
     json_file = get_database('graphs', 'annette', network + '.json')
     self.graph = AnnetteGraph(network, json_file)
     print(self.graph)
class Graph_generator():
    """Graph generator"""
    def __init__(self, network):
        #load graphstruct
        json_file = get_database('graphs', 'annette', network + '.json')
        self.graph = AnnetteGraph(network, json_file)
        print(self.graph)
        #load configfile

    def add_configfile(self, configfile):
        self.config = pd.read_csv(
            get_database('benchmarks', 'config', configfile))
        print(self.config)

    def generate_graph_from_config(self, num):
        # can be used as to generate input for generate_tf_model
        # execute the function under test

        def replace_key(value, config, num):
            if value in self.config.keys():
                logging.debug("%s detected", value)
                return int(self.config.iloc[num][value])
            else:
                return value

        # model_spec contains some info about the model
        for key, value in self.graph.model_spec.items():
            logging.debug(key)
            logging.debug(value)

        tf.compat.v1.reset_default_graph()
        self.tf_graph = {}

        for layer_n, layer_attrs in self.graph.model_spec['layers'].items():
            logging.debug("layer name %s " % layer_n)
            logging.debug("layer attrs %s " % layer_attrs)
            for attr_n, attr_v in layer_attrs.items():
                logging.debug("attribute name %s" % attr_n)
                logging.debug("attribute values %s" % attr_v)

                if isinstance(attr_v, list):
                    for n, attr_ele in enumerate(attr_v):
                        #logging.debug(n)
                        #logging.debug(attr_ele)
                        self.graph.model_spec['layers'][layer_n][attr_n][
                            n] = replace_key(attr_ele, self.config, num)
                else:
                    self.graph.model_spec['layers'][layer_n][
                        attr_n] = replace_key(attr_v, self.config, num)

        self.graph.compute_dims()

        logging.debug("Loop through layers")

        for layer_n, layer_attrs in self.graph.model_spec['layers'].items():
            if layer_attrs['type'] == "DataInput":
                self.tf_graph[layer_n] = self.tf_gen_placeholder(
                    layer_attrs, layer_n)
            elif layer_attrs['type'] == "Conv":
                self.tf_graph[layer_n] = self.tf_gen_conv(layer_attrs, layer_n)
            elif layer_attrs['type'] == "Relu":
                self.tf_graph[layer_n] = self.tf_gen_relu(layer_attrs, layer_n)
            elif layer_attrs['type'] == "Add":
                self.tf_graph[layer_n] = self.tf_gen_add(layer_attrs, layer_n)
            elif layer_attrs['type'] == "DepthwiseConv":
                self.tf_graph[layer_n] = self.tf_gen_dwconv(
                    layer_attrs, layer_n)
            elif layer_attrs['type'] == "Pool":
                self.tf_graph[layer_n] = self.tf_gen_pool(layer_attrs, layer_n)
            elif layer_attrs['type'] == "Concat":
                self.tf_graph[layer_n] = self.tf_gen_concat(
                    layer_attrs, layer_n)
            elif layer_attrs['type'] == "Flatten":
                self.tf_graph[layer_n] = self.tf_gen_flatten(
                    layer_attrs, layer_n)
            elif layer_attrs['type'] == "Softmax":
                self.tf_graph[layer_n] = self.tf_gen_softmax(
                    layer_attrs, layer_n)
            elif layer_attrs['type'] == "MatMul" or layer_attrs[
                    'type'] == "FullyConnected":  # TODO check this! Maybe FullyConnected with bias
                self.tf_graph[layer_n] = self.tf_gen_matmul(
                    layer_attrs, layer_n)
            else:
                print("no layer")
                exit()

            logging.debug("Config %s" % self.config.iloc[num])
            logging.debug("Current graph %s" % self.tf_graph)

        # return annette graph
        out = self.graph.model_spec['output_layers']
        logging.debug(self.graph.model_spec)
        self.tf_export_to_pb(out)
        return out

    def tf_export_to_pb(self, output_node, save_path=None):
        # Collect default graph information
        g = tf.get_default_graph()

        with tf.Session() as sess:
            # Initialize the variables
            sess.run(tf.global_variables_initializer())
            g = g.as_graph_def(add_shapes=True)

            # Convert variables to constants until the "fully_conn_1/Softmax" node
            frozen_graph_def = tf.graph_util.convert_variables_to_constants(
                sess, g, output_node)

            print("load graph")
            graph_nodes = [n for n in frozen_graph_def.node]
            names = []
            for t in graph_nodes:
                if not ("Variable" in t.name or "BiasAdd" in t.name):
                    names.append(t.name.replace("/", "_").replace("-", "_"))
            print(names)

        # Write the intermediate representation of the graph to .pb file
        if save_path:
            net_file = save_path
        else:
            net_file = get_database('graphs', 'tf',
                                    self.graph.model_spec['name'] + ".pb")
        print(net_file)
        with open(os.path.join(net_file), 'wb') as f:
            graph_string = (frozen_graph_def.SerializeToString())
            f.write(graph_string)

    def tf_gen_pool(self, layer, name=None):
        logging.debug("Generating Relu with dict: %s" % layer)
        inp_name = layer['parents'][0]
        inp = self.tf_graph[inp_name]
        k_w = layer['kernel_shape'][1]
        k_h = layer['kernel_shape'][2]
        stride_w = layer['strides'][1]
        stride_h = layer['strides'][2]
        if layer['pooling_type'] == 'MAX':
            return maxpool(inp, (k_w, k_h), (stride_w, stride_h), name)
        elif layer['pooling_type'] == 'AVG' and layer['kernel_shape'][1] == -1:
            return globavgpool(inp, name)
        elif layer['pooling_type'] == 'AVG':
            return avgpool(inp, (k_w, k_h), (stride_w, stride_h), name)
        else:
            logging.error("only max pooling implemented currently")
            exit()

    def tf_gen_concat(self, layer, name=None):
        logging.debug("Generating Concat with dict: %s" % layer)
        inp_name0 = layer['parents'][0]
        inp_name1 = layer['parents'][1]
        inp = [self.tf_graph[x] for x in layer['parents']]
        return tf.concat(inp, axis=3, name=name)

    def tf_gen_add(self, layer, name=None):
        logging.debug("Generating Add with dict: %s" % layer)
        if len(layer['parents']) == 2:
            inp_name0 = layer['parents'][0]
            inp_name1 = layer['parents'][1]
            inp0 = self.tf_graph[inp_name0]
            inp1 = self.tf_graph[inp_name1]
            return tf.add(inp0, inp1, name=name)
        else:
            raise NotImplementedError

    def tf_gen_flatten(self, layer, name=None):
        logging.debug("Generating Flatten with dict: %s" % layer)
        inp_name = layer['parents'][0]
        inp = self.tf_graph[inp_name]
        return flatten(inp, name)

    def tf_gen_relu(self, layer, name=None):
        logging.debug("Generating Relu with dict: %s" % layer)
        inp_name = layer['parents'][0]
        inp = self.tf_graph[inp_name]
        return relu(inp, name)

    def tf_gen_softmax(self, layer, name=None):
        logging.debug("Generating Softmax with dict: %s" % layer)
        inp_name = layer['parents'][0]
        inp = self.tf_graph[inp_name]
        return softmax(inp, name)

    def tf_gen_matmul(self, layer, name=None):
        logging.debug("Generating MatMul with dict: %s" % layer)
        inp_name = layer['parents'][0]
        inp = self.tf_graph[inp_name]
        filters = layer['output_shape'][1]
        return matmul(inp, filters, name)

    def tf_gen_conv(self, layer, name=None):
        logging.debug("Generating Conv with dict: %s" % layer)
        inp_name = layer['parents'][0]
        inp = self.tf_graph[inp_name]
        filters = layer['output_shape'][3]
        k_w = layer['kernel_shape'][0]
        k_h = layer['kernel_shape'][1]
        stride_w = layer['strides'][1]
        stride_h = layer['strides'][2]
        return conv2d(inp, filters, (k_w, k_h), (stride_w, stride_h), name)

    def tf_gen_dwconv(self, layer, name=None):
        logging.debug("Generating DWConv with dict: %s" % layer)
        inp_name = layer['parents'][0]
        k_w = layer['kernel_shape'][0]
        k_h = layer['kernel_shape'][1]
        inp = self.tf_graph[inp_name]
        filters = layer['output_shape'][3]
        #return tf.layers.separable_conv2d(inp, filters, (k_w,k_h), padding='same')
        return dw_conv2d(inp, (k_w, k_h), (1, 1), name)

    def tf_gen_placeholder(self, layer, name="x"):
        logging.debug("Generating Placeholder with dict: %s" % layer)
        batch_size = layer['output_shape'][0]
        if batch_size == -1:
            batch_size = 1
        width = layer['output_shape'][1]
        height = layer['output_shape'][2]
        channels = layer['output_shape'][3]
        return tf.compat.v1.placeholder(tf.float32,
                                        [batch_size, width, height, channels],
                                        name=name)