def dnnweaver_init_data(g, scope, *args, **kwargs): fname = "cmstack.codegen.dnnweavergen.dnnweaver2.get_tensor" print(f"Init data name: {kwargs['name']}") with g.as_default(): with g.name_scope(scope): return get_func(fname)(*args, dtype=FQDtype.FXP16, **kwargs)
def dnnweaver_init_bias(g, scope, *args, **kwargs): fname = "cmstack.codegen.dnnweavergen.dnnweaver2.get_tensor" with g.as_default(): with g.name_scope(scope): return get_func(fname)(*args, dtype=FixedPoint(32, 22), **kwargs)
def dnnweaver_max_pool(g, scope, *args, **kwargs): fname = "cmstack.codegen.dnnweavergen.dnnweaver2.tensorOps.cnn.maxPool" if 'pad' in kwargs.keys(): kwargs['pad'] = ((0, 0), (0, kwargs['pad'][0]), (0, kwargs['pad'][0]), (0, 0)) with g.as_default(): with g.name_scope(scope): return get_func(fname)(*args, **kwargs)
def dnnweaver_init_mean(g, scope, input_op, *args, **kwargs): fname = "cmstack.codegen.dnnweavergen.dnnweaver2.get_tensor" with g.as_default(): with g.name_scope(scope): with g.name_scope(input_op): return get_func(fname)(*args, dtype=FixedPoint(16, 9), **kwargs)
def finalize_graph(self): output_info = self.translator_config['output_wrapper'] args = [] for i in output_info['func_args']: args.append(self.get_arg_attribute(i, self.output_id)) result = get_func(output_info['func_name'])(*args) return result
def dnnweaver_conv2d(g, scope, *args, **kwargs): fname = "cmstack.codegen.dnnweavergen.dnnweaver2.tensorOps.cnn.conv2D" with g.as_default(): with g.name_scope(scope): dtype_counters['cout'] += 1 return get_func(fname)( *args, dtype=FixedPoint(16, dtype_map['cout'][dtype_counters['cout'] - 1]), **kwargs)
def initialize_graph(self): init_info = self.translator_config['initialize_graph'] args = [] for i in init_info['func_args']: args.append(self.get_arg_attribute(i, self.proto_name)) kwargs = {} for k in init_info['func_kwargs'].keys(): kwargs[k] = self.get_arg_attribute(init_info['func_kwargs'][k], self.proto_name) result = get_func(init_info['func_name'])(*args, **kwargs) return result
def create_translated_graph(self): output_id = None assert len(self.translated_graph.input) == 1 self.scope = '' self.inititalize_input() for n in self.translated_graph.sub_graph: op_cat = n.op_cat if op_cat == 'mapped_node': op_context = str(n.name).rsplit("/", 1) self.op_name = n.op_type if len(op_context) > 1: self.scope = str(n.name).split("/")[-2] else: self.scope = '' if len(op_context) > 1 and op_context[0] != 'main': scope = op_context[0] + '/' else: scope = '' op_config = self.translator_config['ops'][n.op_type] op_func = get_func(op_config['op_name']) args, kwargs, output_id = self.create_op_args( n.op_type, n, self.templates[n.op_type], scope) if len(output_id) == 1: self.graph_variables[output_id[0]] = op_func( *args, **kwargs) if output_id[0] in list(self.translated_graph.edge_info): iedge = self.translated_graph.edge_info[output_id[0]] if iedge.name != output_id[0]: self.graph_variables[str( iedge.name)] = self.graph_variables[ output_id[0]] else: temp = op_func(*args, **kwargs) if not hasattr(temp, '__len__'): logging.error( f"Size mismatch between output of {n.op_type} which has length 1 output" f"Supplied config outputs: {output_id}") exit(1) elif len(temp) != len(output_id): logging.error( f"Size mismatch between output of {n.op_type} which has length {len(temp)} output" f"Supplied config outputs: {output_id}") exit(1) for i in range(len(temp)): self.graph_variables[output_id[i]] = temp[i] if output_id[i] in list( self.translated_graph.edge_info): iedge = self.translated_graph.edge_info[ output_id[i]] if iedge.name != output_id[i]: self.graph_variables[str( iedge.name)] = self.graph_variables[ output_id[i]] if not output_id: logging.error(f"No nodes mapped for graph") exit(1) elif len(output_id) != 1: logging.error( f"More than one output supplied for graph: {output_id}") exit(1) self.output_id = output_id[0] if 'output_wrapper' in self.translator_config.keys(): self.target_graph = self.finalize_graph()
def get_arg_attribute(self, key, instance_name, literal=False): if isinstance(key, list): arg = [] for k in key: arg.append( self.get_arg_attribute(k, instance_name, literal=literal)) return arg elif isinstance(key, dict): assert 'func_name' in key.keys() and 'func_args' in key.keys() return get_func(key['func_name'])(self.get_arg_attribute( key['func_args'], instance_name)) elif isinstance(key, str): if key == 'name': return instance_name elif key == 'input_op': return self.op_name elif key == 'shape': if literal: logging.error( f"Cannot get shape for literal value {instance_name} as attribute" ) exit(1) edge = self.translated_graph.edge_info[instance_name] if 'dimensions' not in list(edge.attributes): logging.error(f"No dimensions for edge {instance_name}") tuple_dims = () else: dimensions = hdfgutils.get_attribute_value( edge.attributes['dimensions']) tuple_dims = tuple( int(d) if is_number(d) else d for d in dimensions) return tuple_dims elif key == 'type': if literal: return type(instance_name).__name__ edge = self.translated_graph.edge_info[instance_name] if 'type' not in list(edge.attributes): logging.error(f"No type for edge {instance_name}") dtype = 'float32' else: dtype = hdfgutils.get_attribute_value( edge.attributes['type']) return dtype elif key == "graph": return self.target_graph elif key == "scope": return self.scope elif key == "relative_scope": if len(self.scope.split("/")) > 1: return self.scope.split("/")[-2] else: return '' elif instance_name in self.graph_variables.keys(): return self.graph_variables[instance_name] else: return key elif isinstance(key, bool) or isinstance(key, int): return key else: logging.error( f"Could not evaluate argument {key} for {instance_name}")
def execute_graph(self): get_func(self.translator_config['graph_execution']['func_name'])( self.target_graph)
def init_var(self, var, instance_name, literal=False): args = [] kwargs = {} arg_type = var['arg_type'] if isinstance(instance_name, str): id = instance_name.rsplit('/', 1) if len(id) > 1: scope = id[0] id = id[-1] else: scope = 'main' id = id[0] else: id = str(instance_name).rsplit('/', 1) if len(id) > 1: scope = id[0] id = id[-1] else: scope = 'main' id = id[0] if arg_type == 'parameter' and not literal and not is_literal(id): if instance_name not in list(self.translated_graph.edge_info): logging.error( f"Unable to get value for parameter {instance_name}") exit(1) edge = self.translated_graph.edge_info[instance_name] if 'value' not in list(edge.attributes): logging.error( f"Could not find literal for parameter argument {instance_name}.\n" f"Possible attributes: {list(edge.attributes)}") exit(1) value = hdfgutils.get_attribute_value(edge.attributes['value']) elif is_literal(id) and isinstance(instance_name, str): if id in list(self.translated_graph.edge_info): edge = self.translated_graph.edge_info[id] value = hdfgutils.get_attribute_value(edge.attributes['value']) elif instance_name in list(self.translated_graph.edge_info): edge = self.translated_graph.edge_info[instance_name] value = hdfgutils.get_attribute_value(edge.attributes['value']) else: logging.error( f"Could not find literal for parameter argument {instance_name} with id {id}.\n" f"var: {var['key']}") exit(1) else: value = instance_name for a in var['init_func_args']: arg_result = self.get_arg_attribute(a, value, literal=literal) args.append(arg_result) for k in var['init_func_kw'].keys(): kwargs[k] = self.get_arg_attribute(var['init_func_kw'][k], value, literal=literal) if len(kwargs.keys()) == 0: var = get_func(var['init_func'])(*args) else: var = get_func(var['init_func'])(*args, **kwargs) return var
def dnnweaver_context(g, scope, fname, *args, **kwargs): with g.as_default(): with g.name_scope(scope): return get_func(fname)(*args, **kwargs)
def dnnweaver_leaky_relu(g, scope, *args, **kwargs): fname = "cmstack.codegen.dnnweavergen.dnnweaver2.tensorOps.cnn.leakyReLU" with g.as_default(): with g.name_scope(scope): return get_func(fname)(*args, **kwargs)