def generate_digest(fun, call): generated_code = "" ## TODO make this proper fun_params = ["bg", "\"mac_learn_digest\""] for p in call[1]: if isinstance(p, int): fun_params += "0" #[str(p)] elif isinstance(p, p4_field_list): field_list = p fun_params += ["&fields"] else: addError("generating actions.c", "Unhandled parameter type in generate_digest: " + str(p)) #[ struct type_field_list fields; quan = str(len(field_list.fields)) #[ fields.fields_quantity = ${quan}; #[ fields.field_offsets = malloc(sizeof(uint8_t*)*fields.fields_quantity); #[ fields.field_widths = malloc(sizeof(uint8_t*)*fields.fields_quantity); for i,field in enumerate(field_list.fields): j = str(i) if isinstance(field, p4_field): #[ fields.field_offsets[${j}] = (uint8_t*) (pd->headers[header_instance_${field.instance}].pointer + field_instance_byte_offset_hdr[field_instance_${field.instance}_${field.name}]); #[ fields.field_widths[${j}] = field_instance_bit_width[field_instance_${field.instance}_${field.name}]*8; else: addError("generating actions.c", "Unhandled parameter type in field_list: " + name + ", " + str(field)) params = ",".join(fun_params) #[ #[ generate_digest(${params}); sleep(1); return generated_code
def gen_extract_header_2(hdrinst, hdrtype, w): if not hdrtype.is_vw: addError("generating extract header call", "fixed-width header extracted with two-param extract") return x = header_bit_width(hdrtype) w = format_expr(w)
def gen_format_type(t, resolve_names = True): if t.node_type == 'Type_Void': #[ void elif t.node_type == 'Type_Boolean': #[ bool elif t.node_type == 'Type_Bits': res = 'int' if t.isSigned else 'uint' if t.size <= 8: res += '8_t' elif t.size <= 16: res += '16_t' elif t.size <= 32: res += '32_t' else: # TODO is making it an array always OK? res += '8_t*' # name = "bit" if t.isSigned else "int" return res elif t.node_type == 'Type_Name': if t.type_ref.node_type in {'Type_Enum', 'Type_Error'}: #[ enum ${t.type_ref.c_name} else: if not resolve_names: return t.type_ref.name global type_env if t.type_ref.name in type_env: return type_env[t.type_ref.name] addWarning('using a named type parameter', 'no type found in environment for variable {}, defaulting to int'.format(t.type_ref.name)) #[ int /*type param ${t.type_ref.name}*/ elif t.node_type in {'Type_Extern', 'Type_Struct'}: #[ ${t.name} else: addError('formatting type', 'The type %s is not supported yet!' % t.node_type)
def gen_methodcall(stmt): mcall = format_expr(stmt.methodCall) if mcall: #[ $mcall; else: addError('generating method call statement', 'Invalid method call {}'.format(stmt.methodCall))
def generate_digest(fun, call): generated_code = "" ## TODO make this proper extracted_params = [] for p in call[1]: if isinstance(p, int): extracted_params += "0" #[str(p)] elif isinstance(p, p4_field_list): field_list = p extracted_params += ["&fields"] else: addError("generating actions.c", "Unhandled parameter type in generate_digest: " + str(p)) fun_params = ["bg"] + ["\""+field_list.name+"\""] + extracted_params #[ struct type_field_list fields; quan = str(len(field_list.fields)) #[ fields.fields_quantity = ${quan}; #[ fields.field_offsets = malloc(sizeof(uint8_t*)*fields.fields_quantity); #[ fields.field_widths = malloc(sizeof(uint8_t*)*fields.fields_quantity); for i,field in enumerate(field_list.fields): j = str(i) if isinstance(field, p4_field): #[ fields.field_offsets[${j}] = (uint8_t*) field_desc(pd, ${fld_id(field)}).byte_addr; #[ fields.field_widths[${j}] = field_desc(pd, ${fld_id(field)}).bitwidth; else: addError("generating actions.c", "Unhandled parameter type in field_list: " + name + ", " + str(field)) params = ",".join(fun_params) #[ #[ generate_digest(${params}); sleep(1); return generated_code
def convert_component(component): if component.node_type == 'Member': hdr = component.expr fld_name = component.member fld = hdr.type.fields.get(fld_name) return (hdr, fld) addError('generating list expression buffer', 'List element (%s) not supported!' % component) return None
def add_to_field(fun, call): generated_code = "" args = call[1] dst = args[0] val = args[1] if not isinstance(dst, p4_field): addError("generating add_to_field", "We do not allow changing an R-REF yet") if isinstance(val, int): #[ value32 = ${val}; if dst.width <= 32: #[ EXTRACT_INT32_AUTO(pd, ${fld_id(dst)}, res32) #[ value32 += res32; #[ MODIFY_INT32_INT32_AUTO(pd, ${fld_id(dst)}, value32) else: addError("generating modify_field", "Bytebufs cannot be modified yet.") elif isinstance(val, p4_field): if dst.width <= 32 and val.length <= 32: #[ EXTRACT_INT32_AUTO(pd, ${fld_id(val)}, value32) #[ EXTRACT_INT32_AUTO(pd, ${fld_id(dst)}, res32) #[ value32 += res32; #[ MODIFY_INT32_INT32_AUTO(pd, ${fld_id(dst)}, value32) else: addError("generating add_to_field", "bytebufs cannot be modified yet.") elif isinstance(val, p4_signature_ref): p = "parameters.%s" % str(fun.signature[val.idx]) l = fun.signature_widths[val.idx] if dst.width <= 32 and l <= 32: #[ EXTRACT_INT32_AUTO(pd, ${fld_id(dst)}, res32) #[ TODO else: addError("generating add_to_field", "bytebufs cannot be modified yet.") return generated_code
def gen_extract_header(hdrinst, hdrtype): if hdrinst is None: addError("extracting header", "no instance found for header type " + hdrtype.name) return #[ if((int)((uint8_t*)buf-(uint8_t*)(pd->data))+${hdrtype.byte_width} > pd->wrapper->pkt_len) #[ ; // packet_too_short // TODO optimize this #[ pd->headers[${hdrinst.id}].pointer = buf; #[ pd->headers[${hdrinst.id}].length = ${hdrtype.byte_width}; #[ pd->parsed_length += ${hdrtype.byte_width}; #[ buf += pd->headers[${hdrinst.id}].length; for f in hdrtype.valid_fields: # TODO get rid of "f.get_attr('preparsed') is not None" # TODO (f must always have a preparsed attribute) if f.get_attr('preparsed') is not None and f.preparsed and f.size <= 32:
def extract_header_2(h, w): generated_code = "" if not h.type.is_vw: addError("generating extract header call", "fixed-width header extracted with two-param extract") else: x = sum([f.size if not f.is_vw else 0 for f in h.type.fields]) w = format_expr_16(w) #[ int hdrlen = ((${w}+${x})/8); #[ if((int)((uint8_t*)buf-(uint8_t*)(pd->data))+hdrlen > pd->wrapper->pkt_len); // packet_too_short // TODO optimize this #[ if(hdrlen > ${h.type.byte_width}); // header_too_long #[ pd->headers[${h.id}].pointer = buf; #[ pd->headers[${h.id}].length = hdrlen; #[ pd->headers[${h.id}].var_width_field_bitwidth = hdrlen * 8 - ${sum([f.size if not f.is_vw else 0 for f in h.type.fields])}; #[ // pd->headers[${h.id}].valid = 1; return generated_code
def register_read(fun, call): global rc generated_code = "" args = call[1] dst = args[0] # field register = args[1] index = args[2] if isinstance(index, int): # TODO generated_code += " value32 = " + str(index) + ";// sugar@284\n" elif isinstance(index, p4_field): # TODO generated_code += " " + str(extract_int32( index, 'value32')) + "// sugar@286\n" elif isinstance(val, p4_signature_ref): generated_code += " value32 = TODO;// sugar@288\n" if (register.width + 7) / 8 < 4: generated_code += " uint8_t register_value_" + str( rc) + "[4];// sugar@290\n" else: generated_code += " uint8_t register_value_" + str(rc) + "[" + str( (register.width + 7) / 8) + "];// sugar@292\n" generated_code += " read_register(REGISTER_" + str( register.name) + ", value32, register_value_" + str( rc) + ");// sugar@293\n" if not is_vwf(dst) and dst.width <= 32: generated_code += " memcpy(&value32, register_value_" + str( rc) + ", 4);// sugar@295\n" generated_code += " " + str(modify_int32_int32(dst)) + "// sugar@296\n" else: if is_field_byte_aligned(dst) and register.width % 8 == 0: dst_fd = "field_desc(pd, " + fld_id(dst) + ")" reg_bw = register.width / 8 generated_code += " if(" + str(reg_bw) + " < " + str( dst_fd) + ".bytewidth) {// sugar@301\n" generated_code += " MODIFY_BYTEBUF_BYTEBUF(pd, " + str( fld_id(dst)) + ", register_value_" + str(rc) + ", " + str( reg_bw) + ");// sugar@302\n" generated_code += " } else {// sugar@303\n" generated_code += " MODIFY_BYTEBUF_BYTEBUF(pd, " + str( fld_id(dst)) + ", register_value_" + str(rc) + " + (" + str( reg_bw) + " - " + str(dst_fd) + ".bytewidth), " + str( dst_fd) + ".bytewidth);// sugar@304\n" generated_code += " }// sugar@305\n" else: addError("generating register_read", "Improper bytebufs cannot be modified yet.") rc = rc + 1 return generated_code
def generate_digest(fun, call): generated_code = "" ## TODO make this proper fun_params = ["bg", "\"mac_learn_digest\""] for p in call[1]: if isinstance(p, int): fun_params += "0" #[str(p)] elif isinstance(p, p4_field_list): field_list = p fun_params += ["&fields"] else: addError("generating actions.c", "Unhandled parameter type in generate_digest: " + str(p)) generated_code += " struct type_field_list fields;// sugar@149\n" quan = str(len(field_list.fields)) generated_code += " fields.fields_quantity = " + str( quan) + ";// sugar@151\n" generated_code += " fields.field_offsets = malloc(sizeof(uint8_t*)*fields.fields_quantity);// sugar@152\n" generated_code += " fields.field_widths = malloc(sizeof(uint8_t*)*fields.fields_quantity);// sugar@153\n" for i, field in enumerate(field_list.fields): j = str(i) if isinstance(field, p4_field): generated_code += " fields.field_offsets[" + str( j ) + "] = (uint8_t*) (pd->headers[header_instance_" + str( field.instance ) + "].pointer + field_instance_byte_offset_hdr[field_instance_" + str( field.instance) + "_" + str(field.name) + "]);// sugar@157\n" generated_code += " fields.field_widths[" + str( j) + "] = field_instance_bit_width[field_instance_" + str( field.instance) + "_" + str( field.name) + "]*8;// sugar@158\n" else: addError( "generating actions.c", "Unhandled parameter type in field_list: " + name + ", " + str(field)) params = ",".join(fun_params) generated_code += "\n" generated_code += " generate_digest(" + str( params) + "); sleep(1);// sugar@164\n" return generated_code
def modify_field(fun, call): generated_code = "" args = call[1] dst = args[0] src = args[1] # mask = args[2] if not isinstance(dst, p4_field): addError("generating modify_field", "We do not allow changing an R-REF yet") if isinstance(src, int): #[ value32 = ${src}; if dst.width <= 32: #[ MODIFY_INT32_INT32_AUTO(pd, ${fld_id(dst)}, value32) else: if dst.width % 8 == 0 and dst.offset % 8 == 0: #[ MODIFY_BYTEBUF_INT32(pd, ${fld_id(dst)}, value32) //TODO: This macro is not implemented else: addError("generating modify_field", "Improper bytebufs cannot be modified yet.") elif isinstance(src, p4_field): if dst.width <= 32 and src.width <= 32: if src.instance.metadata == dst.instance.metadata: #[ EXTRACT_INT32_BITS(pd, ${fld_id(src)}, value32) #[ MODIFY_INT32_INT32_BITS(pd, ${fld_id(dst)}, value32) else: #[ EXTRACT_INT32_AUTO(pd, ${fld_id(src)}, value32) #[ MODIFY_INT32_INT32_AUTO(pd, ${fld_id(dst)}, value32) elif src.width != dst.width: addError("generating modify_field", "bytebuf field-to-field of different widths is not supported yet") else: if dst.width % 8 == 0 and dst.offset % 8 == 0 and src.width % 8 == 0 and src.offset % 8 == 0 and src.instance.metadata == dst.instance.metadata: #[ MODIFY_BYTEBUF_BYTEBUF(pd, ${fld_id(dst)}, FIELD_BYTE_ADDR(pd, field_desc(${fld_id(src)})), (field_desc(${fld_id(dst)})).bytewidth) else: addError("generating modify_field", "Improper bytebufs cannot be modified yet.") elif isinstance(src, p4_signature_ref): p = "parameters.%s" % str(fun.signature[src.idx]) l = fun.signature_widths[src.idx] if dst.width <= 32 and l <= 32: #[ MODIFY_INT32_BYTEBUF(pd, ${fld_id(dst)}, ${p}, ${(l+7)/8}) else: if dst.width % 8 == 0 and dst.offset % 8 == 0 and l % 8 == 0: #and dst.instance.metadata: #[ MODIFY_BYTEBUF_BYTEBUF(pd, ${fld_id(dst)}, ${p}, (field_desc(${fld_id(dst)})).bytewidth) else: addError("generating modify_field", "Improper bytebufs cannot be modified yet.") return generated_code
def gen_extract_header(hdrinst, hdrtype, arg0_expr): if hdrinst is None: addError("extracting header", "no instance found for header type " + hdrtype.name) return if hasattr(arg0_expr, "path"): hdrname = arg0_expr.path.name else: hdrname = hdrinst.name #[ if(unlikely((int)((uint8_t*)buf-(uint8_t*)(pd->data))+${hdrtype.byte_width} > pd->wrapper->pkt_len)) #[ ; // packet_too_short // TODO optimize this #[ pd->headers[header_instance_${hdrname}].pointer = buf; #[ pd->headers[header_instance_${hdrname}].was_enabled_at_initial_parse = true; #[ pd->headers[header_instance_${hdrname}].length = ${hdrtype.byte_width}; #[ pd->parsed_length += ${hdrtype.byte_width}; for f in hdrtype.fields: # TODO get rid of "f.get_attr('preparsed') is not None" # TODO (f must always have a preparsed attribute) if f.get_attr('preparsed') is not None and f.preparsed and f.size <= 32:
def gen_format_declaration_16(d): if d.node_type == 'Declaration_Variable': if d.type.type_ref.node_type == 'Type_Header': #[ uint8_t ${d.name}[${d.type.type_ref.byte_width}]; #[ uint8_t ${d.name}_var = 0;/* Width of the variable width field*/ else: t = gen_format_type_16(d.type, False) #[ $t ${d.name}; elif d.node_type == 'Declaration_Instance': t = gen_format_type_16(d.type, False) #[ struct $t ${d.name}; #[ ${t}_init(&${d.name}); elif d.node_type == 'P4Table' or d.node_type == 'P4Action': #[ /* nothing */ #for m in d.type.type_ref.methods: # if m.name != d.type.path.name: # #[ ${d.name}.${m.name} = &${gen_format_type_16(d.type)}_${m.name}; else: addError('formatting declaration', 'Declaration of type %s is not supported yet!' % d.node_type)
def generate_digest(fun, call): generated_code = "" ## TODO make this proper extracted_params = [] for p in call[1]: if isinstance(p, int): extracted_params += "0" #[str(p)] elif isinstance(p, p4_field_list): field_list = p extracted_params += ["&fields"] else: addError("generating actions.c", "Unhandled parameter type in generate_digest: " + str(p)) fun_params = ["bg"] + ["\"" + field_list.name + "\""] + extracted_params generated_code += " struct type_field_list fields;// sugar@369\n" quan = str(len(field_list.fields)) generated_code += " fields.fields_quantity = " + str( quan) + ";// sugar@371\n" generated_code += " fields.field_offsets = malloc(sizeof(uint8_t*)*fields.fields_quantity);// sugar@372\n" generated_code += " fields.field_widths = malloc(sizeof(uint8_t*)*fields.fields_quantity);// sugar@373\n" for i, field in enumerate(field_list.fields): j = str(i) if isinstance(field, p4_field): generated_code += " fields.field_offsets[" + str( j) + "] = (uint8_t*) field_desc(pd, " + str( fld_id(field)) + ").byte_addr;// sugar@377\n" generated_code += " fields.field_widths[" + str( j) + "] = field_desc(pd, " + str( fld_id(field)) + ").bitwidth;// sugar@378\n" else: addError( "generating actions.c", "Unhandled parameter type in field_list: " + name + ", " + str(field)) params = ",".join(fun_params) generated_code += "\n" generated_code += " generate_digest(" + str( params) + "); sleep(1);// sugar@384\n" return generated_code
def modify_field_mask(mask): generated_code = "" mask_code = "" if isinstance(mask, int): mask_code = '0x%s' % format(mask, 'x') elif isinstance(mask, p4_field): if mask.width <= 32: generated_code += " EXTRACT_INT32_BITS(pd, " + str( fld_id(mask)) + ", mask32)// sugar@110\n" mask_code = 'mask32' else: addError("generating modify_field_mask", "Modify field mask is not supported.") elif isinstance(mask, p4_signature_ref): p = "parameters.%s" % str(fun.signature[mask.idx]) l = fun.signature_widths[mask.idx] if l <= 32: generated_code += " mask32 = *" + str(p) + ";// sugar@118\n" mask_code = 'mask32' else: addError("generating modify_field_mask", "Modify field mask is not supported.") else: addError("generating modify_field_mask", "Modify field mask cannot be recognized.") return (generated_code, mask_code)
def modify_field_with_hash_based_offset(fun, call): generated_code = "" args = call[1] met = args[0] h = args[3] ## TODO make this proper extracted_params = [] for p in call[1]: if isinstance(p, int): extracted_field = p elif isinstance(p, p4_field): instance = str(p.instance) name = str(p.name) field_ = instance+"_"+name elif isinstance(p, p4_field_list_calculation): lis = p.input[0] f = str(p.name) quan = str(len(p.input)) #[ struct type_field_list fields; #[ fields.fields_quantity = ${quan}; #[ fields.field_offsets = malloc(sizeof(uint8_t*)*fields.fields_quantity); #[ fields.field_widths = malloc(sizeof(uint8_t*)*fields.fields_quantity); for k in range (0,len(lis.fields)): li = lis.fields[k] l = str(li.instance)+"_"+str(li.name) #[ #[ fields.field_offsets[${k}] = (uint8_t*) field_desc(pd, field_instance_${(l)}).byte_addr; #[ fields.field_widths[${k}] = field_desc(pd, field_instance_${(l)}).bitwidth; #[ else: addError("generating actions.c", "Unhandled parameter type in modify_field_with_hash_based_offset: " + str(p)) #[ uint16_t result = modify_field_with_hash_based_offset(field_instance_${field_}, &fields, ${h}); #[ MODIFY_INT32_INT32_AUTO(pd, field_instance_${field_}, result); return generated_code
def gen_format_declaration(d, varname_override): var_name = d.name if varname_override is None else varname_override if d.node_type == 'Declaration_Variable': if d.type.get_attr('type_ref') is not None and d.type.type_ref.node_type == 'Type_Header': # Data for variable width headers is stored in parser_state_t pass elif d.type.node_type == 'Type_Boolean': #[ bool ${var_name} = false; else: t = gen_format_type(d.type, False) #[ $t ${var_name}; elif d.node_type == 'Declaration_Instance': t = gen_format_type(d.type, False) #[ struct $t ${var_name}; #[ ${t}_init(&${var_name}); elif d.node_type == 'P4Table' or d.node_type == 'P4Action': #[ /* nothing */ #for m in d.type.type_ref.methods: # if m.name != d.type.path.name: # #[ ${d.name}.${m.name} = &${gen_format_type(d.type)}_${m.name}; else: addError('formatting declaration', 'Declaration of type %s is not supported yet!' % d.node_type)
def register_write(fun, call): global rc generated_code = "" args = call[1] register = args[0] # field index = args[1] src = args[2] if isinstance(index, int): # TODO generated_code += " res32 = " + str(index) + ";// sugar@322\n" elif isinstance(index, p4_field): # TODO generated_code += " " + str(extract_int32(index, 'res32')) + "// sugar@324\n" elif isinstance(val, p4_signature_ref): generated_code += " res32 = TODO;// sugar@326\n" if (register.width + 7) / 8 < 4: generated_code += " uint8_t register_value_" + str( rc) + "[4];// sugar@328\n" else: generated_code += " uint8_t register_value_" + str(rc) + "[" + str( (register.width + 7) / 8) + "];// sugar@330\n" if isinstance(src, int): generated_code += " value32 = " + str(src) + ";// sugar@332\n" generated_code += " memcpy(register_value_" + str( rc) + ", &value32, 4);// sugar@333\n" elif isinstance(src, p4_field): if is_vwf(src): addError( "generating register_write", "Variable width field '" + str(src) + "' in register_write is not supported yet") elif register.width <= 32 and src.width <= 32: generated_code += " " + str(extract_int32( src, 'value32')) + "// sugar@338\n" generated_code += " memcpy(register_value_" + str( rc) + ", &value32, 4);// sugar@339\n" else: if src.width == register.width: if src.width % 8 == 0 and src.offset % 8 == 0: # and src.instance.metadata == dst.instance.metadata: generated_code += " EXTRACT_BYTEBUF(pd, " + str( fld_id(src)) + ", register_value_" + str( rc) + ")// sugar@343\n" else: addError("generating register_write", "Improper bytebufs cannot be modified yet.") else: addError( "generating register_write", "Write register-to-field of different widths is not supported yet." ) generated_code += " write_register(REGISTER_" + str( register.name) + ", res32, register_value_" + str( rc) + ");// sugar@348\n" rc = rc + 1 return generated_code
def add_to_field(fun, call): generated_code = "" args = call[1] dst = args[0] val = args[1] if not isinstance(dst, p4_field): addError("generating add_to_field", "We do not allow changing an R-REF yet") if isinstance(val, int): generated_code += " value32 = " + str(val) + ";// sugar@90\n" if dst.width <= 32: generated_code += " EXTRACT_INT32_AUTO(pd, " + str( fld_id(dst)) + ", res32)// sugar@92\n" generated_code += " value32 += res32;// sugar@93\n" generated_code += " MODIFY_INT32_INT32_AUTO(pd, " + str( fld_id(dst)) + ", value32)// sugar@94\n" else: addError("generating modify_field", "Bytebufs cannot be modified yet.") elif isinstance(val, p4_field): if dst.width <= 32 and val.length <= 32: generated_code += " EXTRACT_INT32_AUTO(pd, " + str( fld_id(val)) + ", value32)// sugar@99\n" generated_code += " EXTRACT_INT32_AUTO(pd, " + str( fld_id(dst)) + ", res32)// sugar@100\n" generated_code += " value32 += res32;// sugar@101\n" generated_code += " MODIFY_INT32_INT32_AUTO(pd, " + str( fld_id(dst)) + ", value32)// sugar@102\n" else: addError("generating add_to_field", "bytebufs cannot be modified yet.") elif isinstance(val, p4_signature_ref): p = "parameters.%s" % str(fun.signature[val.idx]) l = fun.signature_widths[val.idx] if dst.width <= 32 and l <= 32: generated_code += " EXTRACT_INT32_AUTO(pd, " + str( fld_id(dst)) + ", res32)// sugar@109\n" generated_code += " TODO// sugar@110\n" else: addError("generating add_to_field", "bytebufs cannot be modified yet.") return generated_code
if len(hlir.p4_tables.values())>0: #[ uint8_t reverse_buffer[${max([t[1] for t in map(getTypeAndLength, hlir.p4_tables.values())])}]; def match_type_order(t): if t is p4.p4_match_type.P4_MATCH_EXACT: return 0 if t is p4.p4_match_type.P4_MATCH_LPM: return 1 if t is p4.p4_match_type.P4_MATCH_TERNARY: return 2 for table in hlir.p4_tables.values(): table_type, key_length = getTypeAndLength(table) #[ void table_${table.name}_key(packet_descriptor_t* pd, uint8_t* key) { sortedfields = sorted(table.match_fields, key=lambda field: match_type_order(field[1])) for match_field, match_type, match_mask in sortedfields: if is_vwf(match_field): addError("generating table_" + table.name + "_key", "Variable width field '" + str(match_field) + "' in match key for table '" + table.name + "' is not supported") elif match_field.width <= 32: #[ EXTRACT_INT32_BITS(pd, ${fld_id(match_field)}, *(uint32_t*)key) #[ key += sizeof(uint32_t); elif match_field.width > 32 and match_field.width % 8 == 0: byte_width = (match_field.width+7)/8 #[ EXTRACT_BYTEBUF(pd, ${fld_id(match_field)}, key) #[ key += ${byte_width}; else: print "Unsupported field %s ignored in key calculation." % fld_id(match_field) if table_type == "LOOKUP_LPM": #[ key -= ${key_length}; #[ int c, d; #[ for(c = ${key_length-1}, d = 0; c >= 0; c--, d++) *(reverse_buffer+d) = *(key+c); #[ for(c = 0; c < ${key_length}; c++) *(key+c) = *(reverse_buffer+c); #[ }
def register_write(fun, call): global rc generated_code = "" args = call[1] register = args[0] # field index = args[1] src = args[2] if isinstance(index, int): # TODO #[ res32 = ${index}; elif isinstance(index, p4_field): # TODO #[ ${ extract_int32(index, 'res32') } elif isinstance(val, p4_signature_ref): #[ res32 = TODO; if (register.width+7)/8 < 4: #[ uint8_t register_value_${rc}[4]; else: #[ uint8_t register_value_${rc}[${(register.width+7)/8}]; if isinstance(src, int): #[ value32 = ${src}; #[ memcpy(register_value_${rc}, &value32, 4); elif isinstance(src, p4_field): if is_vwf(src): addError("generating register_write", "Variable width field '" + str(src) + "' in register_write is not supported yet") elif register.width <= 32 and src.width <= 32: #[ ${ extract_int32(src, 'value32') } #[ memcpy(register_value_${rc}, &value32, 4); else: if src.width == register.width: if src.width % 8 == 0 and src.offset % 8 == 0: # and src.instance.metadata == dst.instance.metadata: #[ EXTRACT_BYTEBUF(pd, ${fld_id(src)}, register_value_${rc}) else: addError("generating register_write", "Improper bytebufs cannot be modified yet.") else: addError("generating register_write", "Write register-to-field of different widths is not supported yet.") #[ write_register(REGISTER_${register.name}, res32, register_value_${rc}); rc = rc + 1 return generated_code # ============================================================================= # GENERATE_DIGEST def generate_digest(fun, call): generated_code = "" ## TODO make this proper extracted_params = [] for p in call[1]: if isinstance(p, int): extracted_params += "0" #[str(p)] elif isinstance(p, p4_field_list): field_list = p extracted_params += ["&fields"] else: addError("generating actions.c", "Unhandled parameter type in generate_digest: " + str(p)) fun_params = ["bg"] + ["\""+field_list.name+"\""] + extracted_params #[ struct type_field_list fields; quan = str(len(field_list.fields)) #[ fields.fields_quantity = ${quan}; #[ fields.field_offsets = malloc(sizeof(uint8_t*)*fields.fields_quantity); #[ fields.field_widths = malloc(sizeof(uint8_t*)*fields.fields_quantity); for i,field in enumerate(field_list.fields): j = str(i) if isinstance(field, p4_field): #[ fields.field_offsets[${j}] = (uint8_t*) field_desc(pd, ${fld_id(field)}).byte_addr; #[ fields.field_widths[${j}] = field_desc(pd, ${fld_id(field)}).bitwidth; else: addError("generating actions.c", "Unhandled parameter type in field_list: " + name + ", " + str(field)) params = ",".join(fun_params) #[ #[ generate_digest(${params}); sleep(1); return generated_code # ============================================================================= # DROP def drop(fun, call): generated_code = "" #[ debug(" :: SETTING PACKET TO BE DROPPED\n"); #[ pd->dropped=1; return generated_code; # ============================================================================= # RESUBMIT def resubmit(fun, call): generated_code = "" #[ debug(" :: RESUBMITTING PACKET\n"); #[ handle_packet(pd, tables); return generated_code; # ============================================================================= # NO_OP def no_op(fun, call): return "no_op(); // no_op" # ============================================================================= # PUSH def push(fun, call): generated_code = "" args = call[1] i = args[0] #[ push(pd, header_stack_${i.base_name}); return generated_code # ============================================================================= # POP def pop(fun, call): generated_code = "" args = call[1] i = args[0] #[ pop(pd, header_stack_${i.base_name}); return generated_code # ============================================================================= for fun in userActions(hlir): hasParam = fun.signature modifiers = "" ret_val_type = "void" name = fun.name params = ", struct action_%s_params parameters" % (name) if hasParam else "" #[ ${modifiers} ${ret_val_type} action_code_${name}(packet_descriptor_t* pd, lookup_table_t** tables ${params}) { #[ uint32_t value32, res32, mask32; #[ (void)value32; (void)res32; (void)mask32; for i,call in enumerate(fun.call_sequence): name = call[0].name # Generates a primitive action call to `name' if name in locals().keys(): #[ ${locals()[name](fun, call)} else: addWarning("generating actions.c", "Unhandled primitive function: " + name)
def gen_format_expr(e, format_as_value=True, expand_parameters=False): simple_binary_ops = {'Div':'/', 'Mod':'%', #Binary arithmetic operators 'Grt':'>', 'Geq':'>=', 'Lss':'<', 'Leq':'<=', #Binary comparison operators 'BAnd':'&', 'BOr':'|', 'BXor':'^', #Bitwise operators 'LAnd':'&&', 'LOr':'||', #Boolean operators 'Equ':'==', 'Neq':'!='} #Equality operators complex_binary_ops = {'Add':'+', 'Sub':'-', 'Mul':'*', 'Shl':'<<', 'Shr':'>>'} if e is None: return "FORMAT_EXPR(None)" elif e.node_type == 'DefaultExpression': return "" elif e.node_type == 'Parameter': return format_type(e.type) + " " + e.name elif e.node_type == 'Constant': if e.type.node_type == 'Type_Bits': if e.type.size > 32: def split_text(text, n): """Splits the text into chunks that are n characters long.""" return [text[i:i+n] for i in range(0, len(text), n)] byte_width = (e.type.size+7)/8 const_str_format = '{:0' + str(2 * byte_width) + 'x}' const_str = const_str_format.format(e.value) array_const = ", ".join(["0x" + txt for txt in split_text(const_str, 2)]) var_name = generate_var_name("const", "0x" + const_str) prepend_statement("uint8_t " + var_name + "[] = {" + array_const + "};\n") return var_name else: # 4294967136 versus (uint32_t)4294967136 return "({}){}".format(format_type(e.type), print_with_base(e.value, e.base)) else: return str(e.value) elif e.node_type == 'BoolLiteral': return 'true' if e.value else 'false' elif e.node_type == 'StringLiteral': return '"' + e.value + '"'; elif e.node_type == 'TypeNameExpression': return format_expr(e.typeName.type_ref); elif e.node_type == 'Neg': if e.type.node_type == 'Type_Bits' and not e.type.isSigned: return '(' + format_type_mask(e.type) + '(' + str(2**e.type.size) + '-' + format_expr(e.expr) + '))' else: return '(-' + format_expr(e.expr) + ')' elif e.node_type == 'Cmpl': return '(' + format_type_mask(e.type) + '(~' + format_expr(e.expr) + '))' elif e.node_type == 'LNot': return '(!' + format_expr(e.expr) + ')' elif e.node_type in simple_binary_ops and e.node_type == 'Equ' and e.left.type.size > 32: return "0 == memcmp({}, {}, ({} + 7) / 8)".format(format_expr(e.left), format_expr(e.right), e.left.type.size) elif e.node_type in simple_binary_ops: return '(' + format_expr(e.left) + simple_binary_ops[e.node_type] + format_expr(e.right) + ')' #Subtraction on unsigned values is performed by adding the negation of the second operand elif e.node_type == 'Sub' and e.type.node_type == 'Type_Bits' and not e.type.isSigned: return '(' + format_type_mask(e.type) + '(' + format_expr(e.left) + '+(' + str(2**e.type.size) + '-' + format_expr(e.right) + ')))' #Right shift on signed values is performed with a shift width check elif e.node_type == 'Shr' and e.type.node_type == 'Type_Bits' and e.type.isSigned: return '(({1}>{2}) ? 0 : ({0} >> {1}))'.format(format_expr(e.left), format_expr(e.right), e.type.size) #These formatting rules MUST follow the previous special cases elif e.node_type in complex_binary_ops: temp_expr = '(' + format_expr(e.left) + complex_binary_ops[e.node_type] + format_expr(e.right) + ')' if e.type.node_type == 'Type_InfInt': return temp_expr elif e.type.node_type == 'Type_Bits': if not e.type.isSigned: return '(' + format_type_mask(e.type) + temp_expr + ')' else: if e.type.size in {8,16,32}: return '((' + format_type(e.type) + ') ' + temp_expr + ')' else: addError('formatting an expression', 'Expression of type %s is not supported on int<%s>. (Only int<8>, int<16> and int<32> are supported.)' % (e.node_type, e.type.size)) return '' elif e.node_type == 'Mux': return '(' + format_expr(e.e0) + '?' + format_expr(e.e1) + ':' + format_expr(e.e2) + ')' elif e.node_type == 'Slice': return '(' + format_type_mask(e.type) + '(' + format_expr(e.e0) + '>>' + format_expr(e.e2) + '))' elif e.node_type == 'Concat': return '((' + format_expr(e.left) + '<<' + str(e.right.type.size) + ') | ' + format_expr(e.right) + ')' elif e.node_type == 'Cast': if e.expr.type.node_type == 'Type_Bits' and not e.expr.type.isSigned and e.expr.type.size == 1 \ and e.destType.node_type == 'Type_Boolean': #Cast from bit<1> to bool return '(' + format_expr(e.expr) + ')' elif e.expr.type.node_type == 'Type_Boolean' and e.destType.node_type == 'Type_Bits' and not e.destType.isSigned \ and e.destType.size == 1: #Cast from bool to bit<1> return '(' + format_expr(e.expr) + '? 1 : 0)' elif e.expr.type.node_type == 'Type_Bits' and e.destType.node_type == 'Type_Bits': if e.expr.type.isSigned == e.destType.isSigned: if not e.expr.type.isSigned: #Cast from bit<w> to bit<v> if e.expr.type.size > e.destType.size: return '(' + format_type_mask(e.destType) + format_expr(e.expr) + ')' else: return format_expr(e.expr) else: #Cast from int<w> to int<v> return '((' + format_type(e.destType) + ') ' + format_expr(e.expr) + ')' elif e.expr.type.isSigned and not e.destType.isSigned: #Cast from int<w> to bit<w> return '(' + format_type_mask(e.destType) + format_expr(e.expr) + ')' elif not e.expr.type.isSigned and e.destType.isSigned: #Cast from bit<w> to int<w> if e.destType.size in {8,16,32}: return '((' + format_type(e.destType) + ')' + format_expr(e.expr) + ')' else: addError('formatting an expression', 'Cast from bit<%s> to int<%s> is not supported! (Only int<8>, int<16> and int<32> are supported.)' % e.destType.size) return '' #Cast from int to bit<w> and int<w> are performed by P4C addError('formatting an expression', 'Cast from %s to %s is not supported!' % (pp_type_16(e.expr.type), pp_type_16(e.destType))) return '' elif e.node_type == 'ListExpression': if e.id not in generated_exprs: prepend_statement(listexpression_to_buf(e)) generated_exprs.add(e.id) return '(struct uint8_buffer_s) {{ .buffer = buffer{}, .buffer_size = buffer{}_size }}'.format(e.id, e.id) # return 'buffer{}, buffer{}_size'.format(e.id, e.id) elif e.node_type == 'SelectExpression': #Generate local variables for select values for k in e.select.components: if k.type.node_type == 'Type_Bits' and k.type.size <= 32: prepend_statement('{} {} = {};'.format(format_type(k.type), gen_var_name(k), format_expr(k))) elif k.type.node_type == 'Type_Bits' and k.type.size % 8 == 0: prepend_statement('uint8_t {0}[{1}];\n EXTRACT_BYTEBUF_PACKET(pd, {2}, {0});' .format(gen_var_name(k), k.type.size/8, format_expr(k, False))) else: addError('formatting select expression', 'Select on type %s is not supported!' % pp_type_16(k.type)) cases = [] for case in e.selectCases: cases_tmp = case.keyset.components if case.keyset.node_type == 'ListExpression' else [case.keyset] conds = [] for k, c in zip(e.select.components, cases_tmp): select_type = k.type.node_type size = k.type.size #if k.type.node_type == 'Type_Bits' else 0 case_type = c.node_type if case_type == 'DefaultExpression': conds.append('true /* default */') elif case_type == 'Constant' and select_type == 'Type_Bits' and 32 < size and size % 8 == 0: byte_array = int_to_big_endian_byte_array_with_length(c.value, size/8) prepend_statement('uint8_t {}[{}] = {};'.format(gen_var_name(c), size/8, byte_array)) conds.append('memcmp({}, {}, {}) == 0'.format(gen_var_name(k), gen_var_name(c), size/8)) elif size <= 32: if case_type == 'Range': conds.append('{0} <= {1} && {1} <= {2}'.format(format_expr(c.left), gen_var_name(k), format_expr(c.right))) elif case_type == 'Mask': conds.append('{0} & {1} == {2} & {1}'.format(format_expr(c.left), format_expr(c.right), gen_var_name(k))) else: if case_type not in {'Constant'}: #Trusted expressions addWarning('formatting a select case', 'Select statement cases of type %s on %s might not work properly.' % (case_type, pp_type_16(k.type))) conds.append('{} == {}'.format(gen_var_name(k), format_expr(c))) else: addError('formatting a select case', 'Select statement cases of type %s on %s is not supported!' % (case_type, pp_type_16(k.type))) cases.append('if({0}){{parser_state_{1}(pd, buf, tables, pstate);}}'.format(' && '.join(conds), format_expr(case.state))) return '\nelse\n'.join(cases)
def modify_field(fun, call): generated_code = "" args = call[1] dst = args[0] src = args[1] mask = '' if len(args)==3: (gc,m) = modify_field_mask( args[2] ) print "MASK: %s %s" % (fun.name, m) #[ ${gc} mask = ' & %s' % m if not isinstance(dst, p4_field): addError("generating modify_field", "We do not allow changing an R-REF yet") if isinstance(src, int): if not is_vwf(dst) and dst.width <= 32: #[ value32 = ${src}${mask}; #[ ${ modify_int32_int32(dst) } else: if is_field_byte_aligned(dst): #[ ${ write_int_to_bytebuff(src, field_max_width(dst)/8) } if is_vwf(dst): #[ MODIFY_BYTEBUF_BYTEBUF(pd, ${fld_id(dst)}, buffer_${buff-1}+(${field_max_width(dst)/8}-field_desc(pd, ${fld_id(dst)}).bytewidth), field_desc(pd, ${fld_id(dst)}).bytewidth) else: #[ MODIFY_BYTEBUF_BYTEBUF(pd, ${fld_id(dst)}, buffer_${buff-1}, ${dst.width/8}) else: if is_vwf(dst): addError("generating modify_field", "Modifying non byte-wide variable width field '" + str(dst) + "' with int is not supported") else: addError("generating modify_field", "Improper bytebufs cannot be modified yet.") elif isinstance(src, p4_field): if not is_vwf(dst) and not is_vwf(src) and dst.width <= 32 and src.width <= 32: if src.instance.metadata == dst.instance.metadata: #[ EXTRACT_INT32_BITS(pd, ${fld_id(src)}, value32) #[ MODIFY_INT32_INT32_BITS(pd, ${fld_id(dst)}, value32${mask}) else: #[ ${ extract_int32(src, 'value32', mask) } #[ ${ modify_int32_int32(dst) } else: if is_field_byte_aligned(dst) and is_field_byte_aligned(src) and src.instance.metadata == dst.instance.metadata: if mask: # TODO: Mask handling is missing addError("generating modify_field", "Using masks in modify_field on fields longer than 4 bytes is not supported") src_fd = "field_desc(pd, " + fld_id(src) + ")" dst_fd = "field_desc(pd, " + fld_id(dst) + ")" #[ if(${src_fd}.bytewidth < ${dst_fd}.bytewidth) { #[ MODIFY_BYTEBUF_BYTEBUF(pd, ${fld_id(dst)}, ${src_fd}.byte_addr, ${src_fd}.bytewidth); #[ } else { #[ MODIFY_BYTEBUF_BYTEBUF(pd, ${fld_id(dst)}, ${src_fd}.byte_addr + (${src_fd}.bytewidth - ${dst_fd}.bytewidth), ${dst_fd}.bytewidth); #[ } else: if is_vwf(dst): addError("generating modify_field", "Modifying field '" + str(dst) + "' with field '" + str(src) + "' (one of which is a non byte-wide variable width field) is not supported") else: addError("generating modify_field", "Improper bytebufs cannot be modified yet.") elif isinstance(src, p4_signature_ref): p = "parameters.%s" % str(fun.signature[src.idx]) l = fun.signature_widths[src.idx] # TODO: Mask handling if not is_vwf(dst) and dst.width <= 32 and l <= 32: #[ MODIFY_INT32_BYTEBUF(pd, ${fld_id(dst)}, ${p}, ${(l+7)/8}) else: if is_field_byte_aligned(dst) and l % 8 == 0: #and dst.instance.metadata: dst_fd = "field_desc(pd, " + fld_id(dst) + ")" #[ if(${l/8} < ${dst_fd}.bytewidth) { #[ MODIFY_BYTEBUF_BYTEBUF(pd, ${fld_id(dst)}, ${p}, ${l/8}); #[ } else { #[ MODIFY_BYTEBUF_BYTEBUF(pd, ${fld_id(dst)}, ${p} + (${l/8} - ${dst_fd}.bytewidth), ${dst_fd}.bytewidth) #[ } else: if is_vwf(dst): addError("generating modify_field", "Modifying non byte-wide variable width field '" + str(src) + "' with p4_signature_ref is not supported") else: addError("generating modify_field", "Improper bytebufs cannot be modified yet.") return generated_code
def add_to_field(fun, call): generated_code = "" args = call[1] dst = args[0] val = args[1] if not isinstance(dst, p4_field): addError("generating add_to_field", "We do not allow changing an R-REF yet") if isinstance(val, int): #[ value32 = ${val}; if not is_vwf(dst) and dst.width <= 32: #[ ${ extract_int32(dst, 'res32') } if (p4_header_keywords.saturating in dst.attributes): #[ ${ add_with_saturating('value32', 'res32', dst.width, (p4_header_keywords.signed in dst.attributes)) } else: #[ value32 += res32; #[ ${ modify_int32_int32(dst) } else: if is_vwf(dst): addError("generating add_to_field", "add_to_field on variable width field '" + str(dst) + "' is not supported yet") else: addError("generating add_to_field", "add_to_field on bytebufs (with int) is not supported yet (field: " + str(dst) + ")") elif isinstance(val, p4_field): if not is_vwf(val) and not is_vwf(dst) and dst.width <= 32 and val.width <= 32: #[ ${ extract_int32(val, 'value32') } #[ ${ extract_int32(dst, 'res32') } if (p4_header_keywords.saturating in dst.attributes): #[ ${ add_with_saturating('value32', 'res32', dst.width, (p4_header_keywords.signed in dst.attributes)) } else: #[ value32 += res32; #[ ${ modify_int32_int32(dst) } else: if is_vwf(val) or is_vwf(dst): addError("generating add_to_field", "add_to_field on field '" + str(dst) + "' with field '" + str(val) + "' is not supported yet. One of the fields is a variable width field!") else: addError("generating add_to_field", "add_to_field on/with bytebufs is not supported yet (fields: " + str(val) + ", " + str(dst) + ")") elif isinstance(val, p4_signature_ref): p = "parameters.%s" % str(fun.signature[val.idx]) l = fun.signature_widths[val.idx] if not is_vwf(dst) and dst.width <= 32 and l <= 32: #[ ${ extract_int32(dst, 'res32') } #[ TODO else: if is_vwf(dst): addError("generating add_to_field", "add_to_field on variable width field '" + str(dst) + "' with p4_signature_ref is not supported yet") else: addError("generating add_to_field", "add_to_field on bytebufs (with p4_signature_ref) is not supported yet (field: " + str(dst) + ")") return generated_code
def gen_extract_header_2(h, w): if not h.type.is_vw: addError("generating extract header call", "fixed-width header extracted with two-param extract") else: x = header_bit_width(h) w = format_expr(w)
else: return 3 ################################################################################ # Table key calculation for table in hlir16.tables: if not hasattr(table, 'key'): continue #{ void table_${table.name}_key(packet_descriptor_t* pd, uint8_t* key) { sortedfields = sorted(table.key.keyElements, key=lambda k: match_type_order(k.match_type)) #TODO variable length fields #TODO field masks for f in sortedfields: if f.get_attr('width') is None: addError('Computing key for table', 'the width attribute of field {} is missing'.format(f.name)) continue hi_name = "all_metadatas" if f.header_name in ['meta', 'standard_metadata'] else f.header.name href = "header_instance_{}".format(hi_name) # fref = "field_{}_{}".format(f.header_name, f.field_name) fref = "field_{}_{}".format(f.header.type.type_ref.name, f.field_name) if f.width <= 32: #{ #ifdef T4P4S_DEBUG #{ if (unlikely(pd->headers[header_instance_${hi_name}].pointer == NULL)) { #[ debug(" " T4LIT(!!!!,error) " " T4LIT(Lookup on invalid header,error) " " T4LIT(${hi_name},header) "." T4LIT(${f.field_name},field) "\n"); #} } #} #endif #[ EXTRACT_INT32_BITS_PACKET(pd, $href, $fref, *(uint32_t*)key) #[ key += sizeof(uint32_t);
#[ static inline void build_key_${state_name}(packet_descriptor_t *pd, uint8_t *buf, uint8_t *key) { for switch_ref in branch_on: if type(switch_ref) is p4.p4_field: field_instance = switch_ref byte_width = (field_instance.width + 7) / 8 if byte_width <= 4: #[ EXTRACT_INT32_BITS(pd, ${fld_id(field_instance)}, *(uint32_t*)key) #[ key += sizeof(uint32_t); else: #[ EXTRACT_BYTEBUF(pd, ${fld_id(field_instance)}, key) #[ key += ${byte_width}; elif type(switch_ref) is tuple: #[ uint8_t* ptr; offset, width = switch_ref # TODO addError("generating parse state %s"%state_name, "current() calls are not supported yet") #[ } call_number = 0 for state_name, parse_state in hlir.p4_parse_states.items(): #[ static void parse_state_${state_name}(packet_descriptor_t* pd, int batch_size, lookup_table_t** tables) #[ { for call in parse_state.call_sequence: if call[0] == p4.parse_call.extract: header_instance_name = hdr_prefix(call[1].name) #[ int total_keys = 0; #[ for(int i = 0; i < batch_size; i++) { #[ uint8_t* buf = (uint8_t*) pd[i].data; if call_number > 0 : for state_name1, parse_state1 in hlir.p4_parse_states.items(): for call1 in parse_state1.call_sequence:
generated_code += " .size = " + str( table.max_size) + ",// sugar@45\n" else: generated_code += " .size = 1,// sugar@47\n" generated_code += " .min_width = " + str( 32 if counter.min_width is None else counter.min_width ) + ",// sugar@48\n" generated_code += " .saturating = " + str( 1 if counter.saturating else 0) + "// sugar@49\n" generated_code += " },// sugar@50\n" generated_code += " };// sugar@51\n" generated_code += " p4_register_t register_config[NB_REGISTERS] = {// sugar@53\n" for register in hlir.p4_registers.values(): if register.binding is not None: addWarning( "", "direct and static registers currently treated as plain registers, no optimization occurs" ) continue if register.layout is not None: addError("", "registers with custom layouts are not supported yet") continue generated_code += " {// sugar@61\n" generated_code += " .name= \"" + str(register.name) + "\",// sugar@62\n" generated_code += " .size = " + str( register.instance_count) + ",// sugar@63\n" generated_code += " .width = " + str( (register.width + 7) / 8) + ",// sugar@64\n" generated_code += " },// sugar@65\n" generated_code += " };// sugar@66\n"
def gen_format_type_mask(t): if t.node_type == 'Type_Bits' and not t.isSigned: mask = hex((2 ** t.size) - 1) #[ $mask& else: addError('formatting a type mask', 'Currently only bit<w> is supported!')