def init_physical_flow_clause(options): # handle tunnel port ingress traffic convert_phy_logical(Priority, Match, Action, State) <= ( (Priority == 2) & remote_chassis(UUID_CHASSIS, PHY_CHASSIS, State) & (State != 0) & match.in_port(PHY_CHASSIS[PCH_OFPORT], Match) & action.resubmit_table(TABLE_EXTRACT_METADATA, Action1) & action.load( 1, NXM_Reg(REG_FLAG_IDX, FLAG_LOOPBACK_BIT_IDX, FLAG_LOOPBACK_BIT_IDX), Action2) & action.resubmit_table(TABLE_PIPELINE_FORWARD, Action3) & (Action == Action1 + Action2 + Action3)) # handle regular port ingress traffic convert_phy_logical(Priority, Match, Action, State) <= ( (Priority == 2) & local_bond_lsp(LSP, LS, State) & (State != 0) & match.in_port(LSP[LSP_OFPORT], Match) & action.load(LSP[LSP_PORTID], NXM_Reg(REG_SRC_IDX), Action1) & action.load(LS[LS_ID], NXM_Reg(REG_DP_IDX), Action2) & action.resubmit_next(Action3) & (Action == Action1 + Action2 + Action3)) # it helps reduce time-cost _arp_ip_mac(IP, IP_INT, MAC, MAC_INT, LS, State) <= (active_lsp(LSP, LS, UUID_LS, State) & (State != 0) & (IP == LSP[LSP_IP]) & (IP_INT == LSP[LSP_IP_INT]) & (MAC == LSP[LSP_MAC]) & (MAC_INT == LSP[LSP_MAC_INT])) _arp_ip_mac(IP, IP_INT, MAC, MAC_INT, LS, State) <= ( lnat_data(LNAT, LR, XLATE_TYPE, UUID_LR, State1) & lsp_link_lrp( LSP, LS, UUID_LS, LRP, LR, UUID_LR, UUID_LR_CHASSIS, State2) & (State == State1 + State2) & (State != 0) & (IP == LNAT[LNAT_XLATE_IP]) & (IP_INT == LNAT[LNAT_XLATE_IP_INT]) & (MAC == LNAT[LNAT_XLATE_MAC]) & (MAC_INT == LNAT[LNAT_XLATE_MAC_INT])) # regular lsp arp feedback arp_feedback_construct(LS, Priority, Match, Action, State) <= ( (Priority == 0) & _arp_ip_mac(IP, IP_INT, MAC, MAC_INT, LS, State) & match.arp_proto(Match1) & match.arp_tpa(IP, Match2) & match.arp_op(1, Match3) & (Match == Match1 + Match2 + Match3) & action.load( 1, NXM_Reg(REG_FLAG_IDX, FLAG_LOOPBACK_BIT_IDX, FLAG_LOOPBACK_BIT_IDX), Action1) & action.move(NXM_Reg(ETH_SRC_IDX), NXM_Reg(ETH_DST_IDX), Action2) & action.mod_dl_src(MAC, Action3) & action.load(2, NXM_Reg(ARP_OP_IDX), Action4) & action.move(NXM_Reg(ARP_SHA_IDX), NXM_Reg(ARP_THA_IDX), Action5) & action.load(MAC_INT, NXM_Reg(ARP_SHA_IDX), Action6) & action.move(NXM_Reg(ARP_SPA_IDX), NXM_Reg(ARP_TPA_IDX), Action7) & action.load(IP_INT, NXM_Reg(ARP_SPA_IDX), Action8) & action.move(NXM_Reg(REG_SRC_IDX), NXM_Reg(REG_DST_IDX), Action9) & (Action == Action1 + Action2 + Action3 + Action4 + Action5 + Action6 + Action7 + Action8 + Action9)) output_pkt_by_reg(Priority, Match, Action) <= ( (Priority == 1) & match.reg_outport(st.TP_OFPORT_NONE, Match) & action.resubmit_table(TABLE_DROP_PACKET, Action)) output_pkt_by_reg(Priority, Match, Action) <= ( (Priority == 0) & match.match_none(Match) & action.output_reg(NXM_Reg(REG_OUTPORT_IDX), Action))
def init_lsp_ingress_clause(options): if options.has_key('GATEWAY'): # push RARP to controller, only Edge node should consider receiving rarp lsp_arp_controller(LS, Priority, Match, Action, State) <= ( (Priority == 2) & ls_array(LS, UUID_LS, State) & (State != 0) & match.arp_proto(Match1) & match.arp_op(2, Match2) & (Match == Match1 + Match2) & action.upload_arp(Action) ) if not options.has_key('ONDEMAND'): # maybe gratuitous ARP, push to controller. # maybe a unknow dst arp lsp_arp_controller(LS, Priority, Match, Action, State) <= ( (Priority == 1) & ls_array(LS, UUID_LS, State) & (State != 0) & match.arp_proto(Match1) & match.arp_op(1, Match2) & (Match == Match1 + Match2) & action.upload_arp(Action1) & action.resubmit_next(Action2) & (Action == Action1 + Action2) ) lsp_arp_controller(LS, Priority, Match, Action, State) <= ( (Priority == 0) & ls_array(LS, UUID_LS, State) & (State != 0) & (match.match_none(Match)) & action.resubmit_next(Action) ) lsp_arp_response(LS, Priority, Match, Action, State) <= ( (Priority == 2) & ls_array(LS, UUID_LS, State) & (State != 0) & match.arp_proto(Match1) & match.arp_op(1, Match2) & (Match == Match1 + Match2) & action.resubmit_table(TABLE_ARP_FEEDBACK_CONSTRUCT, Action1) & action.resubmit_next(Action2) & (Action == Action1 + Action2) ) lsp_arp_response(LS, Priority, Match, Action, State) <= ( (Priority == 0) & ls_array(LS, UUID_LS, State) & (State != 0) & (match.match_none(Match)) & action.resubmit_next(Action) ) if options.has_key('ENABLE_UNTUNNEL') and options.has_key('dsrport'): # NOTE: it helps reduce time-cost _lsp_lrp_ls_changed(LS, LRP, State) <= ( ls_array(LS, UUID_LS, State1) & lsp_link_lrp(LSP, LS1, UUID_LS1, LRP, LR, UUID_LR, UUID_LR_CHASSIS, State2) & (State == State1 + State2) & (State != 0) ) lsp_untunnel_deliver(LS, Priority, Match, Action, State) <= ( _lsp_lrp_ls_changed(LS, LRP, State) & (Priority == _cal_priority(LRP[LRP_PREFIX], 2, LRP[LRP_ILK_IDX])) & match.ip_proto(Match1) & match.ip_dst_prefix(LRP[LRP_IP], LRP[LRP_PREFIX], Match2) & (Match == Match1 + Match2) & action.resubmit_next(Action) ) lsp_untunnel_deliver(LS, Priority, Match, Action, State) <= ( (Priority == 1) & ls_array(LS, UUID_LS, State) & (State != 0) & match.ip_proto(Match) & # output packet to local port which is an internal port. # packet goes into tcpip stack action.mod_dl_dst(options['dsrport']['mac'], Action1) & action.output(options['dsrport']['ofport'], Action2) & (Action == Action1 + Action2) ) lsp_untunnel_deliver(LS, Priority, Match, Action, State) <= ( (Priority == 0) & ls_array(LS, UUID_LS, State) & (State != 0) & (match.match_none(Match)) & action.resubmit_next(Action) ) # deliver to LR which has snat/dnat lsp_lookup_dst_port(LS, Priority, Match, Action, State) <= ( (Priority == 5) & # TODO optimize it lnat_data(LNAT, LR, XLATE_TYPE, UUID_LR, State1) & lsp_link_lrp(LSP, LS, UUID_LS, LRP, LR, UUID_LR, UUID_LR_CHASSIS, State2) & (State == State1 + State2) & (State != 0) & match.eth_dst(LNAT[LNAT_XLATE_MAC], Match) & action.load(LSP[LSP_PORTID], NXM_Reg(REG_DST_IDX), Action1) & action.resubmit_next(Action2) & (Action == Action1 + Action2) ) # deliver to another lsp on local chassis lsp_lookup_dst_port(LS, Priority, Match, Action, State) <= ( (Priority == 4) & local_lsp(LSP, LS, State) & (State != 0) & match.eth_dst(LSP[LSP_MAC], Match) & action.load(LSP[LSP_PORTID], NXM_Reg(REG_DST_IDX), Action1) & action.resubmit_next(Action2) & (Action == Action1 + Action2) ) # it helps reduce time-cost _lsp_remote_lsp_changed(LSP, LS, PHY_CHASSIS, State) <= ( remote_lsp(LSP, LS, PHY_CHASSIS, State) & (State != 0)) if options.has_key('ENABLE_REDIRECT'): # output deliver to another remote chassis. # use bundle_load to check if dst chassis is dead or live. lsp_lookup_dst_port(LS, Priority, Match, Action, State) <= ( (Priority == 3) & _lsp_remote_lsp_changed(LSP, LS, PHY_CHASSIS, State) & match.eth_dst(LSP[LSP_MAC], Match) & action.load(LSP[LSP_PORTID], NXM_Reg(REG_DST_IDX), Action1) & action.bundle_load(NXM_Reg(REG_OUTPORT_IDX), [PHY_CHASSIS[PCH_OFPORT]], Action2) & # if we want output this packet in next step, we set 1->reg5 # in next step flow, no need to clean this reg5, because # it should output a port means the end of packet process action.load(1, NXM_Reg(REG5_IDX), Action3) & action.resubmit_next(Action4) & (Action == Action1 + Action2 + Action3 + Action4) ) else: # deliver to remote chassis by using output,(set outport to reg4) lsp_lookup_dst_port(LS, Priority, Match, Action, State) <= ( (Priority == 3) & _lsp_remote_lsp_changed(LSP, LS, PHY_CHASSIS, State) & match.eth_dst(LSP[LSP_MAC], Match) & action.load(LSP[LSP_PORTID], NXM_Reg(REG_DST_IDX), Action1) & action.load(PHY_CHASSIS[PCH_OFPORT], NXM_Reg(REG_OUTPORT_IDX), Action2) & # if we want output this packet in next step, we set 1->reg5 # in next step flow, no need to clean this reg5, because # it should output a a port means the end of packet process action.load(1, NXM_Reg(REG5_IDX), Action3) & action.resubmit_next(Action4) & (Action == Action1 + Action2 + Action3 + Action4) ) # deliver the packet which not match above flow to the patchport # patch port's ip address should be 255.255.255.255 lsp_lookup_dst_port(LS, Priority, Match, Action, State) <= ( (Priority == 2) & local_patchport(LSP, LS, State) & (State != 0) & match.match_none(Match) & action.load(LSP[LSP_PORTID], NXM_Reg(REG_DST_IDX), Action1) & action.resubmit_table(TABLE_LSP_EGRESS_FIRST, Action2) & (Action == Action1 + Action2) ) if options.has_key('ONDEMAND'): # ovs must upload this packet to controller if cannot found the # destination. controller will tell tuplenet to generate more flows lsp_lookup_dst_port(LS, Priority, Match, Action, State) <= ( (Priority == 0) & ls_array(LS, UUID_LS, State) & (State != 0) & match.match_none(Match) & action.upload_unknow_dst(Action1) & # resubmit this packet to next stage, gateway host can # do delivering if gateway enable redirect feature action.load(st.TP_OFPORT_NONE, NXM_Reg(REG_OUTPORT_IDX), Action2) & action.load(1, NXM_Reg(REG5_IDX), Action3) & action.resubmit_next(Action4) & (Action == Action1 + Action2 + Action3 + Action4) ) else: # deliver packet to drop table if this packet cannot # found the destination. lsp_lookup_dst_port(LS, Priority, Match, Action, State) <= ( (Priority == 0) & ls_array(LS, UUID_LS, State) & (State != 0) & match.match_none(Match) & action.resubmit_table(TABLE_DROP_PACKET, Action) ) if options.has_key('ENABLE_REDIRECT'): # if it is a redirectd packet and reg4 is 0xffff, then we should drop # it, because we don't want cause infinite loop lsp_output_dst_port(LS, Priority, Match, Action, State) <= ( (Priority == 4) & ls_array(LS, UUID_LS, State) & (State != 0) & match.reg_5(1, Match1) & match.reg_flag(FLAG_REDIRECT, Match2) & match.reg_outport(st.TP_OFPORT_NONE, Match3) & (Match == Match1 + Match2 + Match3) & action.resubmit_table(TABLE_DROP_PACKET, Action) ) # if this packet was failed to deliver to remote chassis, we send it to # other gateway to help forwarding lsp_output_dst_port(LS, Priority, Match, Action, State) <= ( (Priority == 3) & ls_array(LS, UUID_LS, State) & (State != 0) & match.reg_5(1, Match1) & match.reg_outport(st.TP_OFPORT_NONE, Match2) & (Match == Match1 + Match2) & action.resubmit_table(TABLE_REDIRECT_CHASSIS, Action) ) # output to a port base on reg4's value lsp_output_dst_port(LS, Priority, Match, Action, State) <= ( (Priority == 2) & ls_array(LS, UUID_LS, State) & (State != 0) & match.reg_5(1, Match) & action.resubmit_table(TABLE_EMBED2_METADATA, Action1) & action.resubmit_table(TABLE_OUTPUT_PKT, Action2) & (Action == Action1 + Action2) ) # just deliver to next stage lsp_output_dst_port(LS, Priority, Match, Action, State) <= ( (Priority == 1) & ls_array(LS, UUID_LS, State) & (State != 0) & match.match_none(Match) & action.resubmit_table(TABLE_LSP_EGRESS_FIRST, Action) )
def init_lrp_egress_clause(options): # figure out all linked lsp on a LS which has a connection with this LRP opposite_side_changed_lsp( LR, LRP, LSP, State) <= (lsp_link_lrp( LSP1, LS, UUID_LS, LRP, LR, UUID_LR, UUID_LR_CHASSIS, State1) & exchange_lsp_array( UUID_LSP, LSP, UUID_LS, UUID_CHASSIS, UUID_LRP1, State2) & (State == State1 + State2) & (State != 0)) # figure out all regular lsp opposite_side_changed_lsp(LR, LRP, LSP, State) <= ( lrp_array(UUID_LRP, LRP, UUID_LR, UUID_LSP1, State1) & exchange_lsp_array(UUID_LSP1, LSP1, UUID_LS, UUID_CHASSIS1, UUID_LRP, State2) & ls_array(LS, UUID_LS, State3) & lr_array(LR, UUID_LR, State4) & lsp_array(UUID_LSP, LSP, UUID_LS, UUID_CHASSIS2, UUID_LRP2, State5) & (UUID_CHASSIS2 != None) & (State == State1 + State2 + State3 + State4 + State5) & (State != 0)) opposite_side_has_patch_port(LR, LRP, State) <= ( local_patchport(LSP, LS, State1) & lsp_link_lrp( LSP1, LS, UUID_LS, LRP, LR, UUID_LR, UUID_LR_CHASSIS, State2) & # NOTE only consider local_patchport, it means a gateway's oppsite # LS has remote patchport cannot trigger this flow (State == State1 + State2)) # update eth_dst by searching active lsp lrp_update_eth_dst(LR, Priority, Match, Action, State) <= ( (Priority == 3) & opposite_side_changed_lsp(LR, LRP, LSP, State) & match.ip_proto(Match1) & # we have to match the lrp portID, because in ecmp, # two ports may have same dst IP but different dst mac match.reg_dst(LRP[LRP_PORTID], Match2) & match.reg_2(LSP[LSP_IP_INT], Match3) & (Match == Match1 + Match2 + Match3) & action.load(LSP[LSP_MAC_INT], NXM_Reg(ETH_DST_IDX), Action1) & action.resubmit_next(Action2) & (Action == Action1 + Action2)) # push packet to table TABLE_SEARCH_IP_MAC to search unknow mac,ip pair lrp_update_eth_dst(LR, Priority, Match, Action, State) <= ( (Priority == 2) & lr_array(LR, UUID_LR, State) & (State != 0) & match.match_none(Match) & action.mod_dl_dst("00:00:00:00:00:00", Action1) & action.resubmit_table(TABLE_SEARCH_IP_MAC, Action2) & action.resubmit_next(Action3) & (Action == Action1 + Action2 + Action3)) lrp_ip_undnat_stage1(LR, Priority, Match, Action, State) <= (nat.lundnat_xlate_stage1( LR, Priority, Match, Action, State)) lrp_ip_undnat_stage2(LR, Priority, Match, Action, State) <= (nat.lundnat_xlate_stage2( LR, Priority, Match, Action, State)) lrp_ip_snat_stage1(LR, Priority, Match, Action, State) <= (nat.lsnat_xlate_stage1( LR, Priority, Match, Action, State)) lrp_ip_snat_stage2(LR, Priority, Match, Action, State) <= (nat.lsnat_xlate_stage2( LR, Priority, Match, Action, State)) # ovs should drop it if the packet's dst_mac = 00:00:00:00:00:00 and # it is a redirect packet. This flow avoids infinite loop. lrp_handle_unknow_dst_pkt(LR, Priority, Match, Action, State) <= ( (Priority == 4) & lr_array(LR, UUID_LR, State) & (State != 0) & match.reg_flag(FLAG_REDIRECT, Match1) & match.eth_dst("00:00:00:00:00:00", Match2) & (Match == Match1 + Match2) & action.resubmit_table(TABLE_DROP_PACKET, Action)) # ask controller to generate arp, if we cannot found the ip,mac pair. # If opposite LS has patch-port will create this flow lrp_handle_unknow_dst_pkt(LR, Priority, Match, Action, State) <= ( (Priority == 3) & # oppsite LS must has patchport opposite_side_has_patch_port(LR, LRP, State) & (State != 0) & match.ip_proto(Match1) & match.eth_dst("00:00:00:00:00:00", Match2) & match.reg_dst(LRP[LRP_PORTID], Match3) & (Match == Match1 + Match2 + Match3) & # reg2 and reg3 were transfered to pkt_controller as well action.generate_arp(TABLE_LRP_EGRESS_FORWARD_PACKET, Action1) & action.resubmit_table(TABLE_DROP_PACKET, Action2) & (Action == Action1 + Action2)) # upload packet to controller, if this packet cannot trigger generating # arp and didn't know the destination's macaddress. controller will # ask tuplenet to generate it. if options.has_key('ONDEMAND'): if options.has_key('ENABLE_REDIRECT'): # A regular tuplenet node(with ondemand) may not know where dst lsp is, # so it uploads packet to controller and redirects pkt to an edge node. lrp_handle_unknow_dst_pkt(LR, Priority, Match, Action, State) <= ( (Priority == 2) & lr_array(LR, UUID_LR, State) & (State != 0) & match.ip_proto(Match1) & # set macaddress to 0, then other host know this packet # should be threw to LR pipline match.eth_dst("00:00:00:00:00:00", Match2) & (Match == Match1 + Match2) & action.load( 1, NXM_Reg(REG_FLAG_IDX, FLAG_REDIRECT_BIT_IDX, FLAG_REDIRECT_BIT_IDX), Action1) & action.upload_unknow_dst(Action2) & action.resubmit_table(TABLE_EMBED2_METADATA, Action3) & action.resubmit_table(TABLE_REDIRECT_CHASSIS, Action4) & (Action == Action1 + Action2 + Action3 + Action4)) else: lrp_handle_unknow_dst_pkt(LR, Priority, Match, Action, State) <= ( (Priority == 2) & lr_array(LR, UUID_LR, State) & (State != 0) & match.ip_proto(Match1) & match.eth_dst("00:00:00:00:00:00", Match2) & (Match == Match1 + Match2) & action.upload_unknow_dst(Action)) else: if options.has_key('ENABLE_REDIRECT'): # A edge node(with ondemand disable) should know where is dst, but # tuplenet instance may down so ovs-flow doesn't know the new dst( # a lsp may be create while tuplenet is down, ovs-flow not updated). # This ovs-flow should redirect this packet to other edge now as well, # BUT NOT upload to controller lrp_handle_unknow_dst_pkt(LR, Priority, Match, Action, State) <= ( (Priority == 2) & lr_array(LR, UUID_LR, State) & (State != 0) & match.ip_proto(Match1) & # set macaddress to 0, then other host know this packet # should be threw to LR pipline match.eth_dst("00:00:00:00:00:00", Match2) & (Match == Match1 + Match2) & action.load( 1, NXM_Reg(REG_FLAG_IDX, FLAG_REDIRECT_BIT_IDX, FLAG_REDIRECT_BIT_IDX), Action1) & action.resubmit_table(TABLE_EMBED2_METADATA, Action2) & action.resubmit_table(TABLE_REDIRECT_CHASSIS, Action3) & (Action == Action1 + Action2 + Action3)) lrp_handle_unknow_dst_pkt(LR, Priority, Match, Action, State) <= ( (Priority == 1) & lr_array(LR, UUID_LR, State) & (State != 0) & match.eth_dst("00:00:00:00:00:00", Match) & action.resubmit_table(TABLE_DROP_PACKET, Action)) lrp_handle_unknow_dst_pkt(LR, Priority, Match, Action, State) <= ( (Priority == 0) & lr_array(LR, UUID_LR, State) & (State != 0) & match.match_none(Match) & action.resubmit_next(Action)) lrp_forward_packet(LR, Priority, Match, Action, State) <= ( (Priority == 3) & lsp_link_lrp(LSP, LS, UUID_LS, LRP, LR, UUID_LR, UUID_LR_CHASSIS, State) & (State != 0) & match.reg_dst(LRP[LRP_PORTID], Match) & action.load(LS[LS_ID], NXM_Reg(REG_DP_IDX), Action1) & action.load(LSP[LSP_PORTID], NXM_Reg(REG_SRC_IDX), Action2) & action.resubmit_next(Action3) & (Action == Action1 + Action2 + Action3))
def init_ecmp_clause(options): # for adding # NOTE: this clause is consumed by ecmp_static_route. # There is NO circumstances that ecmp_static_route's LR's state is not zero, but # ecmp_aggregate_outport's state is zero. Because next_hop_ovsport need LR's state. # If you try to update ecmp_static_route, we should consider it! (ecmp_aggregate_outport[X] == tuple_(Y, order_by=Z)) <= ( lroute_array(Route1, UUID_LR, State1) & lroute_array(Route2, UUID_LR, State2) & (State1 + State2 >= 0) & (Route1[LSR_UUID] != Route2[LSR_UUID]) & (Route1[LSR_IP] == Route2[LSR_IP]) & (Route1[LSR_PREFIX] == Route2[LSR_PREFIX]) & (Route1[LSR_NEXT_HOP] == Route2[LSR_NEXT_HOP]) & (Route1[LSR_OUTPORT] != Route2[LSR_OUTPORT]) & next_hop_ovsport(Route1[LSR_OUTPORT], OFPORT1, State3) & next_hop_ovsport(Route2[LSR_OUTPORT], OFPORT2, State4) & (State1 + State2 + State3 + State4 > 0) & (X == (UUID_LR, Route1[LSR_IP], Route1[LSR_PREFIX], State_ADD, 'adding')) & (Y == OFPORT1) & (Z == Route1[LSR_UUID])) # for readding slave port, deletion delete the whole flow, # but some ports should stay in bundle slave as well, # we should add those ports back (ecmp_aggregate_outport_readd[X] == tuple_(Y, order_by=Z)) <= ( lroute_array(Route1, UUID_LR, State1) & lroute_array(Route2, UUID_LR, State2) & (State_COMBIND1 == State1 + State2) & (State_COMBIND1 >= 0) & (Route1[LSR_UUID] != Route2[LSR_UUID]) & (Route1[LSR_IP] == Route2[LSR_IP]) & (Route1[LSR_PREFIX] == Route2[LSR_PREFIX]) & (Route1[LSR_NEXT_HOP] == Route2[LSR_NEXT_HOP]) & (Route1[LSR_OUTPORT] != Route2[LSR_OUTPORT]) & next_hop_ovsport(Route1[LSR_OUTPORT], OFPORT1, State3) & next_hop_ovsport(Route2[LSR_OUTPORT], OFPORT2, State4) & (State_COMBIND2 == State1 + State2 + State3 + State4) & (State_COMBIND2 >= 0) & (ecmp_aggregate_outport[A] == B) & (A[0] == UUID_LR) & (A[1] == Route1[LSR_IP]) & (A[2] == Route1[LSR_PREFIX]) & (A[4] == 'deleting') & (X == (UUID_LR, Route1[LSR_IP], Route1[LSR_PREFIX], State_ADD, 'readding')) & (Y == OFPORT1) & (Z == Route1[LSR_UUID])) # for deleting (ecmp_aggregate_outport[X] == tuple_(Y, order_by=Y)) <= ( lroute_array(Route1, UUID_LR, State1) & lroute_array(Route2, UUID_LR, State2) & (Route1[LSR_UUID] != Route2[LSR_UUID]) & (Route1[LSR_LR_UUID] == Route2[LSR_LR_UUID]) & (Route1[LSR_IP] == Route2[LSR_IP]) & (Route1[LSR_PREFIX] == Route2[LSR_PREFIX]) & (Route1[LSR_NEXT_HOP] == Route2[LSR_NEXT_HOP]) & (Route1[LSR_OUTPORT] != Route2[LSR_OUTPORT]) & next_hop_ovsport(Route1[LSR_OUTPORT], OFPORT, State3) & (State1 + State2 + State3 < 0) & (X == (Route1[LSR_LR_UUID], Route1[LSR_IP], Route1[LSR_PREFIX], State_DEL, 'deleting')) & (Y == OFPORT)) ecmp_static_route(LR, Priority, Match, Action, State) <= ( lr_array(LR, UUID_LR, State1) & (ecmp_aggregate_outport[X] == Y) & (State == State1 + X[3]) & (State != 0) & (X[0] == UUID_LR) & (Priority == X[2] * 3 + 2) & match.ip_proto(Match1) & match.ip_dst_prefix(X[1], X[2], Match2) & (Match == Match1 + Match2) & action.bundle_load(NXM_Reg(REG4_IDX), Y, Action1) & action.resubmit_next(Action2) & (Action == Action1 + Action2)) ecmp_static_route(LR, Priority, Match, Action, State) <= ( lr_array(LR, UUID_LR, State1) & (ecmp_aggregate_outport_readd[X] == Y) & (State == State1 + X[3]) & (State != 0) & (X[0] == UUID_LR) & (Priority == X[2] * 3 + 2) & match.ip_proto(Match1) & match.ip_dst_prefix(X[1], X[2], Match2) & (Match == Match1 + Match2) & action.bundle_load( NXM_Reg(REG4_IDX), Y, Action1) & action.resubmit_next(Action2) & (Action == Action1 + Action2)) # gateway chassis no need to consider ecmp if not options.has_key('GATEWAY'): # after hitting bundle_load action, flows should be add to forward packet # to different port base on value of reg4 ecmp_static_route_judge(LR, Priority, Match, Action, State) <= ( lroute_array(Route, UUID_LR, State1) & next_hop_ovsport(Route[LSR_OUTPORT], OFPORT, State2) & lr_array(LR, UUID_LR, State3) & lrp_array(Route[LSR_OUTPORT], LRP, UUID_LR, UUID_LSP, State4) & (State == State1 + State2 + State3 + State4) & (State != 0) & (Priority == Route[LSR_PREFIX] * 3 + 2) & match.reg_4(OFPORT, Match1) & match.ip_proto(Match2) & match.ip_dst_prefix(Route[LSR_IP], Route[LSR_PREFIX], Match3) & (Match == Match1 + Match2 + Match3) & action.load(LRP[LRP_PORTID], NXM_Reg(REG_DST_IDX), Action1) & action.load(LRP[LRP_MAC_INT], NXM_Reg(ETH_SRC_IDX), Action2) & action.load(Route[LSR_NEXT_HOP_INT], NXM_Reg(REG2_IDX), Action3) & action.load(LRP[LRP_IP_INT], NXM_Reg(REG3_IDX), Action4) & action.resubmit_next(Action5) & (Action == Action1 + Action2 + Action3 + Action4 + Action5)) # drop packets if all bundle slave ports are not in 'up' status # TODO if we should ignore failure and deliver packet to # one of output ports ecmp_static_route_judge( LR, Priority, Match, Action, State) <= (lr_array(LR, UUID_LR, State) & (State != 0) & (Priority == 1) & match.reg_4(0xffff, Match) & action.resubmit_table(TABLE_DROP_PACKET, Action)) # resubmit next stage without hitting any flows above ecmp_static_route_judge(LR, Priority, Match, Action, State) <= ( lr_array(LR, UUID_LR, State) & (State != 0) & (Priority == 0) & match.match_none(Match) & action.resubmit_next(Action)) ecmp_bfd_port(PORT_NAME, State) <= ( lroute_array(Route, UUID_LR, State1) & next_hop_ovsport(Route[LSR_OUTPORT], OFPORT, State2) & # we only enable/disable ovsport that exist ovsport_chassis(PORT_NAME, UUID_CHASSIS, OFPORT, State3) & (State3 >= 0) & chassis_array(PHY_CHASSIS, UUID_CHASSIS, State4) & (State == State1 + State2 + State3 + State4))
import match from reg import * from logicalview import * pyDatalog.create_terms('Table, Priority, Priority1, Match, Action') pyDatalog.create_terms('Action1, Action2, Action3, Action4, Action5') pyDatalog.create_terms('Action6, Action7, Action8, Action9, Action10') pyDatalog.create_terms('Match1, Match2, Match3, Match4, Match5') pyDatalog.create_terms('trace_pipeline_start, trace_pipeline_end') pyDatalog.create_terms('trace_pipeline_module') # trace_pipeline_start will be inserted into each LS/LR's # first ingress/egress stage trace_pipeline_start(Priority, Match, Action) <= ( (Priority == 100) & match.reg_flag(FLAG_TRACE, Match) & action.upload_trace(Action1) & action.resubmit_next(Action2) & (Action == Action1 + Action2)) # default flow to resubmit to next table trace_pipeline_start(Priority, Match, Action) <= ( (Priority == 0) & match.match_none(Match) & action.resubmit_next(Action)) # trace_pipeline_end will be inserted into each LS/LR's # last ingress/egress stage # the caller will add resumbit action trace_pipeline_end(Priority, Match, Action) <= ( (Priority == 100) & match.reg_flag(FLAG_TRACE, Match) & action.upload_trace(Action)) # default flow, the caller will add resumbit action trace_pipeline_end(Priority, Match, Action) <= (
from pyDatalog import pyDatalog import action import match from reg import * from logicalview import * pyDatalog.create_terms('trace_pipeline_start, trace_pipeline_end') pyDatalog.create_terms('trace_pipeline_module') # trace_pipeline_start will be inserted into each LS/LR's # first ingress/egress stage trace_pipeline_start(Priority, Match, Action) <= ( (Priority == 100) & match.reg_flag(FLAG_TRACE, Match) & action.upload_trace(Action1) & action.resubmit_next(Action2) & (Action == Action1 + Action2) ) # default flow to resubmit to next table trace_pipeline_start(Priority, Match, Action) <= ( (Priority == 0) & match.match_none(Match) & action.resubmit_next(Action) ) # trace_pipeline_end will be inserted into each LS/LR's # last ingress/egress stage # the caller will add resumbit action trace_pipeline_end(Priority, Match, Action) <= ( (Priority == 100) &
def init_lrp_ingress_clause(options): init_ecmp_clause(options) # response ICMP packet if receiving ICMP request lrp_pkt_response(LR, Priority, Match, Action, State) <= ( (Priority == 3) & lsp_link_lrp(LSP, LS, UUID_LS, LRP, LR, UUID_LR, UUID_LR_CHASSIS, State) & (State != 0) & match.icmp_proto(Match1) & match.icmp_type(8, Match2) & match.icmp_code(0, Match3) & match.ip_dst(LRP[LRP_IP], Match4) & (Match == Match1 + Match2 + Match3 + Match4) & action.exchange(NXM_Reg(IP_SRC_IDX), NXM_Reg(IP_DST_IDX), Action1) & action.load(0xff, NXM_Reg(IP_TTL_IDX), Action2) & action.load(0, NXM_Reg(ICMP_TYPE_IDX), Action3) & action.move(NXM_Reg(REG_SRC_IDX), NXM_Reg(REG_DST_IDX), Action4) & action.load(1, NXM_Reg(REG_FLAG_IDX, FLAG_LOOPBACK_BIT_IDX, FLAG_LOOPBACK_BIT_IDX), Action5) & action.resubmit_next(Action6) & (Action == Action1 + Action2 + Action3 + Action4 + Action5 + Action6) ) lrp_pkt_response(LR, Priority, Match, Action, State) <= ( (Priority == 0) & lr_array(LR, UUID_LR, State) & (State != 0) & match.ip_proto(Match) & action.resubmit_next(Action)) lrp_drop_unexpect(LR, Priority, Match, Action, State) <= ( (Priority == 2) & lr_array(LR, UUID_LR, State) & (State != 0) & match.ip_proto(Match1) & match.ip_ttl(1, Match2) & (Match == Match1 + Match2) & action.resubmit_table(TABLE_DROP_PACKET, Action) ) lrp_drop_unexpect(LR, Priority, Match, Action, State) <= ( (Priority == 0) & lr_array(LR, UUID_LR, State) & (State != 0) & match.ip_proto(Match) & action.dec_ttl(Action1) & action.resubmit_next(Action2) & (Action == Action1 + Action2) ) lrp_ip_unsnat_stage1(LR, Priority, Match, Action, State) <= ( nat.lunsnat_xlate_stage1(LR, Priority, Match, Action, State)) lrp_ip_unsnat_stage2(LR, Priority, Match, Action, State) <= ( nat.lunsnat_xlate_stage2(LR, Priority, Match, Action, State)) lrp_ip_dnat_stage1(LR, Priority, Match, Action, State) <= ( nat.ldnat_xlate_stage1(LR, Priority, Match, Action, State)) lrp_ip_dnat_stage2(LR, Priority, Match, Action, State) <= ( nat.ldnat_xlate_stage2(LR, Priority, Match, Action, State)) #automatic route lrp_ip_route(LR, Priority, Match, Action, State) <= ( lsp_link_lrp(LSP, LS, UUID_LS, LRP, LR, UUID_LR, UUID_LR_CHASSIS, State) & (State != 0) & (Priority == LRP[LRP_PREFIX] * 3) & match.ip_proto(Match1) & match.ip_dst_prefix(LRP[LRP_IP], LRP[LRP_PREFIX], Match2) & (Match == Match1 + Match2) & action.load(LRP[LRP_PORTID], NXM_Reg(REG_DST_IDX), Action1) & action.load(LRP[LRP_MAC_INT], NXM_Reg(ETH_SRC_IDX), Action2) & action.move(NXM_Reg(IP_DST_IDX), NXM_Reg(REG2_IDX), Action3) & # lrp_handle_unknow_dst_pkt may use it to modify IP to # construct right arp request action.load(LRP[LRP_IP_INT], NXM_Reg(REG3_IDX), Action4) & action.resubmit_next(Action5) & (Action == Action1 + Action2 + Action3 + Action4 + Action5) ) #static route lrp_ip_route(LR, Priority, Match, Action, State) <= ( lsp_link_lrp(LSP, LS, UUID_LS, LRP, LR, UUID_LR, UUID_LR_CHASSIS, State1) & (lroute_array(Route, UUID_LR, State2)) & (State == State1 + State2) & (State != 0) & (UUID_LR == LR[LR_UUID]) & # only match the first outport (LRP[LRP_UUID] == Route[LSR_OUTPORT]) & (Priority == 1 + Route[LSR_PREFIX] * 3) & match.ip_proto(Match1) & match.ip_dst_prefix(Route[LSR_IP], Route[LSR_PREFIX], Match2) & (Match == Match1 + Match2) & action.load(LRP[LRP_PORTID], NXM_Reg(REG_DST_IDX), Action1) & action.load(LRP[LRP_MAC_INT], NXM_Reg(ETH_SRC_IDX), Action2) & action.load(Route[LSR_NEXT_HOP_INT], NXM_Reg(REG2_IDX), Action3) & # lrp_handle_unknow_dst_pkt may use it to modify IP to # construct right arp request action.load(LRP[LRP_IP_INT], NXM_Reg(REG3_IDX), Action4) & action.resubmit_next(Action5) & (Action == Action1 + Action2 + Action3 + Action4 + Action5) ) # gateway chassis no need to consider ecmp if not options.has_key('GATEWAY'): lrp_ip_route(LR, Priority, Match, Action, State) <= ( ecmp_static_route(LR, Priority, Match, Action, State) ) lrp_ecmp_judge(LR, Priority, Match, Action, State) <= ( ecmp_static_route_judge(LR, Priority, Match, Action, State) ) # drop packet if we cannot found route for this packet lrp_ip_route(LR, Priority, Match, Action, State) <= ( (Priority == 0) & lr_array(LR, UUID_LR, State) & (State != 0) & match.match_none(Match) & action.resubmit_table(TABLE_DROP_PACKET, Action) )
def init_ecmp_clause(options): # for adding # NOTE: this clause is consumed by ecmp_static_route. # There is NO circumstances that ecmp_static_route's LR's state is not zero, but # ecmp_aggregate_outport's state is zero. Because next_hop_ovsport need LR's state. # If you try to update ecmp_static_route, we should consider it! (ecmp_aggregate_outport[X] == tuple_(Y, order_by=Z)) <= ( lroute_array(Route1, UUID_LR, State1) & lroute_array(Route2, UUID_LR, State2) & (State1 + State2 >= 0) & (Route1[LSR_UUID] != Route2[LSR_UUID]) & (Route1[LSR_IP] == Route2[LSR_IP]) & (Route1[LSR_PREFIX] == Route2[LSR_PREFIX]) & (Route1[LSR_NEXT_HOP] == Route2[LSR_NEXT_HOP]) & (Route1[LSR_OUTPORT] != Route2[LSR_OUTPORT]) & next_hop_ovsport(Route1[LSR_OUTPORT], OFPORT1, State3) & next_hop_ovsport(Route2[LSR_OUTPORT], OFPORT2, State4) & (State1 + State2 + State3 + State4 > 0) & (X == (UUID_LR, Route1[LSR_IP], Route1[LSR_PREFIX], State_ADD, 'adding')) & (Y == OFPORT1) & (Z == Route1[LSR_UUID]) ) # for readding slave port, deletion delete the whole flow, # but some ports should stay in bundle slave as well, # we should add those ports back (ecmp_aggregate_outport_readd[X] == tuple_(Y, order_by=Z)) <= ( lroute_array(Route1, UUID_LR, State1) & lroute_array(Route2, UUID_LR, State2) & (State_COMBIND1 == State1 + State2) & (State_COMBIND1 >= 0) & (Route1[LSR_UUID] != Route2[LSR_UUID]) & (Route1[LSR_IP] == Route2[LSR_IP]) & (Route1[LSR_PREFIX] == Route2[LSR_PREFIX]) & (Route1[LSR_NEXT_HOP] == Route2[LSR_NEXT_HOP]) & (Route1[LSR_OUTPORT] != Route2[LSR_OUTPORT]) & next_hop_ovsport(Route1[LSR_OUTPORT], OFPORT1, State3) & next_hop_ovsport(Route2[LSR_OUTPORT], OFPORT2, State4) & (State_COMBIND2 == State1 + State2 + State3 + State4) & (State_COMBIND2 >= 0) & (ecmp_aggregate_outport[A] == B) & (A[0] == UUID_LR) & (A[1] == Route1[LSR_IP]) & (A[2] == Route1[LSR_PREFIX]) & (A[4] == 'deleting') & (X == (UUID_LR, Route1[LSR_IP], Route1[LSR_PREFIX], State_ADD, 'readding')) & (Y == OFPORT1) & (Z == Route1[LSR_UUID]) ) # for deleting (ecmp_aggregate_outport[X] == tuple_(Y, order_by=Y)) <= ( lroute_array(Route1, UUID_LR, State1) & lroute_array(Route2, UUID_LR, State2) & (Route1[LSR_UUID] != Route2[LSR_UUID]) & (Route1[LSR_LR_UUID] == Route2[LSR_LR_UUID]) & (Route1[LSR_IP] == Route2[LSR_IP]) & (Route1[LSR_PREFIX] == Route2[LSR_PREFIX]) & (Route1[LSR_NEXT_HOP] == Route2[LSR_NEXT_HOP]) & (Route1[LSR_OUTPORT] != Route2[LSR_OUTPORT]) & next_hop_ovsport(Route1[LSR_OUTPORT], OFPORT, State3) & (State1 + State2 + State3 < 0) & (X == (Route1[LSR_LR_UUID], Route1[LSR_IP], Route1[LSR_PREFIX], State_DEL, 'deleting')) & (Y == OFPORT) ) # adding and readding may generate same flow, it is ok. ecmp_static_route(LR, Priority, Match, Action, State) <= ( lr_array(LR, UUID_LR, State1) & (ecmp_aggregate_outport[X] == Y) & (State == State1 + X[3]) & (State != 0) & (X[0] == UUID_LR) & (Priority == _cal_priority(X[2], 2, 0)) & match.ip_proto(Match1) & match.ip_dst_prefix(X[1], X[2], Match2) & (Match == Match1 + Match2) & action.bundle_load(NXM_Reg(REG_OUTPORT_IDX), Y, Action1) & action.resubmit_next(Action2) & (Action == Action1 + Action2) ) ecmp_static_route(LR, Priority, Match, Action, State) <= ( lr_array(LR, UUID_LR, State1) & (ecmp_aggregate_outport_readd[X] == Y) & (State == State1 + X[3]) & (State != 0) & (X[0] == UUID_LR) & (Priority == _cal_priority(X[2], 2, 0)) & match.ip_proto(Match1) & match.ip_dst_prefix(X[1], X[2], Match2) & (Match == Match1 + Match2) & action.bundle_load(NXM_Reg(REG_OUTPORT_IDX), Y, Action1) & action.resubmit_next(Action2) & (Action == Action1 + Action2) ) # gateway chassis no need to consider ecmp if not options.has_key('GATEWAY'): # after hitting bundle_load action, flows should be add to forward packet # to different port base on value of reg4 ecmp_static_route_judge(LR, Priority, Match, Action, State) <= ( lroute_array(Route, UUID_LR, State1) & next_hop_ovsport(Route[LSR_OUTPORT], OFPORT, State2) & lr_array(LR, UUID_LR, State3) & lrp_array(Route[LSR_OUTPORT], LRP, UUID_LR, UUID_LSP, State4) & (State == State1 + State2 + State3 + State4) & (State != 0) & (Priority == _cal_priority(Route[LSR_PREFIX], 2, 0)) & match.reg_outport(OFPORT, Match1) & match.ip_proto(Match2) & match.ip_dst_prefix(Route[LSR_IP], Route[LSR_PREFIX], Match3) & (Match == Match1 + Match2 + Match3) & action.load(LRP[LRP_PORTID], NXM_Reg(REG_DST_IDX), Action1) & action.load(LRP[LRP_MAC_INT], NXM_Reg(ETH_SRC_IDX), Action2) & action.load(Route[LSR_NEXT_HOP_INT], NXM_Reg(REG2_IDX), Action3) & action.load(LRP[LRP_IP_INT], NXM_Reg(REG3_IDX), Action4) & action.resubmit_next(Action5) & (Action == Action1 + Action2 + Action3 + Action4 + Action5) ) # drop packets if all bundle slave ports are not in 'up' status # TODO if we should ignore failure and deliver packet to # one of output ports ecmp_static_route_judge(LR, Priority, Match, Action, State) <= ( lr_array(LR, UUID_LR, State) & (State != 0) & (Priority == 1) & match.reg_outport(st.TP_OFPORT_NONE, Match) & action.resubmit_table(TABLE_DROP_PACKET, Action) ) # resubmit next stage without hitting any flows above ecmp_static_route_judge(LR, Priority, Match, Action, State) <= ( lr_array(LR, UUID_LR, State) & (State != 0) & (Priority == 0) & match.match_none(Match) & action.resubmit_next(Action) ) if options.has_key('GATEWAY'): # gateway chassis should set all tunnel port's bfd to true, unless the # chassis was deleted ecmp_bfd_port(PORT_NAME, State) <= ( ovsport_chassis(PORT_NAME, UUID_CHASSIS, OFPORT, State1) & # we only enable ovsport that exist (State1 >= 0) & (UUID_CHASSIS != st.TP_FLOW_TUNNEL_NAME) & chassis_array(PHY_CHASSIS, UUID_CHASSIS, State2) & (State == State1 + State2) & (State != 0) ) # disable all tunnel port bfd if we found our chassis was deleted ecmp_bfd_port(PORT_NAME, State) <= ( local_system_id(UUID_CHASSIS) & chassis_array(PHY_CHASSIS1, UUID_CHASSIS, State1) & # prevent event like chassis tick update, # ecmp_bfd_port will grep out PORT_NAME with state above 0. # In the same time, it also grep out PORT_NAME with state has negative # value. But config_tunnel_bfd help us eliminate negative part # NOTE: it can grep out (State1=1) (State2=1) (State=1), # (State1=1) (State2=-1)(State=-1),(State1=-1) (State2=-1)(State=-1) # but config_tunnel_bfd will keep (State=1) only chassis_array(PHY_CHASSIS2, UUID_CHASSIS, State2) & (State == State1 + State2) & (State != 0) & # figure out all tunnel port ovsport_chassis(PORT_NAME, UUID_CHASSIS1, OFPORT, State3) & (State3 >= 0) & (UUID_CHASSIS1 != st.TP_FLOW_TUNNEL_NAME) ) else: ecmp_bfd_port(PORT_NAME, State) <= ( lroute_array(Route, UUID_LR, State1) & next_hop_ovsport(Route[LSR_OUTPORT], OFPORT, State2) & # we only enable/disable ovsport that exist ovsport_chassis(PORT_NAME, UUID_CHASSIS, OFPORT, State3) & (State3 >= 0) & chassis_array(PHY_CHASSIS, UUID_CHASSIS, State4) & (UUID_CHASSIS != st.TP_FLOW_TUNNEL_NAME) & (State == State1 + State2 + State3 + State4) )
def init_lrp_ingress_clause(options): init_ecmp_clause(options) if options.has_key('GATEWAY'): _live_lsp_link_lrp( LSP, LS, UUID_LS, LRP, LR, UUID_LR, None, State) <= (lsp_link_lrp( LSP, LS, UUID_LS, LRP, LR, UUID_LR, None, State)) _live_lsp_link_lrp( LSP, LS, UUID_LS, LRP, LR, UUID_LR, UUID_LR_CHASSIS, State) <= (lsp_link_lrp(LSP, LS, UUID_LS, LRP, LR, UUID_LR, UUID_LR_CHASSIS, State1) & chassis_array(PHY_CHASSIS, UUID_LR_CHASSIS, State2) & (State == State1 + State2)) else: _live_lsp_link_lrp(LSP, LS, UUID_LS, LRP, LR, UUID_LR, UUID_LR_CHASSIS, State) <= (lsp_link_lrp( LSP, LS, UUID_LS, LRP, LR, UUID_LR, UUID_LR_CHASSIS, State)) # response ICMP packet if receiving ICMP request lrp_pkt_response(LR, Priority, Match, Action, State) <= ( (Priority == 3) & _live_lsp_link_lrp(LSP, LS, UUID_LS, LRP, LR, UUID_LR, UUID_LR_CHASSIS, State) & (State != 0) & match.icmp_proto(Match1) & match.icmp_type(8, Match2) & match.icmp_code(0, Match3) & match.ip_dst(LRP[LRP_IP], Match4) & (Match == Match1 + Match2 + Match3 + Match4) & action.exchange(NXM_Reg(IP_SRC_IDX), NXM_Reg(IP_DST_IDX), Action1) & action.load(0xff, NXM_Reg(IP_TTL_IDX), Action2) & action.load(0, NXM_Reg(ICMP_TYPE_IDX), Action3) & action.move( NXM_Reg(REG_SRC_IDX), NXM_Reg(REG_DST_IDX), Action4) & action.load( 1, NXM_Reg(REG_FLAG_IDX, FLAG_LOOPBACK_BIT_IDX, FLAG_LOOPBACK_BIT_IDX), Action5) & action.resubmit_next(Action6) & (Action == Action1 + Action2 + Action3 + Action4 + Action5 + Action6)) lrp_pkt_response(LR, Priority, Match, Action, State) <= ( (Priority == 0) & lr_array(LR, UUID_LR, State) & (State != 0) & match.ip_proto(Match) & action.resubmit_next(Action)) lrp_drop_unexpect(LR, Priority, Match, Action, State) <= ( (Priority == 2) & lr_array(LR, UUID_LR, State) & (State != 0) & match.ip_proto(Match1) & match.ip_ttl(1, Match2) & (Match == Match1 + Match2) & action.resubmit_table(TABLE_DROP_PACKET, Action)) lrp_drop_unexpect(LR, Priority, Match, Action, State) <= ( (Priority == 0) & lr_array(LR, UUID_LR, State) & (State != 0) & match.ip_proto(Match) & action.dec_ttl(Action1) & action.resubmit_next(Action2) & (Action == Action1 + Action2)) lrp_ip_unsnat_stage1(LR, Priority, Match, Action, State) <= (nat.lunsnat_xlate_stage1( LR, Priority, Match, Action, State)) lrp_ip_unsnat_stage2(LR, Priority, Match, Action, State) <= (nat.lunsnat_xlate_stage2( LR, Priority, Match, Action, State)) lrp_ip_dnat_stage1(LR, Priority, Match, Action, State) <= (nat.ldnat_xlate_stage1( LR, Priority, Match, Action, State)) lrp_ip_dnat_stage2(LR, Priority, Match, Action, State) <= (nat.ldnat_xlate_stage2( LR, Priority, Match, Action, State)) #automatic route lrp_ip_route(LR, Priority, Match, Action, State) <= ( lsp_link_lrp(LSP, LS, UUID_LS, LRP, LR, UUID_LR, UUID_LR_CHASSIS, State) & (State != 0) & (Priority == _cal_priority(LRP[LRP_PREFIX], 0, LRP[LRP_ILK_IDX])) & match.ip_proto(Match1) & match.ip_dst_prefix(LRP[LRP_IP], LRP[LRP_PREFIX], Match2) & (Match == Match1 + Match2) & action.load(LRP[LRP_PORTID], NXM_Reg(REG_DST_IDX), Action1) & action.load(LRP[LRP_MAC_INT], NXM_Reg(ETH_SRC_IDX), Action2) & action.move(NXM_Reg(IP_DST_IDX), NXM_Reg(REG2_IDX), Action3) & # lrp_handle_unknow_dst_pkt may use it to modify IP to # construct right arp request action.load(LRP[LRP_IP_INT], NXM_Reg(REG3_IDX), Action4) & action.resubmit_next(Action5) & (Action == Action1 + Action2 + Action3 + Action4 + Action5)) if options.has_key('GATEWAY'): _static_route_changed(Route, LR, LRP, State) <= ( local_system_id(UUID_CHASSIS) & lroute_array(Route, UUID_LR, State1) & lsp_link_lrp( LSP, LS, UUID_LS, LRP, LR, UUID_LR, UUID_CHASSIS, State2) & (Route[LSR_OUTPORT] == LRP[LRP_UUID]) & local_patchport(LSP1, LS, State3) & (State == State1 + State2 + State3) & (State != 0)) _next_live_hop_lr(UUID_LRP, LRP, LR, LR_NEXT, State) <= (next_hop_lr(UUID_LRP, LRP, LR, LR_NEXT, State) & (LR_NEXT[LR_CHASSIS_UUID] == None)) # if next LR is pining on a chassis, tuplenet have to verify if the geneve # tunnel port had been create. Otherwise, some packet may deliver to this # LR which has no tunnel port to remote chassis. It cause packet drop once # a gateway chassis was re-add. _next_live_hop_lr(UUID_LRP, LRP, LR, LR_NEXT, State) <= ( next_hop_lr(UUID_LRP, LRP, LR, LR_NEXT, State1) & (LR_NEXT[LR_CHASSIS_UUID] != None) & remote_chassis( LR_NEXT[LR_CHASSIS_UUID], PHY_CHASSIS_WITH_OFPORT, State2) & (State == State1 + State2)) _next_live_hop_lr(UUID_LRP, LRP, LR, LR_NEXT, State) <= (next_hop_lr(UUID_LRP, LRP, LR, LR_NEXT, State) & local_system_id(LR_NEXT[LR_CHASSIS_UUID])) _static_route_changed(Route, LR, LRP, State) <= ( lroute_array(Route, UUID_LR, State1) & _next_live_hop_lr(Route[LSR_OUTPORT], LRP, LR, LR_NEXT, State2) & (State == State1 + State2) & (State != 0)) #static route lrp_ip_route(LR, Priority, Match, Action, State) <= ( _static_route_changed(Route, LR, LRP, State) & (Priority == _cal_priority(Route[LSR_PREFIX], 1, Route[LSR_ILK_IDX])) & match.ip_proto(Match1) & match.ip_dst_prefix(Route[LSR_IP], Route[LSR_PREFIX], Match2) & (Match == Match1 + Match2) & action.load(LRP[LRP_PORTID], NXM_Reg(REG_DST_IDX), Action1) & action.load(LRP[LRP_MAC_INT], NXM_Reg(ETH_SRC_IDX), Action2) & action.load(Route[LSR_NEXT_HOP_INT], NXM_Reg(REG2_IDX), Action3) & # lrp_handle_unknow_dst_pkt may use it to modify IP to # construct right arp request action.load(LRP[LRP_IP_INT], NXM_Reg(REG3_IDX), Action4) & action.resubmit_next(Action5) & (Action == Action1 + Action2 + Action3 + Action4 + Action5)) # gateway chassis no need to consider ecmp if not options.has_key('GATEWAY'): lrp_ip_route(LR, Priority, Match, Action, State) <= (ecmp_static_route( LR, Priority, Match, Action, State)) lrp_ecmp_judge(LR, Priority, Match, Action, State) <= (ecmp_static_route_judge( LR, Priority, Match, Action, State)) # drop packet if we cannot found route for this packet lrp_ip_route(LR, Priority, Match, Action, State) <= ((Priority == 0) & lr_array(LR, UUID_LR, State) & (State != 0) & match.match_none(Match) & action.resubmit_table(TABLE_DROP_PACKET, Action))
def init_lsp_egress_clause(way): lsp_judge_loopback(LS, Priority, Match, Action, State) <= ( (Priority == 2) & ls_array(LS, UUID_LS, State) & (State != 0) & match.reg_flag(FLAG_LOOPBACK, Match) & # load 0xffff(OFPP_NONE) -> inport to avoid dropping loopback packet action.load(st.TP_OFPORT_NONE, NXM_Reg(IN_PORT_IDX), Action1) & action.resubmit_next(Action2) & (Action == Action1 + Action2)) lsp_judge_loopback(LS, Priority, Match, Action, State) <= ( (Priority == 0) & ls_array(LS, UUID_LS, State) & (State != 0) & match.match_none(Match) & action.resubmit_next(Action)) # output packet to local ovs-port lsp_forward_packet(LS, Priority, Match, Action, State) <= ( (Priority == 3) & local_bond_lsp(LSP, LS, State) & (State != 0) & match.reg_dst(LSP[LSP_PORTID], Match) & action.load(1, NXM_Reg(REG5_IDX), Action1) & action.load(LSP[LSP_OFPORT], NXM_Reg(REG_OUTPORT_IDX), Action2) & action.resubmit_next(Action3) & (Action == Action1 + Action2 + Action3)) # set the packet's destination, the destination is next LR's LRP lsp_forward_packet(LS, Priority, Match, Action, State) <= ( (Priority == 2) & lsp_link_lrp(LSP, LS, UUID_LS, LRP, LR, UUID_LR, UUID_LR_CHASSIS, State) & (State != 0) & match.reg_dst(LSP[LSP_PORTID], Match) & # load next LR's ID to reg6, next stage's flow will move reg6 --> DP # load next LR's port to reg7, next stage's flow will move reg7 # --> REG_SRC_IDX action.load(LR[LR_ID], NXM_Reg(REG6_IDX), Action1) & action.load(LRP[LRP_PORTID], NXM_Reg(REG7_IDX), Action2) & action.resubmit_next(Action3) & (Action == Action1 + Action2 + Action3)) # if above flows are not hit, then it means the destination is not # on this host and this packet must be a redirect packet. We should # send it to lsp_lookup_dst_port, then lsp_output_dst_port will use # output action to output packet later. # And we decrease ttl the packet.(we assume all packet comes in lsp # egress should be IP packet). lsp_forward_packet(LS, Priority, Match, Action, State) <= ( (Priority == 0) & ls_array(LS, UUID_LS, State) & (State != 0) & match.ip_proto(Match) & # we set REDIRECT bit again, just try to avoid infinite loop action.load( 1, NXM_Reg(REG_FLAG_IDX, FLAG_REDIRECT_BIT_IDX, FLAG_REDIRECT_BIT_IDX), Action1) & action.resubmit_table(TABLE_LSP_INGRESS_LOOKUP_DST_PORT, Action2) & (Action == Action1 + Action2)) # if above flows are not hit, then it means the destination is not # on this host and this packet must be a redirect packet. We should # convert this arp request into arp response and send it back to # tunnel port which it comes from lsp_forward_packet(LS, Priority, Match, Action, State) <= ( (Priority == 0) & ls_array(LS, UUID_LS, State) & (State != 0) & match.arp_proto(Match1) & match.arp_op(1, Match2) & (Match == Match1 + Match2) & # set REDIRECT bit again to avoid infinite loop action.load( 1, NXM_Reg(REG_FLAG_IDX, FLAG_REDIRECT_BIT_IDX, FLAG_REDIRECT_BIT_IDX), Action1) & action.resubmit_table(TABLE_ARP_FEEDBACK_CONSTRUCT, Action2) & action.resubmit_table(TABLE_LSP_INGRESS_LOOKUP_DST_PORT, Action3) & (Action == Action1 + Action2 + Action3)) lsp_pushout_packet(LS, Priority, Match, Action, State) <= ( (Priority == 2) & ls_array(LS, UUID_LS, State) & (State != 0) & match.reg_5(1, Match) & action.resubmit_table(TABLE_OUTPUT_PKT, Action)) lsp_pushout_packet(LS, Priority, Match, Action, State) <= ( (Priority == 1) & ls_array(LS, UUID_LS, State) & (State != 0) & match.match_none(Match) & action.move( NXM_Reg(REG6_IDX, 0, 23), NXM_Reg(REG_DP_IDX, 0, 23), Action1) & action.move(NXM_Reg(REG7_IDX), NXM_Reg(REG_SRC_IDX), Action2) & # set reg6 back to 0 action.load(0, NXM_Reg(REG6_IDX), Action3) & action.load(0, NXM_Reg(REG7_IDX), Action4) & action.resubmit_table(TABLE_LRP_TRACE_INGRESS_IN, Action5) & (Action == Action1 + Action2 + Action3 + Action4 + Action5))
pyDatalog.create_terms('Table, Priority, Priority1, Match, Action') pyDatalog.create_terms('Action1, Action2, Action3, Action4, Action5') pyDatalog.create_terms('Action6, Action7, Action8, Action9, Action10') pyDatalog.create_terms('Match1, Match2, Match3, Match4, Match5') pyDatalog.create_terms('trace_pipeline_start, trace_pipeline_end') pyDatalog.create_terms('trace_pipeline_module') pyDatalog.create_terms('get_init_trigger') # keep it! otherwise # the get_init_trigger won't work # trace_pipeline_start will be inserted into each LS/LR's # first ingress/egress stage trace_pipeline_start(Priority, Match, Action, State) <= ( (Priority == 100) & (State == get_init_trigger(Priority)) & (State != 0) & match.reg_flag(FLAG_TRACE, Match) & action.upload_trace(Action1) & action.resubmit_next(Action2) & (Action == Action1 + Action2)) # default flow to resubmit to next table trace_pipeline_start(Priority, Match, Action, State) <= ( (Priority == 0) & (State == get_init_trigger(Priority)) & (State != 0) & match.match_none(Match) & action.resubmit_next(Action)) # trace_pipeline_end will be inserted into each LS/LR's # last ingress/egress stage # the caller will add resumbit action trace_pipeline_end(Priority, Match, Action, State) <= ( (Priority == 100) & (State == get_init_trigger(Priority)) & (State != 0) & match.reg_flag(FLAG_TRACE, Match) & action.upload_trace(Action)) # default flow, the caller will add resumbit action