def delete(client, data): """ """ if is_ha_vip_config(client, data): _delete_ha_vip_config(client, data) t0_id = get_id(client, data) t0_lrp = nsx_lrp.get_list(client, logical_router_id=t0_id) lp_rm = [] lrp_rm = [] for lrp in t0_lrp: if lrp.has_key('linked_logical_switch_port_id'): lp_rm.append(lrp['linked_logical_switch_port_id']['target_id']) elif lrp.has_key('linked_logical_router_port_id'): lrp_rm.append(lrp['linked_logical_router_port_id']) lrp_rm.append(lrp['id']) for lrp in lrp_rm: nsx_lrp.delete(client, {'id': lrp}) for lp in lp_rm: nsx_lp.delete(client, {'id': lp}) param = {'logical-router-id': get_id(client, data)} request = client.__getattr__(MODULE).DeleteLogicalRouter(**param) response = request.result() return response
def _cleanup_lrp(client, data): lrps = get_ncp_objects(nsx_lrp.get_list(client), cluster=data['cluster_name']) logger.info("Number of Logical Router port to be deleted: %s" % len(lrps)) if data['dry_run']: return for lrp in lrps: nsx_lrp.delete(client, lrp, force=True) logger.info("Logical Router port: %s is deleted" % lrp['display_name'])
def _cleanup_t1lr(client, data): t1_routers = get_ncp_objects(nsx_t1lr.get_list(client), cluster=data['cluster_name']) logger.info("Number of T1 Router to be deleted: %s" % len(t1_routers)) if data['dry_run']: return for t1 in t1_routers: lrps = nsx_lrp.get_list(client, logical_router_id=t1['id']) for lrp in lrps: nsx_lrp.delete(client, lrp, force=True) logger.info("Logical Router port: %s is deleted" % lrp['display_name']) nsx_t1lr.delete(client, t1) logger.info("T1 Router: %s is deleted" % t1['display_name'])
def _create_uplink(client, data): res = [] t0_id = get_id(client, data) t0_uplinks = nsx_lrp.get_list(client, logical_router_id=t0_id) if data['create_uplink']['display_name'] in [ u['display_name'] for u in t0_uplinks ]: logger.error('Uplink Already exist') return None ls_id = nsx_logicalswitch.get_id( client, {'display_name': data['create_uplink']['ls']}) param = { 'logical_switch_id': ls_id, 'display_name': "Uplink_on_%s" % data['create_uplink']['edge_node'], 'admin_state': 'UP' } t0_lp = nsx_lp.create(client, param) edge_memberid = nsx_edge.get_memberid( client, {'display_name': data['create_uplink']['edge_node']}) param = { 'display_name': data['create_uplink']['display_name'], 'resource_type': 'LogicalRouterUpLinkPort', 'logical_router_id': t0_id, 'tags': [], 'linked_logical_switch_port_id': { 'target_id': t0_lp['id'] }, 'edge_cluster_member_index': [edge_memberid], 'subnets': [{ 'ip_addresses': [data['create_uplink']['ip_address']], 'prefix_length': data['create_uplink']['prefix_length'] }] } nsx_lrp.create(client, param)
def _update_ha_vip_config(client, data): t0_id = get_id(client, data) uplink_lrps = nsx_lrp.get_list(client, logical_router_id=t0_id, resource_type='LogicalRouterUpLinkPort') redundant_uplink_port_ids = [] for uplink in data['ha_vip_config']['uplinks']: for uplink_lrp in uplink_lrps: if uplink_lrp['display_name'] == uplink: redundant_uplink_port_ids.append(uplink_lrp['id']) t0_routerconfig = get(client, data) t0_routerconfig['advanced_config']['ha_vip_configs'] = [{ 'enabled': True, 'ha_vip_subnets': [{ 'active_vip_addresses': [data['ha_vip_config']['vip']], 'prefix_length': data['ha_vip_config']['prefix_length'] }], 'redundant_uplink_port_ids': redundant_uplink_port_ids }] update(client, t0_routerconfig)