def send_request(sw_data): try: logger.debug(str(sw_data)) url = build_nxapi_url(sw_data['mgmt_ip']) logger.debug(url) myheaders = {'content-type': 'application/json-rpc'} command = 'show running-config' payload = list() status = {"status": "", "log": ""} template = get_nxapi_template() template['params']['cmd'] = command payload.append(template) logger.debug(str(payload)) try: response = requests.post(url, data=json.dumps(payload), headers=myheaders, timeout=7, auth=(sw_data['username'], sw_data['password'])).json() return response except requests.exceptions.Timeout: logs = 'timeout occured while running ' + command logger.error(logs) raise IgniteException(ERR_SWITCH_CONFIG_FAILED) except Exception as e: logger.error(e) raise IgniteException(ERR_SWITCH_CONFIG_FAILED)
def add_switches(self, data): for item in data[SWITCHES]: if not item[COUNT]: continue curr_count = Switch.objects.filter(topology_id=self._top.id, tier=item[TIER], dummy=False).count() if item[TIER] == SPINE: if curr_count + item[COUNT] > MAX_SPINES: raise IgniteException(ERR_SPINE_EXCEEDED_MAX_LIMIT) if item[TIER] == LEAF: if curr_count + item[COUNT] > MAX_LEAFS: raise IgniteException(ERR_LEAF_EXCEEDED_MAX_LIMIT) if item[TIER] == CORE: if curr_count + item[COUNT] > MAX_CORES: raise IgniteException(ERR_CORE_EXCEEDED_MAX_LIMIT) if item[TIER] == BORDER: if curr_count + item[COUNT] > MAX_BORDERS: raise IgniteException(ERR_BORDER_EXCEEDED_MAX_LIMIT) # get next switch index for tier index = self._get_next_index(item[TIER]) for ctr in range(item[COUNT]): self._add_switch(item[TIER], index + ctr)
def reset_switch_boot_status(sw_id): switch = Switch.objects.get(id=sw_id) if switch: logger.debug("Switch found") if switch.boot_detail: logger.debug("Switch has boot state as %s ", switch.boot_detail.boot_status) if switch.boot_detail.boot_status in [BOOT_PROGRESS, BOOT_FAIL]: boot_detail = switch.boot_detail if switch.boot_detail.boot_status == BOOT_PROGRESS: logger.debug("Updating boot_in_progress in switch Model") switch.model.boot_in_progress -= 1 else: logger.debug("Updating boot_with_fail in switch Model") switch.model.booted_with_fail -= 1 switch.model.save() switch.boot_detail = None switch.save() logger.debug("Switch updated") logger.debug("Deleting boot details for switch") boot_detail.delete() logger.debug("Boot details deleted") fabric = get_fabric(switch.topology_id) return fabric logger.debug( "Can not reset the switch as it is in Boot state: %s ", switch.boot_detail.boot_status) raise IgniteException(ERR_CAN_NOT_RESET + switch.boot_detail.boot_status) raise IgniteException(ERR_SWITCH_NOT_BOOTED)
def _add_profile(data, user, id=0): if id: obj = get_profile(id) update_ref_count(obj.construct_list, -1) obj.name = data[NAME] if not data[CONSTRUCT_LIST]: raise IgniteException(ERR_PROF_IS_EMPTY) else: obj.construct_list = data[CONSTRUCT_LIST] obj.submit = data[SUBMIT] obj.updated_by = user update_ref_count(data[CONSTRUCT_LIST], +1) obj.save() return obj else: fp_object = Profile() fp_object.name = data[NAME] if not data[CONSTRUCT_LIST]: raise IgniteException(ERR_PROF_IS_EMPTY) else: fp_object.construct_list = data[CONSTRUCT_LIST] fp_object.submit = data[SUBMIT] fp_object.updated_by = user update_ref_count(data[CONSTRUCT_LIST], +1) fp_object.save() return fp_object
def get_rma_detail(old_serial_num): #check if switch exist with the searched serial number switch = get_rma_switch(old_serial_num) if switch: switch.match = SWITCH switch.switch_detail = switch if switch.boot_detail: #booting of switch is in progress if switch.boot_detail.boot_status == BOOT_PROGRESS: logger.debug(ERR_SWITCH_BOOT_IN_PROGRESS) raise IgniteException(ERR_SWITCH_BOOT_IN_PROGRESS) try: group_switches = GroupSwitch.objects.filter( grp_switch_id=switch.id).values_list('group_id', flat=True).distinct() for group_switch in group_switches: group = Group.objects.get(pk=group_switch) if group.ref_count > 0: logger.debug(ERR_SWITCH_IN_USE_JOB_SCHEDULE) raise IgniteException(ERR_SWITCH_IN_USE_JOB_SCHEDULE) except GroupSwitch.DoesNotExist: pass else: rule = find_dup_serial_discovery([old_serial_num], rma_case=True) if rule: switch.rule = rule.id logger.debug( "switch found as not booted and discovery rule also exist") return switch return None
def pull_switch_config(data, fid, switch_id, username=''): switch = Switch.objects.get(id=switch_id, topology_id=fid) data['id'] = switch.id data['mgmt_ip'] = switch.mgmt_ip.split('/')[0] response = send_request(data) temp_path = os.path.join(MEDIA_ROOT, 'switch', 'temp.cfg') if not response: raise IgniteException(ERR_SWITCH_CONFIG_FAILED) objs = SwitchConfig.objects.filter(switch=switch).aggregate(Max('version')) if not objs['version__max']: new_name = create_file(switch, 0, username, response) remove_temp(temp_path) return get_file(new_name) objects = SwitchConfig.objects.filter(switch=switch).filter( version=objs['version__max']) old_file = objects[0].name version = objects[0].version sw_config_id = objects[0].id f_path = os.path.join(MEDIA_ROOT, 'switch', old_file) logger.debug(f_path) same = True try: tem_fo = open(temp_path, 'w') tem_fo.write(response['result']['msg']) tem_fo.close() tem_fo = open(temp_path, 'r') fo = open(f_path, 'r') fo_old = fo.readlines() del fo_old[1] fo_new = tem_fo.readlines() logger.debug(str(len(fo_new))) logger.debug(str(len(fo_old))) del fo_new[1] for line1, line2 in zip(fo_old, fo_new): if not line1 == line2: same = False break except Exception as e: logger.error(e) remove_temp(temp_path) raise IgniteException(ERR_SWITCH_CONFIG_FAILED) if same: obj = SwitchConfig.objects.get(id=sw_config_id) obj.username = username obj.save() remove_temp(temp_path) return get_file(obj.name) else: new_name = create_file(switch, version, username, response) remove_temp(temp_path) return get_file(new_name)
def delete_task(id): task = Task.objects.get(pk=id) if not task.editable: raise IgniteException(ERR_TASK_NOT_EDITABLE) if task.ref_count: raise IgniteException(ERR_TASK_IN_USE) task.delete()
def update_profile(prindex_id, pr_id, data, username=''): if pr_id == 1: raise IgniteException(ERR_CAN_NOT_EDIT_DEFAULT_CONFIG) serializer = ProfilePostSerializer(data=data) if not serializer.is_valid(): raise IgniteException(serializer.errors) prof = profile.update_profile(prindex_id, pr_id, serializer.data, username) serializer = ProfileSerializer(prof) return serializer.data
def wrap_config_file(name): path = os.path.join(MEDIA_ROOT, "switch", name) if not os.path.isfile(path): IgniteException(ERR_SWITCH_CONFIG_NOT_AVAILABLE) try: wrapper = FileWrapper(file(path)) except: raise IgniteException(ERR_SWITCH_CONFIG_FAILED) response = HttpResponse(wrapper, content_type='text/plain') response['Content-Length'] = os.path.getsize(path) return response
def delete_backup(data): if not type(data) is list: raise IgniteException(ERR_NOT_LIST) if not data: raise IgniteException(ERR_EMPTY_LIST) for name in data: filename = os.path.join(MEDIA_ROOT, BACKUP, name) if os.path.isfile(filename): if filename.endswith(TAR_FORMAT): status = os.remove(filename)
def add_switch(data, username=''): if data[SWITCH_TYPE] == FIXED: serializer = SwitchFixedSerializer(data=data) elif data[SWITCH_TYPE] == CHASSIS: serializer = SwitchChassisSerializer(data=data) else: raise IgniteException(ERR_INV_SW_TYPE) if not serializer.is_valid(): raise IgniteException(serializer.errors) serializer = SwitchSerializer(switch.add_switch(serializer.data, username)) return serializer.data
def allocate_pool_entry(pool_id, switch_id, switch): logger.debug("pool id = %s, switch id = %s", pool_id, switch_id) if not switch: switch = Switch.objects.get(pk=switch_id) pool = Pool.objects.get(pk=pool_id) if pool.scope == GLOBAL: fabric = None else: if not switch.topology: raise IgniteException(ERR_NON_FABRIC_POOL) fabric = Fabric.objects.get(pk=switch.topology.id) # check if fabric specific entries have been created entry = PoolEntry.objects.filter(pool=pool, fabric=fabric) if not entry: # create fabric specific entries if pool.type == INTEGER: _create_integer_pool(pool, pool.blocks, fabric) elif pool.type == IPV4: _create_ipv4_pool(pool, pool.blocks, fabric) elif pool.type == IPV6: _create_ipv6_pool(pool, pool.blocks, fabric) # check if pool entry has already been allocated to this switch entry = get_assigned_entry(pool, fabric, switch) if entry: return entry.value # find new entry to allocate entries = PoolEntry.objects.filter(pool=pool, fabric=fabric, switch=None) if not entries: raise IgniteException(ERR_POOL_FULL) # save assigned switch in pool entry entry = entries.first() entry.switch = switch entry.save() logger.debug("value = %s", entry.value) if pool.role == MGMT: switch.mgmt_ip = entry.value switch.save() return entry.value
def get_instance_value(instance_value, switch, switch_name): logger.debug("Get Instance value for %s" % instance_value) if instance_value == HOST_NAME: if switch: return switch.name return switch_name if instance_value == VPC_PEER_SRC: if not switch.mgmt_ip: raise IgniteException("%s- %s" % (ERR_MGMT_IP_NOT_DETERMINED, switch.name)) return switch.mgmt_ip if not switch.topology: raise IgniteException("%s- %s" % (ERR_SWITCH_FABRIC_NOT_DETERMINED, switch.name)) topo = BaseTopology.get_object(switch.topology.id) if instance_value == VPC_PEER_DST: peer_switch = topo.get_vpc_peer_switch(switch) if not peer_switch.mgmt_ip: if not switch.mgmt_ip: raise IgniteException( "%s- %s" % (ERR_MGMT_IP_NOT_DETERMINED, switch.name)) return get_peer_mgmt_ip(switch, peer_switch) else: if not peer_switch.mgmt_ip: raise IgniteException( "%s- %s" % (ERR_PEER_MGMT_IP_NOT_DETERMINED, switch.name)) return peer_switch.mgmt_ip if instance_value == VPC_PEER_PORTS: return (topo.get_vpc_peer_ports(switch)) if instance_value == UPLINK_PORTS: return (topo.get_uplink_ports(switch)) if instance_value == DOWNLINK_PORTS: return (topo.get_downlink_ports(switch)) raise IgniteException( "%s- %s Instance %s" % (ERR_FAILED_TO_GET_INSTANCE_PARAM_VALUE, switch.name, instance_value))
def delete_workflow(id): workflow = Workflow.objects.get(pk=id) if not workflow.editable: raise IgniteException(ERR_WF_NOT_EDITABLE) # decrement ref count of tasks used in this workflow for task_const in workflow.task_list: task.update_ref_count(task_const[TASK_ID], -1) try: workflow.delete() except ProtectedError: raise IgniteException(ERR_TASK_IN_USE)
def update_switch(id, data, username=''): if id==1: raise IgniteException(ERR_CAN_NOT_EDIT_UNKNOWN_MODEL) if data[SWITCH_TYPE] == FIXED: serializer = SwitchFixedSerializer(data=data) elif data[SWITCH_TYPE] == CHASSIS: serializer = SwitchChassisSerializer(data=data) else: raise IgniteException(ERR_INV_SW_TYPE) if not serializer.is_valid(): raise IgniteException(serializer.errors) serializer = SwitchSerializer(switch.update_switch(id, serializer.data, username)) return serializer.data
def get_switch_config(fid, sid, id): if Switch.objects.filter(id=sid, topology_id=fid): config = SwitchConfig.objects.get(id=id, switch_id=sid) return wrap_config_file(config.name) else: logger.error(ERR_FABRIC_SWITCH_DIFFERENT) raise IgniteException(ERR_FABRIC_SWITCH_DIFFERENT)
def build_config(fab_id): fab = Fabric.objects.get(pk=fab_id) if not fab.submit: raise IgniteException(ERR_BUILD_WITHOUT_SUBMIT) # get all switches in fabric switches = fabric.get_all_switches(fab_id) for switch in switches: try: os.remove(os.path.join(REPO_PATH + str(switch.id) + '.cfg')) except OSError: pass # Run ANK device_profile = _build_ank_profiles(fab.feature_profile, switches) if device_profile: topo_detail = fabric.get_topology_detail(fab_id) run_ank(topo_detail=topo_detail, prof_detail=device_profile) # fetch fabric level config fab_cfg = fab.config_profile # build config on each switch in fabric for switch in switches: build_switch_config(switch, fab_cfg=fab_cfg) fab.build_time = timezone.now() fab.save()
def delete_pool(id): pool = Pool.objects.get(pk=id) if pool.ref_count: raise IgniteException(ERR_POOL_IN_USE) pool.delete()
def update_job(id, data, username=''): serializer = JobPostSerializer(data=data) if not serializer.is_valid(): raise IgniteException(serializer.errors) jb = job.update_job(id, serializer.data, username) serializer = JobDetailSerializer(jb) return serializer.data
def add_fabric_switch(fab_id, data, user=""): serializer = SwitchPostSerializer(data=data) if not serializer.is_valid(): raise IgniteException(serializer.errors) fabric.add_switches(fab_id, serializer.data, user) serializer = FabricSerializer(fabric.get_fabric(fab_id)) return serializer.data
def update_fabric_switch(fab_id, switch_id, data, user=""): serializer = FabricSwitchPutSerializer(data=data) if not serializer.is_valid(): raise IgniteException(serializer.errors) fabric.update_switch(fab_id, switch_id, serializer.data, user) serializer = FabricSerializer(fabric.get_fabric(fab_id)) return serializer.data
def build_switch_config(switch, fab_cfg=None, switch_cfg=None): if switch.topology: if not switch.topology.submit: raise IgniteException(ERR_REQUEST_WITHOUT_SUBMIT) logger.debug('Starting build config for switch %s' % switch.name) if not switch_cfg: # fetch switch config profile switch_cfg = fabric.get_switch_config_profile(switch) if switch_cfg: logger.debug("Config profile for %s- %s" % (switch.name, switch_cfg.name)) else: logger.debug("No switch level config profile applied for- %s" % switch.name) if not fab_cfg: fab_cfg = switch.topology.config_profile if fab_cfg: logger.debug("Fabric level config profile for- %s", fab_cfg.name) else: logger.debug("No fabric level config profile applied") cfg_file = os.path.join(REPO_PATH + str(switch.id) + '.cfg') if fab_cfg or switch_cfg: with open(cfg_file, 'a') as output_fh: output_fh.write(build_config_profile([fab_cfg, switch_cfg], switch))
def add_switches(data, fab_id, gid, user): count = 0 error = [] for switch in data: mes = {} if not Switch.objects.filter(id=switch["switch_id"], topology_id=fab_id, topology__is_fabric=True, boot_detail__isnull=False, boot_detail__boot_status=BOOT_SUCCESS): mes[switch["switch_id"]] = "switch is not booted" count = count + 1 else: mes[switch["switch_id"]] = "" error.append(mes) if count: raise IgniteException(error) for switch in data: if not (GroupSwitch.objects.filter( group__id=gid, grp_switch__id=switch["switch_id"]).count() == 0): continue sw = GroupSwitch() grp = Group.objects.get(pk=gid) sw.group = grp fsw = Switch.objects.get(id=switch["switch_id"]) sw.grp_switch = fsw sw.save()
def create_backup(): cwd = os.getcwd() os.chdir(BASE_DIR) current_date = datetime.utcnow().strftime(FILE_TIME_FORMAT) filename = os.path.join(MEDIA_ROOT, current_date + SQL_FORMAT) backup_name = "Ignite_" + current_date + TAR_FORMAT tar_name = os.path.join(MEDIA_ROOT, BACKUP, backup_name) db_dump(filename) file_obj = tarfile.open(tar_name, "w:gz") logger.debug("cwd = " + os.getcwd()) for name in FILE_LIST: file_obj.add(MEDIA + "/" + name) file_obj.add(MEDIA + "/" + current_date + SQL_FORMAT) file_obj.close() try: os.remove(filename) except: delete_backup([current_date + TAR_FORMAT]) raise IgniteException(ERR_FAILED_TO_REMOVE + filename) resp = {} resp["status"] = "success" resp["filename"] = backup_name os.chdir(cwd) return resp
def _add_linecard(data, user, id=0): logger.debug("lc name = %s, type = %s", data[NAME], data[LC_TYPE]) if id: # get existing line card lc = LineCard.objects.get(pk=id) if lc.ref_count: raise IgniteException(ERR_LC_IN_USE) else: # create new line card lc = LineCard() lc.name = data[NAME] lc.lc_type = data[LC_TYPE] lc.lc_data = data[LC_DATA] lc.updated_by = user lc.lc_info = dict() for speed in PORT_SPEEDS: lc.lc_info[speed] = 0 for port_group in data[LC_DATA][PORT_GROUPS]: lc.lc_info[port_group[SPEED]] += port_group[NUM_PORTS] lc.save() return lc
def get_vpc_peer_switch(self, switch): logger.debug("Get VPC peer switch for switch %s" % switch.name) try: link = Link.objects.get(topology_id=switch.topology.id, src_switch=switch.id, dst_switch__tier=switch.tier, link_type=VPC_PEER, dummy=False) logger.debug("VPC Peer Switch %s" % link.dst_switch.name) return link.dst_switch except Link.DoesNotExist: try: link = Link.objects.get(topology_id=switch.topology.id, dst_switch=switch.id, src_switch__tier=switch.tier, link_type=VPC_PEER, dummy=False) logger.debug("VPC Peer Switch %s" % link.src_switch.name) return link.src_switch except: logger.debug("%s- %s" % (ERR_NO_VPC_PEER_SWITCH, switch.name)) raise IgniteException("%s- %s" % (ERR_NO_VPC_PEER_SWITCH, switch.name))
def get_vpc_peer_ports(self, switch): ports = [] logger.debug("Get VPC ports for switch %s" % switch.name) try: link = Link.objects.get(topology_id=switch.topology.id, src_switch=switch.id, dst_switch__tier=switch.tier, link_type=VPC_PEER, dummy=False) logger.debug("VPC ports %s" % str(link.src_ports)) return string_to_ports(link.src_ports) except Link.DoesNotExist: try: link = Link.objects.get(topology_id=switch.topology.id, dst_switch=switch.id, src_switch__tier=switch.tier, link_type=VPC_PEER, dummy=False) logger.debug("VPC ports %s" % str(link.dst_ports)) return string_to_ports(link.dst_ports) except Link.DoesNotExist: logger.debug("%s- %s" % (ERR_NO_VPC_PEER_PORTS, switch.name)) raise IgniteException("%s- %s" % (ERR_NO_VPC_PEER_PORTS, switch.name))
def update_group(data, fab_id, grp_id, username=''): serializer = GroupPostSerializer(data=data) if not serializer.is_valid(): raise IgniteException(serializer.errors) grp = group.update_group(serializer.data, fab_id, grp_id, username) serializer = GroupBriefSerializer(grp) return serializer.data
def _delete_switch(switch): # delete switch and boot detail if any boot_detail = switch.boot_detail switch_id = switch.id try: clear_switch_config(switch_id) except Exception as e: logger.error(e) try: switch.delete() except ProtectedError: raise IgniteException(ERR_SW_IN_USE) if boot_detail: boot_detail.delete() # delete build files if any try: os.remove(os.path.join(REPO_PATH, str(switch_id) + '.cfg')) except OSError: pass try: os.remove(os.path.join(REPO_PATH, str(switch_id) + '.yml')) except OSError: pass
def clone_job(data, id, username=''): serializer = JobCloneSerializer(data=data) if not serializer.is_valid(): raise IgniteException(serializer.errors) jb = job.clone_job(serializer.data, id, username) serializer = JobDetailSerializer(jb) return serializer.data