def _pad_batch_records(self, batch_records, is_infer): """ Padding batch records and construct model's inputs. """ batch = {} batch_token_ids = [record.token_ids for record in batch_records] batch_type_ids = [record.type_ids for record in batch_records] batch_pos_ids = [record.pos_ids for record in batch_records] if self.use_role: batch_role_ids = [record.role_ids for record in batch_records] batch_tgt_start_idx = [record.tgt_start_idx for record in batch_records] batch_label = [record.label for record in batch_records] if self.attention_style == "unidirectional": batch["token_ids"] = pad_batch_data(batch_token_ids, pad_id=self.pad_id) batch["type_ids"] = pad_batch_data(batch_type_ids, pad_id=self.pad_id) batch["pos_ids"] = pad_batch_data(batch_pos_ids, pad_id=self.pad_id) if self.use_role: batch["role_ids"] = pad_batch_data(batch_role_ids, pad_id=self.pad_id) tgt_label, tgt_pos, label_pos = mask( batch_tokens=batch_token_ids, vocab_size=self.vocab_size, bos_id=self.bos_id, sent_b_starts=batch_tgt_start_idx, labels=batch_label, is_unidirectional=True) attention_mask = self._gen_self_attn_mask(batch_token_ids, batch_tgt_start_idx) else: batch_mask_token_ids, tgt_label, tgt_pos, label_pos = mask( batch_tokens=batch_token_ids, vocab_size=self.vocab_size, bos_id=self.bos_id, eos_id=self.eos_id, mask_id=self.mask_id, sent_b_starts=batch_tgt_start_idx, labels=batch_label, is_unidirectional=False) if not is_infer: batch_token_ids = batch_mask_token_ids batch["token_ids"] = pad_batch_data(batch_token_ids, pad_id=self.pad_id) batch["type_ids"] = pad_batch_data(batch_type_ids, pad_id=self.pad_id) batch["pos_ids"] = pad_batch_data(batch_pos_ids, pad_id=self.pad_id) if self.use_role: batch["role_ids"] = pad_batch_data(batch_role_ids, pad_id=self.pad_id) attention_mask = self._gen_self_attn_mask(batch_token_ids, is_unidirectional=False) batch["attention_mask"] = attention_mask batch["label_pos"] = label_pos if not is_infer: batch_label = np.array(batch_label).astype("int64").reshape([-1, 1]) batch["label"] = batch_label batch["tgt_label"] = tgt_label batch["tgt_pos"] = tgt_pos batch_data_id = [record.data_id for record in batch_records] batch["data_id"] = np.array(batch_data_id).astype("int64").reshape([-1, 1]) return batch
def _pad_batch_records(self, batch_records, is_infer): """ Padding batch records and construct model's inputs. """ batch = {} batch_token_ids = [record.token_ids for record in batch_records] batch_type_ids = [record.type_ids for record in batch_records] batch_pos_ids = [record.pos_ids for record in batch_records] if self.use_role: batch_role_ids = [record.role_ids for record in batch_records] batch_tgt_start_idx = [ record.tgt_start_idx for record in batch_records ] batch_size = len(batch_token_ids) # padding batch["token_ids"] = pad_batch_data(batch_token_ids, pad_id=self.pad_id) batch["type_ids"] = pad_batch_data(batch_type_ids, pad_id=self.pad_id) batch["pos_ids"] = pad_batch_data(batch_pos_ids, pad_id=self.pad_id) if self.use_role: batch["role_ids"] = pad_batch_data(batch_role_ids, pad_id=self.pad_id) batch["generation_mask"] = self._gen_self_attn_mask( batch_token_ids, batch_tgt_start_idx=batch_tgt_start_idx) if is_infer: tgt_ids = np.array([[[self.bos_id]]] * len(batch_token_ids), dtype="int64") if self.continuous_position: tgt_pos = np.array(batch_tgt_start_idx, dtype="int64") else: tgt_pos = np.zeros_like(batch_tgt_start_idx, dtype="int64") tgt_pos = tgt_pos.reshape(-1, 1, 1) batch["init_score"] = np.zeros_like(tgt_ids, dtype="float32").reshape( -1, 1).tolist() batch["tgt_ids"] = tgt_ids.tolist() batch["tgt_pos"] = tgt_pos.tolist() batch["tgt_generation_mask"] = batch[ "generation_mask"][:, 0:1, :].astype("float32") else: mask_return_list = mask(batch_tokens=batch_token_ids, vocab_size=self.vocab_size, sent_b_starts=batch_tgt_start_idx, is_unidirectional=True) batch["tgt_label"] = mask_return_list[0] batch["tgt_pos"] = mask_return_list[1] batch_data_id = [record.data_id for record in batch_records] batch["data_id"] = np.array(batch_data_id).astype("int64").reshape( [-1, 1]) return batch
def delete_config(namespace: str, qualifier = "") -> object: enforce_authorization(namespace) event_id = str(uuid.uuid4()) record_gateway_event(event_id, 'delete', 'received', namespace) log = app.logger outFolder = namespace tempFolder = "%s/%s/%s" % ('/tmp', uuid.uuid4(), outFolder) os.makedirs (tempFolder, exist_ok=False) with open("%s/%s" % (tempFolder, 'empty.yaml'), 'w') as file: file.write("") selectTag = "ns.%s" % namespace log.debug("ST = %s" % selectTag) if qualifier is not None and qualifier != "": log.debug("What is qual? %s" % qualifier) selectTag = "ns.%s.%s" % (namespace, qualifier) # Call the 'deck' command cmd = "sync" log.info("[%s] %s action using %s" % (namespace, cmd, selectTag)) args = [ "deck", cmd, "--config", "/tmp/deck.yaml", "--skip-consumers", "--select-tag", selectTag, "--state", tempFolder ] log.debug("[%s] Running %s" % (namespace, args)) deck_run = Popen(args, stdout=PIPE, stderr=STDOUT) out, err = deck_run.communicate() if deck_run.returncode != 0: cleanup (tempFolder) log.warn("%s - %s" % (namespace, out.decode('utf-8'))) abort_early(event_id, 'delete', namespace, jsonify(error="Sync Failed.", results=mask(out.decode('utf-8'))) ) elif cmd == "sync": try: route_count = prepare_apply_routes (namespace, selectTag, is_host_transform_enabled(), tempFolder) log.debug("%s - Prepared %d routes" % (namespace, route_count)) if route_count > 0: apply_routes (tempFolder) log.debug("%s - Applied %d routes" % (namespace, route_count)) route_count = prepare_delete_routes (namespace, selectTag, tempFolder) log.debug("%s - Prepared %d deletions" % (namespace, route_count)) if route_count > 0: delete_routes (tempFolder) # create Network Security Policies (nsp) for any upstream that # has the format: <name>.<ocp_ns>.svc log.debug("%s - Update NSPs" % (namespace)) ocp_ns_list = get_ocp_service_namespaces (tempFolder) for ocp_ns in ocp_ns_list: if check_nsp (namespace, ocp_ns) is False: apply_nsp (namespace, ocp_ns, tempFolder) # ok all looks good, so update a secret containing the original submitted request log.debug("%s - Update Original Config" % (namespace)) write_submitted_config ("", tempFolder) prep_and_apply_secret (namespace, selectTag, tempFolder) log.debug("%s - Updated Original Config" % (namespace)) except HTTPException as ex: traceback.print_exc() log.error("Error updating custom routes, nsps and secrets. %s" % ex) abort_early(event_id, 'delete', namespace, jsonify(error="Partially failed.") ) except: traceback.print_exc() log.error("Error updating custom routes, nsps and secrets. %s" % sys.exc_info()[0]) abort_early(event_id, 'delete', namespace, jsonify(error="Partially failed.") ) cleanup (tempFolder) log.debug("[%s] The exit code was: %d" % (namespace, deck_run.returncode)) message = "Sync successful." if cmd == 'diff': message = "Dry-run. No changes applied." record_gateway_event(event_id, 'delete', 'completed', namespace) return make_response('', http.HTTPStatus.NO_CONTENT)
def write_config(namespace: str) -> object: """ (Over)write :return: JSON of success message or error message """ enforce_authorization(namespace) event_id = str(uuid.uuid4()) record_gateway_event(event_id, 'publish', 'received', namespace) log = app.logger outFolder = namespace # Build a list of existing hosts that are outside this namespace # They become reserved and any conflict will return an error reserved_hosts = [] all_routes = get_routes() tag_match = "ns.%s" % namespace for route in all_routes: if tag_match not in route['tags'] and 'hosts' in route: for host in route['hosts']: reserved_hosts.append(transform_host(host)) reserved_hosts = list(set(reserved_hosts)) ns_svc = NamespaceService() ns_attributes = ns_svc.get_namespace_attributes (namespace) dfile = None if 'configFile' in request.files: log.debug("[%s] %s", namespace, request.files['configFile']) dfile = request.files['configFile'] dry_run = request.values['dryRun'] elif request.content_type.startswith("application/json"): dfile = request.json['configFile'] dry_run = request.json['dryRun'] else: log.error("Missing input") log.error("%s", request.get_data()) log.error(request.form) log.error(request.content_type) log.error(request.headers) abort_early(event_id, 'publish', namespace, jsonify(error="Missing input")) tempFolder = "%s/%s/%s" % ('/tmp', uuid.uuid4(), outFolder) os.makedirs (tempFolder, exist_ok=False) # dfile.save("%s/%s" % (tempFolder, 'config.yaml')) # log.debug("Saved to %s" % tempFolder) yaml_documents_iter = yaml.load_all(dfile, Loader=yaml.FullLoader) yaml_documents = [] for doc in yaml_documents_iter: yaml_documents.append(doc) selectTag = "ns.%s" % namespace ns_qualifier = None orig_config = prep_submitted_config (yaml_documents) update_routes_flag = False if len(yaml_documents) == 0: update_routes_flag = True for index, gw_config in enumerate(yaml_documents): log.info("[%s] Parsing file %s" % (namespace, index)) if gw_config is None: continue ####################### # Enrichments ####################### # Transformation route hosts if in non-prod environment (HOST_TRANSFORM_ENABLED) host_transformation (namespace, gw_config) # If there is a tag with a pipeline qualifier (i.e./ ns.<namespace>.dev) # then add to tags automatically the tag: ns.<namespace> tags_transformation (namespace, gw_config) # # Enrich the rate-limiting plugin with the appropriate Redis details plugins_transformations (namespace, gw_config) with open("%s/%s" % (tempFolder, 'config-%02d.yaml' % index), 'w') as file: yaml.dump(gw_config, file) ####################### # Validations ####################### # Validate that the every object is tagged with the namespace try: validate_tags (gw_config, selectTag) except Exception as ex: traceback.print_exc() log.error("%s - %s" % (namespace, " Tag Validation Errors: %s" % ex)) abort_early(event_id, 'publish', namespace, jsonify(error="Validation Errors:\n%s" % ex)) # Validate that hosts are valid try: validate_hosts (gw_config, reserved_hosts, ns_attributes) except Exception as ex: traceback.print_exc() log.error("%s - %s" % (namespace, " Host Validation Errors: %s" % ex)) abort_early(event_id, 'publish', namespace, jsonify(error="Validation Errors:\n%s" % ex)) # Validate upstream URLs are valid try: protected_kube_namespaces = json.loads(app.config['protectedKubeNamespaces']) validate_upstream (gw_config, ns_attributes, protected_kube_namespaces) except Exception as ex: traceback.print_exc() log.error("%s - %s" % (namespace, " Upstream Validation Errors: %s" % ex)) abort_early(event_id, 'publish', namespace, jsonify(error="Validation Errors:\n%s" % ex)) # Validation #3 # Validate that certain plugins are configured (such as the gwa_gov_endpoint) at the right level # Validate based on DNS 952 nsq = traverse_get_ns_qualifier (gw_config, selectTag) if nsq is not None: if ns_qualifier is not None and nsq != ns_qualifier: abort_early(event_id, 'publish', namespace, jsonify(error="Validation Errors:\n%s" % ("Conflicting ns qualifiers (%s != %s)" % (ns_qualifier, nsq)))) ns_qualifier = nsq log.info("[%s] CHANGING ns_qualifier %s" % (namespace, ns_qualifier)) if update_routes_check(gw_config): update_routes_flag = True if ns_qualifier is not None: selectTag = ns_qualifier # Call the 'deck' command cmd = "sync" if dry_run == 'true' or dry_run is True: cmd = "diff" log.info("[%s] %s action using %s" % (namespace, cmd, selectTag)) args = [ "deck", cmd, "--config", "/tmp/deck.yaml", "--skip-consumers", "--select-tag", selectTag, "--state", tempFolder ] log.debug("[%s] Running %s" % (namespace, args)) deck_run = Popen(args, stdout=PIPE, stderr=STDOUT) out, err = deck_run.communicate() if deck_run.returncode != 0: cleanup (tempFolder) log.warn("[%s] - %s" % (namespace, out.decode('utf-8'))) abort_early(event_id, 'publish', namespace, jsonify(error="Sync Failed.", results=mask(out.decode('utf-8')))) elif cmd == "sync": try: if update_routes_flag: route_count = prepare_apply_routes (namespace, selectTag, is_host_transform_enabled(), tempFolder) log.debug("[%s] - Prepared %d routes" % (namespace, route_count)) if route_count > 0: apply_routes (tempFolder) log.debug("[%s] - Applied %d routes" % (namespace, route_count)) route_count = prepare_delete_routes (namespace, selectTag, tempFolder) log.debug("[%s] - Prepared %d deletions" % (namespace, route_count)) if route_count > 0: delete_routes (tempFolder) # create Network Security Policies (nsp) for any upstream that # has the format: <name>.<ocp_ns>.svc if should_we_apply_nsp_policies(): log.debug("[%s] - Update NSPs" % (namespace)) ocp_ns_list = get_ocp_service_namespaces (tempFolder) for ocp_ns in ocp_ns_list: if check_nsp (namespace, ocp_ns) is False: apply_nsp (namespace, ocp_ns, tempFolder) # ok all looks good, so update a secret containing the original submitted request log.debug("[%s] - Update Original Config" % (namespace)) write_submitted_config (orig_config, tempFolder) prep_and_apply_secret (namespace, selectTag, tempFolder) log.debug("[%s] - Updated Original Config" % (namespace)) except HTTPException as ex: traceback.print_exc() log.error("[%s] Error updating custom routes, nsps and secrets. %s" % (namespace, ex)) abort_early(event_id, 'publish', namespace, jsonify(error="Partially failed.")) except: traceback.print_exc() log.error("[%s] Error updating custom routes, nsps and secrets. %s" % (namespace, sys.exc_info()[0])) abort_early(event_id, 'publish', namespace, jsonify(error="Partially failed.")) cleanup (tempFolder) log.debug("[%s] The exit code was: %d" % (namespace, deck_run.returncode)) message = "Sync successful." if cmd == 'diff': message = "Dry-run. No changes applied." record_gateway_event(event_id, 'publish', 'completed', namespace) return make_response(jsonify(message=message, results=mask(out.decode('utf-8'))))