def initialize_session(self, request, callback, **callback_kwargs): config = self.instance.config if not config.get("client_secret"): # PKCE code_challenge, code_verifier = generate_pkce_codes() backend_state = {"code_verifier": code_verifier} else: # client secret code_challenge = None backend_state = None from realms.models import RealmAuthenticationSession ras = RealmAuthenticationSession( realm=self.instance, backend_state=backend_state, callback=callback, callback_kwargs=remove_null_character(callback_kwargs) ) ras.save() # add state to session to prevent CSRF self._add_ras_to_session(request, ras) return build_authorization_code_flow_url( config["discovery_url"], config["client_id"], self.ac_redirect_uri(), config["extra_scopes"], str(ras.pk), code_challenge )
def do_node_post(self): results = self.data.get("queries", {}) statuses = self.data.get("statuses", {}) messages = self.data.get("messages", {}) dqm_pk_set = set(chain(results.keys(), statuses.keys(), messages.keys())) if not dqm_pk_set: return {} dqm_cache = {str(dqm.pk): dqm for dqm in DistributedQueryMachine.objects .select_related("distributed_query__query__compliance_check") .filter(pk__in=dqm_pk_set)} # update distributed query machines for dqm_pk, dqm in dqm_cache.items(): status = statuses.get(dqm_pk) if status is None: logger.warning("Missing status for DistributedQueryMachine %s", dqm_pk) status = 999 # TODO: better? dqm.status = status dqm.error_message = messages.get(dqm_pk) dqm.save() # save_results dq_results = ( DistributedQueryResult( distributed_query=dqm.distributed_query, serial_number=self.machine.serial_number, row=remove_null_character(row) ) for dqm_pk, dqm in dqm_cache.items() for row in results.get(dqm_pk, []) ) while True: batch = list(islice(dq_results, self.batch_size)) if not batch: break DistributedQueryResult.objects.bulk_create(batch, self.batch_size) # process compliance checks cc_status_agg = ComplianceCheckStatusAggregator(self.machine.serial_number) status_time = datetime.utcnow() # TODO: how to get a better time? add ztl_status_time = now() to the query? for dqm_pk, dqm in dqm_cache.items(): distributed_query = dqm.distributed_query query = distributed_query.query if not query or not query.compliance_check: continue cc_status_agg.add_result( query.pk, distributed_query.query_version, status_time, results.get(dqm_pk, []), distributed_query.pk ) cc_status_agg.commit_and_post_events() return {}
def store(self, event): self.wait_and_configure_if_necessary() if not isinstance(event, dict): event = event.serialize() msg = json.dumps(remove_null_character(event)) if self.prepend_ecc: msg = "@ecc: " + msg msg = self.priority + msg.encode("utf-8") if self.socket_protocol == socket.SOCK_STREAM: self.socket.sendall(msg + b'\x00') else: self.socket.send(msg)
def initialize_session(self, request, callback, save_password_hash=False, **callback_kwargs): from realms.models import RealmAuthenticationSession ras = RealmAuthenticationSession( realm=self.instance, save_password_hash=save_password_hash, callback=callback, callback_kwargs=remove_null_character(callback_kwargs)) ras.save() return reverse("realms:ldap_login", args=(ras.realm.pk, ras.pk))
def test_all(self): self.assertEqual( remove_null_character({ "un": "1\u0000", "deux": 2, "trois": {1, 2, 3}, 4: [1, "de\u0000ux", 3], "cinq": [{ 5: True }] }), { "un": "1", "deux": 2, "trois": {1, 2, 3}, 4: [1, "deux", 3], "cinq": [{ 5: True }] })
def initialize_session(self, request, callback, **callback_kwargs): from realms.models import RealmAuthenticationSession ras = RealmAuthenticationSession( realm=self.instance, callback=callback, callback_kwargs=remove_null_character(callback_kwargs)) ras.save() saml2_client = self.get_saml2_client() # can throw error # like saml2.s_utils.UnsupportedBinding: urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect # if the IdP configuration and thus the metadata is wrong, but these should be caught at creation time # in the realm form. request_id, request_info = saml2_client.prepare_for_authenticate( relay_state=str(ras.pk)) # save request ID in auth session ras.backend_state = {"request_id": request_id} ras.save() return dict(request_info["headers"])["Location"]
def do_post(self, data): # lock enrolled machine EnrolledMachine.objects.select_for_update().filter( serial_number=self.machine_serial_number) # commit machine snapshot ms_tree = data['machine_snapshot'] ms_tree['source'] = { 'module': 'zentral.contrib.munki', 'name': 'Munki' } ms_tree['reference'] = ms_tree['serial_number'] ms_tree['public_ip_address'] = self.ip if self.business_unit: ms_tree['business_unit'] = self.business_unit.serialize() prepare_ms_tree_certificates(ms_tree) extra_facts = ms_tree.pop("extra_facts", None) if isinstance(extra_facts, dict): ms_tree["extra_facts"] = remove_null_character(extra_facts) ms = commit_machine_snapshot_and_trigger_events(ms_tree) if not ms: raise RuntimeError("Could not commit machine snapshot") # delete all managed installs if last seen report not found # which is a good indicator that the machine has been wiped last_seen_report_found = data.get("last_seen_report_found") if last_seen_report_found is not None and not last_seen_report_found: ManagedInstall.objects.filter( machine_serial_number=self.machine_serial_number).delete() # prepare reports reports = [] report_count = event_count = 0 for r in data.pop('reports'): report_count += 1 event_count += len(r.get("events", [])) reports.append((parser.parse(r.pop('start_time')), parser.parse(r.pop('end_time')), r)) reports.sort() munki_request_event_kwargs = { "request_type": "postflight", "enrollment": { "pk": self.enrollment.pk }, "report_count": report_count, "event_count": event_count, } if last_seen_report_found is not None: munki_request_event_kwargs[ "last_seen_report_found"] = last_seen_report_found # update machine managed installs managed_installs = data.get("managed_installs") if managed_installs is not None: munki_request_event_kwargs["managed_installs"] = True munki_request_event_kwargs["managed_install_count"] = len( managed_installs) # update managed installs using the complete list incident_updates = apply_managed_installs( self.machine_serial_number, managed_installs, self.enrollment.configuration) # incident updates are attached to the munki request event if incident_updates: munki_request_event_kwargs[ "incident_updates"] = incident_updates else: munki_request_event_kwargs["managed_installs"] = False # update managed installs using the install and removal events in the reports for _, _, report in reports: for created_at, event in report.get("events", []): # time event_time = parser.parse(created_at) if is_aware(event_time): event_time = make_naive(event_time) for incident_update in update_managed_install_with_event( self.machine_serial_number, event, event_time, self.enrollment.configuration): # incident updates are attached to each munki event event.setdefault("incident_updates", []).append(incident_update) # update machine munki state update_dict = {'user_agent': self.user_agent, 'ip': self.ip} if managed_installs is not None: update_dict["last_managed_installs_sync"] = datetime.utcnow() if reports: start_time, end_time, report = reports[-1] update_dict.update({ 'munki_version': report.get('munki_version', None), 'sha1sum': report['sha1sum'], 'run_type': report['run_type'], 'start_time': start_time, 'end_time': end_time }) MunkiState.objects.update_or_create( machine_serial_number=self.machine_serial_number, defaults=update_dict) # events post_munki_request_event(self.machine_serial_number, self.user_agent, self.ip, **munki_request_event_kwargs) post_munki_events(self.machine_serial_number, self.user_agent, self.ip, (r for _, _, r in reports)) return {}