def handle(self, *args, **options): facility_id = options["facility"] noninteractive = options["noninteractive"] token = options["token"] facility = get_facility(facility_id, noninteractive) # register the facility try: self._register(token, facility) except Certificate.DoesNotExist: raise CommandError( "This device does not own a certificate for Facility: {}".format( facility.name ) ) # an invalid nonce/register response except exceptions.HTTPError as e: error = e.response.json()[0] message = error["metadata"].get("message") or e.response.text # handle facility not existing response from portal server if error["id"] == error_constants.FACILITY_DOES_NOT_EXIST: # if the facility does not exist on data portal, try syncing and retry registering if not noninteractive: confirm_or_exit( "Facility: {} does not exist on data portal server. Would you like to initiate a syncing session?".format( facility.name ) ) call_command( "sync", facility=facility_id, noninteractive=noninteractive ) confirm_or_exit( "Facility: {} has been synced. Would you like to retry registering?".format( facility.name ) ) self._register(token, facility) # display nice error messages for other Http errors raise CommandError( "{status} Client Error: For url: {url} Reason: {reason}".format( status=e.response.status_code, url=e.response.url, reason=message ) ) # handle any other invalid response except exceptions.RequestException as e: raise CommandError(e)
def test_get_facility_no_facilities(self): with self.assertRaisesRegexp(CommandError, "no facilities"): utils.get_facility()
def test_get_facility_multiple_facilities_interactive(self, input_mock): # Desired facility should be third item Facility.objects.create(name="a_facility") Facility.objects.create(name="b_facility") self.assertEqual(self.facility, utils.get_facility())
def test_get_facility_multiple_facilities_noninteractive(self): Facility.objects.create(name="facility2") with self.assertRaisesRegexp(CommandError, "multiple facilities"): utils.get_facility(noninteractive=True)
def test_get_facility_with_no_id(self): self.assertEqual(self.facility, utils.get_facility())
def test_get_facility_with_non_existent_id(self): with self.assertRaisesRegexp(CommandError, "does not exist"): utils.get_facility(facility_id=uuid.uuid4().hex)
def test_get_facility_with_id(self): self.assertEqual( self.facility, utils.get_facility(facility_id=self.facility.id) )
def handle_async(self, *args, **options): noninteractive = options["noninteractive"] strict = options["strict"] facility = get_facility(facility_id=options["facility"], noninteractive=noninteractive) dataset_id = facility.dataset_id logger.info("Found facility {} <{}> for deletion".format( facility.id, dataset_id)) if not noninteractive: # ensure the user REALLY wants to do this! confirm_or_exit( "Are you sure you wish to permanently delete this facility? This will DELETE ALL DATA FOR THE FACILITY." ) confirm_or_exit( "ARE YOU SURE? If you do this, there is no way to recover the facility data on this device." ) # everything should get cascade deleted from the facility, but we'll check anyway delete_group = GroupDeletion( "Main", groups=[ self._get_morango_models(dataset_id), self._get_log_models(dataset_id), self._get_class_models(dataset_id), self._get_users(dataset_id), self._get_facility_dataset(dataset_id), ], ) logger.info( "Proceeding with facility deletion. Deleting all data for facility <{}>" .format(dataset_id)) with self._delete_context(): total_deleted = 0 # run the counting step with self.start_progress( total=delete_group.group_count()) as update_progress: update_progress(increment=0, message="Counting database objects") total_count = delete_group.count(update_progress) # no the deleting step with self.start_progress(total=total_count) as update_progress: update_progress(increment=0, message="Deleting database objects") count, stats = delete_group.delete(update_progress) total_deleted += count # clear related cache dataset_cache.clear() # if count doesn't match, something doesn't seem right if total_count != total_deleted: msg = "Deleted count does not match total ({} != {})".format( total_count, total_deleted) if strict: raise CommandError("{}, aborting!".format(msg)) else: logger.warning(msg) logger.info("Deletion complete.")
def handle_async(self, *args, **options): baseurl, facility_id, chunk_size, username, password, no_push, no_pull, noninteractive = ( options["baseurl"], options["facility"], options["chunk_size"], options["username"], options["password"], options["no_push"], options["no_pull"], options["noninteractive"], ) PORTAL_SYNC = baseurl == DATA_PORTAL_SYNCING_BASE_URL # validate url that is passed in if not PORTAL_SYNC: baseurl = get_baseurl(baseurl) # call this in case user directly syncs without migrating database if not ScopeDefinition.objects.filter(): call_command("loaddata", "scopedefinitions") # try to connect to server controller = MorangoProfileController("facilitydata") network_connection = controller.create_network_connection(baseurl) # if instance_ids are equal, this means device is trying to sync with itself, which we don't allow if (InstanceIDModel.get_or_create_current_instance()[0].id == network_connection.server_info["instance_id"]): raise CommandError( "Device can not sync with itself. Please recheck base URL and try again." ) if PORTAL_SYNC: # do portal sync setup facility = get_facility(facility_id=facility_id, noninteractive=noninteractive) # check for the certs we own for the specific facility client_cert = (facility.dataset.get_owned_certificates().filter( scope_definition_id=FULL_FACILITY).first()) if not client_cert: raise CommandError( "This device does not own a certificate for Facility: {}". format(facility.name)) # get primary partition scope_params = json.loads(client_cert.scope_params) dataset_id = scope_params["dataset_id"] # check if the server already has a cert for this facility server_certs = network_connection.get_remote_certificates( dataset_id, scope_def_id=FULL_FACILITY) # if necessary, push a cert up to the server server_cert = ( server_certs[0] if server_certs else network_connection.push_signed_client_certificate_chain( local_parent_cert=client_cert, scope_definition_id=FULL_FACILITY, scope_params=scope_params, )) else: # do P2P setup dataset_id = get_dataset_id(baseurl, identifier=facility_id, noninteractive=noninteractive) client_cert, server_cert, username = get_client_and_server_certs( username, password, dataset_id, network_connection, noninteractive=noninteractive, ) self.stdout.write( "Syncing has been initiated (this may take a while)...") sync_client = network_connection.create_sync_session( client_cert, server_cert, chunk_size=chunk_size) # pull from server and push our own data to server if not no_pull: sync_client.initiate_pull(Filter(dataset_id)) if not no_push: sync_client.initiate_push(Filter(dataset_id)) create_superuser_and_provision_device(username, dataset_id, noninteractive=noninteractive) sync_client.close_sync_session() self.stdout.write("Syncing has been completed.")
def handle_async(self, *args, **options): baseurl = options["baseurl"] facility_id = options["facility"] chunk_size = options["chunk_size"] # This handles the case for when we want to pull in facility data for our empty kolibri instance if not facility_id: self._fullfacilitysync(baseurl) facility = get_facility( facility_id=facility_id, noninteractive=options["noninteractive"] ) # if url is not pointing to portal server, do P2P syncing self._fullfacilitysync(baseurl, facility=facility, chunk_size=chunk_size) # data portal syncing self.stdout.write("Syncing has been initiated (this may take a while)...") controller = MorangoProfileController("facilitydata") with self.start_progress(total=5) as progress_update: try: network_connection = controller.create_network_connection(baseurl) except ConnectionError as e: raise CommandError(e) progress_update(1) # get client certificate client_cert = ( facility.dataset.get_owned_certificates() .filter(scope_definition_id=FULL_FACILITY) .first() ) if not client_cert: raise CommandError( "This device does not own a certificate for Facility: {}".format( facility.name ) ) # push certificate up to portal server scope_params = json.loads(client_cert.scope_params) server_cert = network_connection.push_signed_client_certificate_chain( local_parent_cert=client_cert, scope_definition_id=FULL_FACILITY, scope_params=scope_params, ) progress_update(1) # we should now be able to push our facility data sync_client = network_connection.create_sync_session( client_cert, server_cert, chunk_size=chunk_size ) progress_update(1) sync_client.initiate_push(Filter(scope_params["dataset_id"])) progress_update(1) sync_client.close_sync_session() progress_update(1) self.stdout.write("Syncing has been completed.")
def test_get_facility_multiple_facilities_interactive(self): utils.input = mock.MagicMock(name="input", return_value="1") Facility.objects.create(name="facility2") self.assertEqual(self.facility, utils.get_facility())
def handle_async(self, *args, **options): # noqa C901 ( baseurl, facility_id, chunk_size, username, password, user_id, no_push, no_pull, noninteractive, no_provision, ) = ( options["baseurl"], options["facility"], options["chunk_size"], options["username"], options["password"], options["user"], options["no_push"], options["no_pull"], options["noninteractive"], options["no_provision"], ) PORTAL_SYNC = baseurl == DATA_PORTAL_SYNCING_BASE_URL # validate url that is passed in if not PORTAL_SYNC: baseurl = get_baseurl(baseurl) # call this in case user directly syncs without migrating database if not ScopeDefinition.objects.filter(): call_command("loaddata", "scopedefinitions") dataset_cache.clear() dataset_cache.activate() # try to connect to server controller = MorangoProfileController(PROFILE_FACILITY_DATA) network_connection = controller.create_network_connection(baseurl) # if instance_ids are equal, this means device is trying to sync with itself, which we don't allow if (InstanceIDModel.get_or_create_current_instance()[0].id == network_connection.server_info["instance_id"]): raise CommandError( "Device can not sync with itself. Please recheck base URL and try again." ) if user_id: # it's a single-user sync if not facility_id: raise CommandError( "Facility ID must be specified in order to do single-user syncing" ) if not re.match("[a-f0-9]{32}", user_id): raise CommandError( "User ID must be a 32-character UUID (no dashes)") dataset_id = get_dataset_id(baseurl, identifier=facility_id, noninteractive=True) client_cert, server_cert, username = get_client_and_server_certs( username, password, dataset_id, network_connection, user_id=user_id, noninteractive=noninteractive, ) scopes = [ client_cert.scope_definition_id, server_cert.scope_definition_id ] if len(set(scopes)) != 2: raise CommandError( "To do a single-user sync, one device must have a single-user certificate, and the other a full-facility certificate." ) elif PORTAL_SYNC: # do portal sync setup facility = get_facility(facility_id=facility_id, noninteractive=noninteractive) # check for the certs we own for the specific facility client_cert = (facility.dataset.get_owned_certificates().filter( scope_definition_id=ScopeDefinitions.FULL_FACILITY).first()) if not client_cert: raise CommandError( "This device does not own a certificate for Facility: {}". format(facility.name)) # get primary partition scope_params = json.loads(client_cert.scope_params) dataset_id = scope_params["dataset_id"] # check if the server already has a cert for this facility server_certs = network_connection.get_remote_certificates( dataset_id, scope_def_id=ScopeDefinitions.FULL_FACILITY) # if necessary, push a cert up to the server server_cert = ( server_certs[0] if server_certs else network_connection.push_signed_client_certificate_chain( local_parent_cert=client_cert, scope_definition_id=ScopeDefinitions.FULL_FACILITY, scope_params=scope_params, )) else: # do P2P setup dataset_id = get_dataset_id(baseurl, identifier=facility_id, noninteractive=noninteractive) client_cert, server_cert, username = get_client_and_server_certs( username, password, dataset_id, network_connection, noninteractive=noninteractive, ) logger.info("Syncing has been initiated (this may take a while)...") sync_session_client = network_connection.create_sync_session( client_cert, server_cert, chunk_size=chunk_size) try: # pull from server if not no_pull: self._handle_pull( sync_session_client, noninteractive, dataset_id, client_cert, server_cert, user_id=user_id, ) # and push our own data to server if not no_push: self._handle_push( sync_session_client, noninteractive, dataset_id, client_cert, server_cert, user_id=user_id, ) if not no_provision: with self._lock(): if user_id: provision_single_user_device(user_id) else: create_superuser_and_provision_device( username, dataset_id, noninteractive=noninteractive) except UserCancelledError: if self.job: self.job.extra_metadata.update(sync_state=State.CANCELLED) self.job.save_meta() logger.info("Syncing has been cancelled.") return network_connection.close() if self.job: self.job.extra_metadata.update(sync_state=State.COMPLETED) self.job.save_meta() dataset_cache.deactivate() logger.info("Syncing has been completed.")