def testUniqueIndex(self): settingModel = Setting() coll = settingModel.collection indices = coll.index_information() # Make sure we have just one index on key and that it specifies that it # is unique self.assertTrue(any(indices[index]['key'][0][0] == 'key' and indices[index].get('unique') for index in indices)) self.assertFalse(any(indices[index]['key'][0][0] == 'key' and not indices[index].get('unique') for index in indices)) # Delete that index, create a non-unique index, and make some duplicate # settings so that we can test that this will be corrected. coll.drop_index(next(index for index in indices if indices[index]['key'][0][0] == 'key')) coll.create_index('key') for val in range(3, 8): coll.insert_one({'key': 'duplicate', 'value': val}) # Check that we've broken things indices = coll.index_information() self.assertGreaterEqual(settingModel.get('duplicate'), 3) self.assertEqual(settingModel.find({'key': 'duplicate'}).count(), 5) self.assertFalse(any(indices[index]['key'][0][0] == 'key' and indices[index].get('unique') for index in indices)) self.assertTrue(any(indices[index]['key'][0][0] == 'key' and not indices[index].get('unique') for index in indices)) # Reconnecting the model should fix the issues we just created settingModel.reconnect() indices = coll.index_information() self.assertTrue(any(indices[index]['key'][0][0] == 'key' and indices[index].get('unique') for index in indices)) self.assertFalse(any(indices[index]['key'][0][0] == 'key' and not indices[index].get('unique') for index in indices)) self.assertEqual(settingModel.get('duplicate'), 3) self.assertEqual(settingModel.find({'key': 'duplicate'}).count(), 1)
def getCollectionCreationPolicyAccess(self): cpp = Setting().get('core.collection_create_policy') acList = { 'users': [{ 'id': x } for x in cpp.get('users', [])], 'groups': [{ 'id': x } for x in cpp.get('groups', [])] } for user in acList['users'][:]: userDoc = User().load(user['id'], force=True, fields=['firstName', 'lastName', 'login']) if userDoc is None: acList['users'].remove(user) else: user['login'] = userDoc['login'] user['name'] = ' '.join( (userDoc['firstName'], userDoc['lastName'])) for grp in acList['groups'][:]: grpDoc = Group().load(grp['id'], force=True, fields=['name', 'description']) if grpDoc is None: acList['groups'].remove(grp) else: grp['name'] = grpDoc['name'] grp['description'] = grpDoc['description'] return acList
def testSettingsCache(db, enabledCache): setting = Setting() # 'foo' should be cached as the brand name setting.set(SettingKey.BRAND_NAME, 'foo') # change the brand name bypassing the cache via mongo returnedSetting = setting.findOne({'key': SettingKey.BRAND_NAME}) returnedSetting['value'] = 'bar' # verify the cache still gives us the old brand name assert setting.get(SettingKey.BRAND_NAME) == 'foo' # change the brand name through .set (which updates the cache) setting.set(SettingKey.BRAND_NAME, 'bar') # verify retrieving gives us the new value with unittest.mock.patch.object(setting, 'findOne') as findOneMock: assert setting.get(SettingKey.BRAND_NAME) == 'bar' # findOne shouldn't be called since the cache is returning the setting findOneMock.assert_not_called() # unset the setting, invalidating the cache setting.unset(SettingKey.BRAND_NAME) # verify the database needs to be accessed to retrieve the setting now with unittest.mock.patch.object(setting, 'findOne') as findOneMock: setting.get(SettingKey.BRAND_NAME) findOneMock.assert_called_once()
def load(info): setDefaults() settings = Setting() homeDirsRoot = settings.get(PluginSettings.HOME_DIRS_ROOT) logger.info('WT Home Dirs root: %s' % homeDirsRoot) startDAVServer(homeDirsRoot, HomeDirectoryInitializer, HomeAuthorizer, HomePathMapper()) taleDirsRoot = settings.get(PluginSettings.TALE_DIRS_ROOT) logger.info('WT Tale Dirs root: %s' % taleDirsRoot) startDAVServer(taleDirsRoot, TaleDirectoryInitializer, TaleAuthorizer, TalePathMapper()) runsDirsRoot = settings.get(PluginSettings.RUNS_DIRS_ROOT) if runsDirsRoot: logger.info('WT Runs Dirs root: %s' % runsDirsRoot) startDAVServer(runsDirsRoot, RunsDirectoryInitializer, RunsAuthorizer, RunsPathMapper()) events.unbind('model.user.save.created', CoreEventHandler.USER_DEFAULT_FOLDERS) events.bind('model.user.save.created', 'wt_home_dirs', setHomeFolderMapping) events.bind('model.tale.save.created', 'wt_home_dirs', setTaleFolderMapping) events.bind('model.tale.remove', 'wt_home_dirs', deleteWorkspace) hdp = Homedirpass() info['apiRoot'].homedirpass = hdp info['apiRoot'].homedirpass.route('GET', ('generate',), hdp.generatePassword) info['apiRoot'].homedirpass.route('PUT', ('set',), hdp.setPassword) Tale().exposeFields(level=AccessType.READ, fields={"workspaceId"})
def __init__(self, templatePath=None): if not templatePath: templatePath = os.path.join(constants.PACKAGE_DIR, 'utility', 'webroot.mako') super(Webroot, self).__init__(templatePath) settings = Setting() self.vars = { 'plugins': [], 'apiRoot': '', 'staticRoot': '', # 'title' is depreciated use brandName instead 'title': 'Girder', 'brandName': settings.get(SettingKey.BRAND_NAME), 'bannerColor': settings.get(SettingKey.BANNER_COLOR), 'contactEmail': settings.get(SettingKey.CONTACT_EMAIL_ADDRESS), 'registrationPolicy': settings.get(SettingKey.REGISTRATION_POLICY), 'enablePasswordLogin': settings.get(SettingKey.ENABLE_PASSWORD_LOGIN) } events.bind('model.setting.save.after', CoreEventHandler.WEBROOT_SETTING_CHANGE, self._onSettingSave) events.bind('model.setting.remove', CoreEventHandler.WEBROOT_SETTING_CHANGE, self._onSettingRemove)
def startMonitor(self): settings = Setting() self.ASSETSTORE_ID = settings.get(PluginSettings.ASSETSTORE_ID) self.assetstore = AssetstoreModel().load(self.ASSETSTORE_ID) self.MONITOR_PARTITION = settings.get(PluginSettings.MONITOR_PARTITION) self.DESTINATION_TYPE = settings.get(PluginSettings.DESTINATION_TYPE) self.DESTINATION_ID = settings.get(PluginSettings.DESTINATION_ID) self.destination = ModelImporter.model(self.DESTINATION_TYPE).load( self.DESTINATION_ID, user=self.getCurrentUser(), level=AccessType.WRITE, exc=True) wm = pyinotify.WatchManager() # Watch Manager mask = pyinotify.IN_DELETE | pyinotify.IN_CREATE | pyinotify.IN_MODIFY self.notifier = pyinotify.ThreadedNotifier( wm, EventHandler(self.assetstore, self.destination, self.DESTINATION_TYPE, self.MONITOR_PARTITION)) wdd = wm.add_watch(self.MONITOR_PARTITION, mask, rec=True, auto_add=True) self.notifier.start() SettingDefault.defaults.update({PluginSettings.MONITOR: True}) return {PluginSettings.MONITOR: settings.get(PluginSettings.MONITOR)}
def getCollectionCreationPolicyAccess(self): cpp = Setting().get('core.collection_create_policy') acList = { 'users': [{'id': x} for x in cpp.get('users', [])], 'groups': [{'id': x} for x in cpp.get('groups', [])] } for user in acList['users'][:]: userDoc = User().load( user['id'], force=True, fields=['firstName', 'lastName', 'login']) if userDoc is None: acList['users'].remove(user) else: user['login'] = userDoc['login'] user['name'] = ' '.join((userDoc['firstName'], userDoc['lastName'])) for grp in acList['groups'][:]: grpDoc = Group().load( grp['id'], force=True, fields=['name', 'description']) if grpDoc is None: acList['groups'].remove(grp) else: grp['name'] = grpDoc['name'] grp['description'] = grpDoc['description'] return acList
def testSettingsCache(db, enabledCache): setting = Setting() # 'foo' should be cached as the brand name setting.set(SettingKey.BRAND_NAME, 'foo') # change the brand name bypassing the cache via mongo returnedSetting = setting.findOne({'key': SettingKey.BRAND_NAME}) returnedSetting['value'] = 'bar' # verify the cache still gives us the old brand name assert setting.get(SettingKey.BRAND_NAME) == 'foo' # change the brand name through .set (which updates the cache) setting.set(SettingKey.BRAND_NAME, 'bar') # verify retrieving gives us the new value with mock.patch.object(setting, 'findOne') as findOneMock: assert setting.get(SettingKey.BRAND_NAME) == 'bar' # findOne shouldn't be called since the cache is returning the setting findOneMock.assert_not_called() # unset the setting, invalidating the cache setting.unset(SettingKey.BRAND_NAME) # verify the database needs to be accessed to retrieve the setting now with mock.patch.object(setting, 'findOne') as findOneMock: setting.get(SettingKey.BRAND_NAME) findOneMock.assert_called_once()
def getSettings(self): settings = Setting() return { PluginSettings.SHARED_PARTITION: settings.get(PluginSettings.SHARED_PARTITION), PluginSettings.CRONTAB_PARTITION: settings.get(PluginSettings.CRONTAB_PARTITION) }
def getSettings(self): settings = Setting() return { PluginSettings.MARKDOWN: settings.get(PluginSettings.MARKDOWN), PluginSettings.HEADER: settings.get(PluginSettings.HEADER), PluginSettings.SUBHEADER: settings.get(PluginSettings.SUBHEADER), PluginSettings.WELCOME_TEXT: settings.get(PluginSettings.WELCOME_TEXT), PluginSettings.LOGO: settings.get(PluginSettings.LOGO), }
def _storeEndpointData(self, key, endpointId, endpointName): settings = Setting() endpointIds = settings.get(PluginSettings.GLOBUS_ENDPOINT_ID, None) endpointNames = settings.get(PluginSettings.GLOBUS_ENDPOINT_NAME, None) if endpointIds is None: endpointIds = {} endpointNames = {} endpointIds[key] = endpointId endpointNames[key] = endpointName settings.set(PluginSettings.GLOBUS_ENDPOINT_ID, endpointIds) settings.set(PluginSettings.GLOBUS_ENDPOINT_NAME, endpointNames)
def _getEndpointDataByKey(self, key): settings = Setting() endpointIds = settings.get(PluginSettings.GLOBUS_ENDPOINT_ID, None) endpointNames = settings.get(PluginSettings.GLOBUS_ENDPOINT_NAME, None) (endpointIds, endpointNames) = self._maybeConvertSettings(endpointIds, endpointNames) if endpointIds is None: return (None, None) else: if key in endpointIds: return (endpointIds[key], endpointNames[key]) else: return (None, None)
def getSettings(self): settings = Setting() return { PluginSettings.ASSETSTORE_ID: settings.get(PluginSettings.ASSETSTORE_ID) or '', PluginSettings.MONITOR_PARTITION: settings.get(PluginSettings.MONITOR_PARTITION) or '', PluginSettings.DESTINATION_TYPE: settings.get(PluginSettings.DESTINATION_TYPE) or '', PluginSettings.DESTINATION_ID: settings.get(PluginSettings.DESTINATION_ID) or '', PluginSettings.MONITOR: settings.get(PluginSettings.MONITOR) or '' }
def _submitEmail(msg, recipients): from girder.models.setting import Setting setting = Setting() smtp = _SMTPConnection(host=setting.get(SettingKey.SMTP_HOST), port=setting.get(SettingKey.SMTP_PORT), encryption=setting.get(SettingKey.SMTP_ENCRYPTION), username=setting.get(SettingKey.SMTP_USERNAME), password=setting.get(SettingKey.SMTP_PASSWORD)) logger.info('Sending email to %s through %s', ', '.join(recipients), smtp.host) with smtp: smtp.send(msg['From'], recipients, msg.as_string())
def __init__(self): super(Slurm, self).__init__() self.resourceName = 'slurm' self.user = self.getCurrentUser() self.token = self.getCurrentToken() self.name = 'test' self.entry = None self.partition = 'norm' self.nodes = 1 self.ntasks = 2 self.gres = 'gpu:p100:1' self.mem_per_cpu = '32gb' settings = Setting() self.SHARED_PARTITION = settings.get(PluginSettings.SHARED_PARTITION) self._shared_partition_log = os.path.join(self.SHARED_PARTITION, 'logs') self._shared_partition_work_directory = os.path.join( self.SHARED_PARTITION, 'tmp') self._modulesPath = os.path.join(self.SHARED_PARTITION, 'modules') self._shellPath = os.path.join(self.SHARED_PARTITION, 'shells') self.route('GET', ('lists', ), self.listSlurmJobs) self.route('GET', ('slurmOption', ), self.getSlurmOption) self.route('PUT', ('slurmOption', ), self.setSlurmOption) self.route('GET', (), self.getSlurm) self.route('PUT', ('cancel', ':id'), self.cancelSlurm) self.route('POST', (), self.submitSlurmJob) self.route('GET', ('settings', ), self.getSettings) self.route('POST', ('update', ), self.update) self.route('PUT', ('updatestep', ), self.updateStep)
def _startContainer(self, container): settings = Setting() psRoot = settings.get(PluginSettings.PRIVATE_STORAGE_PATH) restUrl = rest.getApiUrl() token = rest.getCurrentToken()['_id'] sessionId = str(container['sessionId']) mountId = efs.mount(sessionId, '/tmp/' + sessionId, psRoot, restUrl, token) container['mountId'] = mountId container['status'] = 'Running' self.save(container)
def _sendmail(event): from girder.models.setting import Setting msg = event.info['message'] recipients = event.info['recipients'] setting = Setting() smtp = _SMTPConnection( host=setting.get(SettingKey.SMTP_HOST, 'localhost'), port=setting.get(SettingKey.SMTP_PORT, None), encryption=setting.get(SettingKey.SMTP_ENCRYPTION, 'none'), username=setting.get(SettingKey.SMTP_USERNAME, None), password=setting.get(SettingKey.SMTP_PASSWORD, None) ) logger.info('Sending email to %s through %s', ', '.join(recipients), smtp.host) with smtp: smtp.send(msg['From'], recipients, msg.as_string())
def enablePlugins(self, plugins): # Determine what plugins have been disabled and remove their associated routes. setting = Setting() routeTable = setting.get(SettingKey.ROUTE_TABLE) oldPlugins = setting.get(SettingKey.PLUGINS_ENABLED) reservedRoutes = {GIRDER_ROUTE_ID, GIRDER_STATIC_ROUTE_ID} routeTableChanged = False removedRoutes = (set(oldPlugins) - set(plugins) - reservedRoutes) for route in removedRoutes: if route in routeTable: del routeTable[route] routeTableChanged = True if routeTableChanged: setting.set(SettingKey.ROUTE_TABLE, routeTable) # Route cleanup is done; update list of enabled plugins. return setting.set(SettingKey.PLUGINS_ENABLED, plugins)
def enablePlugins(self, plugins): # Determine what plugins have been disabled and remove their associated routes. setting = Setting() routeTable = setting.get(SettingKey.ROUTE_TABLE) oldPlugins = setting.get(SettingKey.PLUGINS_ENABLED) reservedRoutes = {GIRDER_ROUTE_ID, GIRDER_STATIC_ROUTE_ID} routeTableChanged = False removedRoutes = ( set(oldPlugins) - set(plugins) - reservedRoutes) for route in removedRoutes: if route in routeTable: del routeTable[route] routeTableChanged = True if routeTableChanged: setting.set(SettingKey.ROUTE_TABLE, routeTable) # Route cleanup is done; update list of enabled plugins. return setting.set(SettingKey.PLUGINS_ENABLED, plugins)
def cronWatch(event): import random slurmJobId = str(event.info['slurmJobId']) settings = Setting() CRONTAB_PARTITION = settings.get(PluginSettings.CRONTAB_PARTITION) logPath = os.path.join(CRONTAB_PARTITION, slurmJobId) shellPath = os.path.join(os.path.dirname(__file__), 'crontab.sh') commentId = str(random.getrandbits(128)) cron = CronTab(user=True) job = cron.new(command=shellPath + ' ' + slurmJobId + ' ' + commentId + ' >> ' + logPath + ' 2>&1\n') job.set_comment(commentId) job.minute.every(1) job.enable() cron.write()
def tmpdir(cleanup=True): # Make the temp dir underneath tmp_root config setting settings = Setting() tmp = os.path.join(settings.get(PluginSettings.SHARED_PARTITION), 'tmp') root = os.path.abspath(tmp) try: os.makedirs(root) except OSError: if not os.path.isdir(root): raise path = tempfile.mkdtemp(dir=root) try: yield path finally: # Cleanup the temp dir # if cleanup and os.path.isdir(path): # shutil.rmtree(path) pass
def loopWatch(slurmJobId): settings = Setting() SHARED_PARTITION = settings.get(PluginSettings.SHARED_PARTITION) shared_partition_log = os.path.join(SHARED_PARTITION, 'logs') shared_partition_work_directory = os.path.join(SHARED_PARTITION, 'tmp') while True: args = 'squeue -j {}'.format(slurmJobId) output = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out_put = output.communicate()[0] found = re.findall(slurmJobId, out_put.decode()) if len(found) == 0: job = Job().findOne( {'otherFields.slurm_info.slurm_id': int(slurmJobId)}) Job().updateJob(job, status=JobStatus.SUCCESS) log_file_name = 'slurm-{}.{}.out'.format( job['otherFields']['slurm_info']['name'], slurmJobId) log_file_path = os.path.join(shared_partition_log, log_file_name) f = open(log_file_path, "r") content = f.read() Job().updateJob(job, log=content) f.close() err_file_name = 'slurm-{}.{}.err'.format( job['otherFields']['slurm_info']['name'], slurmJobId) err_file_path = os.path.join(shared_partition_log, err_file_name) f = open(err_file_path, "r") content = f.read() Job().updateJob(job, log=content) f.close() # Job().save(job) # _send_to_girder slurm_output_name = 'slurm-{}.{}'.format( job['otherFields']['slurm_info']['name'], slurmJobId) data = os.path.join(shared_partition_work_directory, slurm_output_name) girderOutput.sendOutputToGirder(job, data) break sleep(1)
def testLdapLogin(self): settings = Setting() self.assertEqual(settings.get(PluginSettings.SERVERS), []) with self.assertRaises(ValidationException): settings.set(PluginSettings.SERVERS, {}) settings.set(PluginSettings.SERVERS, [{ 'baseDn': 'cn=Users,dc=foo,dc=bar,dc=org', 'bindName': 'cn=foo,cn=Users,dc=foo,dc=bar,dc=org', 'password': '******', 'searchField': 'mail', 'uri': 'foo.bar.org:389' }]) with mock.patch('ldap.initialize', return_value=MockLdap()) as ldapInit: resp = self.request('/user/authentication', basicAuth='hello:world') self.assertEqual(len(ldapInit.mock_calls), 1) self.assertStatusOk(resp) # Register a new user user = resp.json['user'] self.assertEqual(user['email'], '*****@*****.**') self.assertEqual(user['firstName'], 'Foo') self.assertEqual(user['lastName'], 'Bar') self.assertEqual(user['login'], 'foobar') # Login as an existing user resp = self.request('/user/authentication', basicAuth='hello:world') self.assertStatusOk(resp) self.assertEqual(resp.json['user']['_id'], user['_id']) with mock.patch('ldap.initialize', return_value=MockLdap(bindFail=True)): resp = self.request('/user/authentication', basicAuth='hello:world') self.assertStatus(resp, 401) with mock.patch('ldap.initialize', return_value=MockLdap(searchFail=True)): resp = self.request('/user/authentication', basicAuth='hello:world') self.assertStatus(resp, 401) # Test fallback to logging in with core auth normalUser = User().createUser( login='******', firstName='Normal', lastName='User', email='*****@*****.**', password='******') with mock.patch('ldap.initialize', return_value=MockLdap(searchFail=True)): resp = self.request('/user/authentication', basicAuth='normal:normaluser') self.assertStatusOk(resp) self.assertEqual(str(normalUser['_id']), resp.json['user']['_id']) # Test registering from a record that only has a cn, no sn/givenName record = { 'cn': [b'Fizz Buzz'], 'mail': [b'*****@*****.**'], 'distinguishedName': [b'shouldbeignored'] } with mock.patch('ldap.initialize', return_value=MockLdap(record=record)): resp = self.request('/user/authentication', basicAuth='fizzbuzz:foo') self.assertStatusOk(resp) self.assertEqual(resp.json['user']['login'], 'fizz') self.assertEqual(resp.json['user']['firstName'], 'Fizz') self.assertEqual(resp.json['user']['lastName'], 'Buzz') # Test falling back to other name generation behavior (first+last name) record = { 'cn': [b'Fizz Buzz'], 'mail': [b'*****@*****.**'], 'distinguishedName': [b'shouldbeignored'] } with mock.patch('ldap.initialize', return_value=MockLdap(record=record)): resp = self.request('/user/authentication', basicAuth='fizzbuzz:foo') self.assertStatusOk(resp) self.assertEqual(resp.json['user']['login'], 'fizzbuzz') self.assertEqual(resp.json['user']['firstName'], 'Fizz') self.assertEqual(resp.json['user']['lastName'], 'Buzz')
def schedule(event): """ This is bound to the "jobs.schedule" event, and will be triggered any time a job is scheduled. This handler will process any job that has the handler field set to "worker_handler". """ job = event.info slurm_info_new = job['otherFields']['slurm_info'] # shellScript = job['shellScript'] if job['handler'] == 'slurm_handler' and slurm_info_new[ 'entry'] is not None: # fetch_input(job) settings = Setting() SHARED_PARTITION = settings.get(PluginSettings.SHARED_PARTITION) shared_partition_log = os.path.join(SHARED_PARTITION, 'logs') shared_partition_work_directory = os.path.join(SHARED_PARTITION, 'tmp') modulesPath = os.path.join(SHARED_PARTITION, 'modules') pythonScriptPath = os.path.join(modulesPath, slurm_info_new['entry']) Job().updateJob(job, status=JobStatus.QUEUED) batchscript = """#!/bin/bash #SBATCH --partition={partition} #SBATCH --job-name={name} #SBATCH --nodes={nodes} #SBATCH --ntasks={ntasks} #SBATCH --gres={gres} #SBATCH --mem-per-cpu={mem_per_cpu} #SBATCH --output={shared_partition_log}/slurm-%x.%j.out #SBATCH --error={shared_partition_log}/slurm-%x.%j.err source /etc/profile.d/modules.sh module load {modules} mkdir -p {shared_partition_work_directory}/slurm-$SLURM_JOB_NAME.$SLURM_JOB_ID """ execCommand = """python3.6 {pythonScriptPath} --directory {shared_partition_work_directory}/slurm-$SLURM_JOB_NAME.$SLURM_JOB_ID """ for name in job['kwargs']['inputs']: if isinstance(job['kwargs']['inputs'][name]['data'], list): arg = "--" + name + " " + ' '.join( '"{0}"'.format(i) for i in job['kwargs']['inputs'][name]['data']) + " " else: arg = "--" + name + " '" + str( job['kwargs']['inputs'][name]['data']) + "' " execCommand += arg batchscript += execCommand script = batchscript.format( name=slurm_info_new['name'], partition=slurm_info_new['partition'], nodes=slurm_info_new['nodes'], ntasks=slurm_info_new['ntasks'], gres=slurm_info_new['gres'], mem_per_cpu=slurm_info_new['mem_per_cpu'], modules=" ".join(slurm_info_new['modules']), shared_partition_log=shared_partition_log, shared_partition_work_directory=shared_partition_work_directory, pythonScriptPath=pythonScriptPath) shellPath = os.path.join(SHARED_PARTITION, 'shells') shellScriptPath = os.path.join(shellPath, slurm_info_new['name'] + '.sh') with open(shellScriptPath, "w") as sh: sh.write(script) try: args = ['sbatch'] args.append(sh.name) res = subprocess.check_output(args).strip() if not res.startswith(b"Submitted batch"): return None slurmJobId = int(res.split()[-1]) # crontab method # events.trigger('cron.watch', {'slurmJobId': slurmJobId}) # thread method threading.Thread(target=loopWatch, args=([str(slurmJobId)])).start() job['otherFields']['slurm_info']['slurm_id'] = slurmJobId Job().save(job) Job().updateJob(job, status=JobStatus.RUNNING) except Exception: return 'something wrong during slurm start' return slurmJobId
def testLdapLogin(self): from girder.plugins.ldap.constants import PluginSettings settings = Setting() self.assertEqual(settings.get(PluginSettings.LDAP_SERVERS), []) with self.assertRaises(ValidationException): settings.set(PluginSettings.LDAP_SERVERS, {}) settings.set(PluginSettings.LDAP_SERVERS, [{ 'baseDn': 'cn=Users,dc=foo,dc=bar,dc=org', 'bindName': 'cn=foo,cn=Users,dc=foo,dc=bar,dc=org', 'password': '******', 'searchField': 'mail', 'uri': 'foo.bar.org:389' }]) with mock.patch('ldap.initialize', return_value=MockLdap()) as ldapInit: resp = self.request('/user/authentication', basicAuth='hello:world') self.assertEqual(len(ldapInit.mock_calls), 1) self.assertStatusOk(resp) # Register a new user user = resp.json['user'] self.assertEqual(user['email'], '*****@*****.**') self.assertEqual(user['firstName'], 'Foo') self.assertEqual(user['lastName'], 'Bar') self.assertEqual(user['login'], 'foobar') # Login as an existing user resp = self.request('/user/authentication', basicAuth='hello:world') self.assertStatusOk(resp) self.assertEqual(resp.json['user']['_id'], user['_id']) with mock.patch('ldap.initialize', return_value=MockLdap(bindFail=True)): resp = self.request('/user/authentication', basicAuth='hello:world') self.assertStatus(resp, 401) with mock.patch('ldap.initialize', return_value=MockLdap(searchFail=True)): resp = self.request('/user/authentication', basicAuth='hello:world') self.assertStatus(resp, 401) # Test fallback to logging in with core auth normalUser = User().createUser(login='******', firstName='Normal', lastName='User', email='*****@*****.**', password='******') with mock.patch('ldap.initialize', return_value=MockLdap(searchFail=True)): resp = self.request('/user/authentication', basicAuth='normal:normaluser') self.assertStatusOk(resp) self.assertEqual(str(normalUser['_id']), resp.json['user']['_id']) # Test registering from a record that only has a cn, no sn/givenName record = { 'cn': [b'Fizz Buzz'], 'mail': [b'*****@*****.**'], 'distinguishedName': [b'shouldbeignored'] } with mock.patch('ldap.initialize', return_value=MockLdap(record=record)): resp = self.request('/user/authentication', basicAuth='fizzbuzz:foo') self.assertStatusOk(resp) self.assertEqual(resp.json['user']['login'], 'fizz') self.assertEqual(resp.json['user']['firstName'], 'Fizz') self.assertEqual(resp.json['user']['lastName'], 'Buzz') # Test falling back to other name generation behavior (first+last name) record = { 'cn': [b'Fizz Buzz'], 'mail': [b'*****@*****.**'], 'distinguishedName': [b'shouldbeignored'] } with mock.patch('ldap.initialize', return_value=MockLdap(record=record)): resp = self.request('/user/authentication', basicAuth='fizzbuzz:foo') self.assertStatusOk(resp) self.assertEqual(resp.json['user']['login'], 'fizzbuzz') self.assertEqual(resp.json['user']['firstName'], 'Fizz') self.assertEqual(resp.json['user']['lastName'], 'Buzz')
def stopMonitor(self): settings = Setting() self.notifier.stop() SettingDefault.defaults.update({PluginSettings.MONITOR: False}) return {PluginSettings.MONITOR: settings.get(PluginSettings.MONITOR)}
def getSettings(self): settings = Setting() return { PluginSettings.DEFAULT_BINS: settings.get(PluginSettings.DEFAULT_BINS), }
def getTaleDirPath(tale: dict, rootProp: str) -> pathlib.Path: settings = Setting() root = settings.get(rootProp) taleId = str(tale['_id']) return pathlib.Path(root) / taleId[0:2] / taleId
def getSettings(self): settings = Setting() return { PluginSettings.DEFAULT_BEHAVIOR: settings.get(PluginSettings.DEFAULT_BEHAVIOR), }