def recalculate(): """ Recalculate all the live synthesis counters """ livesynthesis = current_app.data.driver.db["livesynthesis"] live_current = livesynthesis.find_one() if live_current is None: data = { "hosts_total": 0, "hosts_up_hard": 0, "hosts_up_soft": 0, "hosts_down_hard": 0, "hosts_down_soft": 0, "hosts_unreachable_hard": 0, "hosts_unreachable_soft": 0, "hosts_acknowledged": 0, "hosts_in_downtime": 0, "hosts_flapping": 0, "hosts_business_impact": 0, "services_total": 0, "services_ok_hard": 0, "services_ok_soft": 0, "services_warning_hard": 0, "services_warning_soft": 0, "services_critical_hard": 0, "services_critical_soft": 0, "services_unknown_hard": 0, "services_unknown_soft": 0, "services_acknowledged": 0, "services_in_downtime": 0, "services_flapping": 0, "services_business_impact": 0, } livesynthesis.insert(data) live_current = livesynthesis.find_one() # get all hosts hosts = current_app.data.driver.db["host"] hosts_cnt = hosts.find({"register": True}).count() livestates = current_app.data.driver.db["livestate"] if live_current["hosts_total"] != hosts_cnt: data = {"hosts_total": hosts_cnt} data["hosts_up_hard"] = livestates.find({"service_description": None, "state": "UP"}).count() data["hosts_down_hard"] = livestates.find({"service_description": None, "state": "DOWN"}).count() data["hosts_unreachable_hard"] = livestates.find( {"service_description": None, "state": "UNREACHABLE"} ).count() lookup = {"_id": live_current["_id"]} patch_internal("livesynthesis", data, False, False, **lookup) # get all services services = current_app.data.driver.db["service"] services_cnt = services.find({"register": True}).count() if live_current["services_total"] != services_cnt: data = {"services_total": services_cnt} data["services_ok_hard"] = livestates.find({"state": "OK"}).count() data["services_warning_hard"] = livestates.find({"state": "WARNING"}).count() data["services_critical_hard"] = livestates.find({"state": "CRITICAL"}).count() data["services_unknown_hard"] = livestates.find({"state": "UNKNOWN"}).count() lookup = {"_id": live_current["_id"]} patch_internal("livesynthesis", data, False, False, **lookup)
def on_updated_userservice_session_user(updates, original): """ A session user relation has been updated in the database: - Add a session.opened event :param items: :return: None """ if 'status' in updates: if updates['status'] == 'close': # Decrease session users _session = app.data.driver.db['userservice_session'] session = _session.find_one({'_id': original['userservice_session']}) if session and session['current_nb_users'] > 0: data = { "current_nb_users": session['current_nb_users'] - 1 } # Close session if no more users in the session ... if data['current_nb_users'] == 0: data['status'] = 'close' lookup = {"_id": original['userservice_session']} patch_internal('userservice_session', data, False, False, **lookup) # Add an event _users = app.data.driver.db['user'] user = _users.find_one({'_id': original['user']}) if user: data = { 'userservice_session': original['userservice_session'], 'user': user['_id'], 'date': datetime.utcnow(), 'type': 'session.left', 'message': 'User %s left the session' % user['name'] } post_internal("event", data)
def verify_password(user, plaintext): """Check password of user, rehash if necessary. It is possible that the password is None, e.g. if the user is authenticated via LDAP. In this case default to "not verified". Args: user (dict): the user in question. plaintext (string): password to check Returns: bool: True if password matches. False if it doesn't or if there is no password set and/or provided. """ password_context = app.config['PASSWORD_CONTEXT'] if (plaintext is None) or (user['password'] is None): return False is_valid = password_context.verify(plaintext, user['password']) if is_valid and password_context.needs_update(user['password']): # update password - hook will handle hashing update = {'password': plaintext} with admin_permissions(): patch_internal("users", payload=update, _id=user['_id']) return is_valid
def hook_on_inserted(resource, items): """ Called after run one insert operation for a list of items. It sets for each parent the leaf as False. """ config = get_config(resource) processed = [] for item in items: parent_id = item[config['PARENT']] # root node if not parent_id: continue # for bulk operations with the children that belong to the # same parent if parent_id in processed: continue patch_internal( resource, {config['LEAF']: False}, concurrency_check=False, skip_validation=True, **{current_app.config['ID_FIELD']: parent_id} ) processed.append(parent_id)
def on_updated_service(updated, original): """ Update field business_impact if changed """ bi = True if 'business_impact' not in updated: bi = False elif updated['business_impact'] == original['business_impact']: bi = False name = '' if 'display_name' in updated and updated['display_name'] != '': name = updated['display_name'] elif 'display_name' in original and original['display_name'] != '': name = '' elif 'alias' in updated and updated['alias'] != '': name = updated['alias'] elif 'alias' in original and original['alias'] != '': name = '' elif 'service_description' in updated and updated['service_description'] != '': name = updated['service_description'] if bi or name != '': livestate_db = current_app.data.driver.db['livestate'] live_current = livestate_db.find_one({'service_description': original['_id']}) data = {} if bi: data['business_impact'] = updated['business_impact'] if name != '': data['display_name_service'] = name lookup = {"_id": live_current['_id']} patch_internal('livestate', data, False, False, **lookup)
def on_confirm_email(token): """Email confirmation endpoint. We try to confirm the specified signup and redirect to a webpage. """ try: s = URLSafeSerializer(get_token_secret()) signup_id = ObjectId(s.loads(token)) except BadSignature: return "Unknown token" patch_internal('eventsignups', {'confirmed': True}, skip_validation=True, concurrency_check=False, **{current_app.config['ID_FIELD']: signup_id}) # Now the user may be able to get accepted, so update the events waiting # list lookup = {current_app.config['ID_FIELD']: signup_id} signup = current_app.data.find_one('eventsignups', None, **lookup) update_waiting_list(signup['event']) redirect_url = current_app.config.get('EMAIL_CONFIRMED_REDIRECT') if redirect_url: return redirect(redirect_url) else: return current_app.config['CONFIRM_TEXT']
def update_service_use_template(service, fields): """This update (patch) service with values of template :param service: fields / values of the service :type service: dict :param fields: fields updated in the template service :type fields: dict :return: None """ service_db = current_app.data.driver.db['service'] # Get the field values from the service templates template_fields = {} for template_id in service['_templates']: temp = service_db.find_one({'_id': template_id}) for (name, value) in iteritems(temp): template_fields[name] = value to_patch = {} for (name, value) in iteritems(fields): if name in service['_template_fields']: to_patch[name] = template_fields[name] if to_patch: g.ignore_hook_patch = True lookup = {"_id": service['_id']} patch_internal('service', to_patch, False, False, **lookup)
def cascade_delete(resource, item): """Cascade DELETE. Hook to delete all objects, which have the 'cascade_delete' option set in the data_relation and relate to the object, which was just deleted. """ domain = current_app.config['DOMAIN'] deleted_id = item[domain[resource]['id_field']] for res, res_domain in domain.items(): # Filter schema of `res` to get all fields containing references # to the resource of the deleted item relations = ((field, field_def['data_relation']) for field, field_def in res_domain['schema'].items() if 'data_relation' in field_def and field_def['data_relation'].get('resource') == resource) for field, data_relation in relations: # All items in `res` with reference to the deleted item lookup = {field: deleted_id} with admin_permissions(): try: if data_relation.get('cascade_delete'): # Delete the item as well deleteitem_internal(res, concurrency_check=False, **lookup) else: # Don't delete, only remove reference patch_internal(res, payload={field: None}, concurrency_check=False, **lookup) except NotFound: pass
def on_inserted_userservice_session_user(items): """ A user joined a session: - Add a sessionJoined event - Update last user activity in the session - Increase session users' number :param items: :return: None """ for index, item in enumerate(items): print "User: %s joined the session: %s" % (item['user'], item['userservice_session']) # New session event: sessionJoined _users = app.data.driver.db['user'] user = _users.find_one({'_id': item['user']}) if user: data = { 'userservice_session': item['userservice_session'], 'user': g.get('users_id', None), 'date': datetime.utcnow(), 'type': 'session.joined', 'message': 'User %s joined the session' % user['name'] } post_internal("event", data) # Increase session users _session = app.data.driver.db['userservice_session'] session = _session.find_one({'_id': item['userservice_session']}) if session: data = { "current_nb_users": session['current_nb_users'] + 1 } lookup = {"_id": item['userservice_session']} patch_internal('userservice_session', data, False, False, **lookup)
def cron_timeseries(): """ Cron used to add perfdata from retention to timeseries databases :return: None """ with app.test_request_context(): timeseriesretention_db = current_app.data.driver.db[ 'timeseriesretention'] if timeseriesretention_db.find().count() > 0: tsc = timeseriesretention_db.find({ 'for_graphite': True, 'for_influxdb': False }) for data in tsc: if not Timeseries.send_to_timeseries_graphite([data]): break lookup = {"_id": data['_id']} deleteitem_internal('timeseriesretention', False, False, **lookup) tsc = timeseriesretention_db.find({ 'for_graphite': False, 'for_influxdb': True }) for data in tsc: if not Timeseries.send_to_timeseries_influxdb([data]): break lookup = {"_id": data['_id']} deleteitem_internal('timeseriesretention', False, False, **lookup) tsc = timeseriesretention_db.find({ 'for_graphite': True, 'for_influxdb': True }) for data in tsc: graphite_serv = True influxdb_serv = True if not Timeseries.send_to_timeseries_graphite([data]): graphite_serv = False if not Timeseries.send_to_timeseries_influxdb([data]): influxdb_serv = False lookup = {"_id": data['_id']} if graphite_serv and influxdb_serv: deleteitem_internal('timeseriesretention', False, False, **lookup) elif graphite_serv and not influxdb_serv: patch_internal('timeseriesretention', {"for_graphite": False}, False, False, **lookup) elif influxdb_serv and not graphite_serv: patch_internal('timeseriesretention', {"for_influxdb": False}, False, False, **lookup)
def push_review(self): for re in self.run_elements: if re.get('barcode'): patch_internal('run_elements', payload=self._summary, run_id=self.run_id, lane=re.get('lane'), barcode=re.get('barcode')) else: patch_internal('run_elements', payload=self._summary, run_id=self.run_id, lane=re.get('lane'))
def mark_as_paid(payments): """After successful payment, set status to `accepted`.""" # Check if payments is not a list if not isinstance(payments, list): payments = [payments] for payment in payments: for signup in payment['signups']: data = {'status': 'accepted'} patch_internal('signups', _id=str(signup), payload=data, concurrency_check=False, skip_validation=True)
def mark_as_unpaid(payments): """Before a payment is deleted, set status to `reserved`.""" # Check if payments is not a list if not isinstance(payments, list): payments = [payments] for payment in payments: for signup in payment['signups']: data = {'status': 'reserved'} patch_internal('signups', _id=str(signup), payload=data, concurrency_check=False, skip_validation=True)
def pre_realm_patch(updates, original): """ Hook before updating existing realm :param updates: modified fields :type updates: dict :param original: original fields :type original: dict :return: None """ if not g.updateRealm: if '_tree_parents' in updates: abort(make_response("Updating _tree_parents is forbidden", 412)) if '_children' in updates: abort(make_response("Updating _children is forbidden", 412)) if '_all_children' in updates: abort(make_response("Updating _all_children is forbidden", 412)) if '_parent' in updates and updates['_parent'] != original['_parent']: realmsdrv = current_app.data.driver.db['realm'] # Add self reference in new parent children tree parent = realmsdrv.find_one({'_id': updates['_parent']}) if original['_id'] not in parent['_children']: parent['_children'].append(original['_id']) lookup = {"_id": parent['_id']} g.updateRealm = True patch_internal('realm', {"_children": parent['_children']}, False, False, **lookup) g.updateRealm = False # Delete self reference in former parent children tree if len(original['_tree_parents']) > 0: parent = realmsdrv.find_one({'_id': original['_tree_parents'][-1]}) if original['_id'] in parent['_children']: parent['_children'].remove(original['_id']) lookup = {"_id": parent['_id']} g.updateRealm = True patch_internal('realm', {"_children": parent['_children']}, False, False, **lookup) g.updateRealm = False updates['_level'] = parent['_level'] + 1 updates['_tree_parents'] = original['_tree_parents'] if original['_parent'] in original['_tree_parents']: updates['_tree_parents'].remove(original['_parent']) if updates['_parent'] not in original['_tree_parents']: updates['_tree_parents'].append(updates['_parent'])
def forgot_password(): mail.init_app(app) data = request.values or request.get_json() if not data: abort(422, description='username or email required') expected_username = data.get('username') if not expected_username: abort(422, description='username or email required') expected_role = data.get('role') if not expected_role: abort(422, description='role required') expected_role = json.loads(expected_role) r = {'username': expected_username, 'role': expected_role[0]} user, *_ = getitem_internal(resource, **r) if not user: r = {'email': expected_username, 'role': expected_role[0]} user, *_ = getitem_internal(resource, **r) if not user: abort(404, description='username or email not found') new_password = str(uuid.uuid4())[:8] _ = patch_internal(resource, {'pass_': new_password}, **{'id': user['id']}) body = template % (expected_username, new_password) msg = Message("Reset Password", body=body, recipients=[user['email']]) mail.send(msg) return jsonify({})
def on_competence_put(response, original=None): """""" if response.get('passed', False) is True: expiry = response.get('valid_until', None) # Set expiry to end year if expiry is None: expiry = _get_end_of_year() expiry = _fix_naive(expiry) person = _get_person(response.get('person_id', None)) if '_id' in person: competence = person.get('competences', []).copy() # Add this competence? if expiry is not None and isinstance( expiry, datetime) and expiry >= _get_now(): try: competence.append({ 'id': response.get('id'), '_code': response.get('_code', None), 'issuer': response.get('approved_by_person_id', None), 'expiry': expiry, # 'paid': response.get('paid_date', None) }) except: pass # Always remove stale competences # Note that _code is for removing old competences, should be removed competence[:] = [ d for d in competence if _fix_naive(d.get('expiry')) >= _get_now() and d.get('_code', None) is not None ] # Always unique by id competence = list({v['id']: v for v in competence}.values()) # Patch if difference if _compare_list_of_dicts(competence, person.get('competence', [])) is True: lookup = {'_id': person['_id']} resp, _, _, status = patch_internal( RESOURCE_PERSONS_PROCESS, {'competences': competence}, False, True, **lookup) if status != 200: app.logger.error( 'Patch returned {} for competence'.format(status)) pass
def on_update_host(updates, original): """Called by EVE HOOK (app.on_update_host) On update host, if not template, remove in '_template_fields' fields in updates because we update these fields, so they are now not dependant of template :param updates: modified fields :type updates: dict :param original: original fields :type original: dict :return: None """ if g.get('ignore_hook_patch', False): return if not original['_is_template']: # case the host is not a template template_fields = original['_template_fields'] do_patch = False for (field_name, _) in iteritems(updates): if field_name in template_fields: template_fields.remove(field_name) do_patch = True if do_patch: lookup = {"_id": original['_id']} to_patch = {"_template_fields": template_fields} response = patch_internal('host', to_patch, False, False, **lookup) updates['_etag'] = response[0]['_etag'] original['_etag'] = response[0]['_etag']
def syndicate(service): ''' Syndicate a post to the given service. Checks if the original post exists and if it hasn't been syndicated to the given service already. :param service: the service to syndicate to. ''' service = service.lower() if service not in VALID_SERVICES: abort(400, 'Service not recognized.') if request.mimetype != 'application/json': abort(400, 'Syndication endpoint only accepts JSON data.') # Get the data and launch the handler for the appropriate service data = request.json meta_post = current_app.config.get('META_POST') post_id, links = eval('%s_handler' % service)(data, meta_post['entity']) # Patch the original post with the new links object response, _, _, status = patch_internal('posts', {'links': links}, concurrency_check=False, **{'_id': post_id}) if status not in (200, 201): abort(status) return render_response(response, 'item.html', title="Syndicate to %s" % (service.lower().capitalize(),))
def execute_patch(resource: str, payload: dict, identifier) -> dict: """Executes PATCH to the same DeviceHub with a new connection.""" payload['_id'] = str(identifier) response = patch_internal(resource, payload, False, False, **{'_id': str(identifier)}) if not (200 <= response[3] < 300): raise InnerRequestError(response[3], response[0]) return response[0]
def _generate_all_links(response, now): """Generate a new link for the file and all its variations. :param response: the file document that should be updated. :param now: datetime that reflects 'now', for consistent expiry generation. """ project_id = str( response['project']) if 'project' in response else None # TODO: add project id to all files backend = response['backend'] response['link'] = generate_link(backend, response['file_path'], project_id) variations = response.get('variations') if variations: for variation in variations: variation['link'] = generate_link(backend, variation['file_path'], project_id) # Construct the new expiry datetime. validity_secs = current_app.config['FILE_LINK_VALIDITY'][backend] response['link_expires'] = now + datetime.timedelta(seconds=validity_secs) patch_info = remove_private_keys(response) file_id = ObjectId(response['_id']) (patch_resp, _, _, _) = patch_internal('files', patch_info, _id=file_id) if patch_resp.get('_status') == 'ERR': log.warning('Unable to save new links for file %s: %r', response['_id'], patch_resp) # TODO: raise a snag. response['_updated'] = now else: response['_updated'] = patch_resp['_updated'] # Be silly and re-fetch the etag ourselves. TODO: handle this better. etag_doc = current_app.data.driver.db['files'].find_one({'_id': file_id}, {'_etag': 1}) response['_etag'] = etag_doc['_etag']
def get_files(data): items=data['_items'] if len(items)==0: return for item in items: if item['ThumbnailUrl']=="": token=get_internal('lastToken')[0]['_items']['token'] headers={"Authorization":"Bearer "+token} img_data=process_thumbnail(item['TrimbleVersionID'],headers) if img_data=="": continue item['ThumbnailUrl']=img_data payload={ "ThumbnailUrl":img_data } patch_internal('file',payload,**{'_id': item['_id']})
def task(): print("request", request) if request.method == "PATCH": print("heelloooooo") return send_response("tasks", patch_internal("tasks", payload = request.json)) return send_response("tasks", post_internal("tasks", request.json))
def save_workflow(self, event): """ Will only trigger when it actually IS changed, so save every time this is called! patch_internal(self.known_resource, data, concurrency_check=False,**{'_id': self.item_id}) patch_internal(resource, payload=None, concurrency_check=False,skip_validation=False, **lookup): Hmmm, need audit trail since version control will not cut this. Workflow should also increase the version number """ _id = self.db_wf.get('_id') _etag = self.db_wf.get('_etag') _version = self.db_wf.get('_version') self.action = event.event.name self.db_wf.get('workflow').update({'state': self.state}) # Make a new without _id etc new = {'workflow': self.db_wf.get('workflow')} audit = {'a': event.event.name, 'r': self._trigger_attrs.get(event.event.name).get('resource'), 'u': self.user_id, 's': self.initial_state, 'd': self.state, 'v': _version + 1, 't': datetime.utcnow(), 'c': self.comment} new['workflow']['audit'].insert(0, audit) new['workflow']['last_transition'] = datetime.utcnow() # New owner it is! new['owner'] = app.globals['user_id'] if self._trigger_attrs.get(event.event.name).get('comment', False): new.get('workflow').update({'comment': self.comment}) # Always add new['workflow']['settings'] = self.wf_settings new['acl'] = self.set_acl() # Should really supply the e-tag here, will work! , '_etag': _etag # Can also use test_client to do this but it's rubbish or? # This will ignore the readonly field skip_validation AND you do not need another domain file for it!! response, last_modified, etag, status = patch_internal(RESOURCE_COLLECTION, payload=new, concurrency_check=False, skip_validation=True, **{'_id': "%s" % _id, '_etag': "%s" % _etag}) # test_client().post('/add', data = {'input1': 'a'}} # app.test_client().patch('/observations/%s' % _id, data=new, headers=[('If-Match', _etag)]) # if self.state != self.initial_state: if status in [200, 201]: self.notification() return True return False
def on_updated_userservice_session(updates, original): """ Hook before updating a user service session. Updating a session may be used to force session closing :param updates: list of fields to update :type updates: dict :param original: list of original fields :type original: dict :return: None """ if 'status' in updates: # Event: session status changed data = { 'userservice_session': original['_id'], 'user': g.get('users_id', None), 'date': datetime.utcnow(), 'type': 'session.status', 'message': 'Session status changed to %s' % (updates['status']) } post_internal("event", data) if updates['status'] == 'close': # Event: session closed data = { 'userservice_session': original['_id'], 'user': g.get('users_id', None), 'date': datetime.utcnow(), 'type': 'session.closed', 'message': 'Session closed' } post_internal("event", data) # Session / user relations should be closed _userservice_session_user = app.data.driver.db['userservice_session_user'] userservice_session_users = _userservice_session_user.find({ 'userservice_session': original['_id'], 'status': 'open' }) for userservice_session_user in userservice_session_users: # Session / user relation closing date data = { 'status': 'close' } lookup = {"_id": userservice_session_user['_id']} patch_internal('userservice_session_user', data, False, False, **lookup)
def _labrequests_set_expire_after(labrequest_id, minutes=1): expire_at = _datetime_to_rfc1123(datetime.now() + timedelta(minutes=minutes)) response, last_modified, etag, status = patch_internal( 'labrequests', payload={'expireAt': expire_at}, concurrency_check=False, skip_validation=True, _id=labrequest_id) return response, last_modified, etag, status
def test_patch_internal(self): # test that patch_internal is available and working properly. test_field = 'ref' test_value = "9876543210987654321098765" data = {test_field: test_value} with self.app.test_request_context(self.item_id_url): r, _, _, status = patch_internal( self.known_resource, data, concurrency_check=False, **{'_id': self.item_id}) db_value = self.compare_patch_with_get(test_field, r) self.assertEqual(db_value, test_value) self.assert200(status)
def publish(content_id): lookup = { '_id': content_id, '$or': [{ "acl.execute.roles": { '$in': app.globals['acl']['roles'] } }, { "acl.execute.users": { '$in': [app.globals.get('user_id')] } }] } acl = { 'read': { 'users': [app.globals.get('user_id')], 'roles': ACL_CLOSED_ALL_LIST if request.method == 'POST' else [] }, 'write': { 'users': [app.globals.get('user_id')], 'roles': [] }, 'execute': { 'users': [app.globals.get('user_id')], 'roles': [] }, 'delete': { 'users': [app.globals.get('user_id')], 'roles': [] } } published = True if request.method == 'POST' else False # response, last_modified, etag, status response, last_modified, etag, status = patch_internal( 'content', { 'acl': acl, 'published': published, 'owner': app.globals.get('user_id') }, False, True, **lookup) print(response, status) if status in [200, 201]: return eve_response(response, status) else: print(response, status) return eve_error_response('Error', 403)
def after_delete_realm(item): """ Hook after realm deletion. Update tree children of parent realm :param item: fields of the item / record :type item: dict :return: None """ realmsdrv = current_app.data.driver.db['realm'] if len(item['_tree_parents']) > 0: parent = realmsdrv.find_one({'_id': item['_tree_parents'][-1]}) if item['_id'] in parent['_children']: parent['_children'].remove(item['_id']) if item['_id'] in parent['_all_children']: parent['_all_children'].remove(item['_id']) lookup = {"_id": parent['_id']} g.updateRealm = True patch_internal('realm', { "_children": parent['_children'], "_all_children": parent['_children'] }, False, False, **lookup) g.updateRealm = False
def after_insert_realm(items): """ Hook after realm inserted. It calculate/update tree parents and children :param items: realm fields :type items: dict :return: None """ # pylint: disable=unused-argument for dummy, item in enumerate(items): # update _children fields on all parents realmsdrv = current_app.data.driver.db['realm'] parent = realmsdrv.find_one({'_id': item['_parent']}) parent['_children'].append(item['_id']) parent['_all_children'].append(item['_id']) lookup = {"_id": parent['_id']} g.updateRealm = True patch_internal('realm', { "_children": parent['_children'], "_all_children": parent['_all_children'] }, False, False, **lookup) g.updateRealm = False
def test_patch_internal(self): # test that patch_internal is available and working properly. test_field = "ref" test_value = "9876543210987654321098765" data = {test_field: test_value} with self.app.test_request_context(self.item_id_url): r, _, _, status = patch_internal(self.known_resource, data, concurrency_check=False, **{"_id": self.item_id}) db_value = self.compare_patch_with_get(test_field, r) self.assertEqual(db_value, test_value) self.assert200(status)
def cron_timeseries(): """ Cron used to add perfdata from retention to timeseries databases :return: None """ with app.test_request_context(): timeseriesretention_db = current_app.data.driver.db['timeseriesretention'] if timeseriesretention_db.find().count() > 0: tsc = timeseriesretention_db.find({'for_graphite': True, 'for_influxdb': False}) for data in tsc: if not Timeseries.send_to_timeseries_graphite([data]): break lookup = {"_id": data['_id']} deleteitem_internal('timeseriesretention', False, False, **lookup) tsc = timeseriesretention_db.find({'for_graphite': False, 'for_influxdb': True}) for data in tsc: if not Timeseries.send_to_timeseries_influxdb([data]): break lookup = {"_id": data['_id']} deleteitem_internal('timeseriesretention', False, False, **lookup) tsc = timeseriesretention_db.find({'for_graphite': True, 'for_influxdb': True}) for data in tsc: graphite_serv = True influxdb_serv = True if not Timeseries.send_to_timeseries_graphite([data]): graphite_serv = False if not Timeseries.send_to_timeseries_influxdb([data]): influxdb_serv = False lookup = {"_id": data['_id']} if graphite_serv and influxdb_serv: deleteitem_internal('timeseriesretention', False, False, **lookup) elif graphite_serv and not influxdb_serv: patch_internal('timeseriesretention', {"for_graphite": False}, False, False, **lookup) elif influxdb_serv and not graphite_serv: patch_internal('timeseriesretention', {"for_influxdb": False}, False, False, **lookup)
def update_host_use_template(host, fields): """This update (patch) host with values of template :param host: fields / values of the host :type host: dict :param fields: fields updated in the template host :type fields: dict :return: None """ host_db = current_app.data.driver.db['host'] template_fields = {} for template_id in host['_templates']: temp = host_db.find_one({'_id': template_id}) for (name, value) in iteritems(temp): template_fields[name] = value to_patch = {} for (name, value) in iteritems(fields): if name in host['_template_fields']: to_patch[name] = template_fields[name] if to_patch: g.ignore_hook_patch = True lookup = {"_id": host['_id']} patch_internal('host', to_patch, False, False, **lookup)
def update_user_use_template(user, fields): """This update (patch) user with values of template :param user: fields / values of the user :type user: dict :param fields: fields updated in the template user :type fields: dict :return: None """ user_db = current_app.data.driver.db['user'] template_fields = {} for template_id in user['_templates']: temp = user_db.find_one({'_id': template_id}) for (name, value) in iteritems(temp): template_fields[name] = value to_patch = {} for (name, value) in iteritems(fields): if name in user['_template_fields']: to_patch[name] = template_fields[name] if to_patch: g.ignore_hook_patch = True lookup = {"_id": user['_id']} patch_internal('user', to_patch, False, False, **lookup)
def after_insert_realm(items): """ Hook after realm inserted. It calculate/update tree parents and children :param items: realm fields :type items: dict :return: None """ # pylint: disable=unused-argument for dummy, item in enumerate(items): # update _children fields on all parents realmsdrv = current_app.data.driver.db['realm'] parent = realmsdrv.find_one({'_id': item['_parent']}) parent['_children'].append(item['_id']) parent['_all_children'].append(item['_id']) lookup = {"_id": parent['_id']} g.updateRealm = True patch_internal( 'realm', { "_children": parent['_children'], "_all_children": parent['_all_children'] }, False, False, **lookup) g.updateRealm = False
def _perform_action(self): payload = self.reviewable_data.get( 'rapid_analysis' ) # patch with the whole subdict, or it gets overwritten if self.failing_metrics: payload[ELEMENT_REVIEWED] = 'fail' payload[ELEMENT_REVIEW_COMMENTS] = self.failure_comment else: payload[ELEMENT_REVIEWED] = 'pass' payload[ELEMENT_REVIEW_DATE] = self.current_time patch_internal('samples', {'rapid_analysis': payload}, sample_id=self.sample_id) return { 'action_id': self.sample_id + self.date_started, 'date_finished': self.now(), 'action_info': { 'sample_id': self.sample_id } }
def after_update_realm(updated, original): """ Hook update tree children on realm parent after update tree children realm :param updates: modified fields :type updates: dict :param original: original fields :type original: dict :return: None """ if g.updateRealm: if '_all_children' in updated and updated['_all_children'] != original['_all_children']: s = set(original['_all_children']) diff = [x for x in updated['_all_children'] if x not in s] added_children = (diff != []) if not added_children: s = set(updated['_all_children']) diff = [x for x in original['_all_children'] if x not in s] realmsdrv = current_app.data.driver.db['realm'] parent = realmsdrv.find_one({'_id': original['_parent']}) if not parent: return for d in diff: if added_children: if d not in parent['_all_children']: parent['_all_children'].append(d) else: if d in parent['_all_children']: parent['_all_children'].remove(d) lookup = {"_id": parent['_id']} g.updateRealm = True patch_internal('realm', { "_all_children": parent['_all_children'] }, False, False, **lookup) g.updateRealm = False
def after_update_realm(updated, original): """ Hook update tree children on realm parent after update tree children realm :param updates: modified fields :type updates: dict :param original: original fields :type original: dict :return: None """ if g.updateRealm: if '_all_children' in updated and updated['_all_children'] != original[ '_all_children']: s = set(original['_all_children']) diff = [x for x in updated['_all_children'] if x not in s] added_children = (diff != []) if not added_children: s = set(updated['_all_children']) diff = [x for x in original['_all_children'] if x not in s] realmsdrv = current_app.data.driver.db['realm'] parent = realmsdrv.find_one({'_id': original['_parent']}) if not parent: return for d in diff: if added_children: if d not in parent['_all_children']: parent['_all_children'].append(d) else: if d in parent['_all_children']: parent['_all_children'].remove(d) lookup = {"_id": parent['_id']} g.updateRealm = True patch_internal('realm', {"_all_children": parent['_all_children']}, False, False, **lookup) g.updateRealm = False
def update_service_use_template(service, fields): """ This update (patch) service with values of template :param service: fields / values of the service :type service: dict :param fields: fields updated in the template service :type fields: dict :return: None """ service_db = current_app.data.driver.db['service'] template_fields = {} for template_id in service['_templates']: temp = service_db.find_one({'_id': template_id}) for (name, value) in iteritems(temp): template_fields[name] = value to_patch = {} for (name, value) in iteritems(fields): if name in service['_template_fields']: to_patch[name] = template_fields[name] if len(to_patch) > 0: g.ignore_hook_patch = True lookup = {"_id": service['_id']} patch_internal('service', to_patch, False, False, **lookup)
def update_host_use_template(host, fields): """ This update (patch) host with values of template :param host: fields / values of the host :type host: dict :param fields: fields updated in the template host :type fields: dict :return: None """ host_db = current_app.data.driver.db['host'] template_fields = {} for template_id in host['_templates']: temp = host_db.find_one({'_id': template_id}) for (name, value) in iteritems(temp): template_fields[name] = value to_patch = {} for (name, value) in iteritems(fields): if name in host['_template_fields']: to_patch[name] = template_fields[name] if len(to_patch) > 0: g.ignore_hook_patch = True lookup = {"_id": host['_id']} patch_internal('host', to_patch, False, False, **lookup)
def patch_internal(self, resource: str, payload=None, concurrency_check=False, skip_validation=False, **lookup): """Workaround for Eve issue https://github.com/nicolaiarocci/eve/issues/810""" from eve.methods.patch import patch_internal url = self.config['URLS'][resource] path = '%s/%s/%s' % (self.api_prefix, url, lookup['_id']) with self.__fake_request_url_rule('PATCH', path): return patch_internal(resource, payload=payload, concurrency_check=concurrency_check, skip_validation=skip_validation, **lookup)[:4]
def _create_or_update_user(ldap_data): """Try to find user in database. Update if it exists, create otherwise.""" query = {'nethz': ldap_data['nethz']} db_data = current_app.data.driver.db['users'].find_one(query) with admin_permissions(): if db_data: # Membership will not be downgraded and email not be overwritten ldap_data.pop('email', None) if db_data.get('membership') != u"none": ldap_data.pop('membership', None) user = patch_internal('users', ldap_data, _id=db_data['_id'])[0] else: user = post_internal('users', ldap_data)[0] return user
def _update_with_retry(resource, item_id, patch_data, max_retries=50, skip_validation=False): item = _get_internal_item(resource, item_id) retries = 1 response, last_modified, etag, status = None, None, None, None if item: while retries <= max_retries: item_etag = item.get('_etag') try: new_environ = flask.request.environ new_environ['HTTP_IF_MATCH'] = item_etag with app.request_context(new_environ): logger.info("Updating item {} with {}".format( item_id, patch_data)) response, last_modified, etag, status = patch_internal( resource, payload=patch_data, concurrency_check=True, skip_validation=skip_validation, _id=item_id) break except Exception as e: if "Precondition Failed" in str(e): logger.warning( "Retrying {0}/{1} due to {2}\netag: {3}".format( retries, max_retries, e, item_etag)) time.sleep(0.01) else: break retries += 1 item = _get_internal_item(resource, item_id) return response, last_modified, etag, status else: return { "_status": "ERR", "_issues": "{} Not Found in {}".format(item_id, resource) }, None, None, 404
def _generate_all_links(response, now): """Generate a new link for the file and all its variations. :param response: the file document that should be updated. :param now: datetime that reflects 'now', for consistent expiry generation. """ project_id = str( response['project'] ) if 'project' in response else None # TODO: add project id to all files backend = response['backend'] response['link'] = generate_link(backend, response['file_path'], project_id) variations = response.get('variations') if variations: for variation in variations: variation['link'] = generate_link(backend, variation['file_path'], project_id) # Construct the new expiry datetime. validity_secs = current_app.config['FILE_LINK_VALIDITY'][backend] response['link_expires'] = now + datetime.timedelta(seconds=validity_secs) patch_info = remove_private_keys(response) file_id = ObjectId(response['_id']) (patch_resp, _, _, _) = patch_internal('files', patch_info, _id=file_id) if patch_resp.get('_status') == 'ERR': log.warning('Unable to save new links for file %s: %r', response['_id'], patch_resp) # TODO: raise a snag. response['_updated'] = now else: response['_updated'] = patch_resp['_updated'] # Be silly and re-fetch the etag ourselves. TODO: handle this better. etag_doc = current_app.data.driver.db['files'].find_one({'_id': file_id}, {'_etag': 1}) response['_etag'] = etag_doc['_etag']
def on_organizations_put(response, original=None): # Only on NIF groups / NLF clubs if response.get('type_id', 0) == 6 or len(response.get('activities', [])) == 0: for v in response.get('_down'): if v.get('type') == 14: discipline = _get_org(v.get('id')) if 'activities' in discipline: for a in discipline['activities']: response['activities'].append(a) if 'main_activity' in discipline: response['main_activity'] = discipline.get('main_activity') response['activities'] = list( {v['id']: v for v in response['activities']}.values()) lookup = {'_id': response['_id']} resp, _, _, status = patch_internal( RESOURCE_ORGANIZATIONS_PROCESS, { 'activities': response['activities'], 'main_activity': response['main_activity'] }, False, True, **lookup) if status != 200: app.logger.error('Patch returned {} for license'.format(status)) pass # Broadcast to all activities and own org broadcast({ 'entity': 'organization', 'entity_id': response['id'], 'orgs': list( set([response['id']] + [x['id'] for x in response.get('activities', [])])) })
def _create_or_update_user(ldap_data): """Try to find user in database. Update if it exists, create otherwise.""" query = {'nethz': ldap_data['nethz']} db_data = current_app.data.driver.db['users'].find_one(query) with admin_permissions(): if db_data: # Membership will not be downgraded and email not be overwritten # Newletter settings will also not be adjusted ldap_data.pop('email', None) ldap_data.pop('send_newsletter', None) if db_data.get('membership') != u"none": ldap_data.pop('membership', None) user = patch_internal('users', ldap_data, _id=db_data['_id'])[0] else: # For new members, user = post_internal('users', ldap_data)[0] return user
def save_item(self, collection, specs, data, indexes=[]): """ Save or update item in collection: + collection - mongodb collection + specs - list parameters used for search + data - data to save + indexes - fields to index (specs will be used if empty) """ specs_params = {field: data[field] for field in specs} obj = self.db[collection].find_one(specs_params) if obj is None: # insert with api.test_request_context(): api_response = post_internal(collection, data) logger.info("Item Saved: {}".format(api_response)) else: # update update_this = {} for field in specs: del data[field] for key, value in data.items(): if key not in obj: update_this[key] = value else: if value != obj[key]: update_this[key] = value with api.test_request_context(): api_response = patch_internal(collection, payload=update_this, _id=obj['_id']) logger.info("Item Updated: {}".format(api_response)) if not indexes: indexes = specs for index in indexes: self.db[collection].ensure_index(index)
def on_update_service(updates, original): """Called by EVE HOOK (app.on_update_service) On update service, if not template, remove in '_template_fields' fields in updates because we update these fields, so they are now not dependant of template :param updates: modified fields :type updates: dict :param original: original fields :type original: dict :return: None """ if g.get('ignore_hook_patch', False): return # check if not modified the _is_template field if '_is_template' in updates and not updates[ '_is_template'] and original['_is_template']: abort( make_response( "Change a service template to not template is forbidden", 412)) # manage services not template if not original['_is_template']: template_fields = original['_template_fields'] do_patch = False for (field_name, _) in iteritems(updates): if field_name in template_fields: template_fields.remove(field_name) do_patch = True if do_patch: lookup = {"_id": original['_id']} to_patch = {"_template_fields": template_fields} response = patch_internal('service', to_patch, False, False, **lookup) updates['_etag'] = response[0]['_etag'] original['_etag'] = response[0]['_etag']
def create_dashboard(self, host_id): # pylint: disable=too-many-locals """ Create / update a dashboard in Grafana :param host_id: id of the host :type host_id: str :return: None """ if not self.datasource: return headers = {"Authorization": "Bearer " + self.api_key} host_db = current_app.data.driver.db['host'] service_db = current_app.data.driver.db['service'] command_db = current_app.data.driver.db['command'] host = host_db.find_one({'_id': host_id}) hostname = host['name'] command = command_db.find_one({'_id': host['check_command']}) command_name = command['name'] rows = [] targets = [] perfdata = PerfDatas(host['ls_perf_data']) for measurement in perfdata.metrics: fields = perfdata.metrics[measurement].__dict__ targets.append(self.generate_target(fields['name'], {"host": hostname})) if len(targets) > 0: rows.append(self.generate_row(command_name, targets)) if host['ls_last_check'] > 0: # Update host live state data = { "ls_grafana": True, "ls_grafana_panelid": 1 } lookup = {"_id": host['_id']} patch_internal('host', data, False, False, **lookup) # now get services services = service_db.find({'host': host_id}) for service in services: if service['ls_last_check'] > 0: perfdata = PerfDatas(service['ls_perf_data']) targets = [] for measurement in perfdata.metrics: fields = perfdata.metrics[measurement].__dict__ targets.append(self.generate_target(fields['name'], {"host": hostname, "service": service['name']})) if len(targets) > 0: rows.append(self.generate_row(service['name'], targets)) # Update service live state data = { "ls_grafana": True, "ls_grafana_panelid": len(rows) } lookup = {"_id": service['_id']} patch_internal('service', data, False, False, **lookup) self.dashboard_template['id'] = None self.dashboard_template['title'] = "host_" + hostname self.dashboard_template['rows'] = rows data = { "dashboard": self.dashboard_template, "overwrite": True } requests.post('http://' + self.host + ':' + self.port + '/api/dashboards/db', json=data, headers=headers)
def get_names(data): ''' Not used now ''' name = {'downloads' : 0, 'keyword' : data.get('traduccion')} meaning = data.get('definicion_traduccion') if meaning: name['meaning'] = meaning return name keys_im = {} with app.test_request_context(): for d in data: payload = data_to_payload(d) url = payload.get('url') if url in keys_im: original = get_document('images', concurrency_check=False, **{'url':url}) original['names'].extend(payload.get('names')) response = patch_internal('images', {'names':original['names']}, **{'url':url}) if response[-1] == 201 or response[-1] == 200: #print ('modificado ', url) pass else: print ('Error modif.-> ', response, url ) else: response = post_internal('images', payload) if response[-1] == 201: #print ('añadido ', url) keys_im[url] = get_id(response) else: print ('Error -> ', response, url )
def on_inserted_event(items): """ A user has submitted an event: - Update last user activity in the session :param items: :return: None """ for index, item in enumerate(items): print "*** inserted event:", item['type'] # Update service session CDR _userservice_cdr = app.data.driver.db['userservice_cdr'] userservice_cdr = _userservice_cdr.find_one({ 'userservice_session': item['userservice_session'] }) if userservice_cdr: # Update service CDR depending upon event type data = {} if item['type'] == 'session.joined': # A new user joined the session ... increase nb_users data["nb_users"] = userservice_cdr['nb_users'] + 1 if userservice_cdr['nb_users'] == 0: data["user_creator"] = item['user'] if userservice_cdr['nb_users'] == 1: data["user_participant"] = item['user'] if item['type'] == 'session.closed': # Session got closed ... update closing date data["closing_date"] = datetime.utcnow() data["status"] = 'close' if item['type'] == 'attachment.document': # A new document has been transfered ... increase nb_documents data["nb_documents"] = userservice_cdr['nb_documents'] + 1 if item['type'].startswith('video.'): # A new document has been transfered ... increase nb_documents data["videoconference"] = item['type'] if data: lookup = {"_id": userservice_cdr['_id']} patch_internal('userservice_cdr', data, False, False, **lookup) # TODO: Really necessary? # Update user activity in the session _userservice_session_user = app.data.driver.db['userservice_session_user'] userservice_session_user = _userservice_session_user.find_one({ 'userservice_session': item['userservice_session'], 'user': g.get('users_id', None) }) if userservice_session_user: # Update last user activity in the session data = { # 'user': g.get('users_id', None), "last_ping": datetime.utcnow() } lookup = {"_id": userservice_session_user['_id']} patch_internal('userservice_session_user', data, False, False, **lookup) # Increase session events counter _session = app.data.driver.db['userservice_session'] session = _session.find_one({'_id': item['userservice_session']}) if session: data = { "current_nb_events": session['current_nb_events'] + 1 } lookup = {"_id": item['userservice_session']} patch_internal('userservice_session', data, False, False, **lookup)
def after_inserting_project(project, db_user): project_id = project['_id'] user_id = db_user['_id'] # Create a project-specific admin group (with name matching the project id) result, _, _, status = post_internal('groups', {'name': str(project_id)}) if status != 201: log.error('Unable to create admin group for new project %s: %s', project_id, result) return abort_with_error(status) admin_group_id = result['_id'] log.debug('Created admin group %s for project %s', admin_group_id, project_id) # Assign the current user to the group db_user.setdefault('groups', []).append(admin_group_id) result, _, _, status = patch_internal('users', {'groups': db_user['groups']}, _id=user_id) if status != 200: log.error('Unable to add user %s as member of admin group %s for new project %s: %s', user_id, admin_group_id, project_id, result) return abort_with_error(status) log.debug('Made user %s member of group %s', user_id, admin_group_id) # Assign the group to the project with admin rights is_admin = authorization.is_admin(db_user) world_permissions = ['GET'] if is_admin else [] permissions = { 'world': world_permissions, 'users': [], 'groups': [ {'group': admin_group_id, 'methods': ['GET', 'PUT', 'POST', 'DELETE']}, ] } def with_permissions(node_type): copied = copy.deepcopy(node_type) copied['permissions'] = permissions return copied # Assign permissions to the project itself, as well as to the node_types project['permissions'] = permissions project['node_types'] = [ with_permissions(node_type_group), with_permissions(node_type_asset), with_permissions(node_type_comment), with_permissions(node_type_texture), with_permissions(node_type_group_texture), ] # Allow admin users to use whatever url they want. if not is_admin or not project.get('url'): project['url'] = "p-{!s}".format(project_id) # Initialize storage page (defaults to GCS) if current_app.config.get('TESTING'): log.warning('Not creating Google Cloud Storage bucket while running unit tests!') else: gcs_storage = GoogleCloudStorageBucket(str(project_id)) if gcs_storage.bucket.exists(): log.info('Created CGS instance for project %s', project_id) else: log.warning('Unable to create CGS instance for project %s', project_id) # Commit the changes directly to the MongoDB; a PUT is not allowed yet, # as the project doesn't have a valid permission structure. projects_collection = current_app.data.driver.db['projects'] result = projects_collection.update_one({'_id': project_id}, {'$set': remove_private_keys(project)}) if result.matched_count != 1: log.warning('Unable to update project %s: %s', project_id, result.raw_result) abort_with_error(500)
def recalculate(): """ Recalculate all the live synthesis counters """ livesynthesis = current_app.data.driver.db['livesynthesis'] realmsdrv = current_app.data.driver.db['realm'] allrealms = realmsdrv.find() for _, realm in enumerate(allrealms): live_current = livesynthesis.find_one({'_realm': realm['_id']}) if live_current is None: data = { 'hosts_total': 0, 'hosts_up_hard': 0, 'hosts_up_soft': 0, 'hosts_down_hard': 0, 'hosts_down_soft': 0, 'hosts_unreachable_hard': 0, 'hosts_unreachable_soft': 0, 'hosts_acknowledged': 0, 'hosts_in_downtime': 0, 'hosts_flapping': 0, 'hosts_business_impact': 0, 'services_total': 0, 'services_ok_hard': 0, 'services_ok_soft': 0, 'services_warning_hard': 0, 'services_warning_soft': 0, 'services_critical_hard': 0, 'services_critical_soft': 0, 'services_unknown_hard': 0, 'services_unknown_soft': 0, 'services_acknowledged': 0, 'services_in_downtime': 0, 'services_flapping': 0, 'services_business_impact': 0, '_realm': realm['_id'] } livesynthesis.insert(data) live_current = livesynthesis.find_one({'_realm': realm['_id']}) # Update hosts live synthesis hosts = current_app.data.driver.db['host'] hosts_count = hosts.find({'_is_template': False, '_realm': realm['_id']}).count() if live_current['hosts_total'] != hosts_count: data = {"hosts_total": hosts_count} data['hosts_up_hard'] = hosts.find({ "ls_state": "UP", "ls_state_type": "HARD", "ls_acknowledged": False, "_realm": realm["_id"] }).count() data['hosts_down_hard'] = hosts.find({ "ls_state": "DOWN", "ls_state_type": "HARD", "ls_acknowledged": False, "_realm": realm["_id"] }).count() data['hosts_unreachable_hard'] = hosts.find({ "ls_state": "UNREACHABLE", "ls_state_type": "HARD", "ls_acknowledged": False, "_realm": realm["_id"] }).count() data['hosts_up_soft'] = hosts.find({ "ls_state": "UP", "ls_state_type": "SOFT", "ls_acknowledged": False, "_realm": realm["_id"] }).count() data['hosts_down_soft'] = hosts.find({ "ls_state": "DOWN", "ls_state_type": "SOFT", "ls_acknowledged": False, "_realm": realm["_id"] }).count() data['hosts_unreachable_soft'] = hosts.find({ "ls_state": "UNREACHABLE", "ls_state_type": "SOFT", "ls_acknowledged": False, "_realm": realm["_id"] }).count() data['hosts_acknowledged'] = hosts.find( {'ls_acknowledged': True, "_realm": realm["_id"]} ).count() data['hosts_in_downtime'] = hosts.find( {'ls_downtimed': True, "_realm": realm["_id"]} ).count() lookup = {"_id": live_current['_id']} patch_internal('livesynthesis', data, False, False, **lookup) # Update services live synthesis services = current_app.data.driver.db['service'] services_count = services.find({'_is_template': False, '_realm': realm['_id']}).count() if live_current['services_total'] != services_count: data = {"services_total": services_count} data['services_ok_hard'] = services.find({ "ls_state": "OK", "ls_state_type": "HARD", "ls_acknowledged": False, "_realm": realm["_id"] }).count() data['services_warning_hard'] = services.find({ "ls_state": "WARNING", "ls_state_type": "HARD", "ls_acknowledged": False, "_realm": realm["_id"] }).count() data['services_critical_hard'] = services.find({ "ls_state": "CRITICAL", "ls_state_type": "HARD", "ls_acknowledged": False, "_realm": realm["_id"] }).count() data['services_unknown_hard'] = services.find({ "ls_state": "UNKNOWN", "ls_state_type": "HARD", "ls_acknowledged": False, "_realm": realm["_id"] }).count() data['services_ok_soft'] = services.find({ "ls_state": "OK", "ls_state_type": "SOFT", "ls_acknowledged": False, "_realm": realm["_id"] }).count() data['services_warning_soft'] = services.find({ "ls_state": "WARNING", "ls_state_type": "SOFT", "ls_acknowledged": False, "_realm": realm["_id"] }).count() data['services_critical_soft'] = services.find({ "ls_state": "CRITICAL", "ls_state_type": "SOFT", "ls_acknowledged": False, "_realm": realm["_id"] }).count() data['services_unknown_soft'] = services.find({ "ls_state": "UNKNOWN", "ls_state_type": "SOFT", "ls_acknowledged": False, "_realm": realm["_id"] }).count() data['services_acknowledged'] = services.find( {"ls_acknowledged": True, "_realm": realm["_id"]} ).count() data['services_in_downtime'] = services.find( {"ls_downtimed": True, "_realm": realm["_id"]} ).count() lookup = {"_id": live_current['_id']} patch_internal('livesynthesis', data, False, False, **lookup)
def login(): username = None password = None logged_in = False m = Melwin() if m is None: app.logger.critical("Melwin service unavailable") eve_abort('503', 'Melwin service is unavailable') # Request via json rq = request.get_json() try: username = rq['username'] password = rq['password'] except: # Now it will fail in the next if pass if username == 'access_token': try: public_key = _get_public_key() decoded_token = jwt.decode(password, public_key, issuer=ISSUER, algorithm='HS256') logged_in = True username = decoded_token.get('melwin_id', None) if username is None: eve_abort(401, 'Could not validate the token, could not find username') else: # print('Username', username) username = int(username) except jwt.exceptions.InvalidTokenError: logged_in = False eve_abort(401, 'Could not validate the token, InvalidTokenError') except jwt.exceptions.InvalidSignatureError: logged_in = False eve_abort(401, 'Could not validate the token, InvalidSignatureError') except jwt.exceptions.InvalidIssuerError: logged_in = False eve_abort(401, 'Could not validate the token, InvalidIssuerError') except jwt.exceptions.ExpiredSignatureError: logged_in = False eve_abort(401, 'Could not validate the token, ExpiredSignatureError') except Exception as e: logged_in = False eve_abort(401, 'Could not validate your token {}'.format(e)) else: try: username = int(username) logged_in = m.login(username, password) except: logged_in = False eve_abort(503, 'Could not log you into Melwin') # isinstance(username, int) and len(password) == 9 and # Now process user and successful authentication if logged_in is True: try: user, last_modified, etag, status = getitem_internal(resource='users', **{'id': username}) except: user = None if not is_mongo_alive(): eve_abort(502, 'Network problems') # If not existing, make from melwin! if user is None or status != 200: if not create_user(username): app.logger.error("502: Could not create user %i from Melwin" % username) eve_abort(502, 'Could not create user from Melwin') else: app.logger.info("Created user %i" % username) # token = uuid5(uuid4(),rq['username']) token = uuid4().hex # valid = utc.replace(hours=+2) # @bug: utc and cet!!! utc = arrow.utcnow() valid = utc.replace(seconds=+app.config['AUTH_SESSION_LENGHT']) # Pure datetime # valid = datetime.datetime.now() + datetime.timedelta(seconds=60) try: response, last_modified, etag, status = patch_internal('users/auth', payload={'auth': {'token': token, 'valid': valid.datetime}}, concurrency_check=False, **{'id': username}) if status != 200: app.logger.error("Could not insert token for %i" % username) except: app.logger.exception("Could not update user %i auth token" % username) eve_abort(500, "Could not update user %i auth token" % username) t = '%s:' % token b64 = b64encode(t.encode('utf-8')) """return jsonify(**{'success': True, 'token': token, 'token64': b64, 'valid': valid, })""" return eve_response(data={'success': True, 'username': username, 'token': token, 'token64': b64.decode('utf-8'), 'valid': valid.datetime}, status=200) # On error sleep a little against brute force sleep(1) return eve_response({'success': False, 'username': None, 'token': None, 'token64': None, 'valid': None, 'message': 'Wrong username or password'})
def remove_file(item): payload={ "Url":'removed' } patch_internal('file',payload,**{'_id': item['_id']})
def remove_project(item): payload={ "UserID":'removed-by-' + item['UserID'] } patch_internal('project',payload,**{'_id': item['_id']})