def closure(request, *args, **kwargs): api_key = request.GET.get('key', None) heka_client = get_heka_client() if api_key is None: heka_client.incr('%s.no_api_key' % func_name) if error_on_invalidkey: result = HTTPBadRequest() result.content_type = 'application/json' result.body = NO_API_KEY return result else: session = request.db_slave_session found_key_filter = session.query(ApiKey) found_key_filter = found_key_filter.filter(ApiKey.valid_key == api_key) if found_key_filter.count(): heka_client.incr('%s.api_key.%s' % (func_name, api_key.replace('.', '__'))) else: heka_client.incr('%s.unknown_api_key' % func_name) return func(request, *args, **kwargs)
def getDataPath(request): try: fileOptions = { 'txt': [('all files','.*'),('txt files', '.txt')], 'csv': [('all files','.*'),('csv files', '.csv')], 'json': [('all files','.*'),('json files', '.json')], 'xlsx': [('all files','.*'),('excel files', '.xlsx')], 'zip': [('all files','.*'),('zip files', '.zip')], 'npz': [('all files','.*'),('numpy files', '.npz')] } fileType = request.json['fileType'] options = { 'filetypes': fileOptions[fileType], # 'initialdir': 'c:\\', 'title': 'Select ' + fileType + ' file' } Tk().withdraw() dataPath = askopenfilename(**options) except IOError as err: response = HTTPBadRequest() response.text = 'getDataPath Error: ' + err.args[0] return response else: if fileType == 'xlsx': wb = openpyxl.load_workbook(dataPath, read_only=True) sheets = {} sheet_names = wb.get_sheet_names() for sheet_name in sheet_names: sheets[sheet_name] = [] ws = wb.get_sheet_by_name(sheet_name) for icol in range(1, ws.max_column + 1): cell_1 = ws.cell(row=1, column=icol) cell_2 = ws.cell(row=2, column=icol) dtype = type(cell_2.value).__name__ if cell_2.value is None: dtype = 'str' sheets[sheet_name].append({'name': cell_1.value.replace('.','_'), 'dtype': dtype}) return {'dataPath': dataPath,'sheets': sheets} else: return {'dataPath': dataPath}
def geolocate_post(request): data = request.validated session = request.db_slave_session result = None api_key = request.GET.get('key', None) if api_key is not None: if data['wifiAccessPoints']: result = search_wifi_ap(session, data) else: result = search_cell_tower(session, data) else: result = HTTPBadRequest() result.content_type = 'application/json' result.body = NO_API_KEY return result if result is None: result = HTTPNotFound() result.content_type = 'application/json' result.body = NOT_FOUND return result return { "location": { "lat": result['lat'], "lng": result['lon'], }, "accuracy": float(result['accuracy']), }
def getDataFields(request): try: fields = [] dtypes = [] fileType = request.json['fileType'] dataPath = request.json['dataPath'] if fileType == 'json': with open(dataPath, 'rb') as datafile: for item in ijson.items(datafile, 'item'): flatdoc = {} ProcessJSON.flattenDoc(item, None, flatdoc) #fields = sorted(list(flatdoc.keys())) fields = list(flatdoc.keys()) dtypes = [] for field in fields: if (type(flatdoc[field]).__name__ == 'Decimal'): dtypes.append('float') else: dtypes.append(type(flatdoc[field]).__name__) break datafile.close() return {'fields': fields, 'dtypes': dtypes} elif fileType == 'csv': delimiter = request.json['delimiter'] headersIdx = request.json['headersIdx'] with open(dataPath) as datafile: csvreader = csv.reader(datafile, delimiter=delimiter, ) if headersIdx != None: for i in range(int(headersIdx)): fields = next(csvreader) else: fields = next(csvreader) datafile.close() return {'fields': fields} except IOError as err: response = HTTPBadRequest() response.text = 'getDataFields Error: ' + err.args[0] return response
def geolocate_view(request): api_key = request.GET.get('key', None) heka_client = get_heka_client() if api_key is None: heka_client.incr('geolocate.no_api_key') result = HTTPBadRequest() result.content_type = 'application/json' result.body = NO_API_KEY return result heka_client.incr('geolocate.api_key.%s' % api_key.replace('.', '__')) data, errors = preprocess_request( request, schema=GeoLocateSchema(), extra_checks=(geolocate_validator, ), response=JSONError, ) session = request.db_slave_session result = None if data['wifiAccessPoints']: result = search_wifi_ap(session, data) if result is not None: heka_client.incr('geolocate.wifi_hit') else: result = search_cell_tower(session, data) if result is not None: heka_client.incr('geolocate.cell_hit') if result is None and request.client_addr: result = search_geoip(request.registry.geoip_db, request.client_addr) if result is not None: heka_client.incr('geolocate.geoip_hit') if result is None: heka_client.incr('geolocate.miss') result = HTTPNotFound() result.content_type = 'application/json' result.body = NOT_FOUND return result return { "location": { "lat": result['lat'], "lng": result['lon'], }, "accuracy": float(result['accuracy']), }
def _decode_cookie(self): cookie = self.request.cookies.get("userid", None) if not cookie: return None # try to extract a userid and timestamp from the cookie try: (digest, ts, userid) = cookie.split("-", 2) logging.info("cookie splitted up:%s-%s-%s" % (digest, ts, userid, )) except: logging.error("BAD COOKIE FORMAT:%s|" % cookie) response = HTTPBadRequest() response.delete_cookie("userid") raise response ip = "" d2 = calculate_digest(self.SECRET, userid, ts, ip) if d2==digest: return userid logging.error("bad digest") response = HTTPBadRequest() response.delete_cookie("userid") raise response
def regression_data_delete(request): if not regression_request_isvalid(request) or "_id" not in request.POST: return HTTPBadRequest() request.regressiondb.delete(request.POST["_id"]) return HTTPFound(location="/regression/data?type=" + request.GET["type"])
def jquery_livesearch_view(context, request): request.unicode_errors = 'ignore' try: searchterm = request.params.get('val', None) except UnicodeDecodeError: # Probably windows client didn't set request encoding. Try again. request.charset = 'ISO-8859-1' searchterm = request.params.get('val', None) if searchterm is None: # The request forgot to send the key we use to do a search, so # make a friendly error message. Important for the unit test. msg = "Client failed to send a 'val' parameter as the searchterm" return HTTPBadRequest(msg) # maybe do some * checking to verify that we don't have a # prefix search < 3 chars records = [] # we return back 5 results for each type of search results_per_type = 5 kind = request.params.get('kind', '') if not kind: listitems = [ item for item in get_listitems(IGroupSearchFactory) if item['component'].livesearch ] else: search_utility = queryUtility(IGroupSearchFactory, kind) if search_utility is None: msg = "The LiveSearch kind %s is not known" % kind return HTTPBadRequest(msg) else: # simulate a list item for the loop below listitems = (dict(component=search_utility), ) # we'll just have on type of results, so we return back 20 results results_per_type = 20 start_time = time.time() for listitem in listitems: utility = listitem['component'] factory = utility(context, request, searchterm) if factory is None: continue factory.limit = results_per_type try: num, docids, resolver = factory() except ParseError: continue for result in (resolver(x) for x in docids): if result is None: continue record = queryMultiAdapter((result, request), ILiveSearchEntry) assert record is not None, ("Unexpected livesearch result: " + result.__class__.__name__) records.append(record) end_time = time.time() log.debug('livesearch: %0.3fs for "%s", kind=%s', end_time - start_time, searchterm, kind) result = JSONEncoder().encode(records) return Response(result, content_type="application/json")
def report_printers(context, request, file_ext): ''' Generate a report with all the printers and its related computers. Args: ou_id (string) : ID of the OU. Returns: headers (list) : The headers of the table to export rows (list) : Rows with the report data widths (list) : The witdhs of the columns of the table to export page : Translation of the word "page" to the current language of : Translation of the word "of" to the current language report_type : Type of report (html, csv or pdf) ''' # Check current user permissions ou_id = check_visibility_of_ou(request) if ou_id is None: raise HTTPBadRequest() # Get printers policy policy = request.db.policies.find_one({'slug': 'printer_can_view'}) property_name = 'policies.' + str(policy['_id']) + '.object_related_list' # Get all printers query = request.db.nodes.find({ 'type': 'printer', 'path': get_filter_nodes_belonging_ou(ou_id) }) task = ChefTask() rows = [] if file_ext == 'pdf': for item in query: row = [] # No path in PDF because it's too long row.append('--') row.append(item['name']) row.append(treatment_string_to_pdf(item, 'manufacturer', 15)) row.append(treatment_string_to_pdf(item, 'model', 15)) row.append(treatment_string_to_pdf(item, 'serial', 15)) row.append(treatment_string_to_pdf(item, 'registry', 15)) # Get all nodes related with this printer nodes_query = request.db.nodes.find( {property_name: str(item['_id'])}) related_computers = [] related_objects = [] for node in nodes_query: related_computers = task.get_related_computers( node, related_computers, related_objects) # Remove duplicated computers computer_paths = [] computers = [] for computer in related_computers: full_path = computer['path'] + '.' + computer['name'] if not full_path in computer_paths: computer_paths.append(full_path) computers.append(computer) if len(computers) == 0: row.append('--') rows.append(row) else: for computer in computers: computer_row = list(row) computer_row.append( treatment_string_to_pdf(computer, 'name', 15)) # No path in PDF because it's too long rows.append(computer_row) else: for item in query: row = [] item['complete_path'] = get_complete_path(request.db, item['path']) row.append(treatment_string_to_csv(item, 'complete_path')) row.append(treatment_string_to_csv(item, 'name') \ if file_ext == 'csv' else get_html_node_link(item)) row.append(treatment_string_to_csv(item, 'manufacturer')) row.append(treatment_string_to_csv(item, 'model')) row.append(treatment_string_to_csv(item, 'serial')) row.append(treatment_string_to_csv(item, 'registry')) # Get all nodes related with this printer nodes_query = request.db.nodes.find( {property_name: str(item['_id'])}) related_computers = [] related_objects = [] for node in nodes_query: related_computers = task.get_related_computers( node, related_computers, related_objects) # Remove duplicated computers computer_paths = [] computers = [] for computer in related_computers: full_path = computer['path'] + '.' + computer['name'] if not full_path in computer_paths: computer_paths.append(full_path) computers.append(computer) if len(computers) == 0: row.append('--') rows.append(row) else: for computer in computers: computer_row = list(row) computer_row.append(treatment_string_to_csv( computer, 'name') \ if file_ext == 'csv' \ else get_html_node_link(computer)) computer['complete_path'] = get_complete_path( request.db, item['path']) rows.append(computer_row) header = (_(u'Path'), _(u'Name'), _(u'Manufacturer'), _(u'Model'), _(u'Serial number'), _(u'Registry number'), _(u'Computer')) # Column widths in percentage if file_ext == 'pdf': widths = (0, 25, 15, 15, 15, 15, 15) else: widths = (0, 20, 10, 10, 10, 10, 20) title = _(u'Printers and related computers report') now = datetime.datetime.now().strftime("%d/%m/%Y %H:%M") # Sort rows rows = sorted(rows, key=lambda i: (i[0].lower(), i[1].lower(), i[6].lower())) return { 'headers': header, 'rows': rows, 'default_order': [[0, 'asc'], [1, 'asc'], [6, 'asc']], 'widths': widths, 'report_title': title, 'page': _(u'Page'), 'of': _(u'of'), 'report_type': file_ext, 'now': now }
def new_colleague(request): if not check_csrf_token(request, raises=False): return HTTPBadRequest(body=json.dumps({'error': 'Bad CSRF Token'})) params = request.json_body required_fields = ['first_name', 'last_name', 'email', 'orcid'] for x in required_fields: if not params[x]: msg = x + ' is a required field.' return HTTPBadRequest(body=json.dumps({'message': msg}), content_type='text/json') is_email_valid = validate_email(params['email'], verify=False) if not is_email_valid: msg = params['email'] + ' is not a valid email.' return HTTPBadRequest(body=json.dumps({'message': msg}), content_type='text/json') is_orcid_valid = validate_orcid(params['orcid']) if not is_orcid_valid: msg = params['orcid'] + ' is not a valid orcid.' return HTTPBadRequest(body=json.dumps({'message': msg}), content_type='text/json') colleague_orcid_exists = DBSession.query(Colleague).filter( Colleague.orcid == params.get('orcid')).one_or_none() if colleague_orcid_exists: msg = 'You entered an ORCID which is already being used by an SGD colleague. Try to find your entry or contact [email protected] if you think this is a mistake.' return HTTPBadRequest(body=json.dumps({'message': msg}), content_type='text/json') try: full_name = params['first_name'] + ' ' + params['last_name'] format_name = params['first_name'] + '_' + params['last_name'] + str( randint(1, 100)) # add a random number to be sure it's unique created_by = get_username_from_db_uri() new_colleague = Colleague( format_name=format_name, display_name=full_name, obj_url='/colleague/' + format_name, source_id=759, # direct submission orcid=params['orcid'], first_name=params['first_name'], last_name=params['last_name'], email=params['email'], is_contact=False, is_beta_tester=False, display_email=False, is_in_triage=True, is_pi=False, created_by=created_by) DBSession.add(new_colleague) DBSession.flush() new_colleague_id = new_colleague.colleague_id new_colleague = DBSession.query(Colleague).filter( Colleague.format_name == format_name).one_or_none() new_c_triage = Colleaguetriage( colleague_id=new_colleague_id, json=json.dumps(params), triage_type='New', ) DBSession.add(new_c_triage) transaction.commit() return {'colleague_id': new_colleague_id} except Exception as e: transaction.abort() log.error(e) return HTTPBadRequest(body=json.dumps({'message': str(e)}), content_type='text/json')
def colleague_triage_update(request): if not check_csrf_token(request, raises=False): return HTTPBadRequest(body=json.dumps({'error': 'Bad CSRF Token'})) return True
def search(request): metrics = request.find_service(IMetricsService, context=None) q = request.params.get("q", "") q = q.replace("'", '"') if q: bool_query = gather_es_queries(q) query = request.es.query(bool_query) query = query.suggest("name_suggestion", q, term={"field": "name"}) else: query = request.es.query() if request.params.get("o"): sort_key = request.params["o"] if sort_key.startswith("-"): sort = {sort_key[1:]: {"order": "desc", "unmapped_type": "long"}} else: sort = {sort_key: {"unmapped_type": "long"}} query = query.sort(sort) # Require match to all specified classifiers for classifier in request.params.getall("c"): query = query.query("prefix", classifiers=classifier) try: page_num = int(request.params.get("page", 1)) except ValueError: raise HTTPBadRequest("'page' must be an integer.") try: page = ElasticsearchPage(query, page=page_num, url_maker=paginate_url_factory(request)) except elasticsearch.TransportError: metrics.increment("warehouse.views.search.error") raise HTTPServiceUnavailable if page.page_count and page_num > page.page_count: raise HTTPNotFound available_filters = collections.defaultdict(list) classifiers_q = (request.db.query(Classifier).with_entities( Classifier.classifier).filter(Classifier.deprecated.is_(False)).filter( exists([release_classifiers.c.trove_id]).where( release_classifiers.c.trove_id == Classifier.id)).order_by( Classifier.classifier)) for cls in classifiers_q: first, *_ = cls.classifier.split(" :: ") available_filters[first].append(cls.classifier) def filter_key(item): try: return 0, SEARCH_FILTER_ORDER.index(item[0]), item[0] except ValueError: return 1, 0, item[0] def form_filters_tree(split_list): """ Takes a list of lists, each of them containing a filter and one of its children. Returns a dictionary, each key being a filter and each value being the filter's children. """ d = {} for l in split_list: current_level = d for part in l: if part not in current_level: current_level[part] = {} current_level = current_level[part] return d def process_available_filters(): """ Processes available filters and returns a list of dictionaries. The value of a key in the dictionary represents its children """ sorted_filters = sorted(available_filters.items(), key=filter_key) output = [] for f in sorted_filters: classifier_list = f[1] split_list = [i.split(" :: ") for i in classifier_list] tree = form_filters_tree(split_list) output.append(tree) return output metrics = request.find_service(IMetricsService, context=None) metrics.histogram("warehouse.views.search.results", page.item_count) return { "page": page, "term": q, "order": request.params.get("o", ""), "available_filters": process_available_filters(), "applied_filters": request.params.getall("c"), }
def get_response(self, req: "CamcopsRequest") -> Response: """ Return the report content itself, as an HTTP :class:`Response`. """ # Check the basic parameters report_id = req.get_str_param(ViewParam.REPORT_ID) rows_per_page = req.get_int_param(ViewParam.ROWS_PER_PAGE, DEFAULT_ROWS_PER_PAGE) page_num = req.get_int_param(ViewParam.PAGE, 1) if report_id != self.report_id: raise HTTPBadRequest("Error - request directed to wrong report!") # viewtype = req.get_str_param(ViewParam.VIEWTYPE, ViewArg.HTML, # lower=True) # ... NO; for a Deform radio button, the request contains parameters # like # ('__start__', 'viewtype:rename'), # ('deformField2', 'tsv'), # ('__end__', 'viewtype:rename') # ... so we need to ask the appstruct instead. # This is a bit different from how we manage trackers/CTVs, where we # recode the appstruct to a URL. # # viewtype = appstruct.get(ViewParam.VIEWTYPE) # type: str # # Ah, no... that fails with pagination of reports. Let's redirect # things to the HTTP query, as for trackers/audit! viewtype = req.get_str_param(ViewParam.VIEWTYPE, ViewArg.HTML, lower=True) if viewtype not in [ViewArg.HTML, ViewArg.TSV]: raise HTTPBadRequest("Bad viewtype") # Run the report (which may take additional parameters from the # request) statement = self.get_query(req) if statement is not None: rp = req.dbsession.execute(statement) # type: ResultProxy column_names = rp.keys() rows = rp.fetchall() else: plain_report = self.get_rows_colnames(req) if plain_report is None: raise NotImplementedError( "Report did not implement either of get_select_statement()" " or get_rows_colnames()") column_names = plain_report.column_names rows = plain_report.rows # Serve the result if viewtype == ViewArg.HTML: page = CamcopsPage(collection=rows, page=page_num, items_per_page=rows_per_page, url_maker=PageUrl(req)) return self.render_html(req=req, column_names=column_names, page=page) else: # TSV filename = ( "CamCOPS_" + self.report_id + "_" + format_datetime(req.now, DateFormat.FILENAME) + ".tsv" ) content = tsv_from_query(rows, column_names) return TsvResponse(body=content, filename=filename)
def renderForm(request, itemType, **kwargs): """ Render the form for either Activity or Stakeholder """ # Get the kwargs itemJson = kwargs.get('itemJson', None) newInvolvement = kwargs.get('inv', None) emptyTitle = _('Empty Form') emptyText = _('You submitted an empty form or did not make any changes.') errorTitle = _('Error') # Activity or Stakeholder if itemType == 'activities': # The initial category of the form formid = 'activityform' otherItemType = 'stakeholders' elif itemType == 'stakeholders': # The initial category of the form formid = 'stakeholderform' otherItemType = 'activities' else: raise HTTPBadRequest( 'Unknown itemType (neither "activities" nor "stakeholders")') session = request.session oldCategory = None log.debug('Session before processing the form: %s' % session) # Use a different template rendering engine (mako instead of chameleon) deform.Form.set_default_renderer(mako_renderer) # Check if anything was submitted at all. If so, remember which category # was the one submitted. formSubmit = False if request.POST != {}: formSubmit = True for p in request.POST: if p == 'category': oldCategory = request.POST[p] break # Get the configuration of the categories (as defined in the config yaml) configCategoryList = getCategoryList(request, itemType) # Determine which category to show. If reopening the form after creating a # new Involvement, use the category of the Involvement. Else use the first # category of the configuration. if newInvolvement is not None: newInvCat = configCategoryList.findCategoryByInvolvementName( newInvolvement) if newInvCat is not None: newCategory = newInvCat.getId() else: newCategory = configCategoryList.getFirstCategoryId() else: newCategory = configCategoryList.getFirstCategoryId() # Collect a list with id and names of all available categories which will # be used to create the buttons based cat categoryListButtons = [] for cat in sorted(configCategoryList.getCategories(), key=lambda cat: cat.order): displayName = (cat.getTranslation() if cat.getTranslation() is not None else cat.getName()) categoryListButtons.append((cat.getId(), displayName)) captured = None formHasErrors = False # Some sort of data used for feedback. Can be Javascript or something else feedbackData = None # Handle form submission: This can also be just the "submission" of a # single category which does not submit the item but stores the # information of the submitted category in the session. for p in request.POST: if (not (p.startswith('step_') or p in ['submit', 'delete'] or p.startswith('createinvolvement_'))): continue createInvolvement = False if p.startswith('createinvolvement_'): # If the form was "submitted" because a new involvement is to be # created, we need to remove the 'create_involvement' POST value, # otherwise Deform cannot validate the form. x = p.split('_') createInvolvement = x[1] request.POST.pop(p) # Do a validation of the submitted form data. To do this, it is # necessary to recreate a form with the same category that was # submitted. buttons = [] # Prepare a form with the submitted category oldschema = addHiddenFields(colander.SchemaNode(colander.Mapping()), itemType) oldCat = configCategoryList.findCategoryById(oldCategory) if oldCat is not None: oldschema.add(oldCat.getForm(request)) showSessionCategories = None if (itemJson is None or (itemType in session and 'form' in session[itemType] and 'id' in session[itemType]['form'] and session[itemType]['form']['id'] == itemJson['id'])): showSessionCategories = itemType buttons = getFormButtons( request, categoryListButtons, oldCategory, showSessionCategories=showSessionCategories) # creates form? form = deform.Form(oldschema, buttons=buttons, formid=formid) if p == 'delete': captured = {} else: try: # Try to validate the form captured = form.validate( request.POST.items()) # captured contains input values except deform.ValidationFailure as e: # The submitted values contains errors. Render the same form # again with error messages. It will be returned later. html = e.render() formHasErrors = True if formHasErrors is False: # The form is valid, store the captured data in the session. log.debug('Data captured by the form: %s' % captured) # If there is already some data in the session. if itemType in session and 'form' in session[itemType]: sessionItem = session[itemType][ 'form'] # sessionItem contains values saved in session if (captured.get('id') == sessionItem.get('id') and captured.get('version') == sessionItem.get('version') and oldCategory in captured): # It is the same item as already in the session, add or # overwrite the form data. updatedCategory = captured[oldCategory] sessionItem[oldCategory] = updatedCategory log.debug('Updated session item: Category %s' % oldCategory) else: # A different item is already in the session. It will be # overwriten. if 'category' in captured: del (captured['category']) session[itemType]['form'] = captured log.debug('Replaced session item') else: # No data is in the session yet. Store the captured data # there. if 'category' in captured: del (captured['category']) if itemType not in session: session[itemType] = {} session[itemType][ 'form'] = captured # write session data to form of itemType (can be activity etc.) log.debug('Added session item') if p.startswith('step_'): # A button with a next category was clicked, set a new # current category to show in the form c = p.split('_') newCategory = c[1] if createInvolvement is not False: # A new form is opened to create an Involvement. Store the # current form information in the session (camefrom). if itemType in session and 'camefrom' in session[itemType]: # TODO print("*************************") print("*************************") print("*************************") print("there is already an activity in the session") print("*************************") print("*************************") print("*************************") itemId = '' if itemJson is None or 'id' not in itemJson \ else itemJson['id'] session[itemType]['camefrom'] = { 'id': itemId, 'timestamp': datetime.datetime.now(), 'inv': createInvolvement } if itemType == 'activities': msg = render( get_customized_template_path( request, 'parts/messages/stakeholder_form_through_' 'involvement.mak'), { 'url': request.route_url( 'activities_read_many', output='form', _query={'inv': createInvolvement}) }, request) session.flash(msg) url = request.route_url('stakeholders_read_many', output='form') else: url = request.route_url('activities_read_many', output='form') # Redirect to the other form. return HTTPFound(url) if p in ['submit', 'delete']: # The final submit button was clicked. Calculate the diff, # delete the session data and redirect to a confirm page. success = False posted_formid = request.POST['__formid__'] if posted_formid not in ['activityform', 'stakeholderform']: # TODO: Is this the correct way to return an error message? feedbackMessage = '<span class="text-error">{}</span>: Unknown form'.format( errorTitle) return { 'form': feedbackMessage, 'css_links': [], 'js_links': [], 'js': None, 'success': False } if p == 'delete': # The Item is to be deleted. Calculate the diff to delete # all tags diff = calculate_deletion_diff(request, itemType) else: if (itemType not in session or 'form' not in session[itemType]): # TODO: Is this the correct way to return an error # message? feedbackMessage = 'Session not active' return { 'form': feedbackMessage, 'css_links': [], 'js_links': [], 'js': None, 'success': False } formdata = copy.copy(session[itemType]['form']) log.debug('The complete formdata as in the session: %s' % formdata) # check diff = formdataToDiff(request, formdata, itemType) log.debug( 'The uncleaned diff to create/update the activity: %s' % diff) if diff is None: # TODO: Is this the correct way to return an error message? return { 'form': '<h3 class="text-info">%s</h3><p>%s</p>' % (emptyTitle, emptyText), 'css_links': [], 'js_links': [], 'js': None, 'success': False } # Create or update the Item success, returnValues = doUpdate(request, itemType, diff) if success is True: # Clear the session doClearFormSessionData(request, itemType, 'form') if (otherItemType in session and 'camefrom' in session[otherItemType]): # The form was submitted "indirectly" camefrom = session[otherItemType]['camefrom'] # Clear the camefrom flag doClearFormSessionData(request, otherItemType, 'camefrom') addToSession = addCreatedInvolvementToSession( request, session, otherItemType, camefrom['inv'], returnValues) if addToSession is True: msg = render( get_customized_template_path( request, 'parts/messages/stakeholder_created_' 'through_involvement.mak'), {}, request) session.flash(msg, 'success') # Route to the other form again. if itemType == 'activities': url = request.route_url( 'stakeholders_read_many', output='form', _query={'inv': camefrom['inv']}) else: activity_id = camefrom.get('id') if activity_id is not None and activity_id != '': url = request.route_url( 'activities_read_one', output='form', uid=activity_id, _query={'inv': camefrom['inv']}) else: url = request.route_url( 'activities_read_many', output='form', _query={'inv': camefrom['inv']}) return HTTPFound(url) else: if itemType == 'activities': feedbackMessage = render( get_customized_template_path( request, 'parts/messages/activity_created_' 'success.mak'), { 'url': request.route_url('activities_read_one', output='html', uid=returnValues['id']) }, request) else: feedbackMessage = render( get_customized_template_path( request, 'parts/messages/stakeholder_created_' 'success.mak'), { 'url': request.route_url('stakeholders_read_one', output='html', uid=returnValues['id']) }, request) else: feedbackMessage = '<h3 class="text-error">%s</h3>%s' % ( errorTitle, returnValues) return { 'form': feedbackMessage, 'css_links': [], 'js_links': [], 'js': feedbackData, 'success': success } # END Post-request if formHasErrors is False: # If nothing was submitted or the captured form data was stored # correctly, create a form with the (new) current category. newschema = addHiddenFields(colander.SchemaNode(colander.Mapping()), itemType) newCat = configCategoryList.findCategoryById(newCategory) if newCat is not None: newschema.add( newCat.getForm(request)) # send get request to config/form.py showSessionCategories = None if (itemJson is None or (itemType in session and 'id' in session[itemType] and session[itemType]['id'] == itemJson['id'])): showSessionCategories = itemType buttons = getFormButtons(request, categoryListButtons, newCategory, showSessionCategories=showSessionCategories) form = deform.Form(newschema, buttons=buttons, formid=formid) # The form contains empty data by default data = {'category': newCategory} # Decide which data to show in the form sessionItem = None if itemType in session and 'form' in session[itemType]: sessionItem = copy.copy(session[itemType]['form']) if itemJson is not None and itemType not in session: # An item was provided to show in the form (edit form) and no # values are in the session yet. # Simply show the data of the provided item in the form. data = getFormdataFromItemjson(request, itemJson, itemType, newCategory) elif itemJson is not None and sessionItem is not None: # An item was provided to show in the form (edit form) and there # are some values in the session. if (itemJson['id'] == sessionItem['id'] and itemJson['version'] == sessionItem['version']): # The item in the session and the item provided are the same. if str(newCategory) in sessionItem: # The current category of the form is already in the # session so we display this data. sessionItem['category'] = newCategory data = sessionItem else: # The current category of the form is not yet in the # session so we use the data of the itemjson to populate # the form. data = getFormdataFromItemjson(request, itemJson, itemType, newCategory) if formSubmit is False and request.params.get('inv') is None: # If the form is rendered for the first time, inform the # user that session was used. url = request.route_url('form_clear_session', item=itemType, attr='form', _query={'url': request.url}) msg = render( get_customized_template_path( request, 'parts/messages/unsaved_data_same_form.mak'), {'url': url}, request) session.flash(msg) else: # The item in the session is not the same as the item provided. # Use the itemjson to populate the form data = getFormdataFromItemjson(request, itemJson, itemType, newCategory) # Inform the user that there is data in the session. item_name = sessionItem['id'][:6] \ if sessionItem['id'] != colander.null else '' if sessionItem['id'] != colander.null: if itemType == 'activities': item_url = request.route_url('activities_read_one', output='form', uid=sessionItem['id']) elif itemType == 'stakeholders': item_url = request.route_url('stakeholders_read_one', output='form', uid=sessionItem['id']) else: if itemType == 'activities': item_url = request.route_url('activities_read_many', output='form') elif itemType == 'stakeholders': item_url = request.route_url('stakeholders_read_many', output='form') msg = render( get_customized_template_path( request, 'parts/messages/unsaved_data_different_form.mak'), { 'url': item_url, 'name': item_name, 'type': itemType }, request) session.flash(msg) elif itemJson is None and sessionItem is not None: # No item was provided (create form) but some data was found in the # session. if (sessionItem['id'] != colander.null and sessionItem['version'] != colander.null): # The item in the session is not new. Show empty form data # (already defined) and inform the user. item_name = sessionItem['id'][:6] \ if sessionItem['id'] != colander.null \ else _('Unknown Item') if sessionItem['id'] != colander.null: if itemType == 'activities': item_url = request.route_url('activities_read_one', output='form', uid=sessionItem['id']) elif itemType == 'stakeholders': item_url = request.route_url('stakeholders_read_one', output='form', uid=sessionItem['id']) else: if itemType == 'activities': item_url = request.route_url('activities_read_many', output='form') elif itemType == 'stakeholders': item_url = request.route_url('stakeholders_read_many', output='form') msg = render( get_customized_template_path( request, 'parts/messages/unsaved_data_different_form.mak'), { 'url': item_url, 'name': item_name, 'type': itemType }, request) session.flash(msg) else: # The item in the session is new. # If the form is rendered for the first time, inform the # user that session was used. sessionItem['category'] = newCategory data = sessionItem if formSubmit is False and newInvolvement is None: # Inform the user that data from the session is used. url = request.route_url('form_clear_session', item=itemType, attr='form', _query={'url': request.url}) msg = render( get_customized_template_path( request, 'parts/messages/unsaved_data_same_form.mak'), {'url': url}, request) session.flash(msg) elif itemJson is not None: # An item was provided to show in the form (edit form) # Simply show the data of the provided item in the form. data = getFormdataFromItemjson(request, itemJson, itemType, newCategory) else: # No itemjson and no sessionitem, do nothing (empty data already # defined above). pass # log.debug('Data used to populate the form: %s' % data) html = form.render(data) # If the current category contains involvements (eg. to add Stakeholders to # an Activity), show a (initially empty) div which will contain the form # for Stakeholders. if str(newCategory) in configCategoryList.getInvolvementCategoryIds(): html += '<div id="stakeholderformcontainer"></div>' # Add JS and CSS requirements (for widgets) resources = form.get_widget_resources() log.debug('Session after processing the form: %s' % session) return { 'form': html, 'css_links': resources['css'], 'js_links': resources['js'], 'success': not formHasErrors }
def POST(self): reqParams = self.__parser__( args=tokenSchema(), location='json' ) policy = my_get_authentication_policy(self.request) if reqParams.get('grant_type') == 'code': secret = getattr(policy, 'codeTokenSecret') payloadInCode = myDecode( token=reqParams.get('code'), secret=secret ) now = datetime.datetime.now() dateExpCode = datetime.datetime.fromtimestamp( payloadInCode.get('exp') ) if now < dateExpCode: accessToken = getAccessToken( idUser=payloadInCode.get('sub'), request=self.request ) refreshToken = getRefreshToken( idUser=payloadInCode.get('sub'), request=self.request ) self.request.response.json_body = { 'access_token': accessToken.decode('utf-8'), 'token_type': 'Bearer', "expires_in": 300, "refresh_token": refreshToken.decode('utf-8') } return self.request.response else: return HTTPBadRequest("Code no more valid") elif reqParams.get('grant_type') == 'refresh_token': secret = getattr(policy, 'refreshTokenSecret') payloadInRefreshToken = myDecode( token=reqParams.get('refresh_token'), secret=secret ) now = datetime.datetime.now() dateExpCode = datetime.datetime.fromtimestamp( payloadInRefreshToken.get('exp') ) if now < dateExpCode: accessToken = getAccessToken( idUser=payloadInRefreshToken.get('sub'), request=self.request ) self.request.response.json_body = { 'access_token': accessToken.decode('utf-8'), 'token_type': 'Bearer', "expires_in": 300 } return self.request.response else: return "refresh token no more valid" else: return HTTPBadRequest('Code no more valid')
def upload_randomization_json(context, request): """ Handles RANDID file uploads. The file is expected to be a CSV with the following columns: * ARM * STRATA * BLOCKID * RANDID In addition, the CSV file must have the columns as the form it is using for randomization. """ check_csrf_token(request) db_session = request.db_session if not context.is_randomized: # No form check required as its checked via database constraint raise HTTPBadRequest(body=_(u'This study is not randomized')) input_file = request.POST['upload'].file input_file.seek(0) # Ensure we can read the CSV try: csv.Sniffer().sniff(input_file.read(1024)) except csv.Error: raise HTTPBadRequest(body=_(u'Invalid file-type, must be CSV')) else: input_file.seek(0) reader = csv.DictReader(input_file) # Case-insensitive lookup fieldnames = dict((name.upper(), name) for name in reader.fieldnames) stratumkeys = ['ARM', 'BLOCKID', 'RANDID'] formkeys = context.randomization_schema.attributes.keys() # Ensure the CSV defines all required columns required = stratumkeys + formkeys missing = [name for name in required if name.upper() not in fieldnames] if missing: raise HTTPBadRequest( body=_(u'File upload is missing the following columns ${columns}', mapping={'columns': ', '.join(missing)})) # We'll be using this to create new arms as needed arms = dict([(arm.name, arm) for arm in context.arms]) # Default to comple state since they're generated by a statistician complete = (db_session.query( datastore.State).filter_by(name=u'complete').one()) for row in reader: arm_name = row[fieldnames['ARM']] if arm_name not in arms: arms[arm_name] = models.Arm(study=context, name=arm_name, title=arm_name) stratum = models.Stratum(study=context, arm=arms[arm_name], block_number=int(row[fieldnames['BLOCKID']]), randid=row[fieldnames['RANDID']]) if 'STRATA' in fieldnames: stratum.label = row[fieldnames['STRATA']] db_session.add(stratum) entity = datastore.Entity(schema=context.randomization_schema, state=complete) for key in formkeys: entity[key] = row[fieldnames[key.upper()]] stratum.entities.add(entity) try: db_session.flush() except sa.exc.IntegrityError as e: if 'uq_stratum_reference_number' in e.message: raise HTTPBadRequest(body=_( u'The submitted file contains existing reference numbers. ' u'Please upload a file with new reference numbers.')) return HTTPOk()
def add_schema_json(context, request): check_csrf_token(request) db_session = request.db_session def check_not_patient_schema(form, field): (exists, ) = (db_session.query( db_session.query(datastore.Schema).join( models.patient_schema_table).filter( datastore.Schema.name == field.data).exists()).one()) if exists: raise wtforms.ValidationError( request.localizer.translate(_(u'Already a patient form'))) def check_not_randomization_schema(form, field): if (context.randomization_schema and context.randomization_schema.name == field.data): raise wtforms.ValidationError( request.localizer.translate( _(u'Already a randomization form'))) def check_not_termination_schema(form, field): if (context.termination_schema is not None and context.termination_schema.name == field.data): raise wtforms.ValidationError( request.localizer.translate(_(u'Already a termination form'))) def check_same_schema(form, field): versions = form.versions.data schema = form.schema.data invalid = [i.publish_date for i in versions if i.name != schema] if invalid: raise wtforms.ValidationError( request.localizer.translate( _(_(u'Incorrect versions: ${versions}'), mapping={'versions': ', '.join(map(str, invalid))}))) def check_published(form, field): if field.data.publish_date is None: raise wtforms.ValidationError( request.localizer.translate( _(u'Selected version is not published'))) class SchemaManagementForm(Form): schema = wtforms.StringField(validators=[ wtforms.validators.InputRequired(), check_not_patient_schema, check_not_randomization_schema, check_not_termination_schema ]) versions = wtforms.FieldList( ModelField(db_session=db_session, class_=datastore.Schema, validators=[ wtforms.validators.InputRequired(), check_published ]), validators=[wtforms.validators.DataRequired(), check_same_schema]) form = SchemaManagementForm.from_json(request.json_body) if not form.validate(): raise HTTPBadRequest(json={'errors': wtferrors(form)}) old_items = set(i for i in context.schemata if i.name == form.schema.data) new_items = set(form.versions.data) # Remove unselected context.schemata.difference_update(old_items - new_items) # Add newly selected context.schemata.update(new_items) # Get a list of cycles to update cycles = (db_session.query(models.Cycle).options( orm.joinedload(models.Cycle.schemata)).filter( models.Cycle.study == context).filter( models.Cycle.schemata.any(name=form.schema.data))) # Also update available cycle schemata versions for cycle in cycles: cycle.schemata.difference_update(old_items - new_items) cycle.schemata.update(new_items) return form2json(new_items)[0]
def invalid_url_encoding(exc, request): """ Handler whent he URL contains malformed encoded strings (i.e. %c5, %80) """ return HTTPBadRequest()
def get_feature_info(id, srid, translations): """The function gets the geometry of a parcel by it's ID and does an overlay with other administrative layers to get the basic parcelInfo and attribute information of the parcel : municipality, local names, and so on hint: for debbuging the query use str(query) in the console/browser window to visualize geom.wkt use session.scalar(geom.wkt) """ try: SRS = srid except: SRS = 2056 parcelInfo = {} parcelInfo['featureid'] = None Y = None X = None if id: parcelInfo['featureid'] = id # elif request.params.get('X') and request.params.get('Y') : # X = int(request.params.get('X')) # Y = int(request.params.get('Y')) else: raise Exception(translations['']) if parcelInfo['featureid'] is not None: queryresult = DBSession.query(Property).filter_by( id=parcelInfo['featureid']).first() # We should check unicity of the property id and raise an exception if there are multiple results elif (X > 0 and Y > 0): if Y > X: pointYX = WKTElement('POINT(' + str(Y) + ' ' + str(X) + ')', SRS) else: pointYX = WKTElement('POINT(' + str(X) + ' ' + str(Y) + ')', SRS) queryresult = DBSession.query(Property).filter( Property.geom.ST_Contains(pointYX)).first() parcelInfo['featureid'] = queryresult.id else: # to define return HTTPBadRequest(translations['HTTPBadRequestMsg']) parcelInfo['geom'] = queryresult.geom parcelInfo['area'] = int( round(DBSession.scalar(queryresult.geom.ST_Area()), 0)) if isinstance(LocalName, (types.ClassType)) is False: queryresult1 = DBSession.query(LocalName).filter( LocalName.geom.ST_Intersects(parcelInfo['geom'])).first() parcelInfo['lieu_dit'] = queryresult1.nomloc # Flurname queryresult2 = DBSession.query(Town).filter( Town.geom.ST_Buffer(1).ST_Contains(parcelInfo['geom'])).first() parcelInfo['nummai'] = queryresult.nummai # Parcel number parcelInfo['type'] = queryresult.typimm # Parcel type if 'no_egrid' in queryresult.__table__.columns.keys(): parcelInfo['no_egrid'] = queryresult.no_egrid else: parcelInfo['no_egrid'] = translations['noEGRIDtext'] if parcelInfo['type'] is None: parcelInfo['type'] = translations['UndefinedPropertyType'] if 'numcad' in queryresult2.__table__.columns.keys(): parcelInfo['nomcad'] = queryresult2.cadnom parcelInfo['numcom'] = queryresult.numcom parcelInfo['nomcom'] = queryresult2.comnom parcelInfo['nufeco'] = queryresult2.nufeco parcelInfo['centerX'] = DBSession.scalar( functions.ST_X(queryresult.geom.ST_Centroid())) parcelInfo['centerY'] = DBSession.scalar( functions.ST_Y(queryresult.geom.ST_Centroid())) parcelInfo['BBOX'] = get_bbox_from_geometry( DBSession.scalar(functions.ST_AsText(queryresult.geom.ST_Envelope()))) # the get_print_format function is not needed any longer as the paper size has been fixed to A4 by the cantons # but we keep the code because the decision will be revoked # parcelInfo['printFormat'] = get_print_format(parcelInfo['BBOX']) return parcelInfo
def invalid_api_key_response(): result = HTTPBadRequest() result.content_type = 'application/json' result.body = INVALID_API_KEY return result
def tween(request): log.debug('RATE LIMITING FOR METHOD ' + request.method) # Only write requests are considered for rate limiting. if request.method not in ['POST', 'PUT', 'DELETE']: return handler(request) if request.authorization is None: # See comment of similar block in jwt_database_validation tween return handler(request) user = DBSession.query(User).get(request.authenticated_userid) if user is None: return http_error_handler(HTTPBadRequest('Unknown user'), request) now = datetime.datetime.now(pytz.utc) if user.ratelimit_reset is None or user.ratelimit_reset < now: # No window exists or it is expired: create a new one. span = int(registry.settings.get('rate_limiting.window_span')) limit = int( registry.settings.get( 'rate_limiting.limit_robot' if user. robot else 'rate_limiting.limit_moderator' if user. moderator else 'rate_limiting.limit')) user.ratelimit_reset = now + datetime.timedelta(seconds=span) user.ratelimit_remaining = limit - 1 log.debug('RATE LIMITING, CREATE WINDOW SPAN : {}'.format( user.ratelimit_reset)) elif user.ratelimit_remaining: user.ratelimit_remaining -= 1 log.info('RATE LIMITING, REQUESTS REMAINING FOR {} : {}'.format( user.id, user.ratelimit_remaining)) else: # User is rate limited log.warning('RATE LIMIT REACHED FOR USER {}'.format(user.id)) # Count how many windows the user has been rate limited # and block them is too many. current_window = user.ratelimit_reset if user.ratelimit_last_blocked_window != current_window: user.ratelimit_last_blocked_window = current_window user.ratelimit_times += 1 max_times = int( registry.settings.get('rate_limiting.max_times')) if user.ratelimit_times > max_times: log.warning('RATE LIMIT BLOCK USER {}'.format(user.id)) user.blocked = True # An alert message is sent to the moderators email_service = get_email_service(request) try: email_service.send_rate_limiting_alert(user) except SMTPAuthenticationError: log.error('RATE LIMIT ALERT MAIL : AUTHENTICATION ERROR') return http_error_handler( HTTPTooManyRequests('Rate limit reached'), request) return handler(request)
def get_report(self) -> pyramid.response.Response: self.layername = self.request.matchdict["layername"] layer_config = self.config["layers"].get(self.layername) multiple = layer_config.get("multiple", False) ids = self.request.matchdict["ids"] if multiple: ids = ids.split(",") if layer_config is None: raise HTTPBadRequest("Layer not found") features_ids = ( [self.layername + "." + id_ for id_ in ids] if multiple else [self.layername + "." + ids] ) if layer_config["check_credentials"]: # FIXME: support of mapserver groups ogc_server = ( models.DBSession.query(main.OGCServer) .filter(main.OGCServer.name == layer_config["ogc_server"]) .one() ) ogc_server_ids = [ogc_server] private_layers_object = get_private_layers(ogc_server_ids) private_layers_names = [private_layers_object[oid].name for oid in private_layers_object] protected_layers_object = get_protected_layers(self.request.user, ogc_server_ids) protected_layers_names = [protected_layers_object[oid].name for oid in protected_layers_object] if self.layername in private_layers_names and self.layername not in protected_layers_names: raise HTTPForbidden srs = layer_config["srs"] mapserv_url = self.request.route_url( "mapserverproxy", _query={"ogcserver": layer_config["ogc_server"]} ) url = Url(mapserv_url) url.add_query( { "service": "WFS", "version": "1.1.0", "outputformat": "gml3", "request": "GetFeature", "typeName": self.layername, "featureid": ",".join(features_ids), "srsName": srs, } ) vector_request_url = url.url() spec = layer_config["spec"] if spec is None: spec = { "layout": self.layername, "outputFormat": "pdf", "attributes": {"ids": [{"id": id_} for id_ in ids]} if multiple else {"id": id}, } map_config = layer_config.get("map") if map_config is not None: spec["attributes"]["map"] = self._build_map(mapserv_url, vector_request_url, srs, map_config) maps_config = layer_config.get("maps") if maps_config is not None: spec["attributes"]["maps"] = [] for map_config in maps_config: spec["attributes"]["maps"].append( self._build_map(mapserv_url, vector_request_url, srs, map_config) ) else: datasource = layer_config.get("datasource", True) if multiple and datasource: data = dumps(layer_config["data"]) data_list = [ loads( data % { "layername": self.layername, "id": id_, "srs": srs, "mapserv_url": mapserv_url, "vector_request_url": vector_request_url, } ) for id_ in ids ] self.walker(spec, "%(datasource)s", data_list) spec = loads( dumps(spec) % { "layername": self.layername, "srs": srs, "mapserv_url": mapserv_url, "vector_request_url": vector_request_url, } ) elif multiple: spec = loads( dumps(spec) % { "layername": self.layername, "ids": ",".join(ids), "srs": srs, "mapserv_url": mapserv_url, "vector_request_url": vector_request_url, } ) else: spec = loads( dumps(spec) % { "layername": self.layername, "id": ids, "srs": srs, "mapserv_url": mapserv_url, "vector_request_url": vector_request_url, } ) return self._do_print(spec)
def deploy_process_from_payload(payload, container, overwrite=False): # type: (JSON, AnyContainer, bool) -> HTTPException """ Deploy the process after resolution of all references and validation of the parameters from payload definition. Adds a :class:`weaver.datatype.Process` instance to storage using the provided JSON ``payload`` matching :class:`weaver.wps_restapi.swagger_definitions.ProcessDescription`. :param payload: JSON payload that was specified during the process deployment request. :param container: container to retrieve application settings. :param overwrite: whether to allow override of an existing process definition if conflict occurs. :returns: HTTPOk if the process registration was successful. :raises HTTPException: for any invalid process deployment step. """ # use deepcopy of to remove any circular dependencies before writing to mongodb or any updates to the payload payload_copy = deepcopy(payload) payload = _check_deploy(payload) # validate identifier naming for unsupported characters process_description = payload.get("processDescription") process_info = process_description.get("process", {}) process_href = process_description.pop("href", None) # retrieve CWL package definition, either via "href" (WPS-1/2), "owsContext" or "executionUnit" (package/reference) deployment_profile_name = payload.get("deploymentProfileName", "").lower() ows_context = process_info.pop("owsContext", None) reference = None package = None if process_href: reference = process_href # reference type handled downstream elif isinstance(ows_context, dict): offering = ows_context.get("offering") if not isinstance(offering, dict): raise HTTPUnprocessableEntity( "Invalid parameter 'processDescription.process.owsContext.offering'." ) content = offering.get("content") if not isinstance(content, dict): raise HTTPUnprocessableEntity( "Invalid parameter 'processDescription.process.owsContext.offering.content'." ) package = None reference = content.get("href") elif deployment_profile_name: if not any( deployment_profile_name.endswith(typ) for typ in [PROCESS_APPLICATION, PROCESS_WORKFLOW]): raise HTTPBadRequest( "Invalid value for parameter 'deploymentProfileName'.") execution_units = payload.get("executionUnit") if not isinstance(execution_units, list): raise HTTPUnprocessableEntity("Invalid parameter 'executionUnit'.") for execution_unit in execution_units: if not isinstance(execution_unit, dict): raise HTTPUnprocessableEntity( "Invalid parameter 'executionUnit'.") package = execution_unit.get("unit") reference = execution_unit.get("href") # stop on first package/reference found, simultaneous usage will raise during package retrieval if package or reference: break else: raise HTTPBadRequest( "Missing one of required parameters [href, owsContext, deploymentProfileName]." ) if process_info.get("type", "") == PROCESS_BUILTIN: raise HTTPBadRequest( "Invalid process type resolved from package: [{0}]. Deployment of {0} process is not allowed." .format(PROCESS_BUILTIN)) # update and validate process information using WPS process offering, CWL/WPS reference or CWL package definition settings = get_settings(container) headers = getattr( container, "headers", {} ) # container is any request (as when called from API Deploy request) process_info = _validate_deploy_process_info(process_info, reference, package, settings, headers) restapi_url = get_wps_restapi_base_url(settings) description_url = "/".join( [restapi_url, "processes", process_info["identifier"]]) execute_endpoint = "/".join([description_url, "jobs"]) # ensure that required "processEndpointWPS1" in db is added, # will be auto-fixed to localhost if not specified in body process_info["processEndpointWPS1"] = process_description.get( "processEndpointWPS1") process_info["executeEndpoint"] = execute_endpoint process_info["payload"] = payload_copy process_info["jobControlOptions"] = process_description.get( "jobControlOptions", []) process_info["outputTransmission"] = process_description.get( "outputTransmission", []) process_info["processDescriptionURL"] = description_url # insert the "resolved" context using details retrieved from "executionUnit"/"href" or directly with "owsContext" if "owsContext" not in process_info and reference: process_info["owsContext"] = { "offering": { "content": { "href": str(reference) } } } elif isinstance(ows_context, dict): process_info["owsContext"] = ows_context # bw-compat abstract/description (see: ProcessDeployment schema) if "description" not in process_info or not process_info["description"]: process_info["description"] = process_info.get("abstract", "") # FIXME: handle colander invalid directly in tween (https://github.com/crim-ca/weaver/issues/112) try: store = get_db(container).get_store(StoreProcesses) process = Process(process_info) sd.ProcessSummary().deserialize( process) # make if fail before save if invalid store.save_process(process, overwrite=overwrite) process_summary = process.summary() except ProcessRegistrationError as ex: raise HTTPConflict(detail=str(ex)) except (ValueError, colander.Invalid) as ex: # raised on invalid process name raise HTTPBadRequest(detail=str(ex)) return HTTPCreated( json={ "description": sd.OkPostProcessesResponse.description, "processSummary": process_summary, "deploymentDone": True })
def store_image(request): try: log.debug('Receive a upload request.') username = checkIsUser(request) user = Users.by_username(username, request.db) # check if need metadata is send log.debug('Check if mandatory metadata is send ...') params = request.params if not 'title' in params or not 'titleshort' in params or \ not 'imagelicence' in params or not 'imageowner' in params: raise MissingQueryParameterError('Missing query parameter ...') # register upload process in database log.debug('Register upload process to database ...') uploadObj = Uploads(userid=user.id, time=getTimestampAsPGStr(), params='%s' % request.params) request.db.add(uploadObj) log.debug('Create and add mapObj ...') mapObj = Map(istaktiv=False, isttransformiert=False, maptype='A', hasgeorefparams=0) request.db.add(mapObj) request.db.flush() # check if image allowed extensions # ``filename`` contains the name of the file in string format. log.debug('Create filename for persistent saving ...') filename = request.POST['file'].filename if not allowed_file(filename): raise WrongParameterException( 'Format of the image is not supported through the upload API.') # ``input_file`` contains the actual file data which needs to be # stored somewhere. inputFile = request.POST['file'].file # Note that we are generating our own filename instead of trusting # the incoming filename since that might result in insecure paths. # Please note that in a real application you would not use /tmp, # and if you write to an untrusted location you will need to do # some extra work to prevent symlink attacks. newFilename = '%s.%s' % ('df_dk_%s' % mapObj.id, filename.rsplit( '.', 1)[1]) filePath = os.path.join(UPLOAD_DIR, newFilename) # save file to disk log.debug('Save file to datastore ...') saveFile(inputFile, filePath) # process thumbnails log.debug('Create thumbnails ...') thumbSmall = createSmallThumbnail(filePath, UPLOAD_THUMBS_SMALL_DIR) thumbMid = createMidThumbnail(filePath, UPLOAD_THUMBS_MID_DIR) log.debug('Create zoomify tiles') zoomifyTiles = processZoomifyTiles(filePath, UPLOAD_ZOOMIFY_DIR, log) # parse boundinbBox pgBoundingBoxStr = parseBoundingBoxFromRequest(request.params) # add geometry to map object and update other attributes # work around --> should be replaced through adding the geomtry on initial adding log.debug('Update mapObj and create metadataObj ...') mapObj.apsdateiname = newFilename mapObj.originalimage = filePath Map.updateGeometry(mapObj.id, pgBoundingBoxStr, request.db) request.db.flush() # parse and create metadataObj if 'title' in request.params: title = request.params['title'] if 'titleshort' in request.params: titleshort = request.params['titleshort'] if 'serientitle' in request.params: serientitle = request.params['serientitle'] if 'description' in request.params: description = request.params['description'] if 'timepublish' in request.params: timepublish = request.params['timepublish'] if 'imagelicence' in request.params: imagelicence = request.params['imagelicence'] if 'scale' in request.params: scale = request.params['scale'] if 'imageowner' in request.params: imageowner = request.params['imageowner'] # create metadata obj # the creating of the paths are right now quite verbose imagezoomify = UPLOAD_SERVICE_URL_ZOOMIFY + os.path.basename( filePath).split('.')[0] + '/ImageProperties.xml' thumbssmall = UPLOAD_SERVICE_URL_THUMBS_SMALL + os.path.basename( thumbSmall) thumbsmid = UPLOAD_SERVICE_URL_THUMBS_MID + os.path.basename(thumbMid) metadataObj = Metadata(mapid=mapObj.id, title=title, titleshort=titleshort, serientitle=serientitle, description=description, timepublish="%s-01-01 00:00:00" % (timepublish), imagelicence=imagelicence, imageowner=imageowner, scale=scale, imagezoomify=imagezoomify, thumbssmall=thumbssmall, thumbsmid=thumbsmid) request.db.add(metadataObj) # update uploadObj and create response uploadObj.mapid = mapObj.id log.debug('Create response ...') target_url = request.route_url('upload-profile') return HTTPFound(location=target_url) # Exception handling except NotFoundException as e: log.exception(e) ERR_MSG = GENERAL_ERROR_MESSAGE + "We're sorry, but something went wrong. Please be sure that your file respects the upload conditions." return HTTPBadRequest(ERR_MSG) except DBAPIError as e: log.error('Database error within a upload process') log.exception(e) return HTTPInternalServerError(GENERAL_ERROR_MESSAGE) except MissingQueryParameterError or WrongParameterException as e: log.exception(e) raise HTTPBadRequest(GENERAL_ERROR_MESSAGE) except Exception as e: log.exception(e) raise HTTPInternalServerError(GENERAL_ERROR_MESSAGE)
tags = request.json['tags'] username = request.session['username'] # validate tags before doing anything else try: validate_tags(tags) except Exception, e: return HTTPBadRequest(body=json.dumps({'error': str(e)})) id = request.matchdict['id'].upper() triage = DBSession.query(Referencetriage).filter_by( curation_id=id).one_or_none() new_reference_id = None existing_ref = DBSession.query(Referencedbentity).filter_by( pmid=triage.pmid).one_or_none() if existing_ref: return HTTPBadRequest(body=json.dumps({ 'error': 'The reference already exists in the database. You may need to discard from triage after verifying.' })) if triage: # promote try: new_reference = add_paper(triage.pmid, request.json['data']['assignee']) new_reference_id = new_reference.dbentity_id DBSession.delete(triage) transaction.commit() except Exception as e: traceback.print_exc() log.error(e) transaction.abort() DBSession.rollback() return HTTPBadRequest(body=json.dumps({'error': str(e)}))
def activate(self): """ """ activation_uuid = self.request.params.get("uuid") username = self.request.params.get("username") if validate_uuid(activation_uuid) is False: raise HTTPBadRequest('Invalid UUID') # Get the user user = DBSession.query(User).filter( and_(User.activation_uuid == activation_uuid, User.username == username, User.is_active == False)).first() # Raise a BadRequest if no user is found if user is None: raise HTTPBadRequest('User not found or already active.') # A timedelta of 48 hours equals 2 days delta = timedelta(hours=48) # Create a timezone info tz = psycopg2.tz.FixedOffsetTimezone(offset=0, name="UTC") # Check if the registration timestamp is not older than 48 hours if (datetime.now(tz) - delta) > user.registration_timestamp: raise HTTPBadRequest("Activation link has been expired.") # Set the user active and set the activation uuid to NULL user.is_active = True user.activation_uuid = None approval_dict = { "username": user.username, "firstname": user.firstname, "lastname": user.lastname, "email": user.email, "profiles": ",".join([p.code for p in user.profiles]), "approval_link": "http://%s/users/approve?user=%s&name=%s" % (self.request.environ['HTTP_HOST'], user.uuid, user.username) } # Send an email to all moderators of the profile in which the user # registered. email_text = render(get_customized_template_path( self.request, 'emails/account_approval_request.mak'), approval_dict, request=self.request) # Determine profile. Each user should only have one profile when # registering! profiles = [p.code for p in user.profiles] if len(profiles) == 0: profile = get_default_profile(self.request) else: profile = profiles[0] # Find moderators of this profile moderators = DBSession.query(User). \ join(users_groups). \ join(Group). \ join(users_profiles). \ join(Profile). \ filter(Group.name == 'moderators'). \ filter(Profile.code == profile) # A list with email addresses the email is sent to email_addresses = [] for m in moderators.all(): email_addresses.append(m.email) if len(email_addresses) == 0: # If no moderator, try to contact the administrators for admin_user in DBSession.query(User).join(users_groups).join( Group).filter(func.lower(Group.name) == 'administrators'): email_addresses.append(admin_user.email) log.debug( "No moderator found for profile %s. Approval emails will be " "sent to administrators: %s" % (profile, email_addresses)) else: log.debug( "Approval emails will be sent to moderators of %s profile: %s" % (profile, email_addresses)) # Send the email self._send_email(email_addresses, "User %s requests approval" % user.username, email_text) return render_to_response( get_customized_template_path(self.request, 'users/activation_successful.mak'), {'username': user.username}, self.request)
def new_gene_name_reservation(request): if not check_csrf_token(request, raises=False): return HTTPBadRequest(body=json.dumps({'error': 'Bad CSRF Token'})) data = request.json_body required_fields = ['colleague_id', 'year', 'status'] # validate fields outside of reservation for x in required_fields: if not data[x]: field_name = x.replace('_', ' ') field_name = field_name.replace('new', 'proposed') msg = field_name + ' is a required field.' return HTTPBadRequest(body=json.dumps({'message': msg}), content_type='text/json') if x == 'year': try: iy = int(data[x]) if iy < 1950 or iy > 2050: raise ValueError('Not a valid year') except ValueError as e: msg = 'Please enter a valid year.' return HTTPBadRequest(body=json.dumps({'message': msg}), content_type='text/json') # make sure author names have only letters if 'authors' in data.keys(): authors = data['authors'] for a in authors: if a['first_name'] and a['last_name']: first_name = a['first_name'] last_name = a['last_name'] if not (first_name.isalpha() and last_name.isalpha()): return HTTPBadRequest(body=json.dumps( {'message': 'Author names must contain only letters.'}), content_type='text/json') res_required_fields = ['new_gene_name'] # validate reservations themselves for res in data['reservations']: for x in res_required_fields: if not res[x]: field_name = x.replace('_', ' ') field_name = field_name.replace('new', 'proposed') msg = field_name + ' is a required field.' return HTTPBadRequest(body=json.dumps({'message': msg}), content_type='text/json') proposed_name = res['new_gene_name'].strip().upper() is_already_res = DBSession.query(Reservedname).filter( Reservedname.display_name == proposed_name).one_or_none() if is_already_res: msg = 'The proposed name ' + proposed_name + ' is already reserved. Please contact [email protected] for more information.' return HTTPBadRequest(body=json.dumps({'message': msg}), content_type='text/json') is_already_gene = DBSession.query(Locusdbentity).filter( Locusdbentity.gene_name == proposed_name).one_or_none() if is_already_gene: msg = 'The proposed name ' + proposed_name + ' is a standard gene name. Please contact [email protected] for more information.' return HTTPBadRequest(body=json.dumps({'message': msg}), content_type='text/json') # make sure is proper format if not Locusdbentity.is_valid_gene_name(proposed_name): msg = 'Proposed gene name does not meet standards for gene names. Must be 3 letters followed by a number.' return HTTPBadRequest(body=json.dumps({'message': msg}), content_type='text/json') # validate ORF as valid systematic name if res['systematic_name']: proposed_systematic_name = res['systematic_name'].strip() systematic_locus = DBSession.query(Locusdbentity).filter( Locusdbentity.systematic_name == proposed_systematic_name).one_or_none() if not systematic_locus: msg = proposed_systematic_name + ' is not a recognized locus systematic name.' return HTTPBadRequest(body=json.dumps({'message': msg}), content_type='text/json') # see if there is already a res for that locus, or if already named is_systematic_res = DBSession.query(Reservedname).filter( Reservedname.locus_id == systematic_locus.dbentity_id).one_or_none() if is_systematic_res: msg = proposed_systematic_name + ' has already been reserved. Please contact [email protected] for more information.' return HTTPBadRequest(body=json.dumps({'message': msg}), content_type='text/json') is_already_named = DBSession.query(Locusdbentity.gene_name).filter( Locusdbentity.dbentity_id == systematic_locus.dbentity_id).scalar() if is_already_named: msg = proposed_systematic_name + ' has already been named. Please contact [email protected] for more information.' return HTTPBadRequest(body=json.dumps({'message': msg}), content_type='text/json') existing_name = systematic_locus.gene_name if existing_name: msg = proposed_systematic_name + ' already has a standard name: ' + existing_name + '. Please contact [email protected] for more information.' return HTTPBadRequest(body=json.dumps({'message': msg}), content_type='text/json') # input is valid, add entry or entries to reservednametriage try: colleague_id = data['colleague_id'] for res in data['reservations']: proposed_gene_name = res['new_gene_name'].upper() res_data = data res_data.pop('reservations', None) res_data.update(res) res_json = json.dumps(res_data) new_res = ReservednameTriage(proposed_gene_name=proposed_gene_name, colleague_id=colleague_id, json=res_json) DBSession.add(new_res) transaction.commit() return True except Exception as e: traceback.print_exc() transaction.abort() log.error(e) return HTTPBadRequest(body=json.dumps({'message': str(e)}), content_type='text/json')
def addresses(request): if 'term' not in request.params: return HTTPBadRequest() term = '%%%s%%' % request.params['term'] query = DBSession.query(Address).filter(Address.label.ilike(term)) return [{'id': addr.id, 'label': addr.label} for addr in query]
def upload_vehicle(request): # take in some gzipped data that is base64 encoded keys = request.POST.get('keys') tz_local = timezone('Australia/Adelaide') tz_utc = utc if keys is None: return HTTPBadRequest("Please specify the device keys") keys = keys.split(",") # if key != request.db['webcan_devices'].find() # we receive the data as base 64 encoded gzipped json object rows data = request.POST.get('data') if data is None: return HTTPBadRequest("Please provide a data value") data64 = bytes(data, 'ascii') device_ids = set([]) trips = set() rows = [] # create a new trip_id based off the first GPS time reading timestamps = {} for row in gzip.decompress( base64.b64decode(data64)).decode('ascii').splitlines(): try: js = json.loads(row) except json.JSONDecodeError as e: # print("Could not decode '{}': {}".format(row, e)) continue if 'vid' not in js or 'trip_id' not in js: return HTTPBadRequest( "Please provide a vid and trip_id on every row") if 'timestamp' in js and js['timestamp'] is not None: js['timestamp'] = dateutil.parser.parse(js['timestamp']) if js['trip_id'] not in timestamps: try: timestamps[js['trip_id']] = js['timestamp'].astimezone( tz_local).strftime("%Y%m%d_%H%M%S_{}".format('_'.join( js['trip_id'].split('_')[2:]))) print("Changing {} to {}".format( js['trip_id'], timestamps[js['trip_id']])) except: pass else: continue device_ids.add(js['vid']) trips.add(js['trip_id']) rows.append(js) # check if we have all the appropriate keys for the devices we want to add for device in request.db.webcan_devices.find( {'name': { '$in': list(device_ids) }}): if device['secret'] not in keys: return HTTPForbidden( 'You must provide a valid API key for all Vehicle IDs used') for row in rows: row['trip_id'] = timestamps[row['trip_id']] request.db.rpi_readings.remove( {'trip_id': { '$in': list(trips) + list(timestamps.values()) }}) if not rows: return {'inserted': 0} res = request.db.rpi_readings.insert_many(rows) return {'inserted': len(res.inserted_ids)}
def wait_for_new_players(request): """ This returns a list of players when the player count increases. This implements requirements: 3.2.7 3.2.8 3.2.9 3.2.10 3.2.11 Parameters ---------- request: Request - required JSON parameters: "game_id": String Returns ------- Same object as in get_players_in_game() """ json_body = request.json_body if 'game_id' in request.session: game_id = request.session['game_id'] game = request.registry.games.games[game_id] else: raise HTTPBadRequest( json_body={ 'error': "Requested game not found. Session may have expired" }) if 'current_player_count' in json_body: player_count = json_body['current_player_count'] else: raise HTTPBadRequest( json_body={ 'error': "Required parameter 'current_player_count' not found in request" }) timeout = 0 players_added = "True" while (player_count < 4) and (player_count == len( game.turn_order)) and not game.game_started: time.sleep(1) timeout += 1 if timeout > 10: players_added = "False" break players = [] if game.players: for _, player in game.players.iteritems(): players.append({ 'Player': player.get_dictionary(player_age=True, player_color=True, owned_settlements=True) }) else: players.append("None") return_data = { 'players_added': players_added, 'Players': players, 'Game': game.get_dictionary(has_started=True, player_count=True) } json_return = json.dumps(return_data) return Response(content_type='application/json', body=json_return)
def search(request): q = request.params.get("q", '') if q: should = [] for field in SEARCH_FIELDS: kw = {"query": q} if field in SEARCH_BOOSTS: kw["boost"] = SEARCH_BOOSTS[field] should.append(Q("match", **{field: kw})) # Add a prefix query if ``q`` is longer than one character. if len(q) > 1: should.append(Q('prefix', normalized_name=q)) query = request.es.query("dis_max", queries=should) query = query.suggest("name_suggestion", q, term={"field": "name"}) else: query = request.es.query() if request.params.get("o"): sort_key = request.params["o"] if sort_key.startswith("-"): sort = { sort_key[1:]: { "order": "desc", "unmapped_type": "long", }, } else: sort = { sort_key: { "unmapped_type": "long", } } query = query.sort(sort) if request.params.getall("c"): query = query.filter("terms", classifiers=request.params.getall("c")) try: page_num = int(request.params.get("page", 1)) except ValueError: raise HTTPBadRequest("'page' must be an integer.") page = ElasticsearchPage( query, page=page_num, url_maker=paginate_url_factory(request), ) if page.page_count and page_num > page.page_count: return HTTPNotFound() available_filters = collections.defaultdict(list) classifiers_q = (request.db.query(Classifier).with_entities( Classifier.classifier).filter( exists([release_classifiers.c.trove_id]).where( release_classifiers.c.trove_id == Classifier.id)).order_by( Classifier.classifier)) for cls in classifiers_q: first, *_ = cls.classifier.split(' :: ') available_filters[first].append(cls.classifier) def filter_key(item): try: return 0, SEARCH_FILTER_ORDER.index(item[0]), item[0] except ValueError: return 1, 0, item[0] return { "page": page, "term": q, "order": request.params.get("o", ''), "available_filters": sorted(available_filters.items(), key=filter_key), "applied_filters": request.params.getall("c"), }
def add_player_to_game(request): """ Creates a player and then adds it to the game's list of players This implements requirements: 3.2.2 3.2.3 3.2.4 3.2.5 3.2.6 Parameters ---------- request: Request - required JSON parameters: "player_name": String "player_age": Int Returns ------- Json object containing: "game": { "player_count": Int "game_is_full": Bool } "Player": { "player_id": String "player_name": String "player_color": String "player_age: Int } """ json_body = request.json_body if 'game_id' in request.session: game = request.registry.games.games[request.session['game_id']] else: raise HTTPBadRequest( json_body={ 'error': 'Requested game not found. Session may have expired' }) if 'game_id' in json_body: game = request.registry.games.games[json_body['game_id']] if game.game_started: raise HTTPBadRequest(json_body={'error': 'Game has already started'}) if 'player_name' in json_body: player_name = json_body['player_name'] else: raise HTTPBadRequest(json_body={ 'error': "'player_name' is a required parameter for this request" }) if 'player_age' in json_body: player_age = json_body['player_age'] else: raise HTTPBadRequest(json_body={ 'error': "'player_age' is a required parameter for this request" }) player = game.add_player(player_name, player_age) if player is not None: request.session['player_id'] = player.id return_data = { 'game': game.get_dictionary(player_count=True, is_full=True), 'player': player.get_dictionary(player_age=True, player_color=True) } else: raise HTTPBadRequest( json_body={'error': "Player not created, game is full"}) json_return = json.dumps(return_data) return Response(content_type='application/json', body=json_return)
def make_query(context, request): """Given a search request, return a catalog query and a list of terms. """ params = request.params query = {} terms = [] term = params.get('body') if term: terms.append(term) kind = params.get('kind') if kind: searcher = queryUtility(IGroupSearchFactory, kind) if searcher is None: # If the 'kind' we got is not known, return an error fmt = "The LiveSearch group %s is not known" raise HTTPBadRequest(fmt % kind) terms.append(kind) else: searcher = default_group_search searcher = searcher(context, request, term) query.update(searcher.criteria) creator = params.get('creator') if creator: userids = list(_iter_userids(context, request, creator)) query['creator'] = { 'query': userids, 'operator': 'or', } terms.append(creator) tags = filter(None, params.getall('tags')) if tags: query['tags'] = { 'query': tags, 'operator': 'or', } terms.extend(tags) year = params.get('year') if year: year = int(year) begin = coarse_datetime_repr(datetime.datetime(year, 1, 1)) end = coarse_datetime_repr(datetime.datetime(year, 12, 31, 12, 59, 59)) query['creation_date'] = (begin, end) terms.append(year) since = params.get('since') if since: option = since_options[since] since = datetime.datetime.now() - option['delta'] query['creation_date'] = (coarse_datetime_repr(since), None) terms.append(option['name']) sort = params.get('sort') if sort: option = sort_options[sort] query['sort_index'] = option['sort_index'] query['reverse'] = option['reverse'] terms.append(option['name']) return query, terms
def response_from_error(error): response = HTTPBadRequest() msg = 'Evil client is unable to send a proper request. Error is: ' response.text = to_unicode(msg + error.error, 'utf-8') return response
def __call__(self, value, system): """ Implements a subclass of pyramid_oereb.lib.renderer.extract.json_.Renderer to create a print result out of a json. The json extract is reformatted to fit the structure of mapfish print. Args: value (tuple): A tuple containing the generated extract record and the params dictionary. system (dict): The available system properties. Returns: buffer: The pdf content as received from configured mapfish print instance url. """ print_config = Config.get('print', {}) if not print_config: raise ConfigurationError( 'No print config section in config file was found.') print_service_url = print_config.get('base_url', '') if not print_service_url: raise ConfigurationError( 'No print service url ("base_url") was found in the config.') print_service_token = print_config.get('token', '') if not print_service_token: raise ConfigurationError( 'No print service token ("token") was found in the config.') verify_certificate = print_config.get('verify_certificate', True) self.headers = {'token': print_service_token} self.parameters = { 'validate': print_config.get('validate', 'false'), 'usewms': print_config.get('use_wms', 'false'), } log.debug("Parameter webservice is {}".format(value[1])) if value[1].images: raise HTTPBadRequest('With image is not allowed in the print') self._request = self.get_request(system) # If language present in request, use that. Otherwise, keep language from base class if 'lang' in self._request.GET: self._language = self._request.GET.get('lang') self.parameters['language'] = self._language self.parameters['flavour'] = self._request.matchdict['flavour'] # Based on extract record and webservice parameter, render the extract data as JSON extract_record = value[0] extract_as_xml = self._render(extract_record, value[1]) response = self.get_response(system) if self._request.GET.get('getspec', 'no') != 'no': response.headers['Content-Type'] = 'application/xml; charset=UTF-8' return extract_as_xml prepared_extraxt_as_xml = self.prepare_xml(extract_as_xml) print_result = self.request_pdf(print_service_url, prepared_extraxt_as_xml, self.headers, self.parameters, verify_certificate) response.status_code = print_result.status_code response.headers = print_result.headers if 'Transfer-Encoding' in response.headers: del response.headers['Transfer-Encoding'] if 'Connection' in response.headers: del response.headers['Connection'] return print_result
def _do_update_from_json(self, json, parse_def, aliases, ctx, permissions, user_id, duplicate_handling=None, jsonld=None): from ..auth.util import user_has_permission target_user_id = user_id user = ctx.get_instance_of_class(User) if user: target_user_id = user.id if self.user_id: if target_user_id != self.user_id: if not user_has_permission(self.discussion_id, user_id, P_ADMIN_DISC): raise HTTPUnauthorized() # For now, do not allow changing user, it's way too complicated. if 'user' in json and User.get_database_id( json['user']) != self.user_id: raise HTTPBadRequest() else: json_user_id = json.get('user', None) if json_user_id is None: json_user_id = target_user_id else: json_user_id = User.get_database_id(json_user_id) if json_user_id != user_id and not user_has_permission( self.discussion_id, user_id, P_ADMIN_DISC): raise HTTPUnauthorized() self.user_id = json_user_id if self.discussion_id: if 'discussion_id' in json and Discussion.get_database_id( json['discussion_id']) != self.discussion_id: raise HTTPBadRequest() else: discussion_id = json.get('discussion', None) or ctx.get_discussion_id() if discussion_id is None: raise HTTPBadRequest() self.discussion_id = Discussion.get_database_id(discussion_id) new_type = json.get('@type', self.type) if self.external_typename() != new_type: polymap = inspect(self.__class__).polymorphic_identity if new_type not in polymap: raise HTTPBadRequest() new_type = polymap[new_type].class_ new_instance = self.change_class(new_type) return new_instance._do_update_from_json(json, parse_def, aliases, ctx, permissions, user_id, True, jsonld) creation_origin = json.get('creation_origin', "USER_REQUESTED") if creation_origin is not None: self.creation_origin = NotificationCreationOrigin.from_string( creation_origin) if json.get('parent_subscription', None) is not None: self.parent_subscription_id = self.get_database_id( json['parent_subscription']) status = json.get('status', None) if status: status = NotificationSubscriptionStatus.from_string(status) if status != self.status: self.status = status self.last_status_change_date = datetime.utcnow() return self.handle_duplication(json, parse_def, aliases, ctx, permissions, user_id, duplicate_handling, jsonld)
def invalid_api_key(self): response = HTTPBadRequest() response.content_type = 'application/json' response.body = INVALID_API_KEY return response
def _check_deploy(payload): """ Validate minimum deploy payload field requirements with exception handling. """ # FIXME: handle colander invalid directly in tween (https://github.com/crim-ca/weaver/issues/112) message = "Process deployment definition is invalid." try: results = sd.Deploy().deserialize(payload) # Because many fields are optional during deployment to allow flexibility between compatible WPS/CWL # definitions, any invalid field at lower-level could make a full higher-level definition to be dropped. # Verify the result to ensure this was not the case for known cases to attempt early detection. p_inputs = payload.get("processDescription", {}).get("process", {}).get("inputs") r_inputs = results.get("processDescription", {}).get("process", {}).get("inputs") if p_inputs and p_inputs != r_inputs: message = "Process deployment inputs definition is invalid." # try raising sub-schema to have specific reason d_inputs = sd.DeployInputTypeAny().deserialize(p_inputs) # Raise directly if we where not able to detect the cause, but there is something incorrectly dropped. # Only raise if indirect vs direct inputs deserialize differ such that auto-resolved defaults omitted from # submitted process inputs or unknowns fields that were correctly ignored don't cause false-positive diffs. if r_inputs != d_inputs: message = ( "Process deployment inputs definition resolved as valid schema but differ from submitted values. " "Validate provided inputs against resolved inputs with schemas to avoid mismatching definitions." ) raise HTTPBadRequest( json={ "description": message, "cause": "unknown", "error": "Invalid", "value": d_inputs }) # Execution Unit is optional since process reference (e.g.: WPS-1 href) can be provided in processDescription # Cannot validate as CWL yet, since execution unit can also be an href that is not yet fetched (it will later) p_exec_unit = payload.get("executionUnit", [{}]) r_exec_unit = results.get("executionUnit", [{}]) if p_exec_unit and p_exec_unit != r_exec_unit: message = "Process deployment execution unit is invalid." d_exec_unit = sd.ExecutionUnit().deserialize( p_exec_unit) # raises directly if caused by invalid schema if r_exec_unit != d_exec_unit: # otherwise raise a generic error, don't allow differing definitions message = ( "Process deployment execution unit resolved as valid definition but differs from submitted " "package. Aborting deployment to avoid mismatching package definitions." ) raise HTTPBadRequest( json={ "description": message, "cause": "unknown", "error": PackageRegistrationError.__name__, "value": d_exec_unit }) return results except colander.Invalid as exc: LOGGER.debug("Failed deploy body schema validation:\n%s", exc) raise HTTPBadRequest( json={ "description": message, "cause": "Invalid schema: [{!s}]".format(exc.msg), "error": exc.__class__.__name__, "value": exc.value })
def search(request): q = request.params.get("q", "") q = q.replace("'", '"') if q: bool_query = gather_es_queries(q) query = request.es.query(bool_query) query = query.suggest("name_suggestion", q, term={"field": "name"}) else: query = request.es.query() if request.params.get("o"): sort_key = request.params["o"] if sort_key.startswith("-"): sort = {sort_key[1:]: {"order": "desc", "unmapped_type": "long"}} else: sort = {sort_key: {"unmapped_type": "long"}} query = query.sort(sort) # Require match to all specified classifiers for classifier in request.params.getall("c"): query = query.filter("terms", classifiers=[classifier]) try: page_num = int(request.params.get("page", 1)) except ValueError: raise HTTPBadRequest("'page' must be an integer.") page = ElasticsearchPage(query, page=page_num, url_maker=paginate_url_factory(request)) if page.page_count and page_num > page.page_count: return HTTPNotFound() available_filters = collections.defaultdict(list) classifiers_q = (request.db.query(Classifier).with_entities( Classifier.classifier).filter(Classifier.deprecated.is_(False)).filter( exists([release_classifiers.c.trove_id]).where( release_classifiers.c.trove_id == Classifier.id)).order_by( Classifier.classifier)) for cls in classifiers_q: first, *_ = cls.classifier.split(" :: ") available_filters[first].append(cls.classifier) def filter_key(item): try: return 0, SEARCH_FILTER_ORDER.index(item[0]), item[0] except ValueError: return 1, 0, item[0] request.registry.datadog.histogram("warehouse.views.search.results", page.item_count) return { "page": page, "term": q, "order": request.params.get("o", ""), "available_filters": sorted(available_filters.items(), key=filter_key), "applied_filters": request.params.getall("c"), }
def __init__(self, errors=''): error = dict(code=400, status='error', message=unicode(errors)) HTTPBadRequest.__init__(self, body=json.dumps(error))