def adding_view(request): try: DBSession = Session(bind=engine) ot = request.params['waste'] c1d = request.params['c1'] c1f = request.params['c11'] c2d = request.params['c2'] c2f = request.params['c21'] c3d = request.params['c3'] c3f = request.params['c31'] c4d = request.params['c4'] c4f = request.params['c41'] c5d = request.params['c5'] c5f = request.params['c51'] c6d = request.params['c6'] c6f = request.params['c61'] c7d = request.params['c7'] c7f = request.params['c71'] org = request.params['company'] other = request.params['other'] if c1d == '': c1d = None else: k = '+' if c2d == '': c2d = None else: k = '++' if c3d == '': c3d = None else: k = '+++' if c4d == '': c4d = None else: k = '++++' if c5d == '': c5d = None else: k = '+++++' if c6d == '': c6d = None else: k = '++++++' if c7d == '': c7d = None else: k = '+++++++' if c1f == '': c1f = None else: k = '++++++++' if c2f == '': c2f = None else: k = '+++++++++' if c3f == '': c3f = None else: k = '++++++++++' if c4f == '': c4f = None else: k = '+++++++++++' if c5f == '': c5f = None else: k = '++++++++++++' if c6f == '': c6f = None else: k = '+++++++++++++' if c7f == '': c7f = None else: k = '++++++++++++++' w = DBSession.query(Waste).filter(Waste.id == ot).first() compani = DBSession.query(Company).filter(Company.id == org).first() new_license = License(company=compani.id, waste=w.id, collection=c1d, transportation=c2d, defusing=c3d, using=c4d, treatment=c5d, recovery=c6d, placement=c7d, collectionf=c1f, transportationf=c2f, defusingf=c3f, usingf=c4f, treatmentf=c5f, recoveryf=c6f, placementf=c7f, other=other) DBSession.add(new_license) DBSession.commit() try: DBSession.query(License).filter( License.id == request.params['id']).delete() except: p = '' DBSession.commit() except: return Response(db_err_msg, content_type='text/plain', status=500) return HTTPFound('/')
def hello_world(request): return Response("""Hello world from %s (%s)!<br> request.matchdict = %s""" % (__file__, __package__, request.matchdict))
def index(request): return Response('idx')
def hello_world(request): return Response("Hello")
"""Contains the application's views.""" from pyramid.response import Response # Intercooler uses an empty response as a no-op and won't replace anything. # 204 would probably be more correct than 200, but Intercooler errors on it IC_NOOP = Response(status_int=200) IC_NOOP_404 = Response(status_int=404) # Because of the above, in order to deliberately cause Intercooler to replace # an element with whitespace, the response needs to contain at least two spaces IC_EMPTY = Response(' ')
def update_metadata_source(request): if "id" in request.params: result_dict = dict() result_dict["id"] = request.params['id'] query = DBSession.query( Source.patient_id, Source.organ, Source.organism, Source.histology, Source.dignity, Source.celltype, Source.location, Source.metastatis, Source.treatment, Source.person, func.cast(Source.prep_date, String).label("prep_date")).filter( Source.source_id == request.params["id"]) source = json.dumps(query.all()) result_dict['source'] = source query = DBSession.query( Source.source_id, HlaType.hla_string).join(t_hla_map).join(HlaType).filter( Source.source_id == request.params["id"]) hla = json.dumps(query.all()) result_dict['hla'] = hla # getting autocomplete items allowed_elements = { "patient_id": Source.patient_id, "organ": Source.organ, "organism": Source.organism, "histology": Source.histology, "dignity": Source.dignity, "celltype": Source.celltype, "location": Source.location, "metastatis": Source.metastatis, "treatment": Source.treatment, "person": Source.person, "comment": Source.comment, "typing": HlaType.hla_string } for k, v in allowed_elements.iteritems(): query = DBSession.query(v) query = query.group_by(v) query_result = js_list_creator(query.all()) result_dict[k] = query_result #result_dict['original'] = source return result_dict else: try: # query data for autocomplete result_dict = dict() allowed_elements = {"source_id": Source.source_id} for k, v in allowed_elements.iteritems(): query = DBSession.query(v) query = query.group_by(v) query_result = js_list_creator(query.all()) result_dict[k] = query_result # setting a different renderer result = render( '../templates/upload_templates/update_metadata_source.pt', result_dict, request=request) response = Response(result) return response except: return Response(conn_err_msg, content_type='text/plain', status_int=500)
def upload_metadata_source_post(request): source_upload = ast.literal_eval(request.params["sources"]) # Check if source already in DB for source in source_upload: try: query = DBSession.query(Source.source_id) \ .filter(Source.patient_id == source['patient_id']) \ .filter(Source.organ == source['organ']) \ .filter(Source.organism == source['organism']) \ .filter(Source.histology == source['histology']) \ .filter(Source.dignity == source['dignity']) \ .filter(Source.location == source['location']) \ .filter(Source.treatment == source['treatment']) \ .filter(Source.metastatis == source['metastatis']) \ .filter(Source.celltype == source['celltype']) \ .filter(Source.comment == source['comment']) \ .filter(Source.person == source['person']) \ .filter(Source.prep_date == source['prep_date']) test_source = query.all() except DBAPIError: return Response(conn_err_msg, content_type='text/plain', status_int=500) # if in DB abort whole upload if len(test_source) > 0: log_writer("source_metadata_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + source) log_writer( "source_metadata_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + "The source is already in the Database. Aborted whole upload!") return Response( "The source is already in the Database. Aborted whole upload!", content_type='text/plain', status_int=500) # upload each source for source in source_upload: # #################################################### # Source: # # #################################################### try: sample_id = source["patient_id"]+"_"+ source["organ"]+"_"+source['dignity']+"_"+ source['histology']+"_"+\ source['celltype'] +"_"+ source['location'] +"_"+ source['treatment'] +"_"+ source['prep_date'] source_insert = Source(patient_id=source['patient_id'], organ=source['organ'], organism=source['organism'], histology=source['histology'], dignity=source['dignity'], location=source['location'], treatment=source['treatment'], metastatis=source['metastatis'], celltype=source['celltype'], comment=source['comment'], prep_date=source['prep_date'], person=source['person'], sample_id=sample_id) #DBSession.add(source_insert) #DBSession.flush() #source_id = source_insert.source_id except DBAPIError: return Response(conn_err_msg + "\n Insert into Source failed!", content_type='text/plain', status_int=500) if source['typing'] is not "": # ############### # hla_types # # ############### hla_alleles = source['typing'].split(";") for hla_typing in hla_alleles: hla_typing_split = hla_typing.strip().split(":") for i in range(0, len(hla_typing_split)): sub_type = ":".join(hla_typing_split[0:i + 1]) try: query = DBSession.query(HlaType.hla_types_id).filter( HlaType.hla_string == sub_type) hla_types_id = query.all() except DBAPIError: return Response(conn_err_msg, content_type='text/plain', status_int=500) # unknown hla_lookup if len(hla_types_id) == 0: try: hla_type = HlaType( hla_string=sub_type, digits=hla_digits_extractor(sub_type)) DBSession.add(hla_type) DBSession.flush() hla_types_id = hla_type.hla_types_id except DBAPIError: return Response(conn_err_msg + "\n Insert into Hla-Types failed!", content_type='text/plain', status_int=500) else: hla_types_id = hla_types_id[0] hla_type = query = DBSession.query(HlaType).filter( HlaType.hla_string == sub_type).all()[0] # ############### # hla_map # # ############### try: source_insert.append(hla_type) except DBAPIError: return Response(conn_err_msg + "\n Insert into Hla-Map failed!", content_type='text/plain', status_int=500) try: log_writer( "source_metadata", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + source) log_writer( "source_metadata_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + source) DBSession.add(source_insert) DBSession.flush() except DBAPIError: return Response(conn_err_msg + "\n Insert into Source failed!", content_type='text/plain', status_int=500) else: log_writer("source_metadata", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + source) log_writer("source_metadata_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + source) DBSession.add(source_insert) DBSession.flush() hla_lookup_id = "NULL" return dict()
def login_user(request): try: email = request.json_body.get('email') firebase_token = request.json_body.get('firebaseToken') is_anonymous = request.json_body.get('isAnonymous') firebase_user_id = request.json_body.get('firebaseUserId') google_token = request.json_body.get('googleToken') branch_data = request.json_body.get('branchData') prev_firebase_user_id = request.json_body.get('prevFirebaseUserId') except ValueError: raise ValidationError('ERR_INVALID_AUTH_PARAM') if get_is_production() or email != 'oice-dev': try: auth.verify_id_token(firebase_token) except ValueError: raise ValidationError('ERR_FIREBASE_AUTH_ERROR') except AppIdentityError: raise ValidationError('ERR_INVALID_FIREBASE_TOKEN') old_auth_id = authenticated_userid(request) fetch_username = email if is_anonymous and firebase_user_id: fetch_username = firebase_user_id # Init these bool here to avoid scope issue is_first_login = False is_trial_ended = False log_dict = { 'topic': 'actionUser', 'isAnonymous': 'true' if is_anonymous else 'false', 'isDeeplink': 'false', } if branch_data: log_dict.update({ 'channel': dict_get_value(branch_data, ['~channel'], 'direct'), 'isDeeplink': 'true', }) log_dict = set_basic_info_referrer_log( dict_get_value(branch_data, ['+referrer'], 'none'), dict_get_value(branch_data, ['referrer2'], 'none'), log_dict) oice_source = OiceQuery(DBSession).get_by_uuid( dict_get_value(branch_data, ['uuid'])) if oice_source: log_dict = set_basic_info_oice_source_log( oice_source.story.users[0], oice_source, log_dict) try: user = UserQuery(DBSession).fetch_user_by_email( email=fetch_username).one() except NoResultFound: user = User(email=fetch_username, is_anonymous=is_anonymous) if firebase_user_id: user.display_name = firebase_user_id DBSession.add(user) DBSession.flush() is_first_login = True is_trial_ended = False # log log_dict.update({'action': 'createUser'}) log_dict = set_basic_info_user_log(user, log_dict) log_dict = set_basic_info_log(request, log_dict) log_message(KAFKA_TOPIC_USER, log_dict) else: user.last_login_at = datetime.datetime.utcnow() if not user.is_anonymous: sample_story = StoryQuery(DBSession).get_sample_story( user.language) story = next((user_story for user_story in user.stories if sample_story.id == user_story.fork_of), None) if not story: story = fork_story(DBSession, sample_story) sample_oice = OiceQuery(DBSession).get_sample_oice( language=user.language) oice = fork_oice(DBSession, story, sample_oice) user.stories.append(story) if user.is_trial: if user.is_paid( ) and user.expire_date < datetime.datetime.utcnow(): user.role = 'user' update_user_mailchimp_stage(user=user, stage=5) if user.is_free(): user.is_trial = False is_trial_ended = True else: # if user.is_free() and not user.expire_date: # Disabled trial due to busines request # UserOperations.start_trial(user) is_trial_ended = False is_first_login = False if not old_auth_id or request.headers.get('x-oice-app-version'): # log is_redeem_account = prev_firebase_user_id and firebase_user_id != prev_firebase_user_id log_dict.update({ 'action': 'redeemAccount' if is_redeem_account else 'login', }) log_dict = set_basic_info_user_log(user, log_dict) log_dict = set_basic_info_log(request, log_dict) log_message(KAFKA_TOPIC_USER, log_dict) if is_redeem_account: handle_anonymous_user_app_story_progress(is_existing_user=True, \ prev_user_email=prev_firebase_user_id, \ new_user=user) photo_url = request.json_body.get('photoURL', None) if photo_url and user.avatar_storage is None: r = requests.get(photo_url) avatar = BytesIO(r.content) factory = pyramid_safile.get_factory() handle = factory.create_handle('avatar.png', avatar) user.import_handle(handle) language = request.json_body.get('language', None) normalized_language = None if language and user.language is None: normalized_language = normalize_language(language) if normalized_language: user.language = normalized_language # derive ui_language when creating user user.ui_language = normalize_ui_language(normalized_language) if (is_first_login or user.is_anonymous) and google_token: display_name = request.json_body.get('displayName', None) if email: user.email = email if not display_name: display_name = email.split('@')[0] if display_name: user.display_name = display_name sample_story = StoryQuery(DBSession).get_sample_story( normalized_language) story = fork_story(DBSession, sample_story) sample_oice = OiceQuery(DBSession).get_sample_oice( language=normalized_language) oice = fork_oice(DBSession, story, sample_oice) # open a public library for new user library = create_user_public_library(DBSession, user.display_name) user.stories.append(story) user.libraries.append(library) user.libraries_selected.append(library) # pre-select default libraries for new user default_libs = LibraryQuery(DBSession).fetch_default_libs() user.libraries_purchased.extend(default_libs) user.libraries_selected.extend(default_libs) # Disabled trial due to busines request # UserOperations.start_trial(user) user.last_login_at = datetime.datetime.utcnow() subscribe_mailchimp(google_token, user, language=language) # update elastic search when create user update_elastic_search_user(user.display_name, email) if is_first_login and request.headers.get('x-oice-app-version'): # log log_dict.update({'action': 'bindAccount'}) log_dict = set_basic_info_user_log(user, log_dict) log_dict = set_basic_info_log(request, log_dict) log_message(KAFKA_TOPIC_USER, log_dict) handle_anonymous_user_app_story_progress(is_existing_user=False, \ prev_user_email=prev_firebase_user_id, \ new_user=user) user.is_anonymous = False serialize_user = user.serialize() serialize_user['isFirstLogin'] = is_first_login serialize_user['isTrialEnded'] = is_trial_ended serialize_user['intercomUserHash'] = hmac.new( bytes(get_intercom_secret_key().encode('utf-8')), bytes(str(user.id).encode('utf-8')), digestmod=hashlib.sha256).hexdigest() response = Response() response.status_code = 200 response.headers = remember(request, user.email) response.content_type = 'application/json' response.charset = 'UTF-8' response.text = json.dumps({'code': 200, 'user': serialize_user}) return response
#appstruct is deforms convention. It will be the submitted data in a dict. appstruct = form.validate(controls) except ValidationFailure, e: self.response['form'] = e.render() if self.request.is_xhr: return Response(render("templates/ajax_edit.pt", self.response, request = self.request)) return self.response kwargs = {} kwargs['text'] = appstruct['text'] if self.api.userid: kwargs['creators'] = [self.api.userid] ai = find_interface(self.context, IAgendaItem) obj = createContent(content_type, **kwargs) name = generate_slug(ai, obj.title) ai[name] = obj #Success, redirect url = self.request.resource_url(ai, anchor=obj.uid) if self.request.is_xhr: return Response(headers = [('X-Relocate', url)]) return HTTPFound(location=url) self.response['form'] = form.render() if self.request.is_xhr: return Response(render('templates/snippets/inline_form.pt', self.response, request=self.request)) return self.response
def metrics(request): log.info('Serving metrics') registry = CollectorRegistry() MultiProcessCollector(registry) data = generate_latest(registry) return Response(data, content_type='text/plain')
def logout_user(request): headers = forget(request) return Response(status_code=200, headers=headers)
def hello_world(request): log.info('Serving hello world') app_hello_count.inc() return Response('Hello World!')
def cget(resource, request): request.resource_permission(PERM_READ) geom_format = request.GET.get("geom_format") srs = request.GET.get("srs") query = resource.feature_query() if srs is not None: query.srs(SRS.filter_by(id=int(srs)).one()) # Paging limit = request.GET.get('limit') offset = request.GET.get('offset', 0) if limit is not None: query.limit(int(limit), int(offset)) # Filtering by attributes filter_ = [] keys = [fld.keyname for fld in resource.fields] for key in filter(lambda k: k.startswith('fld_'), request.GET.keys()): try: fld_key, operator = key.rsplit('__', 1) except ValueError: fld_key, operator = (key, 'eq') if fld_key in ['fld_%s' % k for k in keys]: filter_.append((re.sub('^fld_', '', fld_key), operator, request.GET[key])) if filter_: query.filter(*filter_) # Ordering order_by = request.GET.get('order_by') order_by_ = [] if order_by is not None: for order_def in list(order_by.split(',')): order, colname = re.match(r'^(\-|\+|%2B)?(.*)$', order_def).groups() if colname is not None: order = ['asc', 'desc'][order == '-'] order_by_.append([order, colname]) if order_by_: query.order_by(*order_by_) # Filtering by extent wkt = request.GET.get('intersects') if wkt is not None: geom = geom_from_wkt(wkt, srid=resource.srs.id) query.intersects(geom) # Selected fields fields = request.GET.get('fields') if fields is not None: field_list = fields.split(',') fields = [key for key in keys if key in field_list] if fields: query.fields(*fields) query.geom() result = [ serialize(feature, fields, geom_format=geom_format) for feature in query() ] return Response(json.dumps(result, cls=geojson.Encoder), content_type='application/json', charset='utf-8')
def mvt(request): z = int(request.GET["z"]) x = int(request.GET["x"]) y = int(request.GET["y"]) extent = int(request.GET.get('extent', 4096)) simplification = float(request.GET.get("simplification", extent / 512)) resids = map( int, filter(None, request.GET["resource"].split(",")), ) # web mercator merc = SRS.filter_by(id=3857).one() minx, miny, maxx, maxy = merc.tile_extent((z, x, y)) # 5% padding by default padding = float(request.GET.get("padding", 0.05)) bbox = ( minx - (maxx - minx) * padding, miny - (maxy - miny) * padding, maxx + (maxx - minx) * padding, maxy + (maxy - miny) * padding, ) bbox = box(*bbox, srid=merc.id) options = [ "FORMAT=DIRECTORY", "TILE_EXTENSION=pbf", "MINZOOM=%d" % z, "MAXZOOM=%d" % z, "EXTENT=%d" % extent, "COMPRESS=NO", ] ds = _ogr_ds(b"MVT", options) vsibuf = ds.GetName() for resid in resids: obj = Resource.filter_by(id=resid).one() request.resource_permission(PERM_READ, obj) query = obj.feature_query() query.intersects(bbox) query.geom() if IFeatureQueryClipByBox.providedBy(query): query.clip_by_box(bbox) if IFeatureQuerySimplify.providedBy(query): tolerance = ((obj.srs.maxx - obj.srs.minx) / (1 << z)) / extent query.simplify(tolerance * simplification) _ogr_layer_from_features(obj, query(), name=b"ngw:%d" % obj.id, ds=ds) # flush changes ds = None filepath = os.path.join("%s" % vsibuf, "%d" % z, "%d" % x, "%d.pbf" % y) try: f = gdal.VSIFOpenL(b"%s" % (filepath, ), b"rb") if f is not None: # SEEK_END = 2 gdal.VSIFSeekL(f, 0, 2) size = gdal.VSIFTellL(f) # SEEK_SET = 0 gdal.VSIFSeekL(f, 0, 0) content = gdal.VSIFReadL(1, size, f) gdal.VSIFCloseL(f) return Response( content, content_type="application/vnd.mapbox-vector-tile", ) else: return HTTPNoContent() finally: gdal.Unlink(b"%s" % (vsibuf, ))
def view_with_params(request: Request) -> Response: pk = int(request.matchdict[ViewParams.PK]) patient_id = int(request.matchdict[ViewParams.PATIENT_ID]) get = request.GET return Response("View for pk={}, patient_id={}, request.GET={}".format( pk, patient_id, repr(get)))
def discussions(self): if self.request.is_xhr: return Response(self.api.render_single_view_component(self.context, self.request, 'discussions', 'listing', api = self.api)) url = self.request.resource_url(self.context, query=self.request.GET, anchor="discussions") return HTTPFound(location=url)
def upload_metadata_ms_run_post(request): ms_run_upload = ast.literal_eval(request.params["ms_runs"]) # Check if MS run is already in database with METADATA for ms_run in ms_run_upload: try: ms_runs = DBSession.query(MsRun.ms_run_id).filter( MsRun.filename == ms_run['filename']).filter( MsRun.source_source_id != None).all() except DBAPIError: return Response(conn_err_msg, content_type='text/plain', status_int=500) if len(ms_runs) > 0: # if in MS run with Metadata in DB, abort whole Upload return Response( "The source " + ms_run['source'] + " is already in the Database. Aborted whole upload!", content_type='text/plain', status_int=500) # upload the each MS run for ms_run in ms_run_upload: source_id = ms_run["source_id"] # update if already in DB (without metadata included) try: ms_run_update = DBSession.query(MsRun).filter( MsRun.filename == ms_run["filename"]).filter( MsRun.source_source_id == None).all() except: return Response(conn_err_msg + " \n MsRun insert failed", content_type='text/plain', status_int=500) if len(ms_run_update) > 0: ms_run_update[0].source_source_id = source_id if ms_run['date'] != "": ms_run_update[0].ms_run_date = ms_run['date'] if ms_run['used_share'] != "" and ms_run['used_share'] != "None": ms_run_update[0].used_share = ms_run['used_share'] if ms_run['comment'] != "": ms_run_update[0].comment = ms_run['comment'] if ms_run['sample_mass'] != "" and ms_run['sample_mass'] != "None": ms_run_update[0].sample_mass = ms_run['sample_mass'] if ms_run['sample_volume'] != "" and ms_run[ 'sample_volume'] != "None": ms_run_update[0].sample_volume = ms_run['sample_volume'] ms_run_update[0].antibody_set = ms_run['antibody_set'].replace( " ", "") if ms_run['antibody_mass'] != "" and ms_run[ 'antibody_mass'] != "None": ms_run_update[0].antibody_mass = ms_run['antibody_mass'] # Updating all crossreferences for the source_id try: spectrum_hits = DBSession.query(SpectrumHit) \ .filter(SpectrumHit.ms_run_ms_run_id == ms_run_update[0].ms_run_id).update( {'source_source_id': ms_run['source_source_id']}) except: log_writer( "ms_run_upload_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + "SpectrumHit update failed!") DBSession.rollback() return Response("SpectrumHit update failed!", content_type='text/plain', status_int=500) try: peptide_runs = DBSession.query(PeptideRun) \ .filter(PeptideRun.ms_run_ms_run_id == ms_run_update[0].ms_run_id).update( {'source_source_id': ms_run['source_source_id']}) except: log_writer("ms_run_update_complete", "Peptide Run update failed!") DBSession.rollback() return Response("Peptide Run update failed!", content_type='text/plain', status_int=500) transaction.commit() DBSession.flush() log_writer("ms_run_metadata_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + ms_run) log_writer("ms_run_metadata", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + ms_run) else: log_writer( "ms_run_metadata_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + "MsRun insert failed! Only already registered MS Runs can be uploaded." ) DBSession.rollback() return Response( conn_err_msg + " \n MsRun insert failed! Only already registered MS Runs can be uploaded.", content_type='text/plain', status_int=500) return dict()
class AgendaItemView(BaseView): """ View for agenda items. """ @view_config(context=IAgendaItem, renderer="templates/agenda_item.pt", permission=VIEW) def agenda_item_view(self): """ Main overview of Agenda item. """ _marker = object() rwidget = self.api.meeting.get_field_value('ai_right_widget', _marker) if rwidget is _marker: rwidget = 'discussions' colkwargs = dict(group_name = 'ai_widgets', col_one = self.api.meeting.get_field_value('ai_left_widget', 'proposals'), col_two = rwidget, ) self.response['ai_columns'] = self.api.render_single_view_component(self.context, self.request, 'main', 'columns', **colkwargs) self.response['next_ai'] = self.next_ai() self.response['previous_ai'] = self.previous_ai() if self.request.is_xhr: Response(render('templates/ajax_tag_filter.pt', self.response, request=self.request)) return self.response def next_ai(self): """ Return next qgenda item within this workflow category, if there is one. """ query = u"path == '%s' and content_type == 'AgendaItem'" % resource_path(self.context.__parent__) query += u" and order > %s" % self.context.get_field_value('order') query += u" and workflow_state == '%s'" % self.context.get_workflow_state() #Note that docids might be a generator here count, docids = self.api.query_catalog(query , limit = 1, sort_index='order') if not count: return return self.api.resolve_catalog_docid(tuple(docids)[0]) def previous_ai(self): """ Return previous agenda item within this workflow category, if there is one. """ query = u"path == '%s' and content_type == 'AgendaItem'" % resource_path(self.context.__parent__) query += u" and order < %s" % self.context.get_field_value('order') query += u" and workflow_state == '%s'" % self.context.get_workflow_state() #Note that docids might be a generator here count, docids = self.api.query_catalog(query , limit = 1, sort_index='order', reverse = True) if not count: return return self.api.resolve_catalog_docid(tuple(docids)[0]) @view_config(context=IAgendaItem, name='_inline_form', permission=VIEW) def process_inline_add_form(self): """ Inline add form. Note the somewhat odd permissions on the view configuration. The actual permission check for each content type is preformed later. """ content_type = self.request.GET['content_type'] add_permission = self.api.content_types_add_perm(content_type) if not has_permission(add_permission, self.context, self.request): raise HTTPForbidden("You're not allowed to add '%s' in this context." % content_type) bind_data = dict(context = self.context, request = self.request, api = self.api) form = inline_add_form(self.api, content_type, bind_data) post = self.request.POST if 'add' in post: controls = post.items() try: #appstruct is deforms convention. It will be the submitted data in a dict. appstruct = form.validate(controls) except ValidationFailure, e: msg = self.api.translate(_(u"There were errors so your post hasn't been submitted yet.")) html = u""" <script type="text/javascript"> flash_message("%s", 'error', true, 3, true); </script> """ % msg html += e.render() return Response(html) kwargs = {} kwargs.update(appstruct) if self.api.userid: kwargs['creators'] = [self.api.userid] obj = createContent(content_type, **kwargs) name = generate_slug(self.context, obj.title) self.context[name] = obj #Prep js response tag = self.request.GET.get('tag', '') url = self.request.resource_url(self.context, query = {'tag': tag}) if content_type == 'Proposal': area = 'proposals' else: area = 'discussions' txt = self.api.translate(_(u"Posting...")) response = '<div><img src="/static/images/spinner.gif" />%s</div>' % txt response += '<script type="text/javascript">' response += "reload_ai_listings('%s', ['%s']);" % (url, area) response += "mark_as_read();" response += '</script>' return Response(response) #Note! Registration of form resources has to be in the view that has the javascript #that will include this! self.response['form'] = form.render() self.response['user_image_tag'] = self.api.user_profile.get_image_tag(request = self.request) self.response['content_type'] = content_type return Response(render('templates/snippets/inline_form.pt', self.response, request=self.request))
def update_metadata_source_post(request): source = ast.literal_eval(request.params["sources"]) try: log_writer("source_update_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + source) source_update = DBSession.query(Source).join(t_hla_map).join( HlaType).filter(Source.source_id == source["source_id"]).all() except: log_writer( "source_update_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + " Source update failed!") return Response(conn_err_msg + " \n Source update failed", content_type='text/plain', status_int=500) if len(source_update) > 0: if source['patient_id'] != "": source_update[0].patient_id = source['patient_id'] if source['organ'] != "": source_update[0].organ = source['organ'] if source['organism'] != "": source_update[0].orgnaism = source['organism'] if source['comment'] != "": source_update[0].comment = source['comment'] if source['histology'] != "": source_update[0].histology = source['histology'] if source['dignity'] != "": source_update[0].dignity = source['dignity'] if source['celltype'] != "": source_update[0].celltype = source['celltype'] if source['person'] != "": source_update[0].person = source['person'] if source['location'] != "": source_update[0].location = source['location'] if source['metastatis'] != "": source_update[0].metastatis = source['metastatis'] if source['treatment'] != "": source_update[0].treatment = source['treatment'] if source['prep_date'] != "": source_update[0].prep_date = source['prep_date'] source_update[0].sample_id = source_update[0].patient_id + "_" + source_update[0].organ + "_" + source_update[0].dignity\ + "_" + source_update[0].histology + "_" + \ source_update[0].celltype + "_" + source_update[0].location + "_" + source_update[0].treatment\ + "_" + source_update[0].prep_date if source['typing'] != "": # remove all mappings source_update[0].hlatypes[:] = [] # create all hla links hla_split = source['typing'].split(";") for hla_typing in hla_split: hla_typing_split = hla_typing.strip().split(":") for i in range(0, len(hla_typing_split)): sub_type = ":".join(hla_typing_split[0:i + 1]) try: query = DBSession.query(HlaType.hla_types_id).filter( HlaType.hla_string == sub_type) hla_types_id = query.all() except DBAPIError: return Response(conn_err_msg, content_type='text/plain', status_int=500) # unknown hla_lookup if len(hla_types_id) == 0: try: hla_type = HlaType( hla_string=sub_type, digits=hla_digits_extractor(sub_type)) DBSession.add(hla_type) DBSession.flush() hla_types_id = hla_type.hla_types_id except DBAPIError: return Response(conn_err_msg + "\n Insert into Hla-Types failed!", content_type='text/plain', status_int=500) else: hla_types_id = hla_types_id[0] hla_type = query = DBSession.query(HlaType).filter( HlaType.hla_string == sub_type).all()[0] try: # add the hla type source_update[0].hlatypes.append(hla_type) except DBAPIError: return Response(conn_err_msg + "\n Insert into Hla-Map failed!", content_type='text/plain', status_int=500) try: transaction.commit() DBSession.flush() log_writer("source_update", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + source) except: log_writer( "source_update_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + "Source update failed!") DBSession.rollback() return Response("Source update failed!", content_type='text/plain', status_int=500) return HTTPFound(location="/update_metadata_source?id=%s" % source["source_id"])
def forbidden_view(request): return Response(body='Forbidden', status='403 Forbidden')
def update_metadata_msrun_post(request): ms_run = ast.literal_eval(request.params["ms_runs"]) # update if already in DB (without metadata included) try: log_writer("ms_run_update_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + ms_run) ms_run_update = DBSession.query(MsRun).filter( MsRun.filename == ms_run["filename"]).all() except: log_writer( "ms_run_update_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + "MS Run update failed!") return Response(conn_err_msg + " \n MsRun insert failed", content_type='text/plain', status_int=500) if len(ms_run_update) > 0: if ms_run['ms_run_date'] != "": ms_run_update[0].ms_run_date = ms_run['ms_run_date'] if ms_run['used_share'] != "": ms_run_update[0].used_share = ms_run['used_share'] if ms_run['comment'] != "": ms_run_update[0].comment = ms_run['comment'] if ms_run['sample_mass'] != "": ms_run_update[0].sample_mass = ms_run['sample_mass'] if ms_run['antibody_set'] != "": ms_run_update[0].antibody_set = ms_run['antibody_set'] if ms_run['antibody_mass'] != "": ms_run_update[0].antibody_mass = ms_run['antibody_mass'] if ms_run['sample_volume'] != "": ms_run_update[0].sample_volume = ms_run['sample_volume'] if ms_run['source_source_id'] != "": # TODO: update peptide_run and spectrum_hit if source changed try: spectrum_hits = DBSession.query(SpectrumHit)\ .filter(SpectrumHit.source_source_id == ms_run_update[0].source_source_id).update({'source_source_id': ms_run['source_source_id']}) except: log_writer( "ms_run_update_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + "SpectrumHit update failed!") DBSession.rollback() return Response("SpectrumHit update failed!", content_type='text/plain', status_int=500) try: peptide_runs = DBSession.query(PeptideRun) \ .filter(PeptideRun.source_source_id == ms_run_update[0].source_source_id).update({'source_source_id': ms_run['source_source_id']}) except: log_writer( "ms_run_update_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + "Peptide Run update failed!") DBSession.rollback() return Response("Peptide Run update failed!", content_type='text/plain', status_int=500) ms_run_update[0].source_source_id = ms_run['source_source_id'] try: transaction.commit() DBSession.flush() log_writer("ms_run_update", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + ms_run) except: log_writer( "ms_run_update_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + "MS Run update failed!") DBSession.rollback() return Response("MS Run update failed!", content_type='text/plain', status_int=500) return HTTPFound(location="/update_metadata_ms_run?filename=%s" % ms_run["filename"])
"This Linkedin user is already registered. Please click the Login with Linkedin button." ), "generic_messages") elif e.message == 'USER_SLUG_TAKEN': request.session.flash( GenericErrorMessage( "This username is already registered. Please choose another one." ), "generic_messages") else: request.session.flash(GenericErrorMessage(e.message), "generic_messages") return Response("Resource Found!", 302, headerlist=[('location', request.fwd_url(ctxt))]) else: return Response("Resource Found!", 302, headerlist=[('location', request.root.signup_url('getstarted'))]) def login_failure(exc, request): ctxt = request.context.__parent__ request.session.flash(GenericErrorMessage(exc.message), "generic_messages") return Response("Resource Found!", 302, headerlist=[('location', request.fwd_url(ctxt))]) #=================================================== ROLE SELECT ======================================================= def role_select_on_success(cls, request, values):
def legend(request): request.resource_permission(PD_READ) result = request.context.render_legend() return Response(body_file=result, content_type='image/png')
def login_failure(exc, request): ctxt = request.context.__parent__ request.session.flash(GenericErrorMessage(exc.message), "generic_messages") return Response("Resource Found!", 302, headerlist=[('location', request.fwd_url(ctxt))])
def xml_response(body): return Response(body=body, charset='utf-8', content_type='text/xml')
def hora_local(request): return Response(strftime('%H:%M:%S'))
def hello_world(request): print('Incoming request') return Response('<body><h1>Hello World!)!)</h1></body>')
def hello_world(request): return Response('Hello World!')
def host_1_view_delete(self, host_name): """Delete a host.""" self.dao.host_dao.delete_host(name=host_name) return Response(status='200 Ok', content_type='application/json')
def my_view(request): complist = [] message = 'OK' log = 0 s_waste = '' s_code = '' s_class = 0 s_city = '' s_comp = '' try: s_waste = request.params['Warse'] message = message + ' ' + s_waste except: k = 0 try: s_comp = request.params['Company'].capitalize() message = message + ' ' + s_waste except: k = 0 try: s_code = request.params['Code'].replace(' ', '') print(s_code) message = message + ' ' + s_code except: k = 0 try: s_class = request.params['Class'] message = message + ' ' + s_class except: s_class = 0 try: s_city = request.params['City'].capitalize() message = message + ' ' + s_city except: k = 0 l = 1 if l == 0: return Response(str(log == None), content_type='text/plain', status=500) if ((s_waste == '') & (s_city == '') & (s_code == '')): return { 'list': complist, 'project': 'Licenses', 'message': '', 'log': log } DBSession = Session(bind=engine) wastes = DBSession.query(Waste).filter( func.lower(Waste.name).like("%" + func.lower(s_waste) + "%")) wastes = wastes.filter(Waste.code.like("%" + s_code + "%")) if s_class == '': wastes = wastes.all() else: wastes = wastes.filter(Waste.danger == s_class).all() message = message + 'wastes' + str(len(wastes)) cities = DBSession.query(City).filter(City.name.like("%" + s_city + "%")).all() message = message + 'wastes' + str(len(cities)) companies = [] for c in cities: companis = DBSession.query(Company).filter(Company.city == c.id) companis = DBSession.query(Company).filter( Company.name.like("%" + s_comp + "%")).all() companies.extend(companis) comps = [] t1 = 0 t2 = 0 t3 = 0 t4 = 0 t5 = 0 t6 = 0 t7 = 0 try: t1 = request.params['t1'] except: t1 = 0 try: t2 = request.params['t2'] except: t2 = 0 try: t3 = request.params['t3'] except: t3 = 0 try: t4 = request.params['t4'] except: t4 = 0 try: t5 = request.params['t5'] except: t5 = 0 try: t6 = request.params['t6'] except: t6 = 0 try: t7 = request.params['t7'] except: t7 = 0 for c in companies: for w in wastes: lic = DBSession.query(License).filter(License.waste == w.id) lic = lic.filter(License.company == c.id) if (t1 == "1"): lic = lic.filter(License.collection != None) else: pp = 0 if (t2 == "1"): lic = lic.filter(License.transportation != None) else: pp = 0 if (t3 == "1"): lic = lic.filter(License.defusing != None) else: pp = 0 if (t4 == "1"): lic = lic.filter(License.using != None) else: pp = 0 if (t5 == "1"): lic = lic.filter(License.treatment != None) else: pp = 0 if (t6 == "1"): lic = lic.filter(License.recovery != None) else: pp = 0 if (t7 == "1"): lic = lic.filter(License.placement != None) else: pp = 0 lic = lic.first() if lic is not None: comps.append(lic) for comp in comps: company = DBSession.query(Company).filter( comp.company == Company.id).first() city = DBSession.query(City).filter(company.city == City.id).first() w = DBSession.query(Waste).filter(comp.waste == Waste.id).first() record = { 'waste': w.name, 'code': w.code, 'class': w.danger, 'c1': comp.collection, 'c2': comp.transportation, 'c3': comp.defusing, 'c4': comp.using, 'c5': comp.treatment, 'c6': comp.recovery, 'c7': comp.placement, 'c1f': comp.collectionf, 'c2f': comp.transportationf, 'c3f': comp.defusingf, 'c4f': comp.usingf, 'c5f': comp.treatmentf, 'c6f': comp.recoveryf, 'c7f': comp.placementf, 'company': company.name, 'city': city.name, 'other': comp.other, 'id': comp.id } complist.append(record) return { 'list': complist, 'project': 'Licenses', 'log': log, 'message': '', 'b1': s_waste, 'b2': s_code, 'b3': s_class, 'b4': s_city, 'comp1': s_comp, 'tt1': t1, 'tt2': t2, 'tt3': t3, 'tt4': t4, 'tt5': t5, 'tt6': t6, 'tt7': t7 }