def get_profile_required(request): resp, content = request.oauth.http.request( 'https://www.googleapis.com/plus/v1/people/me') return http.HttpResponse(content)
def get_json_response(self, content, **httpresponse_kwargs): "Construct an `HttpResponse` object." return http.HttpResponse(content, content_type='application/json', **httpresponse_kwargs)
def model_wrapper(request, model_name): if model_name in BLACKLIST: raise http.Http404("Don't know what you're talking about!") for source in MODELS_MODULES: try: model = getattr(source, model_name) break except AttributeError: pass else: raise http.Http404('no service called `%s`' % model_name) if not is_valid_model_class(model): raise http.Http404('no service called `%s`' % model_name) required_permissions = getattr(model(), 'API_REQUIRED_PERMISSIONS', None) if isinstance(required_permissions, basestring): required_permissions = [required_permissions] if ( required_permissions and ( not request.user.is_active or not has_permissions(request.user, required_permissions) ) ): permission_names = [] for permission in required_permissions: codename = permission.split('.', 1)[1] try: permission_names.append( Permission.objects.get( codename=codename ).name ) except Permission.DoesNotExist: permission_names.append(codename) # you're not allowed to use this model return http.JsonResponse({ 'error': "Use of this endpoint requires the '%s' permission" % ( ', '.join(permission_names), ) }, status=403) # it being set to None means it's been deliberately disabled if getattr(model, 'API_WHITELIST', False) is False: raise APIWhitelistError('No API_WHITELIST defined for %r' % model) instance = model() # Any additional headers we intend to set on the response headers = {} # Certain models need to know who the user is to be able to # internally use that to determine its output. instance.api_user = request.user if request.method == 'POST': function = instance.post else: function = instance.get if not function: return http.HttpResponseNotAllowed([request.method]) # assume first that it won't need a binary response binary_response = False request_data = request.method == 'GET' and request.GET or request.POST form = FormWrapper(model, request_data) if form.is_valid(): try: result = function(**form.cleaned_data) except models.BadStatusCodeError as e: error_code = e.status message = e.message if error_code >= 400 and error_code < 500: # if the error message looks like JSON, # carry that forward in the response try: json.loads(message) return http.HttpResponse( message, status=error_code, content_type='application/json; charset=UTF-8' ) except ValueError: # The error from the middleware was not a JSON error. # Not much more we can do. reason = REASON_PHRASES.get( error_code, 'UNKNOWN STATUS CODE' ) return http.HttpResponse( reason, status=error_code, content_type='text/plain; charset=UTF-8' ) if error_code >= 500: # special case reason = REASON_PHRASES[424] return http.HttpResponse( reason, status=424, content_type='text/plain' ) raise except ValueError as e: if ( # built in json module ValueError 'No JSON object could be decoded' in e or # ujson module ValueError 'Expected object or value' in e ): return http.HttpResponseBadRequest( json.dumps({'error': 'Not a valid JSON response'}), content_type='application/json; charset=UTF-8' ) raise except NOT_FOUND_EXCEPTIONS as exception: return http.HttpResponseNotFound( json.dumps({'error': str(exception)}), content_type='application/json; charset=UTF-8' ) except BAD_REQUEST_EXCEPTIONS as exception: return http.HttpResponseBadRequest( json.dumps({'error': str(exception)}), content_type='application/json; charset=UTF-8' ) # Some models allows to return a binary reponse. It does so based on # the models `BINARY_RESPONSE` dict in which all keys and values # need to be in the valid query. For example, if the query is # `?foo=bar&other=thing&bar=baz` and the `BINARY_RESPONSE` dict is # exactly: {'foo': 'bar', 'bar': 'baz'} it will return a binary # response with content type `application/octet-stream`. for key, value in model.API_BINARY_RESPONSE.items(): if form.cleaned_data.get(key) == value: binary_response = True else: binary_response = False break if binary_response: # if you don't have all required permissions, you'll get a 403 required_permissions = model.API_BINARY_PERMISSIONS if isinstance(required_permissions, basestring): required_permissions = [required_permissions] if ( required_permissions and not has_permissions(request.user, required_permissions) ): permission_names = [] for permission in required_permissions: codename = permission.split('.', 1)[1] try: permission_names.append( Permission.objects.get( codename=codename ).name ) except Permission.DoesNotExist: permission_names.append(codename) # you're not allowed to get the binary response return http.HttpResponseForbidden( "Binary response requires the '%s' permission\n" % (', '.join(permission_names)) ) elif not request.user.has_perm('crashstats.view_pii'): clean_scrub = getattr(model, 'API_CLEAN_SCRUB', None) if isinstance(model.API_WHITELIST, models.Lazy): # This is necessary because in Cleaner() we're going to # rely on asking `isinstance(whitelist, dict)` and there's # no easy or convenient way to be lazy about that. model.API_WHITELIST = model.API_WHITELIST.materialize() if result and model.API_WHITELIST: cleaner = Cleaner( model.API_WHITELIST, clean_scrub=clean_scrub, # if True, uses warnings.warn() to show fields # not whitelisted debug=settings.DEBUG, ) cleaner.start(result) else: # custom override of the status code return {'errors': dict(form.errors)}, 400 if binary_response: assert model.API_BINARY_FILENAME, 'No API_BINARY_FILENAME set on model' response = http.HttpResponse( result, content_type='application/octet-stream' ) filename = model.API_BINARY_FILENAME % form.cleaned_data response['Content-Disposition'] = ( 'attachment; filename="%s"' % filename ) return response if ( getattr(model, 'deprecation_warning', False) ): if isinstance(result, dict): result['DEPRECATION_WARNING'] = model.deprecation_warning # If you return a tuple of two dicts, the second one becomes # the extra headers. # return result, { headers['DEPRECATION-WARNING'] = ( model.deprecation_warning.replace('\n', ' ') ) if model.cache_seconds: # We can set a Cache-Control header. # We say 'private' because the content can depend on the user # and we don't want the response to be collected in HTTP proxies # by mistake. headers['Cache-Control'] = 'private, max-age={}'.format( model.cache_seconds, ) return result, headers
def contribute(request, addon): webapp = addon.is_webapp() contrib_type = request.POST.get('type', 'suggested') is_suggested = contrib_type == 'suggested' source = request.POST.get('source', '') comment = request.POST.get('comment', '') amount = { 'suggested': addon.suggested_amount, 'onetime': request.POST.get('onetime-amount', '') }.get(contrib_type, '') if not amount: amount = settings.DEFAULT_SUGGESTED_CONTRIBUTION contribution_uuid = hashlib.md5(str(uuid.uuid4())).hexdigest() if addon.charity: # TODO(andym): Figure out how to get this in the addon authors # locale, rather than the contributors locale. name, paypal_id = (u'%s: %s' % (addon.name, addon.charity.name), addon.charity.paypal) else: name, paypal_id = addon.name, addon.paypal_id # l10n: {0} is the addon name contrib_for = _(u'Contribution for {0}').format(jinja2.escape(name)) preapproval = None if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval = request.amo_user.get_preapproval() paykey, error, status = '', '', '' try: paykey, status = paypal.get_paykey( dict(amount=amount, email=paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern='%s.paypal' % ('apps' if webapp else 'addons'), preapproval=preapproval, slug=addon.slug, uuid=contribution_uuid)) except paypal.PaypalError as error: paypal.paypal_log_cef(request, addon, contribution_uuid, 'PayKey Failure', 'PAYKEYFAIL', 'There was an error getting the paykey') log.error('Error getting paykey, contribution for addon: %s' % addon.pk, exc_info=True) if paykey: contrib = Contribution(addon_id=addon.id, charity_id=addon.charity_id, amount=amount, source=source, source_locale=request.LANG, annoying=addon.annoying, uuid=str(contribution_uuid), is_suggested=is_suggested, suggested_amount=addon.suggested_amount, comment=comment, paykey=paykey) contrib.save() url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if request.GET.get('result_type') == 'json' or request.is_ajax(): # If there was an error getting the paykey, then JSON will # not have a paykey and the JS can cope appropriately. return http.HttpResponse(json.dumps({ 'url': url, 'paykey': paykey, 'error': str(error), 'status': status }), content_type='application/json') return http.HttpResponseRedirect(url)
def post(self, request): data = parsers.JSONParser().parse(request) return http.HttpResponse(renderers.JSONRenderer().render( api.func.findWay(data)))
def get_response(self): return http.HttpResponse()
def load_external_source(request, source_name): """Loads an external source.""" ## Loading DBpedia if source_name == 'dbpedia': for m in External_resource.objects.filter(source_name='dbpedia'): m.delete() ## Loading diseases inputfilename = CONFIG.get('SOURCES_FILE_LOC') + 'dbpedia_disease.csv' inputfile = file(inputfilename,'r') csv_reader = csv.reader(inputfile, delimiter=',', quotechar='"') row = csv_reader.next() if not csv_reader: return http.HttpResponse("Error reading csv file") else: id_index = 0 try: id_index = row.index('id') except: return http.HttpResponse("Error finding the right column in the csv file") while row: try: row = csv_reader.next() id = row[id_index] resource_url = 'http://dbpedia.org/resource/' + id resource_label = id.replace('_',' ') resource_format = 'RDF_HTML' related_model_name = 'Condition' label = id + ' (dbpedia disease resource)' if len(label)>127: label = hashlib.md5(label).hexdigest() external_resource, created = models.External_resource.objects.get_or_create( label = label, source_id = id, source_label = resource_label, source_name = source_name, source_url = resource_url, source_format = resource_format, related_model_name =related_model_name, ) except StopIteration: row = None ## Loading drugs inputfilename = CONFIG.get('SOURCES_FILE_LOC') + 'dbpedia_drugs.csv' inputfile = file(inputfilename,'r') csv_reader = csv.reader(inputfile, delimiter=',', quotechar='"') row = csv_reader.next() if not csv_reader: return http.HttpResponse("Error reading csv file") else: id_index = 0 try: id_index = row.index('id') except: return http.HttpResponse("Error finding the right column in the csv file") while row: try: row = csv_reader.next() id = row[id_index] resource_url = 'http://dbpedia.org/resource/' + id resource_label = id.replace('_',' ') resource_format = 'RDF_HTML' related_model_name = 'Intervention' label = id + ' (dbpedia drug resource)' if len(label)>127: label = hashlib.md5(label).hexdigest() external_resource, created = models.External_resource.objects.get_or_create( label = label, source_label = resource_label, source_id = id, source_name = source_name, source_url = resource_url, source_format = resource_format, related_model_name =related_model_name, ) except StopIteration: row = None return http.HttpResponse("{'status':'OK'}") ## Loading Drugbank elif source_name == 'drugbank': for m in External_resource.objects.filter(source_name='drugbank'): m.delete() inputfilename = CONFIG.get('SOURCES_FILE_LOC') + 'drugbank_drugs.csv' inputfile = file(inputfilename,'r') csv_reader = csv.reader(inputfile, delimiter=',', quotechar='"') row = csv_reader.next() if not csv_reader: return http.HttpResponse("Error reading csv file") else: id_index = 0 name_index = 1 try: id_index = row.index('id') name_index = row.index('name') except: return http.HttpResponse("Error finding the right column(s) in the csv file") while row: try: row = csv_reader.next() id = row[id_index] name = row[name_index] resource_url = 'http://www4.wiwiss.fu-berlin.de/drugbank/resource/drugs/' + id resource_label = name resource_format = 'RDF_HTML' related_model_name = 'Intervention' label = id + ' (drugbank drug resource)' if len(label)>127: label = hashlib.md5(label).hexdigest() external_resource, created = models.External_resource.objects.get_or_create( label = label, source_label = resource_label, source_url = resource_url, source_format = resource_format, source_name = source_name, related_model_name =related_model_name, ) except StopIteration: row = None # alternative names inputfilename = CONFIG.get('SOURCES_FILE_LOC') + 'drugbank_drug_brandnames.csv' inputfile = file(inputfilename,'r') csv_reader = csv.reader(inputfile, delimiter=',', quotechar='"') row = csv_reader.next() if not csv_reader: return http.HttpResponse("Error reading csv file") else: id_index = 0 name_index = 1 try: id_index = row.index('id') name_index = row.index('name') except: return http.HttpResponse("Error finding the right column(s) in the csv file") while row: try: row = csv_reader.next() id = row[id_index] altname = unicode(row[name_index],errors='ignore') alt_name, created = models.Alt_name.objects.get_or_create( label = hashlib.md5(source_name+id+altname).hexdigest(), source = source_name, id = id, altname = altname, ) except StopIteration: row = None inputfilename = CONFIG.get('SOURCES_FILE_LOC') + 'drugbank_drug_synonyms.csv' inputfile = file(inputfilename,'r') csv_reader = csv.reader(inputfile, delimiter=',', quotechar='"') row = csv_reader.next() if not csv_reader: return http.HttpResponse("Error reading csv file") else: id_index = 0 name_index = 1 try: id_index = row.index('id') name_index = row.index('name') except: return http.HttpResponse("Error finding the right column(s) in the csv file") while row: try: row = csv_reader.next() id = row[id_index] altname = unicode(row[name_index],errors='ignore') alt_name, created = models.Alt_name.objects.get_or_create( label = hashlib.md5(source_name+id+altname).hexdigest(), source = source_name, id = id, altname = altname, ) except StopIteration: row = None return http.HttpResponse("{'status':'OK'}") ## Loading Dailymed elif source_name == 'dailymed': for m in External_resource.objects.filter(source_name='dailymed'): m.delete() inputfilename = CONFIG.get('SOURCES_FILE_LOC') + 'dailymed_drugs.csv' inputfile = file(inputfilename,'r') csv_reader = csv.reader(inputfile, delimiter=',', quotechar='"') row = csv_reader.next() if not csv_reader: return http.HttpResponse("Error reading csv file") else: id_index = 0 name_index = 1 try: id_index = row.index('id') name_index = row.index('name') fullName_index = row.index('fullName') activeIngridient_index = row.index('activeIngridient') drugbank_id_index = row.index('drugbank_id') genericMedicine_index = row.index('genericMedicine') except: return http.HttpResponse("Error finding the right column(s) in the csv file") while row: try: row = csv_reader.next() id = row[id_index] name = row[name_index] resource_url = 'http://www4.wiwiss.fu-berlin.de/dailymed/resource/drugs/' + id resource_label = name resource_format = 'RDF_HTML' related_model_name = 'Intervention' label = id + ' (dailymed drug resource)' if len(label)>127: label = hashlib.md5(label).hexdigest() external_resource, created = models.External_resource.objects.get_or_create( label = label, source_label = resource_label, source_url = resource_url, source_format = resource_format, source_name = source_name, related_model_name =related_model_name, ) # alternative names altname = row[genericMedicine_index] alt_name, created = models.Alt_name.objects.get_or_create( label = hashlib.md5(source_name+id+altname).hexdigest(), source = source_name, id = id, altname = altname, ) altname = row[fullName_index] alt_name, created = models.Alt_name.objects.get_or_create( label = hashlib.md5(source_name+id+altname).hexdigest(), source = source_name, id = id, altname = altname, ) altname = row[activeIngridient_index] alt_name, created = models.Alt_name.objects.get_or_create( label = hashlib.md5(source_name+id+altname).hexdigest(), source = source_name, id = id, altname = altname, ) altname = row[drugbank_id_index] alt_name, created = models.Alt_name.objects.get_or_create( label = hashlib.md5(source_name+id+altname).hexdigest(), source = source_name, id = id, altname = altname, ) except StopIteration: row = None return http.HttpResponse("{'status':'OK'}") ## Loading diseasome elif source_name == 'diseasome': for m in External_resource.objects.filter(source_name='diseasome'): m.delete() inputfilename = CONFIG.get('SOURCES_FILE_LOC') + 'diseasome_disease.csv' inputfile = file(inputfilename,'r') csv_reader = csv.reader(inputfile, delimiter=',', quotechar='"') row = csv_reader.next() if not csv_reader: return http.HttpResponse("Error reading csv file") else: id_index = 0 name_index = 1 try: id_index = row.index('id') name_index = row.index('name') except: return http.HttpResponse("Error finding the right column(s) in the csv file") while row: try: row = csv_reader.next() id = row[id_index] name = row[name_index] resource_url = 'http://www4.wiwiss.fu-berlin.de/diseasome/resource/diseases/' + id resource_label = name resource_format = 'RDF_HTML' related_model_name = 'Condition' label = id + ' (diseasome disease resource)' if len(label)>127: label = hashlib.md5(label).hexdigest() external_resource, created = models.External_resource.objects.get_or_create( label = label, source_label = resource_label, source_url = resource_url, source_format = resource_format, source_name = source_name, related_model_name =related_model_name, ) except StopIteration: row = None return http.HttpResponse("{'status':'OK'}") ## Other sources else: return http.HttpResponse("{'status':'FAIL', 'reason':'Source %s not found'}" % source_name)
def form_invalid(self, form): return http.HttpResponse(form.errors.as_text())
def process_request(self, request): if 'HTTP_ACCESS_CONTROL_REQUEST_METHOD' in request.META: response = http.HttpResponse() self._populate_response(response) return response return None
def contribute(request, addon): # Enforce paypal-imposed comment length limit commentlimit = PAYPAL_MAX_COMMENT_LENGTH contrib_type = request.POST.get('type', 'suggested') is_suggested = contrib_type == 'suggested' source = request.POST.get('source', '') comment = request.POST.get('comment', '') amount = { 'suggested': addon.suggested_amount, 'onetime': request.POST.get('onetime-amount', '') }.get(contrib_type, '') if not amount: amount = settings.DEFAULT_SUGGESTED_CONTRIBUTION form = ContributionForm({'amount': amount}) if len(comment) > commentlimit or not form.is_valid(): return http.HttpResponse(json.dumps({'error': 'Invalid data.', 'status': '', 'url': '', 'paykey': ''}), content_type='application/json') contribution_uuid = hashlib.md5(str(uuid.uuid4())).hexdigest() if addon.charity: # TODO(andym): Figure out how to get this in the addon authors # locale, rather than the contributors locale. name, paypal_id = (u'%s: %s' % (addon.name, addon.charity.name), addon.charity.paypal) else: name, paypal_id = addon.name, addon.paypal_id # l10n: {0} is the addon name contrib_for = _(u'Contribution for {0}').format(jinja2.escape(name)) paykey, error, status = '', '', '' try: paykey, status = paypal.get_paykey( dict(amount=amount, email=paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern='addons.paypal', slug=addon.slug, uuid=contribution_uuid)) except paypal.PaypalError as error: log.error( 'Error getting paykey, contribution for addon ' '(addon: %s, contribution: %s)' % (addon.pk, contribution_uuid), exc_info=True) if paykey: contrib = Contribution(addon_id=addon.id, charity_id=addon.charity_id, amount=amount, source=source, source_locale=request.LANG, annoying=addon.annoying, uuid=str(contribution_uuid), is_suggested=is_suggested, suggested_amount=addon.suggested_amount, comment=comment, paykey=paykey) contrib.save() url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if request.GET.get('result_type') == 'json' or request.is_ajax(): # If there was an error getting the paykey, then JSON will # not have a paykey and the JS can cope appropriately. return http.HttpResponse(json.dumps({'url': url, 'paykey': paykey, 'error': str(error), 'status': status}), content_type='application/json') return http.HttpResponseRedirect(url)
def done(self, request, cleaned_data): return http.HttpResponse(success_string)
def env(request): return http.HttpResponse(u'<pre>%s</pre>' % (jinja2.escape(request)))
def inner(request, *args, **kwargs): if not request.user.is_authenticated(): return http.HttpResponse('Forbidden', status=403) return view_func(request, *args, **kwargs)
def graphs(request, id): content_type = 'application/json' try: user_id = get_user_id_from_hatohol_server(request) except (NoHatoholUser, NoHatoholSession): return http.HttpResponseForbidden(content_type=content_type) if request.method == 'POST': graph = Graph(user_id=user_id, settings_json=request.body) try: graph.full_clean() except ValidationError as e: return http.HttpResponseBadRequest(json.dumps(e.messages), content_type=content_type) graph.save() response = http.HttpResponse(to_json(graph), content_type=content_type, status=201) response['Location'] = reverse('hatohol.views.graphs', args=[graph.id]) return response elif request.method == 'PUT': if id is None: message = 'id is required' return http.HttpResponseBadRequest(to_json(message), content_type=content_type) try: graph = Graph.objects.get(id=id) if graph.user_id != user_id: return http.HttpResponseForbidden(content_type=content_type) graph.settings_json = request.body graph.full_clean() graph.save() return http.HttpResponse(to_json(graph), content_type=content_type) except Graph.DoesNotExist: return http.HttpResponseNotFound(content_type=content_type) except ValidationError as e: return http.HttpResponseBadRequest(json.dumps(e.messages), content_type=content_type) elif request.method == 'DELETE': if id is None: message = 'id is required' return http.HttpResponseBadRequest(to_json(message), content_type=content_type) try: graph = Graph.objects.get(id=id) except Graph.DoesNotExist: return http.HttpResponseNotFound() else: if graph.user_id != user_id: return http.HttpResponseForbidden(content_type=content_type) graph.delete() return http.HttpResponse() else: if id: try: graph = Graph.objects.get(id=id) except Graph.DoesNotExist: return http.HttpResponseNotFound() if graph.user_id != user_id: return http.HttpResponseForbidden(content_type=content_type) response = graph else: graphs = Graph.objects.filter(user_id=user_id).order_by('id') response = graphs return http.HttpResponse(to_json(response), content_type=content_type)
def form_invalid(self, form): return http.HttpResponse(get_errors_json(form.errors), content_type="application/json", status=422)
def loaded(request): return http.HttpResponse('%s' % request.META['wsgi.loaded'], content_type='text/plain')
def paypal_ignore(request, post): paypal_log.info('Ignoring: %s' % post.get('txn_id', '')) return http.HttpResponse('Ignoring %s' % post.get('txn_id', ''))
def get_json_response(self, content, **httpresponse_kwargs): return http.HttpResponse(content, content_type='application/json', **httpresponse_kwargs)
def search(request): data = {} search = request.GET.get('q', '') if len(search) > 100: return http.HttpResponse("Search too long") documents = [] data['base_url'] = 'http://%s' % RequestSite(request).domain tag_strip = re.compile('<[^>]+>') def append_match(item, words): text = item.rendered text = tag_strip.sub(' ', text) sentences = [] def matcher(match): return '<b>%s</b>' % match.group() if regex: for each in regex.finditer(text): sentence = text[max(each.start() - 35, 0):each.end() + 40] sentence = regex_ext.sub(matcher, sentence) sentence = sentence.strip() if each.start() > 0 and not sentence[0].isupper(): sentence = '...%s' % sentence if each.end() < len(text): sentence = '%s...' % sentence sentences.append(sentence.strip()) if len(sentences) > 3: break if isinstance(item, BlogItem): title = html_escape(item.title) if regex_ext: title = regex_ext.sub(matcher, title) date = item.pub_date type_ = 'blog' else: if not item.blogitem: item.correct_blogitem_parent() title = ("Comment on <em>%s</em>" % html_escape(item.blogitem.title)) date = item.add_date type_ = 'comment' documents.append({ 'title': title, 'summary': '<br>'.join(sentences), 'date': date, 'url': item.get_absolute_url(), 'type': type_, }) def create_search(s): words = re.findall('\w+', s) words_orig = words[:] if 'or' in words: which = words.index('or') words_orig.remove('or') if (which + 1) < len(words) and which > 0: before = words.pop(which - 1) words.pop(which - 1) after = words.pop(which - 1) words.insert(which - 1, '%s | %s' % (before, after)) while 'and' in words_orig: words_orig.remove('and') while 'and' in words: words.remove('and') escaped = ' & '.join(words) return escaped, words_orig data['q'] = search keyword_search = {} if len(search) > 1: _keyword_keys = ('keyword', 'keywords', 'category', 'categories') search, keyword_search = split_search(search, _keyword_keys) redis = get_redis_connection(reconnection_wrapped=True) not_ids = defaultdict(set) times = [] count_documents = [] regex = regex_ext = None def append_queryset_search(queryset, order_by, words, model_name): count = items.count() count_documents.append(count) for item in items.order_by(order_by)[:20]: append_match(item, words) not_ids[model_name].add(item.pk) return count if len(search) > 1: search_escaped, words = create_search(search) regex = re.compile( r'\b(%s)' % '|'.join( re.escape(word) for word in words if word.lower() not in STOPWORDS), re.I | re.U) regex_ext = re.compile( r'\b(%s\w*)\b' % '|'.join( re.escape(word) for word in words if word.lower() not in STOPWORDS), re.I | re.U) for model in (BlogItem, BlogComment): qs = model.objects model_name = model._meta.object_name if model == BlogItem: fields = ('title', 'text') order_by = '-pub_date' if keyword_search.get('keyword'): # use Redis! ids = redis.smembers('kw:%s' % keyword_search['keyword']) if ids: qs = qs.filter(pk__in=ids) if keyword_search.get('keywords'): # use Redis! ids = [] for each in [ x.strip() for x in keyword_search['keywords'].split(',') if x.strip() ]: ids.extend(redis.smembers('kw:%s' % each)) if ids: qs = qs.filter(pk__in=ids) elif model == BlogComment: fields = ('comment', ) order_by = '-add_date' if any( keyword_search.get(k) for k in ('keyword', 'keywords', 'category', 'categories')): # BlogComments don't have this keyword so it can never match continue for field in fields: if not_ids[model_name]: qs = qs.exclude(pk__in=not_ids[model_name]) _sql = "to_tsvector('english'," + field + ") " if ' | ' in search_escaped or ' & ' in search_escaped: _sql += "@@ to_tsquery('english', %s)" else: _sql += "@@ plainto_tsquery('english', %s)" items = (qs.extra(where=[_sql], params=[search_escaped])) t0 = time.time() count = append_queryset_search(items, order_by, words, model_name) t1 = time.time() times.append('%s to find %s %ss by field %s' % (t1 - t0, count, model_name, field)) #print 'Searchin for %r:\n%s' % (search, '\n'.join(times)) logging.info('Searchin for %r:\n%s' % (search, '\n'.join(times))) elif keyword_search and any(keyword_search.values()): if keyword_search.get('keyword') or keyword_search.get('keywords'): if keyword_search.get('keyword'): ids = redis.smembers('kw:%s' % keyword_search['keyword']) else: ids = [] for each in [ x.strip() for x in keyword_search.get('keywords').split(',') if x.strip() ]: ids.extend(redis.smembers('kw:%s' % each)) if ids: items = BlogItem.objects.filter(pk__in=ids) model_name = BlogItem._meta.object_name append_queryset_search(items, '-pub_date', [], model_name) if keyword_search.get('category') or keyword_search.get('categories'): if keyword_search.get('category'): categories = Category.objects.filter( name=keyword_search.get('category')) else: cats = [ x.strip() for x in keyword_search.get('categories').split(',') if x.strip() ] categories = Category.objects.filter(name__in=cats) if categories: cat_q = make_categories_q(categories) items = BlogItem.objects.filter(cat_q) model_name = BlogItem._meta.object_name append_queryset_search(items, '-pub_date', [], model_name) count_documents_shown = len(documents) data['documents'] = documents data['count_documents'] = sum(count_documents) data['count_documents_shown'] = count_documents_shown data['better'] = None if not data['count_documents']: if ' or ' not in data['q'] and len(data['q'].split()) > 1: data['better'] = data['q'].replace(' ', ' or ') if data['better']: data['better_url'] = ( reverse('search') + '?' + urllib.urlencode({'q': data['better'].encode('utf-8')})) if not data['q']: page_title = '' elif data['count_documents'] == 1: page_title = '1 thing found' else: page_title = '%s things found' % data['count_documents'] if count_documents_shown < data['count_documents']: if count_documents_shown == 1: page_title += ' (but only 1 thing shown)' else: page_title += ' (but only %s things shown)' % count_documents_shown data['page_title'] = page_title if data['documents']: data['first_document_url'] = data['documents'][0]['url'] else: data['first_document_url'] = None if not data['count_documents'] and len( search.split()) == 1 and not keyword_search: if redis.smembers('kw:%s' % search): url = reverse('search') url += '?' + urllib.urlencode({'q': 'keyword:%s' % search}) return redirect(url) return render(request, 'homepage/search.html', data)
def get(self, request, *args, **kwargs): qs = self.object_list.annotate( transport_count=Count('transport'), ).select_related( 'insured', ).prefetch_related('rating_set', ).prefetch_related( 'logentries__emailtemplate', ).distinct() content = [ ';'.join([ 'N°demande', 'libellé de la CPAM', 'date de naissance bénéficiaire', 'statut de la demande', 'demande "en cours d\'étude"', 'date de demande', 'délai de traitement', 'n° finess', 'n° adeli', 'signalement assuré', 'signalement technicien', 'Nombre de trajet', 'VP', 'ATP', 'Assurée a basculé', 'nombre Km', 'montant frais de parking', 'montant total des frais de VP', 'montant total remboursement VP', 'montant total ATP', 'cout théorique taxi', 'motif de rejet', 'motif de contact', 'note par l\'utilisateur', 'commentaire' ]) ] def yn(val): return 'Y' if val else 'N' def mystr(val): return '' if val is None else str(val) def excel_number(val): return mystr(val).replace('.', ',') for obj in qs: contact_reason = '' reject_reason = '' for entry in obj.logentries.all(): if entry.action == entry.ACTION_CONTACT: contact_reason = entry.emailtemplate if entry.action == obj.STATUS_REJECTED: reject_reason = entry.emailtemplate rating = obj.rating_set.first() content.append(';'.join( map(mystr, [ obj.display_id, obj.caisse, obj.insured.birth_date.strftime(DATE_FORMAT_FR), obj.get_status_display(), yn(obj.suspended), obj.creation_day.strftime(DATE_FORMAT_FR), excel_number(obj.delay), obj.institution.finess if obj.institution else '', mystr(obj.adeli), yn(obj.conflicts_resolved or obj.conflicts_accepted), yn(obj.conflicts_accepted), obj.transport_count, yn(obj.modevp), yn(obj.modeatp), yn(obj.insured.shifted), excel_number(obj.distancevp), excel_number(obj.expensevp_parking), excel_number(obj.expensevp), excel_number(obj.payment_amount), excel_number(obj.expenseatp), excel_number(obj.taxi_cost), reject_reason, contact_reason, mystr(rating.score) if rating else '', mystr(rating.comment) if rating else '' ]))) response = http.HttpResponse( '\n'.join(content), content_type='text/csv; charset=utf-8-sig') response['Content-Disposition'] = (f'attachment; filename="MRS.csv"') return response
def purchase(request, addon): log.debug('Starting purchase of addon: %s by user: %s' % (addon.pk, request.amo_user.pk)) amount = addon.premium.get_price() source = request.POST.get('source', '') uuid_ = hashlib.md5(str(uuid.uuid4())).hexdigest() # l10n: {0} is the addon name contrib_for = _(u'Purchase of {0}').format(jinja2.escape(addon.name)) paykey, status, error = '', '', '' preapproval = None if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval = request.amo_user.get_preapproval() try: pattern = 'addons.purchase.finished' slug = addon.slug if addon.is_webapp(): pattern = 'apps.purchase.finished' slug = addon.app_slug paykey, status = paypal.get_paykey( dict(amount=amount, chains=settings.PAYPAL_CHAINS, email=addon.paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern=pattern, preapproval=preapproval, qs={'realurl': request.POST.get('realurl')}, slug=slug, uuid=uuid_)) except paypal.PaypalError as error: paypal.paypal_log_cef(request, addon, uuid_, 'PayKey Failure', 'PAYKEYFAIL', 'There was an error getting the paykey') log.error('Error getting paykey, purchase of addon: %s' % addon.pk, exc_info=True) if paykey: contrib = Contribution(addon_id=addon.id, amount=amount, source=source, source_locale=request.LANG, uuid=str(uuid_), type=amo.CONTRIB_PENDING, paykey=paykey, user=request.amo_user) log.debug('Storing contrib for uuid: %s' % uuid_) # If this was a pre-approval, it's completed already, we'll # double check this with PayPal, just to be sure nothing went wrong. if status == 'COMPLETED': paypal.paypal_log_cef(request, addon, uuid_, 'Purchase', 'PURCHASE', 'A user purchased using pre-approval') log.debug('Status is completed for uuid: %s' % uuid_) if paypal.check_purchase(paykey) == 'COMPLETED': log.debug('Check purchase is completed for uuid: %s' % uuid_) contrib.type = amo.CONTRIB_PURCHASE else: # In this case PayPal disagreed, we should not be trusting # what get_paykey said. Which is a worry. log.error('Check purchase failed on uuid: %s' % uuid_) status = 'NOT-COMPLETED' contrib.save() else: log.error('No paykey present for uuid: %s' % uuid_) log.debug('Got paykey for addon: %s by user: %s' % (addon.pk, request.amo_user.pk)) url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if request.POST.get('result_type') == 'json' or request.is_ajax(): return http.HttpResponse(json.dumps({ 'url': url, 'paykey': paykey, 'error': str(error), 'status': status }), content_type='application/json') # This is the non-Ajax fallback. if status != 'COMPLETED': return redirect(url) messages.success(request, _('Purchase complete')) return redirect(shared_url('addons.detail', addon))
def serve_xml(content, basename): resp = http.HttpResponse(content, mimetype='application/xml') resp['Content-Disposition'] = 'attachment; filename=%s.xml' % (basename, ) return resp
def post(self, request): data = parsers.JSONParser().parse(request) lst = api.func.findStop(data, bs) return http.HttpResponse(renderers.JSONRenderer().render( {'busStop': lst[min(lst.keys())]}))
def vidly_media_webhook(request): if not request.POST.get('xml'): return http.HttpResponseBadRequest("no 'xml'") xml_string = request.POST['xml'].strip() try: struct = xmltodict.parse(xml_string) except ExpatError: return http.HttpResponseBadRequest("Bad 'xml'") try: task = struct['Response']['Result']['Task'] try: vidly_submission = VidlySubmission.objects.get( url=task['SourceFile'], tag=task['MediaShortLink'] ) if task['Status'] == 'Finished': if not vidly_submission.finished: vidly_submission.finished = timezone.now() vidly_submission.save() event = vidly_submission.event if ( task['Private'] == 'false' and event.privacy != Event.PRIVACY_PUBLIC ): # the event is private but the video is not vidly.update_media_protection( vidly_submission.tag, True # make it private ) if not vidly_submission.token_protection: vidly_submission.token_protection = True vidly_submission.save() # Awesome! # This event now has a fully working transcoded piece of # media. if event.status == Event.STATUS_PENDING: event.status = Event.STATUS_SCHEDULED event.archive_time = timezone.now() event.save() # More awesome! We can start processing the transcoded media. if not event.duration: videoinfo.fetch_duration( event, save=True, verbose=settings.DEBUG ) event = Event.objects.get(id=event.id) if event.duration: if not Picture.objects.filter(event=event): videoinfo.fetch_screencapture( event, save=True, verbose=settings.DEBUG, set_first_available=True, ) elif task['Status'] == 'Error': if not vidly_submission.errored: vidly_submission.errored = timezone.now() vidly_submission.save() except VidlySubmission.DoesNotExist: # remember, we can't trust the XML since it's publicly # available and exposed as a webhook pass except KeyError: # If it doesn't have a "Result" or "Task", it was just a notification # that the media was added. pass return http.HttpResponse('OK\n')
def normal_view(request): return http.HttpResponse('OK')
def events_json_group(request): user = User.objects.get(pk = request.user.id) teacher = Teacher.objects.get(user = user) idg = request.GET.get("group_id") group = Group.objects.get(pk = idg) students = group.students.all() ## Gestion des taches ### Détermine la liste des parcours du groupe parcours_tab , evaluation_tab = [] , [] for student in students : parcours = Parcours.objects.filter(students = student, teacher = teacher,is_evaluation=0,is_trash=0) for p in parcours: if p not in parcours_tab : parcours_tab.append(p) ### parcours_tab = liste des parcours du groupe relationships = Relationship.objects.filter(is_publish = 1, parcours__in=parcours_tab).exclude(date_limit=None, students=None) event_list = [] for relationship in relationships: # On récupère les dates dans le bon fuseau horaire try : relationship_start = dt_naive_to_timezone(relationship.date_limit, user.time_zone) if relationship.exercise.supportfile.annoncement : title = cleanhtml(unescape_html(relationship.exercise.supportfile.annoncement )) else : title = unescape_html(relationship.exercise.knowledge.name) event_list.append({ 'id': relationship.id, 'start': relationship_start.strftime('%Y-%m-%d %H:%M:%S'), 'end': relationship_start.strftime('%Y-%m-%d %H:%M:%S'), 'title': title, 'allDay': True, 'description': title, 'color' : relationship.parcours.color, }) except : pass ## Gestion des parcours d'évaluation for student in students : evaluations = Parcours.objects.filter(students = student, teacher = teacher,is_evaluation=1,is_trash=0) for e in evaluations: if e not in evaluation_tab : evaluation_tab.append(e) ### evaluation_tab = liste des evaluations du groupe for evaluation in evaluation_tab : # evaluation est un parcours evaluation_start = datetime.combine(evaluation.start, evaluation.starter) evaluation_stop = datetime.combine(evaluation.stop, evaluation.stopper) event_list.append({ 'id': evaluation.id, 'start': evaluation_start.strftime('%Y-%m-%d %H:%M:%S'), 'end': evaluation_stop.strftime('%Y-%m-%d %H:%M:%S'), 'title': evaluation.title, 'allDay': False, 'description': evaluation.title, 'color' : evaluation.color, }) return http.HttpResponse(json.dumps(event_list), content_type='application/json')
def mini_manifest(request, addon, version_id): token = request.GET.get('token') return http.HttpResponse( _mini_manifest(addon, version_id, token), content_type='application/x-web-app-manifest+json; charset=utf-8')
def javascript_catalog(request, domain='djangojs', packages=None): """ Returns the selected language catalog as a javascript library. Receives the list of packages to check for translations in the packages parameter either from an infodict or as a +-delimited string from the request. Default is 'django.conf'. Additionally you can override the gettext domain for this view, but usually you don't want to do that, as JavaScript messages go to the djangojs domain. But this might be needed if you deliver your JavaScript source from Django templates. """ if request.GET: if 'language' in request.GET: if check_for_language(request.GET['language']): activate(request.GET['language']) if packages is None: packages = ['django.conf'] if isinstance(packages, basestring): packages = packages.split('+') packages = [p for p in packages if p == 'django.conf' or p in settings.INSTALLED_APPS] default_locale = to_locale(settings.LANGUAGE_CODE) locale = to_locale(get_language()) t = {} paths = [] en_selected = locale.startswith('en') en_catalog_missing = True # first load all english languages files for defaults for package in packages: p = importlib.import_module(package) path = os.path.join(os.path.dirname(p.__file__), 'locale') paths.append(path) try: catalog = gettext_module.translation(domain, path, ['en']) t.update(catalog._catalog) except IOError: pass else: # 'en' is the selected language and at least one of the packages # listed in `packages` has an 'en' catalog if en_selected: en_catalog_missing = False # next load the settings.LANGUAGE_CODE translations if it isn't english if default_locale != 'en': for path in paths: try: catalog = gettext_module.translation(domain, path, [default_locale]) except IOError: catalog = None if catalog is not None: t.update(catalog._catalog) # last load the currently selected language, if it isn't identical to the default. if locale != default_locale: # If the currently selected language is English but it doesn't have a # translation catalog (presumably due to being the language translated # from) then a wrong language catalog might have been loaded in the # previous step. It needs to be discarded. if en_selected and en_catalog_missing: t = {} else: locale_t = {} for path in paths: try: catalog = gettext_module.translation(domain, path, [locale]) except IOError: catalog = None if catalog is not None: locale_t.update(catalog._catalog) if locale_t: t = locale_t src = [LibHead] plural = None if '' in t: for l in t[''].split('\n'): if l.startswith('Plural-Forms:'): plural = l.split(':',1)[1].strip() if plural is not None: # this should actually be a compiled function of a typical plural-form: # Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2; plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=',1)[1] src.append(PluralIdx % plural) else: src.append(SimplePlural) csrc = [] pdict = {} for k, v in t.items(): if k == '': continue if isinstance(k, basestring): csrc.append("catalog['%s'] = '%s';\n" % (javascript_quote(k), javascript_quote(v))) elif isinstance(k, tuple): if k[0] not in pdict: pdict[k[0]] = k[1] else: pdict[k[0]] = max(k[1], pdict[k[0]]) csrc.append("catalog['%s'][%d] = '%s';\n" % (javascript_quote(k[0]), k[1], javascript_quote(v))) else: raise TypeError(k) csrc.sort() for k, v in pdict.items(): src.append("catalog['%s'] = [%s];\n" % (javascript_quote(k), ','.join(["''"]*(v+1)))) src.extend(csrc) src.append(LibFoot) src.append(InterPolate) src.append(LibFormatHead) src.append(get_formats()) src.append(LibFormatFoot) src = ''.join(src) return http.HttpResponse(src, 'text/javascript')
def __call__(self, environ, start_response): """ Hijack the main loop from the original thread and listen on events on the Redis and the Websocket filedescriptors. """ websocket = None subscriber = self.Subscriber(self._redis_connection) try: self.assure_protocol_requirements(environ) request = WSGIRequest(environ) if callable(private_settings.WS4REDIS_PROCESS_REQUEST): private_settings.WS4REDIS_PROCESS_REQUEST(request) else: self.process_request(request) channels, echo_message = self.process_subscriptions(request) if callable(private_settings.WS4REDIS_ALLOWED_CHANNELS): channels = list( private_settings.WS4REDIS_ALLOWED_CHANNELS( request, channels)) websocket = self.upgrade_websocket(environ, start_response) logger.debug('Subscribed to channels: {0}'.format( ', '.join(channels))) subscriber.set_pubsub_channels(request, channels) websocket_fd = websocket.get_file_descriptor() listening_fds = [websocket_fd] redis_fd = subscriber.get_file_descriptor() if redis_fd: listening_fds.append(redis_fd) subscriber.send_persited_messages(websocket) recvmsg = None while websocket and not websocket.closed: ready = self.select(listening_fds, [], [], 4.0)[0] if not ready: # flush empty socket websocket.flush() for fd in ready: if fd == websocket_fd: recvmsg = RedisMessage(websocket.receive()) if recvmsg: subscriber.publish_message(recvmsg) elif fd == redis_fd: sendmsg = RedisMessage(subscriber.parse_response()) if sendmsg and (echo_message or sendmsg != recvmsg): websocket.send(sendmsg) else: logger.error('Invalid file descriptor: {0}'.format(fd)) if private_settings.WS4REDIS_HEARTBEAT: websocket.send(private_settings.WS4REDIS_HEARTBEAT) except WebSocketError as excpt: logger.warning('WebSocketError: {}'.format(excpt), exc_info=sys.exc_info()) response = http.HttpResponse(status=1001, content='Websocket Closed') except UpgradeRequiredError as excpt: logger.info('Websocket upgrade required') response = http.HttpResponseBadRequest(status=426, content=excpt) except HandshakeError as excpt: logger.warning('HandshakeError: {}'.format(excpt), exc_info=sys.exc_info()) response = http.HttpResponseBadRequest(content=excpt) except PermissionDenied as excpt: logger.warning('PermissionDenied: {}'.format(excpt), exc_info=sys.exc_info()) response = http.HttpResponseForbidden(content=excpt) except Exception as excpt: logger.error('Other Exception: {}'.format(excpt), exc_info=sys.exc_info()) response = http.HttpResponseServerError(content=excpt) else: response = http.HttpResponse() finally: subscriber.release() if websocket: websocket.close(code=1001, message='Websocket Closed') else: logger.warning('Starting late response on websocket') status_text = STATUS_CODE_TEXT.get(response.status_code, 'UNKNOWN STATUS CODE') status = '{0} {1}'.format(response.status_code, status_text) headers = list(response._headers.values()) start_response(force_str(status), headers) logger.info( 'Finish non-websocket response with status code: {}'. format(response.status_code)) return response
def index(request): return http.HttpResponse("Hello world!")