def e404(req): return HttpResponseNotFound(render_to_string('pages/e404.html'))
def run_route(start_node_id, end_node_id, route_type): """ :param start_node_id: :param end_node_id: :param route_type: :param route_options: a dictionary :return: """ # TODO add route options dictionary # TODO add parameter to function route_options=None # sample dictionary of options # route_options = {'route_types': { # 'standard_route': 1, # 'barrierfree route': 2, # 'indoor_only_prefered': 3, # 'fastest': 4 # }, # 'route_logic': { # 'force_route_through_location': True # } # } cur = connection.cursor() base_route_q = """SELECT id, source, target, total_cost:: DOUBLE PRECISION AS cost, floor, network_type FROM geodata.networklines_3857""" # set default query barrierfree_q = "WHERE 1=1" if route_type == "1": # exclude all networklines of type stairs barrierfree_q = "WHERE network_type not in (1,3)" routing_query = ''' SELECT seq, id1 AS node, id2 AS edge, ST_Length(geom) AS cost, floor, network_type, ST_AsGeoJSON(geom) AS geoj FROM pgr_dijkstra(' {normal} {type}', %s, %s, FALSE, FALSE ) AS dij_route JOIN geodata.networklines_3857 AS input_network ON dij_route.id2 = input_network.id ; '''.format(normal=base_route_q, type=barrierfree_q) # run our shortest path query if start_node_id or end_node_id: if start_node_id != end_node_id: cur.execute(routing_query, (start_node_id, end_node_id)) else: logger.error("start or end node is None or is the same node " + str(start_node_id)) return HttpResponseNotFound('<h1>Sorry NO start or end node' ' found within 200m</h1>') # get entire query results to work with route_segments = cur.fetchall() route_info = calc_distance_walktime(route_segments) # empty list to hold each segment for our GeoJSON output route_result = [] # loop over each segment in the result route segments # create the list of our new GeoJSON for segment in route_segments: seg_length = segment[3] # length of segment layer_level = segment[4] # floor number seg_type = segment[5] seg_node_id = segment[1] seq_sequence = segment[0] geojs = segment[6] # geojson coordinates geojs_geom = loads(geojs) # load string to geom geojs_feat = Feature(geometry=geojs_geom, properties={'floor': layer_level, 'length': seg_length, 'network_type': seg_type, 'seg_node_id': seg_node_id, 'sequence': seq_sequence} ) route_result.append(geojs_feat) # using the geojson module to create our GeoJSON Feature Collection geojs_fc = FeatureCollection(route_result) geojs_fc.update(route_info) return geojs_fc
def post(self, request, node_id): """ 这个接口操作比较重,所以为了避免发信号 所有写操作都需要用BULK的方式 1 更新节点流量 2 更新用户流量 3 记录节点在线IP 4 关闭超出流量的用户 5 关闭超出流量的节点 """ ss_node = SSNode.get_or_none_by_node_id(node_id) if not ss_node: return HttpResponseNotFound() data = request.json["data"] node_total_traffic = 0 log_time = pendulum.now() trafficlog_model_list = [] active_tcp_connections = 0 user_traffic_model_list = [] online_ip_log_model_list = [] user_ss_config_model_list = [] for user_data in data: user_id = user_data["user_id"] u = int(user_data["upload_traffic"] * ss_node.enlarge_scale) d = int(user_data["download_traffic"] * ss_node.enlarge_scale) # 个人流量增量 user_traffic = UserTraffic.get_by_user_id(user_id) user_traffic.download_traffic += d user_traffic.upload_traffic += u user_traffic.last_use_time = log_time user_traffic_model_list.append(user_traffic) if user_traffic.overflow: user_ss_config = UserSSConfig.get_by_user_id(user_id) user_ss_config.enable = False user_ss_config_model_list.append(user_ss_config) # 个人流量记录 trafficlog_model_list.append( UserTrafficLog( node_type=UserTrafficLog.NODE_TYPE_SS, node_id=node_id, user_id=user_id, download_traffic=u, upload_traffic=d, ) ) # 节点流量增量 node_total_traffic += u + d # active_tcp_connections active_tcp_connections += user_data["tcp_conn_num"] # online ip log for ip in user_data.get("ip_list", []): online_ip_log_model_list.append( UserOnLineIpLog(user_id=user_id, node_id=node_id, ip=ip) ) # 节点流量记录 SSNode.increase_used_traffic(node_id, node_total_traffic) # 流量记录 UserTrafficLog.objects.bulk_create(trafficlog_model_list) # 在线IP UserOnLineIpLog.objects.bulk_create(online_ip_log_model_list) # 个人流量记录 UserTraffic.objects.bulk_update( user_traffic_model_list, ["download_traffic", "upload_traffic", "last_use_time"], ) # 用户开关 UserSSConfig.objects.bulk_update(user_ss_config_model_list, ["enable"]) # 节点在线人数 NodeOnlineLog.add_log( NodeOnlineLog.NODE_TYPE_SS, node_id, len(data), active_tcp_connections ) # check node && user traffic if ss_node.overflow: ss_node.enable = False if user_ss_config_model_list or ss_node.overflow: # NOTE save for clear cache ss_node.save() return JsonResponse(data={})
def dispatch(self, request, room): self.room_name = room try: room = ROOM_DICT[self.room_name] except KeyError: return HttpResponseNotFound('Invalid room specified in url') label = self.request.GET.get('participant_label', '') if room.has_participant_labels(): if label: missing_label = False invalid_label = label not in room.get_participant_labels() else: missing_label = True invalid_label = False # needs to be easy to re-enter label, in case we are in kiosk # mode if missing_label or invalid_label and not room.use_secure_urls: return render( request, "otree/RoomInputLabel.html", {'invalid_label': invalid_label}, ) if room.use_secure_urls: hash = self.request.GET.get('hash') if hash != make_hash(label): return HttpResponseNotFound( 'Invalid hash parameter. use_secure_urls is True, ' 'so you must use the participant-specific URL.') session = room.get_session() if session is None: self.tab_unique_id = otree.common.random_chars_10() self._socket_url = channel_utils.room_participant_path( room_name=self.room_name, participant_label=label, # random chars in case the participant has multiple tabs open tab_unique_id=self.tab_unique_id, ) return render( request, "otree/WaitPageRoom.html", { 'view': self, 'title_text': _('Please wait'), 'body_text': _('Waiting for your session to begin'), }, ) if label: cookies = None else: cookies = request.session # 2017-08-02: changing the behavior so that even in a room without # participant_label_file, 2 requests for the same start URL with same label # will return the same participant. Not sure if the previous behavior # (assigning to 2 different participants) was intentional or bug. return participant_start_page_or_404(session, label=label, cookies=cookies)
def stats_for_realm(request: HttpRequest, realm_str: str) -> HttpResponse: realm = get_realm(realm_str) if realm is None: return HttpResponseNotFound("Realm %s does not exist" % (realm_str, )) return render_stats(request, realm)
def project(request, code): if request.user.is_superuser: q = Project.objects else: q = request.profile.projects() try: project = q.get(code=code) except Project.DoesNotExist: return HttpResponseNotFound() is_owner = project.owner_id == request.user.id ctx = { "page": "project", "project": project, "is_owner": is_owner, "show_api_keys": "show_api_keys" in request.GET, "project_name_status": "default", "api_status": "default", "team_status": "default", } if request.method == "POST": if "create_api_keys" in request.POST: project.set_api_keys() project.save() ctx["show_api_keys"] = True ctx["api_keys_created"] = True ctx["api_status"] = "success" elif "revoke_api_keys" in request.POST: project.api_key = "" project.api_key_readonly = "" project.save() ctx["api_keys_revoked"] = True ctx["api_status"] = "info" elif "show_api_keys" in request.POST: ctx["show_api_keys"] = True elif "invite_team_member" in request.POST: if not is_owner or not project.can_invite(): return HttpResponseForbidden() form = InviteTeamMemberForm(request.POST) if form.is_valid(): if not TokenBucket.authorize_invite(request.user): return render(request, "try_later.html") email = form.cleaned_data["email"] try: user = User.objects.get(email=email) except User.DoesNotExist: user = _make_user(email, with_project=False) project.invite(user) ctx["team_member_invited"] = email ctx["team_status"] = "success" elif "remove_team_member" in request.POST: if not is_owner: return HttpResponseForbidden() form = RemoveTeamMemberForm(request.POST) if form.is_valid(): q = User.objects q = q.filter(email=form.cleaned_data["email"]) q = q.filter(memberships__project=project) farewell_user = q.first() if farewell_user is None: return HttpResponseBadRequest() farewell_user.profile.current_project = None farewell_user.profile.save() Member.objects.filter(project=project, user=farewell_user).delete() ctx["team_member_removed"] = form.cleaned_data["email"] ctx["team_status"] = "info" elif "set_project_name" in request.POST: form = ProjectNameForm(request.POST) if form.is_valid(): project.name = form.cleaned_data["name"] project.save() if request.profile.current_project == project: request.profile.current_project.name = project.name ctx["project_name_updated"] = True ctx["project_name_status"] = "success" # Count members right before rendering the template, in case # we just invited or removed someone ctx["num_members"] = project.member_set.count() return render(request, "accounts/project.html", ctx)
def post(self, request): search_value = request.POST['search'] reverse_search = get_search_result_reverse(search_value) if reverse_search is None: return HttpResponseNotFound("Nothing was found") return redirect(reverse_search)
def wrapper(*args, **kwargs): if not settings.DEBUG: return HttpResponseNotFound() return func(*args, **kwargs)
def get_covid_numbers(request, story_id): """ Takes a story_id as a parameter and makes a call to Covid API to get the covid metrics for the location of the Story instance with given story_id. Returns the country, current day, new Covid cases, new Covid deaths and total active cases for that country in JSON format. """ # Get the story object env = environ.Env() environ.Env.read_env('.env') COVID_API_KEY = env('COVID_API_KEY') CITY_API_KEY = env('COVID_API_KEY') try: story = Story.objects.get(pk=story_id) except Story.DoesNotExist: return HttpResponseNotFound(f"Story object with story_id: {story_id} does not exist!") # Get the covid cases from Covid Api try: url = "https://wft-geo-db.p.rapidapi.com/v1/geo/cities" locationinISOform=true_location_from(story.latitude, story.longitude) querystring = {"location":locationinISOform,"radius":"100", "minPopulation":"10" } headers = { 'x-rapidapi-key': CITY_API_KEY, 'x-rapidapi-host': "wft-geo-db.p.rapidapi.com" } response = requests.request("GET", url, headers=headers, params=querystring) querystring = {"country":response.json()["data"][0]["country"]} except: return HttpResponseServerError("Could not establish connection with API") try: url = "https://covid-193.p.rapidapi.com/statistics" headers = { 'x-rapidapi-key': COVID_API_KEY, 'x-rapidapi-host': "covid-193.p.rapidapi.com" } response = requests.request("GET", url, headers=headers, params=querystring) json_data = json.loads(response.text) except: return HttpResponseServerError("Could not establish connection with Covid API") try: final_data = { "country": json_data["parameters"]["country"], "day": json_data["response"][0]["day"], "new cases": json_data["response"][0]["cases"]["new"], "new deaths": json_data["response"][0]["deaths"]["new"], "active cases": json_data["response"][0]["cases"]["active"], } return JsonResponse(final_data) except: return HttpResponseServerError("No data available.", status= 404)
def get(self, request, *args, **kwargs): if (getattr(settings, "WTM_MANAGE_VIEW", True) or request.COOKIES.get("wtm_debug", "false") == "true"): return super().get(request, *args, **kwargs) return HttpResponseNotFound()
def dispatch(self, request, *args, **kwargs): if request.user.is_authenticated and request.user.is_staff: return super().dispatch(request, *args, **kwargs) return HttpResponseNotFound()
def get(self, request, zipped_filename, embedded_filepath): """ Handles GET requests and serves a static file from within the zip file. """ assert VALID_STORAGE_FILENAME.match( zipped_filename ), "'{}' is not a valid content storage filename".format( zipped_filename) # calculate the local file path to the zip file filename, ext = os.path.splitext(zipped_filename) zipped_path = generate_file_on_disk_name(filename, zipped_filename) # file size file_size = 0 # if the zipfile does not exist on disk, return a 404 if not os.path.exists(zipped_path): return HttpResponseNotFound( '"%(filename)s" does not exist locally' % {'filename': zipped_path}) # if client has a cached version, use that (we can safely assume nothing has changed, due to MD5) if request.META.get('HTTP_IF_MODIFIED_SINCE'): return HttpResponseNotModified() with zipfile.ZipFile(zipped_path) as zf: # if no path, or a directory, is being referenced, look for an index.html file if not embedded_filepath or embedded_filepath.endswith("/"): embedded_filepath += "index.html" # get the details about the embedded file, and ensure it exists try: info = zf.getinfo(embedded_filepath) except KeyError: return HttpResponseNotFound( '"{}" does not exist inside "{}"'.format( embedded_filepath, zipped_filename)) # try to guess the MIME type of the embedded file being referenced content_type = mimetypes.guess_type( embedded_filepath)[0] or 'application/octet-stream' if not os.path.splitext(embedded_filepath)[1] == '.json': # generate a streaming response object, pulling data from within the zip file response = FileResponse(zf.open(info), content_type=content_type) file_size = info.file_size else: # load the stream from json file into memory, replace the path_place_holder. content = zf.open(info).read() str_to_be_replaced = ('$' + exercises.IMG_PLACEHOLDER).encode() zipcontent = ('/' + request.resolver_match.url_name + "/" + zipped_filename).encode() content_with_path = content.replace(str_to_be_replaced, zipcontent) response = HttpResponse(content_with_path, content_type=content_type) file_size = len(content_with_path) # set the last-modified header to the date marked on the embedded file if info.date_time: response["Last-Modified"] = http_date( time.mktime(datetime.datetime(*info.date_time).timetuple())) #cache these resources forever; this is safe due to the MD5-naming used on content files response["Expires"] = "Sun, 17-Jan-2038 19:14:07 GMT" # set the content-length header to the size of the embedded file if file_size: response["Content-Length"] = file_size # ensure the browser knows not to try byte-range requests, as we don't support them here response["Accept-Ranges"] = "none" # allow all origins so that content can be read from within zips within sandboxed iframes response["Access-Control-Allow-Origin"] = "*" return response
def webhook(self, request: WSGIRequest, path: str, previous_value) -> HttpResponse: if path == "/webhook/paid": return JsonResponse(data={"received": True, "paid": True}) if path == "/webhook/failed": return JsonResponse(data={"received": True, "paid": False}) return HttpResponseNotFound()
def app_view(request): return HttpResponseNotFound()
def custom_handler404(request, exception): return HttpResponseNotFound('Ресурс не найден!')
def get(self, request, resourceid=None, formid=None): if formid is not None: form = Form(resourceid=resourceid, formid=formid) return JSONResponse(form) return HttpResponseNotFound()
def get_response(_response): """ Simulate an exception """ middleware.process_exception(request, exception) return HttpResponseNotFound()
def delete(self, request, resourceid=None): if resourceid is not None: ret = models.ResourceInstance.objects.get(pk=resourceid).delete() return JSONResponse(ret) return HttpResponseNotFound()
def custom_handler404(request, exception): return HttpResponseNotFound('Ой, данная страница не найдена((')
def validate_remote_path(request, remote_host_id): response = {} path = request.GET.get('path', None) tardis_object_type = request.GET.get('object_type', None) tardis_object_id = request.GET.get('object_id', None) if tardis_object_type and tardis_object_id: try: response['object_size'] = get_object_size(tardis_object_type, tardis_object_id) except (Experiment.DoesNotExist, Dataset.DoesNotExist, TypeError): response['message'] = "Experiment/dataset/datafile does not exist." return HttpResponseNotFound(json.dumps(response), content_type='application/json') try: remote_host = RemoteHost.objects.get(pk=remote_host_id) credential = get_credential(request, remote_host) ssh = credential.get_client_for_host(remote_host) sftp_client = ssh.open_sftp() response['default'] = {} response['default']['path'] = get_default_push_location(sftp_client) response['default']['free_space'] = bytes_available(ssh, response[ 'default']['path']) response['default']['valid_children'] = list_subdirectories( sftp_client, response['default']['path']) if 'object_size' in response: response['default']['sufficient_space'] = response['default'][ 'free_space'] > \ response['object_size'] if path is not None: path_parts = path.split('/') valid_parts = [] invalid_parts = [] for part in path_parts: if not invalid_parts: test_path = '/'.join(valid_parts + [part]) try: sftp_client.chdir(test_path) valid_parts.append(part) except IOError: invalid_parts.append(part) else: invalid_parts.append(part) response[path] = {} response[path]['valid_parts'] = '/'.join(valid_parts) response[path]['invalid_parts'] = '/'.join(invalid_parts) response[path]['valid_children'] = list_subdirectories( sftp_client, response[path]['valid_parts']) response[path]['free_space'] = bytes_available( ssh, response[path]['valid_parts']) if 'object_size' in response: response[path]['sufficient_space'] = \ response[path]['free_space'] > response['object_size'] except NoSuitableCredential: response['message'] = "You don't have access to this host." return HttpResponseForbidden(json.dumps(response), content_type='application/json') except RemoteHost.DoesNotExist: response['message'] = "Remote host does not exist." return HttpResponseNotFound(json.dumps(response), content_type='application/json') return HttpResponse(json.dumps(response), content_type='application/json')
def show(request): a = 1 if a: return HttpResponseNotFound("<h1>page is not found</h1>") else: return HttpResponse("<h1>page is found</h1>")
def not_found(request, *args, **kwargs): return HttpResponseNotFound("<h1>Page not Found</h1>")
def handler(request, index): if index < len(quotes): return render(request, 'zen.html', {'quote': quotes[index]}) else: return HttpResponseNotFound(f'Quote {index} is not found')
def _toggle_boolean(self, request): """ Handle an AJAX toggle_boolean request """ try: item_id = int(request.POST.get('item_id', None)) attr = str(request.POST.get('attr', None)) except Exception: return HttpResponseBadRequest("Malformed request") if not request.user.is_staff: logging.warning( "Denied AJAX request by non-staff %s to toggle boolean %s for object #%s", request.user, attr, item_id) return HttpResponseForbidden( "You do not have permission to access this object") self._collect_editable_booleans() if not self._ajax_editable_booleans.has_key(attr): return HttpResponseBadRequest("not a valid attribute %s" % attr) try: obj = self.model._default_manager.get(pk=item_id) except self.model.DoesNotExist: return HttpResponseNotFound("Object does not exist") can_change = False if hasattr(obj, "user_can") and obj.user_can(request.user, change_page=True): # Was added in c7f04dfb5d, but I've no idea what user_can is about. can_change = True else: can_change = self.has_change_permission(request, obj=obj) if not can_change: logging.warning( "Denied AJAX request by %s to toggle boolean %s for object %s", request.user, attr, item_id) return HttpResponseForbidden( "You do not have permission to access this object") logging.info("Processing request by %s to toggle %s on %s", request.user, attr, obj) try: before_data = self._ajax_editable_booleans[attr](self, obj) setattr(obj, attr, not getattr(obj, attr)) obj.save() self._refresh_changelist_caches( ) # ???: Perhaps better a post_save signal? # Construct html snippets to send back to client for status update data = self._ajax_editable_booleans[attr](self, obj) except Exception, e: logging.exception("Unhandled exception while toggling %s on %s", attr, obj) return HttpResponseServerError("Unable to toggle %s on %s" % (attr, obj))
def video_encodings_download(request, course_key_string): """ Returns a CSV report containing the encoded video URLs for video uploads in the following format: Video ID,Name,Status,Profile1 URL,Profile2 URL aaaaaaaa-aaaa-4aaa-aaaa-aaaaaaaaaaaa,video.mp4,Complete,http://example.com/prof1.mp4,http://example.com/prof2.mp4 """ course = _get_and_validate_course(course_key_string, request.user) if not course: return HttpResponseNotFound() def get_profile_header(profile): """Returns the column header string for the given profile's URLs""" # Translators: This is the header for a CSV file column # containing URLs for video encodings for the named profile # (e.g. desktop, mobile high quality, mobile low quality) return _(u"{profile_name} URL").format(profile_name=profile) profile_whitelist = VideoUploadConfig.get_profile_whitelist() videos, __ = _get_videos(course) videos = list(videos) name_col = _("Name") duration_col = _("Duration") added_col = _("Date Added") video_id_col = _("Video ID") status_col = _("Status") profile_cols = [get_profile_header(profile) for profile in profile_whitelist] def make_csv_dict(video): """ Makes a dictionary suitable for writing CSV output. This involves extracting the required items from the original video dict and converting all keys and values to UTF-8 encoded string objects, because the CSV module doesn't play well with unicode objects. """ # Translators: This is listed as the duration for a video that has not # yet reached the point in its processing by the servers where its # duration is determined. duration_val = str(video["duration"]) if video["duration"] > 0 else _("Pending") ret = dict( [ (name_col, video["client_video_id"]), (duration_col, duration_val), (added_col, video["created"].isoformat()), (video_id_col, video["edx_video_id"]), (status_col, video["status"]), ] + [ (get_profile_header(encoded_video["profile"]), encoded_video["url"]) for encoded_video in video["encoded_videos"] if encoded_video["profile"] in profile_whitelist ] ) return { key.encode("utf-8") if six.PY2 else key: value.encode("utf-8") if six.PY2 else value for key, value in ret.items() } # Write csv to bytes-like object. We need a separate writer and buffer as the csv # writer writes str and the FileResponse expects a bytes files. buffer = io.BytesIO() buffer_writer = codecs.getwriter("utf-8")(buffer) writer = csv.DictWriter( buffer_writer, [ col_name.encode("utf-8") if six.PY2 else col_name for col_name in [name_col, duration_col, added_col, video_id_col, status_col] + profile_cols ], dialect=csv.excel ) writer.writeheader() for video in videos: writer.writerow(make_csv_dict(video)) buffer.seek(0) # Translators: This is the suggested filename when downloading the URL # listing for videos uploaded through Studio filename = _("{course}_video_urls").format(course=course.id.course) + ".csv" return FileResponse(buffer, as_attachment=True, filename=filename, content_type="text/csv")
def handler404(*args, **kwargs): sentry_sdk.capture_message("not found", level="error") return HttpResponseNotFound("404")
def top_level(self, request, api_name=None, **kwargs): return HttpResponseNotFound()
rendering_engine = rf.get_primary_tiles_rendering_engine(image_id, conn) try: dzi_metadata = rendering_engine.get_dzi_description( fetch_original_file, file_mimetype, tile_size) except Exception, e: rendering_engine = rf.get_secondary_tiles_rendering_engine( image_id, conn) if rendering_engine: dzi_metadata = rendering_engine.get_dzi_description( fetch_original_file, file_mimetype, tile_size) else: raise e if dzi_metadata: return HttpResponse(dzi_metadata, content_type='application/xml') else: return HttpResponseNotFound('No image with ID %s' % image_id) @login_required() def get_image_json(request, image_id, fetch_original_file=False, file_mimetype=None, conn=None, **kwargs): try: tile_size = int(request.GET.get('tile_size')) except TypeError: tile_size = None rf = RenderingEngineFactory() rendering_engine = rf.get_primary_tiles_rendering_engine(image_id, conn)
def get(self, request, node_id): node = VmessNode.get_or_none_by_node_id(node_id) if not node: return HttpResponseNotFound() return JsonResponse(node.relay_config)
def nfe_emitida(modeladmin, request, queryset): if request.user.has_perm('vendas.setar_nfe'): queryset.update(nfe_emitida=True) else: return HttpResponseNotFound('<h1>Não possui permissao</h1>')