def wrapped( self, url, comp, http_method, request ): e = None result = f( self, url, comp, http_method, request ) if not result.get( Cloudmgrws._EXISTS ) : e = HTTPNotFound() e.content_type = 'application/json' e.body = format_response( is_ok = False, information_message = u'''/%s does not exist''' % '/'.join( url ), has_been_executed = False, steps = [], next = [], datas = [], accepted_commands = [], ) else: e = HTTPOk() e.content_type = 'application/json' e.body = format_response( is_ok = True, information_message = '', has_been_executed = False, steps = [], next = result[ Cloudmgrws._CHILDS ], datas = [], accepted_commands = Cloudmgrws.get_accepted_commands_for( url ).keys(), ) raise e
def __call__(self, env, start_response): """ """ req = Request(env) self.loc.reload() if self.loc.age == 0: self.logger.warn('dispatcher relay rule is invalid, using old rules now.') loc_prefix = self.location_check(req) if not self.loc.has_location(loc_prefix): resp = HTTPNotFound(request=req) start_response(resp.status, resp.headerlist) return resp.body if self.loc.is_merged(loc_prefix): self.logger.debug('enter merge mode') resp = self.dispatch_in_merge(req, loc_prefix) else: self.logger.debug('enter normal mode') resp = self.dispatch_in_normal(req, loc_prefix) resp.headers['x-colony-dispatcher'] = 'dispatcher processed' start_response(resp.status, resp.headerlist) if req.method in ('PUT', 'POST'): return resp.body return resp.app_iter \ if resp.app_iter is not None \ else resp.body
def _get_from_store(context, where): try: image_data, image_size = get_from_backend(context, where) except exception.NotFound, e: raise HTTPNotFound(explanation="%s" % e)
def _check_not_found(self, item): if item is None: raise HTTPNotFound()
def __call__(self, request: Request): for router in self._ROUTERS: resp = router.match(request) if resp: return resp raise HTTPNotFound('<h1>Error Page</h1>')
def __call__(self, request:Request): for router in self.ROUTERS: response = router.match(request) if response: return response raise HTTPNotFound("<h1>该网页被外星人劫持</h1>")
def entry_list_post(self, request): ''' Mark feed|all entries as read ''' feed_id = int(request.GET.get('feed', 0)) if request.method == 'GET': now = datetime.utcnow() return self.respond_with_template( '_entries_mark_%s_read.html' % ( 'feed' if feed_id else 'all'), locals()) # Handle postback try: before = datetime.utcfromtimestamp(int(request.POST['before'])) except (KeyError, ValueError): raise HTTPBadRequest('Missing parameter before=time') if feed_id: try: feed = Feed.get((Feed.id == feed_id)) except Feed.DoesNotExist: raise HTTPNotFound('No such feed %s' % feed_id) q = Entry.select(Entry).join(Feed).join(Subscription).where( (Subscription.user == self.user) & # Exclude entries already marked as read ~(Entry.id << Read.select( Read.entry).where(Read.user == self.user)) & # Filter by current feed (Entry.feed == feed) & # Exclude entries fetched after the page load (Feed.last_checked_on < before) ).distinct() message = 'SUCCESS Feed has been marked as read' redirect_url = '%s/entries/?feed=%s' % ( request.application_url, feed_id) else: q = Entry.select(Entry).join(Feed).join(Subscription).where( (Subscription.user == self.user) & # Exclude entries already marked as read ~(Entry.id << Read.select( Read.entry).where(Read.user == self.user)) & # Exclude entries fetched after the page load (Feed.last_checked_on < before) ).distinct() message = 'SUCCESS All entries have been marked as read' redirect_url = '%s/entries/?unread' % request.application_url # @@TODO: Use insert_many() with transaction(): for entry in q: try: Read.create(user=self.user, entry=entry) except IntegrityError: logger.debug( 'entry %d already marked as read, ignored' % entry.id) continue self.alert_message = message return self.respond_with_script( '_modal_done.js', {'location': redirect_url})
def name_not_found(self, request): raise HTTPNotFound()
def _handle_request(self, environ, start_response): if not self._check_ssl(environ, start_response): reason = ('SSL required, while RhodeCode was unable ' 'to detect this as SSL request') log.debug('User not allowed to proceed, %s', reason) return HTTPNotAcceptable(reason)(environ, start_response) ip_addr = get_ip_addr(environ) username = None # skip passing error to error controller environ['pylons.status_code_redirect'] = True # ====================================================================== # EXTRACT REPOSITORY NAME FROM ENV # ====================================================================== environ['PATH_INFO'] = self._get_by_id(environ['PATH_INFO']) repo_name = self._get_repository_name(environ) environ['REPO_NAME'] = repo_name log.debug('Extracted repo name is %s', repo_name) # check for type, presence in database and on filesystem if not self.is_valid_and_existing_repo(repo_name, self.basepath, self.SCM): return HTTPNotFound()(environ, start_response) # ====================================================================== # GET ACTION PULL or PUSH # ====================================================================== action = self._get_action(environ) # ====================================================================== # CHECK ANONYMOUS PERMISSION # ====================================================================== if action in ['pull', 'push']: anonymous_user = User.get_default_user() username = anonymous_user.username if anonymous_user.active: # ONLY check permissions if the user is activated anonymous_perm = self._check_permission( action, anonymous_user, repo_name, ip_addr) else: anonymous_perm = False if not anonymous_user.active or not anonymous_perm: if not anonymous_user.active: log.debug('Anonymous access is disabled, running ' 'authentication') if not anonymous_perm: log.debug('Not enough credentials to access this ' 'repository as anonymous user') username = None # ============================================================== # DEFAULT PERM FAILED OR ANONYMOUS ACCESS IS DISABLED SO WE # NEED TO AUTHENTICATE AND ASK FOR AUTH USER PERMISSIONS # ============================================================== # try to auth based on environ, container auth methods log.debug( 'Running PRE-AUTH for container based authentication') pre_auth = authenticate('', '', environ, VCS_TYPE) if pre_auth and pre_auth.get('username'): username = pre_auth['username'] log.debug('PRE-AUTH got %s as username', username) # If not authenticated by the container, running basic auth if not username: self.authenticate.realm = \ safe_str(self.config['rhodecode_realm']) try: result = self.authenticate(environ) except (UserCreationError, NotAllowedToCreateUserError) as e: log.error(e) reason = safe_str(e) return HTTPNotAcceptable(reason)(environ, start_response) if isinstance(result, str): AUTH_TYPE.update(environ, 'basic') REMOTE_USER.update(environ, result) username = result else: return result.wsgi_application(environ, start_response) # ============================================================== # CHECK PERMISSIONS FOR THIS REQUEST USING GIVEN USERNAME # ============================================================== user = User.get_by_username(username) if not self.valid_and_active_user(user): return HTTPForbidden()(environ, start_response) username = user.username user.update_lastactivity() meta.Session().commit() # check user attributes for password change flag user_obj = user if user_obj and user_obj.username != User.DEFAULT_USER and \ user_obj.user_data.get('force_password_change'): reason = 'password change required' log.debug('User not allowed to authenticate, %s', reason) return HTTPNotAcceptable(reason)(environ, start_response) # check permissions for this repository perm = self._check_permission(action, user, repo_name, ip_addr) if not perm: return HTTPForbidden()(environ, start_response) # extras are injected into UI object and later available # in hooks executed by rhodecode check_locking = _should_check_locking(environ.get('QUERY_STRING')) extras = vcs_operation_context(environ, repo_name=repo_name, username=username, action=action, scm=self.SCM, check_locking=check_locking) # ====================================================================== # REQUEST HANDLING # ====================================================================== str_repo_name = safe_str(repo_name) repo_path = os.path.join(safe_str(self.basepath), str_repo_name) log.debug('Repository path is %s', repo_path) fix_PATH() log.info('%s action on %s repo "%s" by "%s" from %s', action, self.SCM, str_repo_name, safe_str(username), ip_addr) return self._generate_vcs_response(environ, start_response, repo_path, repo_name, extras, action)
def __before__(self): super(BookmarksController, self).__before__() if not h.is_hg(c.rhodecode_repo): raise HTTPNotFound()
def handle_request(self, req): """ Entry point for proxy server. Should return a WSGI-style callable (such as webob.Response). :param req: webob.Request object """ try: self.logger.set_statsd_prefix('proxy-server') if req.content_length and req.content_length < 0: self.logger.increment('errors') return HTTPBadRequest(request=req, body='Invalid Content-Length') try: if not check_utf8(req.path_info): self.logger.increment('errors') return HTTPPreconditionFailed(request=req, body='Invalid UTF8') except UnicodeError: self.logger.increment('errors') return HTTPPreconditionFailed(request=req, body='Invalid UTF8') try: controller, path_parts = self.get_controller(req.path) p = req.path_info if isinstance(p, unicode): p = p.encode('utf-8') except ValueError: self.logger.increment('errors') return HTTPNotFound(request=req) if not controller: self.logger.increment('errors') return HTTPPreconditionFailed(request=req, body='Bad URL') if self.deny_host_headers and \ req.host.split(':')[0] in self.deny_host_headers: return HTTPForbidden(request=req, body='Invalid host header') self.logger.set_statsd_prefix('proxy-server.' + controller.server_type.lower()) controller = controller(self, **path_parts) if 'swift.trans_id' not in req.environ: # if this wasn't set by an earlier middleware, set it now trans_id = 'tx' + uuid.uuid4().hex req.environ['swift.trans_id'] = trans_id self.logger.txn_id = trans_id req.headers['x-trans-id'] = req.environ['swift.trans_id'] controller.trans_id = req.environ['swift.trans_id'] self.logger.client_ip = get_remote_client(req) try: handler = getattr(controller, req.method) getattr(handler, 'publicly_accessible') except AttributeError: return HTTPMethodNotAllowed(request=req) if path_parts['version']: req.path_info_pop() if 'swift.authorize' in req.environ: # We call authorize before the handler, always. If authorized, # we remove the swift.authorize hook so isn't ever called # again. If not authorized, we return the denial unless the # controller's method indicates it'd like to gather more # information and try again later. resp = req.environ['swift.authorize'](req) if not resp: # No resp means authorized, no delayed recheck required. del req.environ['swift.authorize'] else: # Response indicates denial, but we might delay the denial # and recheck later. If not delayed, return the error now. if not getattr(handler, 'delay_denial', None): return resp # Save off original request method (GET, POST, etc.) in case it # gets mutated during handling. This way logging can display the # method the client actually sent. req.environ['swift.orig_req_method'] = req.method return handler(req) except (Exception, Timeout): self.logger.exception(_('ERROR Unhandled exception in request')) return HTTPServerError(request=req)
def __call__(self, request): for router in self.routers: handler = router.match(request) if handler: return handler(self, request) raise HTTPNotFound(detail='no handler match')
def get(self): if self.request.path_info.endswith('/test/'): return self.test_regex() raise HTTPNotFound('Not Implemented Yet')
def __call__(self, request: Request): try: return self._ROUTE.ROUTETABLE[request.path](request) except: raise HTTPNotFound('<h1>ERROR PAGE</h1>')
def view(self, request): @request.after def set_header(response): response.headers.add('Foo', 'FOO') raise HTTPNotFound()
def serve(self, id, download=False, **kwargs): """Serve a :class:`~mediacore.model.media.MediaFile` binary. :param id: File ID :type id: ``int`` :param bool download: If true, serve with an Content-Disposition that makes the file download to the users computer instead of playing in the browser. :raises webob.exc.HTTPNotFound: If no file exists with this ID. :raises webob.exc.HTTPNotAcceptable: If an Accept header field is present, and if the mimetype of the requested file doesn't match, then a 406 (not acceptable) response is returned. """ file = fetch_row(MediaFile, id=id) file_type = file.mimetype.encode('utf-8') file_name = file.display_name.encode('utf-8') file_path = helpers.file_path(file) if file_path is None: log.warn('No path exists for requested media file: %r', file) raise HTTPNotFound().exception file_path = file_path.encode('utf-8') if not os.path.exists(file_path): log.warn('No such file or directory: %r', file_path) raise HTTPNotFound().exception # Ensure the request accepts files with this container accept = request.environ.get('HTTP_ACCEPT', '*/*') if not mimeparse.best_match([file_type], accept): raise HTTPNotAcceptable().exception # 406 method = config.get('file_serve_method', None) headers = [] # Serving files with this header breaks playback on iPhone if download: headers.append(('Content-Disposition', 'attachment; filename="%s"' % file_name)) if method == 'apache_xsendfile': # Requires mod_xsendfile for Apache 2.x # XXX: Don't send Content-Length or Etag headers, # Apache handles them for you. response.headers['X-Sendfile'] = file_path response.body = '' elif method == 'nginx_redirect': # Requires NGINX server configuration: # NGINX must have a location block configured that matches # the __mediacore_serve__ path below. It should also be # configured as an "internal" location to prevent people from # surfing directly to it. # For more information see: http://wiki.nginx.org/XSendfile redirect_filename = '/__mediacore_serve__/%s' % os.path.basename( file_path) response.headers['X-Accel-Redirect'] = redirect_filename else: app = FileApp(file_path, headers, content_type=file_type) return forward(app) response.headers['Content-Type'] = file_type for header, value in headers: response.headers[header] = value return None
def model_not_found(self, request): raise HTTPNotFound()
def __call__(self, environ, start_response): """The main call handler that is called to return a response""" log_debug = self._pylons_log_debug # Keep a local reference to the req/response objects self._py_object = environ['pylons.pylons'] # Keep private methods private try: if environ['pylons.routes_dict']['action'][:1] in ('_', '-'): if log_debug: log.debug("Action starts with _, private action not " "allowed. Returning a 404 response") return HTTPNotFound()(environ, start_response) except KeyError: # The check later will notice that there's no action pass start_response_called = [] def repl_start_response(status, headers, exc_info=None): response = self._py_object.response start_response_called.append(None) # Copy the headers from the global response if log_debug: log.debug( "Merging pylons.response headers into " "start_response call, status: %s", status) for header in response.headerlist: if header[0] == 'Set-Cookie' or header[0].startswith('X-'): headers.append(header) return start_response(status, headers, exc_info) self.start_response = repl_start_response if hasattr(self, '__before__'): response = self._inspect_call(self.__before__) if hasattr(response, '_exception'): return response(environ, self.start_response) response = self._dispatch_call() if not start_response_called: self.start_response = start_response py_response = self._py_object.response # If its not a WSGI response, and we have content, it needs to # be wrapped in the response object if isinstance(response, str): if log_debug: log.debug("Controller returned a string " ", writing it to pylons.response") py_response.body = py_response.body + response elif isinstance(response, unicode): if log_debug: log.debug("Controller returned a unicode string " ", writing it to pylons.response") py_response.unicode_body = py_response.unicode_body + \ response elif hasattr(response, 'wsgi_response'): # It's either a legacy WSGIResponse object, or an exception # that got tossed. if log_debug: log.debug("Controller returned a Response object, merging " "it with pylons.response") if response is pylons.response: # Only etag_cache() returns pylons.response # (deprecated). Unwrap it to avoid a recursive loop # (see ticket #508) response = response._current_obj() warnings.warn(pylons.legacy.response_warning, DeprecationWarning, 1) for name, value in py_response.headers.items(): if name.lower() == 'set-cookie': response.headers.add(name, value) else: response.headers.setdefault(name, value) try: registry = environ['paste.registry'] registry.replace(pylons.response, response) except KeyError: # Ignore the case when someone removes the registry pass py_response = response elif response is None: if log_debug: log.debug("Controller returned None") else: if log_debug: log.debug("Assuming controller returned an iterable, " "setting it as pylons.response.app_iter") py_response.app_iter = response response = py_response if hasattr(self, '__after__'): after = self._inspect_call(self.__after__) if hasattr(after, '_exception'): return after(environ, self.start_response) if hasattr(response, 'wsgi_response'): # Copy the response object into the testing vars if we're testing if 'paste.testing_variables' in environ: environ['paste.testing_variables']['response'] = response if log_debug: log.debug("Calling Response object to return WSGI data") return response(environ, self.start_response) if log_debug: log.debug("Response assumed to be WSGI content, returning " "un-touched") return response
class ContainerController(object): """WSGI Controller for the container server.""" # Ensure these are all lowercase save_headers = [ 'x-container-read', 'x-container-write', 'x-container-sync-key', 'x-container-sync-to' ] def __init__(self, conf): self.logger = get_logger(conf, log_route='container-server') self.root = conf.get('devices', '/srv/node/') self.mount_check = conf.get('mount_check', 'true').lower() in \ ('true', 't', '1', 'on', 'yes', 'y') self.node_timeout = int(conf.get('node_timeout', 3)) self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.allowed_sync_hosts = [ h.strip() for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',') if h.strip() ] self.replicator_rpc = ReplicatorRpc(self.root, DATADIR, ContainerBroker, self.mount_check, logger=self.logger) self.auto_create_account_prefix = \ conf.get('auto_create_account_prefix') or '.' def _get_container_broker(self, drive, part, account, container): """ Get a DB broker for the container. :param drive: drive that holds the container :param part: partition the container is in :param account: account name :param container: container name :returns: ContainerBroker object """ hsh = hash_path(account, container) db_dir = storage_directory(DATADIR, part, hsh) db_path = os.path.join(self.root, drive, db_dir, hsh + '.db') return ContainerBroker(db_path, account=account, container=container, logger=self.logger) def account_update(self, req, account, container, broker): """ Update the account server with latest container info. :param req: webob.Request object :param account: account name :param container: container name :param borker: container DB broker object :returns: if the account request returns a 404 error code, HTTPNotFound response object, otherwise None. """ account_host = req.headers.get('X-Account-Host') account_partition = req.headers.get('X-Account-Partition') account_device = req.headers.get('X-Account-Device') if all([account_host, account_partition, account_device]): account_ip, account_port = account_host.rsplit(':', 1) new_path = '/' + '/'.join([account, container]) info = broker.get_info() account_headers = { 'x-put-timestamp': info['put_timestamp'], 'x-delete-timestamp': info['delete_timestamp'], 'x-object-count': info['object_count'], 'x-bytes-used': info['bytes_used'], 'x-trans-id': req.headers.get('x-trans-id', '-') } if req.headers.get('x-account-override-deleted', 'no').lower() == \ 'yes': account_headers['x-account-override-deleted'] = 'yes' try: with ConnectionTimeout(self.conn_timeout): conn = http_connect(account_ip, account_port, account_device, account_partition, 'PUT', new_path, account_headers) with Timeout(self.node_timeout): account_response = conn.getresponse() account_response.read() if account_response.status == 404: return HTTPNotFound(request=req) elif account_response.status < 200 or \ account_response.status > 299: self.logger.error( _('ERROR Account update failed ' 'with %(ip)s:%(port)s/%(device)s (will retry ' 'later): Response %(status)s %(reason)s'), { 'ip': account_ip, 'port': account_port, 'device': account_device, 'status': account_response.status, 'reason': account_response.reason }) except (Exception, Timeout): self.logger.exception( _('ERROR account update failed with ' '%(ip)s:%(port)s/%(device)s (will retry later)'), { 'ip': account_ip, 'port': account_port, 'device': account_device }) return None def DELETE(self, req): """Handle HTTP DELETE request.""" try: drive, part, account, container, obj = split_path( unquote(req.path), 4, 5, True) except ValueError, err: return HTTPBadRequest(body=str(err), content_type='text/plain', request=req) if 'x-timestamp' not in req.headers or \ not check_float(req.headers['x-timestamp']): return HTTPBadRequest(body='Missing timestamp', request=req, content_type='text/plain') if self.mount_check and not check_mount(self.root, drive): return Response(status='507 %s is not mounted' % drive) broker = self._get_container_broker(drive, part, account, container) if account.startswith(self.auto_create_account_prefix) and obj and \ not os.path.exists(broker.db_file): broker.initialize( normalize_timestamp( req.headers.get('x-timestamp') or time.time())) if not os.path.exists(broker.db_file): return HTTPNotFound() if obj: # delete object broker.delete_object(obj, req.headers.get('x-timestamp')) return HTTPNoContent(request=req) else: # delete container if not broker.empty(): return HTTPConflict(request=req) existed = float(broker.get_info()['put_timestamp']) and \ not broker.is_deleted() broker.delete_db(req.headers['X-Timestamp']) if not broker.is_deleted(): return HTTPConflict(request=req) resp = self.account_update(req, account, container, broker) if resp: return resp if existed: return HTTPNoContent(request=req) return HTTPNotFound()
def view(self, request): @request.after def set_header(response): response.headers.add("Foo", "FOO") return HTTPNotFound()
def _index(self, revision, method): c.pull_request = None c.anchor_url = anchor_url c.ignorews_url = _ignorews_url c.context_url = _context_url c.fulldiff = fulldiff = request.GET.get('fulldiff') #get ranges of revisions if preset rev_range = revision.split('...')[:2] enable_comments = True c.cs_repo = c.db_repo try: if len(rev_range) == 2: enable_comments = False rev_start = rev_range[0] rev_end = rev_range[1] rev_ranges = c.db_repo_scm_instance.get_changesets(start=rev_start, end=rev_end) else: rev_ranges = [c.db_repo_scm_instance.get_changeset(revision)] c.cs_ranges = list(rev_ranges) if not c.cs_ranges: raise RepositoryError('Changeset range returned empty result') except (ChangesetDoesNotExistError, EmptyRepositoryError): log.debug(traceback.format_exc()) msg = _('Such revision does not exist for this repository') h.flash(msg, category='error') raise HTTPNotFound() c.changes = OrderedDict() c.lines_added = 0 # count of lines added c.lines_deleted = 0 # count of lines removes c.changeset_statuses = ChangesetStatus.STATUSES comments = dict() c.statuses = [] c.inline_comments = [] c.inline_cnt = 0 # Iterate over ranges (default changeset view is always one changeset) for changeset in c.cs_ranges: if method == 'show': c.statuses.extend([ChangesetStatusModel().get_status( c.db_repo.repo_id, changeset.raw_id)]) # Changeset comments comments.update((com.comment_id, com) for com in ChangesetCommentsModel() .get_comments(c.db_repo.repo_id, revision=changeset.raw_id)) # Status change comments - mostly from pull requests comments.update((st.comment_id, st.comment) for st in ChangesetStatusModel() .get_statuses(c.db_repo.repo_id, changeset.raw_id, with_revisions=True) if st.comment_id is not None) inlines = ChangesetCommentsModel() \ .get_inline_comments(c.db_repo.repo_id, revision=changeset.raw_id) c.inline_comments.extend(inlines) cs2 = changeset.raw_id cs1 = changeset.parents[0].raw_id if changeset.parents else EmptyChangeset().raw_id context_lcl = get_line_ctx('', request.GET) ign_whitespace_lcl = get_ignore_ws('', request.GET) _diff = c.db_repo_scm_instance.get_diff(cs1, cs2, ignore_whitespace=ign_whitespace_lcl, context=context_lcl) diff_limit = self.cut_off_limit if not fulldiff else None diff_processor = diffs.DiffProcessor(_diff, vcs=c.db_repo_scm_instance.alias, format='gitdiff', diff_limit=diff_limit) file_diff_data = [] if method == 'show': _parsed = diff_processor.prepare() c.limited_diff = False if isinstance(_parsed, LimitedDiffContainer): c.limited_diff = True for f in _parsed: st = f['stats'] c.lines_added += st['added'] c.lines_deleted += st['deleted'] filename = f['filename'] fid = h.FID(changeset.raw_id, filename) url_fid = h.FID('', filename) diff = diff_processor.as_html(enable_comments=enable_comments, parsed_lines=[f]) file_diff_data.append((fid, url_fid, f['operation'], f['old_filename'], filename, diff, st)) else: # downloads/raw we only need RAW diff nothing else diff = diff_processor.as_raw() file_diff_data.append(('', None, None, None, diff, None)) c.changes[changeset.raw_id] = (cs1, cs2, file_diff_data) #sort comments in creation order c.comments = [com for com_id, com in sorted(comments.items())] # count inline comments for __, lines in c.inline_comments: for comments in lines.values(): c.inline_cnt += len(comments) if len(c.cs_ranges) == 1: c.changeset = c.cs_ranges[0] c.parent_tmpl = ''.join(['# Parent %s\n' % x.raw_id for x in c.changeset.parents]) if method == 'download': response.content_type = 'text/plain' response.content_disposition = 'attachment; filename=%s.diff' \ % revision[:12] return diff elif method == 'patch': response.content_type = 'text/plain' c.diff = safe_unicode(diff) return render('changeset/patch_changeset.html') elif method == 'raw': response.content_type = 'text/plain' return diff elif method == 'show': self.__load_data() if len(c.cs_ranges) == 1: return render('changeset/changeset.html') else: c.cs_ranges_org = None c.cs_comments = {} revs = [ctx.revision for ctx in reversed(c.cs_ranges)] c.jsdata = graph_data(c.db_repo_scm_instance, revs) return render('changeset/changeset_range.html')
def default(self, trans, target1=None, target2=None, **kwd): """ Called on any url that does not match a controller method. """ raise HTTPNotFound('This link may not be followed from within Galaxy.')
class ObjectController(object): """Implements the WSGI application for the Swift Object Server.""" def __init__(self, conf): """ Creates a new WSGI application for the Swift Object Server. An example configuration is given at <source-dir>/etc/object-server.conf-sample or /etc/swift/object-server.conf-sample. """ self.logger = get_logger(conf, log_route='object-server') self.devices = conf.get('devices', '/srv/node/') self.mount_check = conf.get('mount_check', 'true').lower() in \ TRUE_VALUES self.node_timeout = int(conf.get('node_timeout', 3)) self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536)) self.network_chunk_size = int(conf.get('network_chunk_size', 65536)) self.keep_cache_size = int(conf.get('keep_cache_size', 5242880)) self.keep_cache_private = \ conf.get('keep_cache_private', 'false').lower() in TRUE_VALUES self.log_requests = \ conf.get('log_requests', 'true').lower() in TRUE_VALUES self.max_upload_time = int(conf.get('max_upload_time', 86400)) self.slow = int(conf.get('slow', 0)) self.bytes_per_sync = int(conf.get('mb_per_sync', 512)) * 1024 * 1024 default_allowed_headers = ''' content-disposition, content-encoding, x-delete-at, x-object-manifest, ''' self.allowed_headers = set( i.strip().lower() for i in conf.get( 'allowed_headers', default_allowed_headers).split(',') if i.strip() and i.strip().lower() not in DISALLOWED_HEADERS) self.expiring_objects_account = \ (conf.get('auto_create_account_prefix') or '.') + \ 'expiring_objects' self.expiring_objects_container_divisor = \ int(conf.get('expiring_objects_container_divisor') or 86400) def async_update(self, op, account, container, obj, host, partition, contdevice, headers_out, objdevice): """ Sends or saves an async update. :param op: operation performed (ex: 'PUT', or 'DELETE') :param account: account name for the object :param container: container name for the object :param obj: object name :param host: host that the container is on :param partition: partition that the container is on :param contdevice: device name that the container is on :param headers_out: dictionary of headers to send in the container request :param objdevice: device name that the object is in """ full_path = '/%s/%s/%s' % (account, container, obj) if all([host, partition, contdevice]): try: with ConnectionTimeout(self.conn_timeout): ip, port = host.rsplit(':', 1) conn = http_connect(ip, port, contdevice, partition, op, full_path, headers_out) with Timeout(self.node_timeout): response = conn.getresponse() response.read() if is_success(response.status): return else: self.logger.error( _('ERROR Container update failed ' '(saving for async update later): %(status)d ' 'response from %(ip)s:%(port)s/%(dev)s'), { 'status': response.status, 'ip': ip, 'port': port, 'dev': contdevice }) except (Exception, Timeout): self.logger.exception( _('ERROR container update failed with ' '%(ip)s:%(port)s/%(dev)s (saving for async update later)' ), { 'ip': ip, 'port': port, 'dev': contdevice }) async_dir = os.path.join(self.devices, objdevice, ASYNCDIR) ohash = hash_path(account, container, obj) self.logger.increment('async_pendings') write_pickle( { 'op': op, 'account': account, 'container': container, 'obj': obj, 'headers': headers_out }, os.path.join( async_dir, ohash[-3:], ohash + '-' + normalize_timestamp(headers_out['x-timestamp'])), os.path.join(self.devices, objdevice, 'tmp')) def container_update(self, op, account, container, obj, headers_in, headers_out, objdevice): """ Update the container when objects are updated. :param op: operation performed (ex: 'PUT', or 'DELETE') :param account: account name for the object :param container: container name for the object :param obj: object name :param headers_in: dictionary of headers from the original request :param headers_out: dictionary of headers to send in the container request :param objdevice: device name that the object is in """ host = headers_in.get('X-Container-Host', None) partition = headers_in.get('X-Container-Partition', None) contdevice = headers_in.get('X-Container-Device', None) if not all([host, partition, contdevice]): return self.async_update(op, account, container, obj, host, partition, contdevice, headers_out, objdevice) def delete_at_update(self, op, delete_at, account, container, obj, headers_in, objdevice): """ Update the expiring objects container when objects are updated. :param op: operation performed (ex: 'PUT', or 'DELETE') :param account: account name for the object :param container: container name for the object :param obj: object name :param headers_in: dictionary of headers from the original request :param objdevice: device name that the object is in """ # Quick cap that will work from now until Sat Nov 20 17:46:39 2286 # At that time, Swift will be so popular and pervasive I will have # created income for thousands of future programmers. delete_at = max(min(delete_at, 9999999999), 0) host = partition = contdevice = None headers_out = { 'x-timestamp': headers_in['x-timestamp'], 'x-trans-id': headers_in.get('x-trans-id', '-') } if op != 'DELETE': host = headers_in.get('X-Delete-At-Host', None) partition = headers_in.get('X-Delete-At-Partition', None) contdevice = headers_in.get('X-Delete-At-Device', None) headers_out['x-size'] = '0' headers_out['x-content-type'] = 'text/plain' headers_out['x-etag'] = 'd41d8cd98f00b204e9800998ecf8427e' self.async_update( op, self.expiring_objects_account, str(delete_at / self.expiring_objects_container_divisor * self.expiring_objects_container_divisor), '%s-%s/%s/%s' % (delete_at, account, container, obj), host, partition, contdevice, headers_out, objdevice) @public def POST(self, request): """Handle HTTP POST requests for the Swift Object Server.""" start_time = time.time() try: device, partition, account, container, obj = \ split_path(unquote(request.path), 5, 5, True) validate_device_partition(device, partition) except ValueError, err: self.logger.increment('POST.errors') return HTTPBadRequest(body=str(err), request=request, content_type='text/plain') if 'x-timestamp' not in request.headers or \ not check_float(request.headers['x-timestamp']): self.logger.increment('POST.errors') return HTTPBadRequest(body='Missing timestamp', request=request, content_type='text/plain') new_delete_at = int(request.headers.get('X-Delete-At') or 0) if new_delete_at and new_delete_at < time.time(): self.logger.increment('POST.errors') return HTTPBadRequest(body='X-Delete-At in past', request=request, content_type='text/plain') if self.mount_check and not check_mount(self.devices, device): self.logger.increment('POST.errors') return HTTPInsufficientStorage(drive=device, request=request) file = DiskFile(self.devices, device, partition, account, container, obj, self.logger, disk_chunk_size=self.disk_chunk_size) if 'X-Delete-At' in file.metadata and \ int(file.metadata['X-Delete-At']) <= time.time(): self.logger.timing_since('POST.timing', start_time) return HTTPNotFound(request=request) if file.is_deleted(): response_class = HTTPNotFound else: response_class = HTTPAccepted try: file_size = file.get_data_file_size() except (DiskFileError, DiskFileNotExist): file.quarantine() return HTTPNotFound(request=request) metadata = {'X-Timestamp': request.headers['x-timestamp']} metadata.update(val for val in request.headers.iteritems() if val[0].lower().startswith('x-object-meta-')) for header_key in self.allowed_headers: if header_key in request.headers: header_caps = header_key.title() metadata[header_caps] = request.headers[header_key] old_delete_at = int(file.metadata.get('X-Delete-At') or 0) if old_delete_at != new_delete_at: if new_delete_at: self.delete_at_update('PUT', new_delete_at, account, container, obj, request.headers, device) if old_delete_at: self.delete_at_update('DELETE', old_delete_at, account, container, obj, request.headers, device) with file.mkstemp() as (fd, tmppath): file.put(fd, tmppath, metadata, extension='.meta') self.logger.timing_since('POST.timing', start_time) return response_class(request=request)
def __call__(self, environ, start_response): from webob.exc import HTTPNotFound r = HTTPNotFound() return r(environ, start_response)
content_type='text/plain', request=req) if self.mount_check and not check_mount(self.root, drive): return Response(status='507 %s is not mounted' % drive) broker = self._get_account_broker(drive, part, account) if container: # put account container if 'x-trans-id' in req.headers: broker.pending_timeout = 3 if account.startswith(self.auto_create_account_prefix) and \ not os.path.exists(broker.db_file): broker.initialize( normalize_timestamp( req.headers.get('x-timestamp') or time.time())) if req.headers.get('x-account-override-deleted', 'no').lower() != \ 'yes' and broker.is_deleted(): return HTTPNotFound(request=req) broker.put_container(container, req.headers['x-put-timestamp'], req.headers['x-delete-timestamp'], req.headers['x-object-count'], req.headers['x-bytes-used']) if req.headers['x-delete-timestamp'] > \ req.headers['x-put-timestamp']: return HTTPNoContent(request=req) else: return HTTPCreated(request=req) else: # put account timestamp = normalize_timestamp(req.headers['x-timestamp']) if not os.path.exists(broker.db_file): broker.initialize(timestamp) created = True elif broker.is_status_deleted():
def item(env, data): event = env.resource.query().filter_by(id=data.item_id).first() if not event: raise HTTPNotFound() return env.resource.render_to_response('item', dict(event=event))
class VMController(base.Controller): """ Controller class for Vm resource extension """ def __init__(self): ''' Initialize controller with resource specific param values ''' base.Controller.__init__(self, constants.VM_COLLECTION_NAME, 'vm', 'Vm') def index(self, req): """ List all virtual machine as a simple list :param req: webob request :returns: simple list of virtual machine with appropriate resource links. """ server_list = self.get_all_by_filters(req, api.vm_get_all_by_filters) if not server_list: server_list = [] limited_list, collection_links = self.limited_by_marker( server_list, req) return self._index(req, limited_list, collection_links) def detail(self, req): """ List all virtual machines as a detailed list with appropriate resource links :param req: webob request :returns: webob response for detail list operation. """ server_list = self.get_all_by_filters(req, api.vm_get_all_by_filters) if not server_list: server_list = [] limited_list, collection_links = self.limited_by_marker( server_list, req) return self._detail(req, limited_list, collection_links) def _get_resource_xml_with_links(self, req, vm): """ Get resource as xml updated with reference links to other resources. :param req: request object :param vm: vm object as per resource model :returns: (vm_xml, out_dict) tuple where, vm_xml is the updated xml and out_dict is a dictionary with keys as the xpath of replaced entities and value is the corresponding entity dict. """ (ctx, proj_id) = util.get_project_context(req) vm_xml = util.dump_resource_xml(vm, self._model_name) out_dict = {} vm_xml_update = util.replace_with_links( vm_xml, self._get_resource_tag_dict_list(req.application_url, proj_id), out_dict) field_list = util.get_query_fields(req) if field_list is not None: if 'utilization' in field_list: vm_xml_update = self._add_perf_data(vm.get_id(), vm_xml_update, ctx) vm_xml_update = \ util.get_select_elements_xml(vm_xml_update, field_list, 'id') elif len(req.GET.getall('utilization')) > 0: vm_xml_update = self._add_perf_data(vm.get_id(), vm_xml_update, ctx) return (vm_xml_update, out_dict) def _get_resource_tag_dict_list(self, application_url, proj_id): """ Get the list of tag dictionaries applicable to virtual machine :param application_url: application url from request :param proj_id: project id :returns: list of tag dictionaries for virtual machine """ return [{ 'tag': 'storageVolumeId', 'tag_replacement': 'storagevolume', 'tag_key': 'id', 'tag_collection_url': os.path.join(application_url, proj_id, constants.STORAGEVOLUME_COLLECTION_NAME), 'tag_attrib': None, }, { 'tag': 'vmHostId', 'tag_replacement': 'vmhost', 'tag_key': 'id', 'tag_collection_url': os.path.join(application_url, proj_id, constants.VMHOSTS_COLLECTION_NAME), 'tag_attrib': None, }] def show(self, req, id): """ Display details for particular virtual machine identified by resource id. :param req: webob request :param id: unique id to identify virtual machine. :returns: complete resource details for the specified id and request. """ try: LOG.debug(_('Show vm id : %s' % str(id))) (ctx, proj_id) = util.get_project_context(req) vm_list = api.vm_get_by_ids(ctx, [id]) LOG.debug( _('Project id: %s Received vmhosts from the database' % proj_id)) if vm_list: return self._show(req, vm_list[0]) except Exception, err: LOG.error( _('Exception while fetching data from healthnmon api %s' % str(err)), exc_info=1) return HTTPNotFound()
def changelog_details(self, commit_id): if request.environ.get('HTTP_X_PARTIAL_XHR'): c.commit = c.rhodecode_repo.get_commit(commit_id=commit_id) return render('changelog/changelog_details.html') raise HTTPNotFound()
except exception.Invalid, e: msg = (_("Failed to update image metadata. Got error: %(e)s") % locals()) for line in msg.split('\n'): LOG.error(line) self.notifier.error('image.update', msg) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") except exception.NotFound, e: msg = ("Failed to find image to update: %(e)s" % locals()) for line in msg.split('\n'): LOG.info(line) self.notifier.info('image.update', msg) raise HTTPNotFound(explanation=msg, request=req, content_type="text/plain") except exception.Forbidden, e: msg = ("Forbidden to update image: %(e)s" % locals()) for line in msg.split('\n'): LOG.info(line) self.notifier.info('image.update', msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") else: self.notifier.info('image.update', image_meta) # Prevent client from learning the location, as it # could contain security credentials image_meta.pop('location', None)
def changelog_summary(self, repo_name): if request.environ.get('HTTP_X_PJAX'): _load_changelog_summary() return render('changelog/changelog_summary_data.html') raise HTTPNotFound()
req.context, id, image_meta, True) if image_data is not None: image_meta = self._upload_and_activate(req, image_meta) except exception.Invalid, e: msg = (_("Failed to update image metadata. Got error: %(e)s") % locals()) for line in msg.split('\n'): logger.error(line) self.notifier.error('image.update', msg) raise HTTPBadRequest(msg, request=req, content_type="text/plain") except exception.NotFound, e: msg = ("Failed to find image to update: %(e)s" % locals()) for line in msg.split('\n'): logger.info(line) self.notifier.info('image.update', msg) raise HTTPNotFound(msg, request=req, content_type="text/plain") else: self.notifier.info('image.update', image_meta) # Prevent client from learning the location, as it # could contain security credentials image_meta.pop('location', None) return {'image_meta': image_meta} def delete(self, req, id): """ Deletes the image and all its chunks from the Glance :param req: The WSGI/Webob Request object :param id: The opaque image identifier
def repo_creating(self, repo_name): c.repo = repo_name c.task_id = request.GET.get('task_id') if not c.repo: raise HTTPNotFound() return render('admin/repos/repo_creating.html')