def _put_page(environ, start_response): ''' ''' req_headers = copy_headers_to_dict(environ,exclude=['HTTP_ACCEPT_ENCODING']) wiki_id, base, opener, original_page, wrapped_wiki_base = target(environ) page = environ['PATH_INFO'].lstrip('/') check_auth(environ, start_response, base, opener, req_headers) ctype = environ.get('CONTENT_TYPE', 'application/unknown') temp_fpath = read_http_body_to_temp(environ, start_response) form_vars = fill_page_edit_form(page, wiki_id, base, opener, req_headers) form_vars["savetext"] = open(temp_fpath, "r").read() url = absolutize(page, base) data = urllib.urlencode(form_vars) request = urllib2.Request(url, data, req_headers) try: logger.debug('Prior to urllib2.opener') with closing(opener.open(request)) as resp: logger.debug('Return from urllib2.opener') doc = htmlparse(resp) raise_embedded_error(doc) logger.debug('HTML parse complete post urllib2.opener') except urllib2.URLError,e: raise UnexpectedResponseError(url=url,code=e.code,error=str(e))
def post_resource(environ, start_response): ''' Create a new record with a resource type ''' slaveinfo, space_tag = setup_request(environ) temp_fpath = read_http_body_to_temp(environ, start_response) body = open(temp_fpath, "r").read() resource_type = slaveinfo.resource_factory() imt = environ['CONTENT_TYPE'].split(';')[0] lang = environ.get('CONTENT_LANGUAGE') handler = resource_type.run_rulesheet(environ, environ['REQUEST_METHOD'], imt, lang) new_path, content = handler(resource_type, body) logger.debug('rulesheet transform output & new uri path (post_resource): ' + repr((content[:100], new_path))) #Comes back as Unicode, but we need to feed it to slave as encoded byte string content = content.encode('utf-8') environ['wsgi.input'] = cStringIO.StringIO(content) environ['CONTENT_LENGTH'] = len(content) response = slaveinfo.create_resource(new_path) if not slaveinfo.resp_status.startswith('2'): start_response(status_response(slaveinfo.resp_status), slaveinfo.resp_headers) return ["Unable to create resource\n"] start_response(slaveinfo.resp_status, slaveinfo.resp_headers) return response
def _put_page(environ, start_response): ''' ''' req_headers = copy_headers_to_dict(environ, exclude=['HTTP_ACCEPT_ENCODING']) wiki_id, base, opener, original_page, wrapped_wiki_base = target(environ) page = environ['PATH_INFO'].lstrip('/') check_auth(environ, start_response, base, opener, req_headers) ctype = environ.get('CONTENT_TYPE', 'application/unknown') temp_fpath = read_http_body_to_temp(environ, start_response) form_vars = fill_page_edit_form(page, wiki_id, base, opener, req_headers) form_vars["savetext"] = open(temp_fpath, "r").read() url = absolutize(page, base) data = urllib.urlencode(form_vars) request = urllib2.Request(url, data, req_headers) try: logger.debug('Prior to urllib2.opener') with closing(opener.open(request)) as resp: logger.debug('Return from urllib2.opener') doc = htmlparse(resp) raise_embedded_error(doc) logger.debug('HTML parse complete post urllib2.opener') except urllib2.URLError, e: raise UnexpectedResponseError(url=url, code=e.code, error=str(e))
def put_resource(environ, start_response): slaveinfo, space_tag = setup_request(environ) resource_type = check_forced_type(environ, start_response, slaveinfo) imt = environ['CONTENT_TYPE'].split(';')[0] lang = environ.get('CONTENT_LANGUAGE') #FIXME support multiple temp_fpath = read_http_body_to_temp(environ, start_response) body = open(temp_fpath, "r").read() if not resource_type: resource = slaveinfo.resource_factory() resource_type = resource.type handler = resource_type.run_rulesheet(environ, environ['REQUEST_METHOD'], imt, lang) content = handler(resource_type, body) logger.debug('rulesheet transform output (put_resource): ' + repr(content[:100])) #Comes back as Unicode, but we need to feed it to slave as encoded byte string content = content.encode('utf-8') environ['wsgi.input'] = cStringIO.StringIO(content) environ['CONTENT_LENGTH'] = len(content) #headers = req_headers #headers['Content-Type'] = 'text/plain' #if creds: # user, passwd = creds # H.add_credentials(user, passwd) #resp, content = H.request(zenuri_to_moinrest(environ), "PUT", body=wikified.encode('UTF-8'), headers=headers) #start_response(status_response(slave_resp_status), [("Content-Type", resp['content-type'])]) response = slaveinfo.update_resource() if not slaveinfo.resp_status.startswith('2'): start_response(status_response(slaveinfo.resp_status), slaveinfo.resp_headers) return ["Unable to update resource\n"] start_response(slaveinfo.resp_status, slaveinfo.resp_headers) return response
def post_page(environ, start_response): ''' Attachments use URI path params (for a bit of discussion see http://groups.google.com/group/comp.lang.python/browse_thread/thread/4662d41aca276d99) ''' #ctype = environ.get('CONTENT_TYPE', 'application/unknown') req_headers = copy_headers_to_dict(environ, exclude=['HTTP_ACCEPT_ENCODING']) wiki_id, base, opener, original_page, wrapped_wiki_base = target(environ) logger.debug("wiki_id,base,opener,original_age,wrapped_wiki_base=" + repr((wiki_id, base, opener, original_page, wrapped_wiki_base))) check_auth(environ, start_response, base, opener, req_headers) page = environ['PATH_INFO'].lstrip('/') page, chaff, attachment = page.partition(';attachment=') # print >> sys.stderr, page, attachment #now = datetime.now().isoformat() #Unfortunately because urllib2's data dicts don't give an option for limiting read length, must read into memory and wrap #content = StringIO(environ['wsgi.input'].read(clen)) temp_fpath = read_http_body_to_temp(environ, start_response) form_vars = fill_attachment_form(page, attachment, wiki_id, base, opener, req_headers) form_vars["file"] = open(temp_fpath, "rb") url = absolutize(page, base) #print >> sys.stderr, url, temp_fpath #data = urllib.urlencode(form_vars) request = urllib2.Request(url, form_vars, req_headers) try: with closing(opener.open(request)) as resp: doc = htmlparse(resp) raise_embedded_error(doc) #logger.debug('POST for attachment page response... ' + doc.xml_encode()) except urllib2.URLError, e: if e.code == 404: raise MoinNotFoundError(fronturl=request_uri(environ), backurl=url) else: raise UnexpectedResponseError(url=url, code=e.code, error=str(e))
def post_page(environ, start_response): ''' Attachments use URI path params (for a bit of discussion see http://groups.google.com/group/comp.lang.python/browse_thread/thread/4662d41aca276d99) ''' #ctype = environ.get('CONTENT_TYPE', 'application/unknown') req_headers = copy_headers_to_dict(environ,exclude=['HTTP_ACCEPT_ENCODING']) wiki_id, base, opener, original_page, wrapped_wiki_base = target(environ) logger.debug("wiki_id,base,opener,original_age,wrapped_wiki_base="+repr((wiki_id,base,opener,original_page,wrapped_wiki_base))) check_auth(environ, start_response, base, opener, req_headers) page = environ['PATH_INFO'].lstrip('/') page, chaff, attachment = page.partition(';attachment=') # print >> sys.stderr, page, attachment #now = datetime.now().isoformat() #Unfortunately because urllib2's data dicts don't give an option for limiting read length, must read into memory and wrap #content = StringIO(environ['wsgi.input'].read(clen)) temp_fpath = read_http_body_to_temp(environ, start_response) form_vars = fill_attachment_form(page, attachment, wiki_id, base, opener, req_headers) form_vars["file"] = open(temp_fpath, "rb") url = absolutize(page, base) #print >> sys.stderr, url, temp_fpath #data = urllib.urlencode(form_vars) request = urllib2.Request(url, form_vars, req_headers) try: with closing(opener.open(request)) as resp: doc = htmlparse(resp) raise_embedded_error(doc) #logger.debug('POST for attachment page response... ' + doc.xml_encode()) except urllib2.URLError,e: if e.code == 404: raise MoinNotFoundError(fronturl=request_uri(environ), backurl=url) else: raise UnexpectedResponseError(url=url,code=e.code,error=str(e))