示例#1
0
文件: files.py 项目: snjypl/tao1
def img(request):
	""" return file and information about it, for static """
	proc_id = request.match_info.get('proc_id', "des:obj")
	doc_id =  request.match_info.get('doc_id', "")
	img =     request.match_info.get('img', "")
	action =  request.match_info.get('action', "img")
	att = None
	headers = {}
	try:
		fn, att, prefix = img_m(request, proc_id, doc_id, img, action)
		if not fn: return web.HTTPNotFound()
		headers['Content-Length'] = fn['length']
		lm = locale_date("%a, %d %b %Y %H:%M:%S GMT", fn['uploadDate'].timetuple(), 'en_US.UTF-8')
		headers['Last-Modified'] = lm

		ims = request.if_modified_since

		if ims and ims.strftime('%Y-%m-%d %H:%M:%S') >= fn['uploadDate'].strftime('%Y-%m-%d %H:%M:%S'):
			headers['Date'] = locale_date("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(), 'en_US.UTF-8')
			return web.HTTPNotModified( headers=MultiDict( headers ) )

		headers['Content-Type'] = fn['mime']
		headers['Cache-Control'] = 'max-age=604800'
		content  = att.read()
	finally:
		if att: att.close()
	return web.Response(body=content, headers=MultiDict( headers ))
示例#2
0
def _get_pretty(request):
    pretty = False
    get_params = MultiDict(urllib.parse.parse_qsl(request.query_string.lower()))
    if get_params.get('pretty'):
        if str(get_params.get('pretty', None)) in ['1', 'true']:
            pretty = True
    # Assume pretty if html is requested and pretty is not disabled
    elif 'text/html' in request.headers.get('ACCEPT', ''):
        pretty = True
    return pretty
示例#3
0
    def test_get_lower(self):
        i_headers = MultiDict([('test', '123')])
        o_headers = MultiDict([('TEST', '123')])

        atoms = helpers.SafeAtoms({}, i_headers, o_headers)
        self.assertEqual(atoms['{test}i'], '123')
        self.assertEqual(atoms['{test}o'], '-')
        self.assertEqual(atoms['{TEST}o'], '123')
        self.assertEqual(atoms['{UNKNOWN}o'], '-')
        self.assertEqual(atoms['{UNKNOWN}'], '-')
示例#4
0
def _get_pretty(request):
    pretty = False
    get_params = MultiDict(urllib.parse.parse_qsl(
        request.query_string.lower()))
    if get_params.get('pretty'):
        if str(get_params.get('pretty', None)) in ['1', 'true']:
            pretty = True
    # Assume pretty if html is requested and pretty is not disabled
    elif 'text/html' in request.headers.get('ACCEPT', ''):
        pretty = True
    return pretty
示例#5
0
 async def handle_request(self, message, payload):
     path = urlparse(message.path).path.lower()
     if path.startswith(u'/api/v1/limit/'):
         get_params = MultiDict(parse_qsl(urlparse(message.path).query))
         domain = get_params.get('d', None)
         gen_time = get_params.get('g', None)
         if domain is None or gen_time is None:
             await super().handle_error(
                 message, payload, reason='domain or gen_time not provided')
         else:
             await self.handle_limit(message, domain, gen_time)
     else:
         await super().handle_request(message, payload)
示例#6
0
文件: handlers.py 项目: REGOVAR/Pirus
def process_generic_get(query_string, allowed_fields):
    # 1- retrieve query parameters
    get_params = MultiDict(parse_qsl(query_string))
    r_range = get_params.get('range', "0-" + str(RANGE_DEFAULT))
    r_fields = get_params.get('fields', None)
    r_order = get_params.get('order_by', None)
    r_sort = get_params.get('order_sort', None)
    r_filter = get_params.get('filter', None)

    # 2- fields to extract
    fields = allowed_fields
    if r_fields is not None:
        fields = []
        for f in r_fields.split(','):
            f = f.strip().lower()
            if f in allowed_fields:
                fields.append(f)
    if len(fields) == 0:
        return rest_error("No valid fields provided : " +
                          get_params.get('fields'))

    # 3- Build json query for mongoengine
    query = {}
    if r_filter is not None:
        query = {"$or": []}
        for k in fields:
            query["$or"].append({k: {'$regex': r_filter}})

    # 4- Order
    order = "name"
    # if r_sort is not None and r_order is not None:
    #     r_sort = r_sort.split(',')
    #     r_order = r_order.split(',')
    #     if len(r_sort) == len(r_order):
    #         order = []
    #         for i in range(0, len(r_sort)):
    #             f = r_sort[i].strip().lower()
    #             if f in allowed_fields:
    #                 if r_order[i] == "desc":
    #                     f = "-" + f
    #                 order.append(f)
    # order = tuple(order)

    # 5- limit
    r_range = r_range.split("-")
    offset = 0
    limit = RANGE_DEFAULT
    try:
        offset = int(r_range[0])
        limit = int(r_range[1])
    except:
        return rest_error("No valid range provided : " +
                          get_params.get('range'))

    # 6- Return processed data
    return fields, query, order, offset, limit
示例#7
0
    async def handle_request(self, request, payload):
        print('Request path: {0}'.format(request.path))
        data = self.create_data(request, 200)
        if request.method == 'POST':
            post_data = await payload.read()
            post_data = MultiDict(parse_qsl(post_data.decode('utf-8')))
            print('POST data:')
            for key, val in post_data.items():
                print('\t- {0}: {1}'.format(key, val))
            data['post_data'] = dict(post_data)

        # Submit the event to the TANNER service
        event_result = await self.submit_data(data)

        # Log the event to slurp service if enabled
        if self.run_args.slurp_enabled:
            await self.submit_slurp(request.path)

        content, content_type, headers, status_code = await self.parse_tanner_response(
            request.path, event_result['response']['message']['detection'])
        response = aiohttp.Response(self.writer,
                                    status=status_code,
                                    http_version=request.version)
        for name, val in headers.items():
            response.add_header(name, val)

        response.add_header('Server', self.run_args.server_header)

        if 'cookies' in data and 'sess_uuid' in data['cookies']:
            previous_sess_uuid = data['cookies']['sess_uuid']
        else:
            previous_sess_uuid = None

        if event_result is not None and (
                'sess_uuid' in event_result['response']['message']):
            cur_sess_id = event_result['response']['message']['sess_uuid']
            if previous_sess_uuid is None or not previous_sess_uuid.strip(
            ) or previous_sess_uuid != cur_sess_id:
                response.add_header('Set-Cookie', 'sess_uuid=' + cur_sess_id)

        if not content_type:
            response.add_header('Content-Type', 'text/plain')
        else:
            response.add_header('Content-Type', content_type)
        if content:
            response.add_header('Content-Length', str(len(content)))
        response.send_headers()
        if content:
            response.write(content)
        await response.write_eof()
示例#8
0
    def _request(self, method, url, headers, data):
        # Note: When using aiobotocore with dynamodb, requests fail on crc32
        # checksum computation as soon as the response data reaches ~5KB.
        # When aws response is gzip compressed:
        # 1. aiohttp is automatically uncompressessing the data
        # (http://aiohttp.readthedocs.io/en/stable/client.html#binary-response-content)
        # 2. botocore computes crc32 on the uncompressed data bytes and fails
        # cause crc32 has been computed on the compressed data
        # The following line forces aws not to use gzip compression,
        # if there is a way to configure aiohttp not to perform uncompression,
        # we can remove the following line and take advantage of
        # aws gzip compression.
        headers['Accept-Encoding'] = 'identity'
        headers_ = MultiDict(
            (z[0], text_(z[1], encoding='utf-8')) for z in headers.items())

        # For now the request timeout is: read_timeout and max(conn_timeout)
        # So we want to ensure that your conn_timeout won't get truncated by
        # the read_timeout. This should be removed after
        # (https://github.com/KeepSafe/aiohttp/issues/1524) is resolved
        if self._read_timeout < self._conn_timeout:
            warnings.warn("connection timeout may be reduced due to current "
                          "read timeout")

        # botocore does this during the request so we do this here as well
        proxy = self.proxies.get(urlparse(url.lower()).scheme)

        url = yarl.URL(url, encoded=True)
        resp = yield from self._aio_session.request(method,
                                                    url=url,
                                                    headers=headers_,
                                                    data=data,
                                                    proxy=proxy,
                                                    timeout=self._read_timeout)
        return resp
示例#9
0
def get_multi_dict_from_python_dict(resp_headers_dict: dict) -> MultiDictProxy:
    """Construct an :class:`aiohttp.MultiDictProxy` instance from a Python dictionary.

    Note: For now, this method is used for test only.

    .. note::

        Neither Python dictionary nor JSON supports multi-value key.  The response headers returned
        by `aiohttp` is of immutable type :class:`aiohttp.MultiDictProxy` while the one returned by
        `aiohttpretty` is of :class:`aiohttp.MultiDict`.

        WB tests use the :class:`aiohttp.MultiDict` type for both files and folders during modification
        and returns the :class:`aiohttp.MultiDictProxy` type to imitate the behavior of `aiohttp`.

    :param dict resp_headers_dict: the raw response headers dictionary
    :rtype: :class:`aiohttp.MultiDictProxy`
    """

    resp_headers = MultiDict(resp_headers_dict)
    google_hash = resp_headers.get('x-goog-hash', None)
    if google_hash:
        assert verify_raw_google_hash_header(google_hash)
        resp_headers.pop('x-goog-hash')
        google_hash_list = google_hash.split(',')
        for google_hash in google_hash_list:
            resp_headers.add('x-goog-hash', google_hash)

    return MultiDictProxy(resp_headers)
示例#10
0
def process_generic_get(query_string, allowed_fields):
        # 1- retrieve query parameters
        get_params = MultiDict(parse_qsl(query_string))
        r_range  = get_params.get('range', "0-" + str(RANGE_DEFAULT))
        r_fields = get_params.get('fields', None)
        r_order  = get_params.get('order_by', None)
        r_sort   = get_params.get('order_sort', None)
        r_filter = get_params.get('filter', None)

        # 2- fields to extract
        fields = allowed_fields
        if r_fields is not None:
            fields = []
            for f in r_fields.split(','):
                f = f.strip().lower()
                if f in allowed_fields:
                    fields.append(f)
        if len(fields) == 0:
            return rest_error("No valid fields provided : " + get_params.get('fields'))

        # 3- Build json query for mongoengine
        query = {}
        if r_filter is not None:
            query = {"$or" : []}
            for k in fields:
                query["$or"].append({k : {'$regex': r_filter}})

        # 4- Order
        order = "name"
        # if r_sort is not None and r_order is not None:
        #     r_sort = r_sort.split(',')
        #     r_order = r_order.split(',')
        #     if len(r_sort) == len(r_order):
        #         order = []
        #         for i in range(0, len(r_sort)):
        #             f = r_sort[i].strip().lower()
        #             if f in allowed_fields:
        #                 if r_order[i] == "desc":
        #                     f = "-" + f
        #                 order.append(f)
        # order = tuple(order)

        # 5- limit
        r_range = r_range.split("-")
        offset=0
        limit=RANGE_DEFAULT
        try:
            offset = int(r_range[0])
            limit = int(r_range[1])
        except:
            return rest_error("No valid range provided : " + get_params.get('range') )

        # 6- Return processed data
        return fields, query, order, offset, limit
示例#11
0
 def _create_or_update(self, data, username, network=None):
     if network:
         network_data = network.to_dict()
         network_data.update(data)
         form = NetworkForm(formdata=MultiDict(network_data))
     else:
         form = NetworkForm(formdata=MultiDict(data))
     form.validate()
     if form.errors:
         return web.Response(body=json.dumps(form.errors).encode(),
                             status=400,
                             content_type='application/json')
     cleaned_data = form.data
     cleaned_data['user'] = username
     if network:
         network = yield from NetworkStore.update(
             dict(filter=('id', network.id), update=cleaned_data))
     else:
         network = yield from NetworkStore.create(**cleaned_data)
     return web.Response(body=self.serialize(network).encode(),
                         content_type='application/json')
示例#12
0
文件: handlers.py 项目: REGOVAR/Pirus
 async def dl_file(self, request):
     # 1- Retrieve request parameters
     file_id = request.match_info.get('file_id', -1)
     pfile = File.from_id(file_id)
     if not pfile:
         return rest_error("File with id {} doesn't exits.".format(file_id))
     file = None
     if os.path.isfile(pfile.path):
         with open(pfile.path, 'br') as content_file:
             file = content_file.read()
     return web.Response(headers=MultiDict(
         {'Content-Disposition': 'Attachment; filename=' + pfile.name}),
                         body=file)
示例#13
0
文件: handlers.py 项目: REGOVAR/Pirus
 def get(self, request):
     fields, query, order, offset, limit = process_generic_get(
         request.query_string, Job.public_fields)
     depth = int(
         MultiDict(parse_qsl(request.query_string)).get('sublvl', 0))
     # Get range meta data
     range_data = {
         "range_offset": offset,
         "range_limit": limit,
         "range_total": Job.count(),
         "range_max": RANGE_MAX,
     }
     jobs = core.jobs.get(fields, query, order, offset, limit, depth)
     return rest_success([j.to_json() for j in jobs], range_data)
示例#14
0
    async def get_comments(self, request):
        """Returns information about user comments by user_id"""

        user_id = request.match_info.get('user_id')
        headers = {
            'Content-Disposition':
            'attachment; filename="comments_{}"'.format(user_id),
        }
        resp = web.StreamResponse(headers=MultiDict(headers))
        resp.content_type = 'text/plain'
        await resp.prepare(request)
        comments = UserCommentsGenerator(db=request.app['db'], user_id=user_id)
        async for i in comments:
            resp.write(i)
        await resp.write_eof()
        return resp
示例#15
0
async def union_stat(request, *args):
    component = request.match_info.get('component', "Anonymous")
    fname = request.match_info.get('fname', "Anonymous")
    path = os.path.join(settings.tao_path, 'libs', component, 'static', fname)
    # search in project directory
    if component == 'static':
        path = os.path.join(settings.root_path, 'static')
    # search in project components
    elif not os.path.exists(path):
        path = os.path.join(settings.root_path, 'apps', component, 'static')
    # search in core components
    else:
        path = os.path.join(settings.tao_path, 'libs', component, 'static')
    # app.router.add_static()
    content, headers = get_static_file(fname, path)
    return web.Response(body=content, headers=MultiDict(headers))
示例#16
0
文件: handlers.py 项目: REGOVAR/Pirus
 def get(self, request):
     # Generic processing of the get query
     fields, query, order, offset, limit = process_generic_get(
         request.query_string, File.public_fields)
     depth = int(
         MultiDict(parse_qsl(request.query_string)).get('sublvl', 0))
     # Get range meta data
     range_data = {
         "range_offset": offset,
         "range_limit": limit,
         "range_total": File.count(),
         "range_max": RANGE_MAX,
     }
     # Return result of the query for PirusFile
     files = core.files.get(fields, query, order, offset, limit, depth)
     return rest_success([format_file_json(f.to_json()) for f in files],
                         range_data)
示例#17
0
文件: handlers.py 项目: REGOVAR/Pirus
 async def dl_pipe_file(self, request):
     # 1- Retrieve request parameters
     pipe_id = request.match_info.get('pipe_id', -1)
     filename = request.match_info.get('filename', None)
     pipeline = Pipeline.from_id(pipe_id, 1)
     if pipeline == None:
         return rest_error("No pipeline with id {}".format(pipe_id))
     if filename == None:
         return rest_error("No filename provided")
     path = os.path.join(pipeline.path, filename)
     file = None
     if os.path.isfile(path):
         with open(path, 'br') as content_file:
             file = content_file.read()
     return web.Response(headers=MultiDict(
         {'Content-Disposition': 'Attachment; filename=' + filename}),
                         body=file)
示例#18
0
文件: handlers.py 项目: REGOVAR/Pirus
    def download_file(self, job_id, filename, location=JOBS_DIR):
        job = Job.from_id(job_id, 1)
        if job == None:
            return rest_error("Unable to find the job (id={})".format(job_id))
        path = os.path.join(job.path, filename)

        if not os.path.exists(path):
            return rest_error(
                "File not found. {} doesn't exists for the job (id={})".format(
                    filename, job_id))
        content = ""
        if os.path.isfile(path):
            with open(path, 'br') as content_file:
                file = content_file.read()
        return web.Response(headers=MultiDict(
            {'Content-Disposition': 'Attachment; filename=' + filename}),
                            body=file)
示例#19
0
 def handle_request(self, message, payload):
     response = aiohttp.Response(
         self.writer, 200, http_version=message.version)
     get_params = MultiDict(parse_qsl(urlparse(message.path).query))
     if message.method == 'POST':
         post_params = yield from payload.read()
     else:
         post_params = None
     content = "<h1>It Works!</h1>"
     if get_params:
         content += "<h2>Get params</h2><p>" + str(get_params) + "</p>"
     if post_params:
         content += "<h2>Post params</h2><p>" + str(post_params) + "</p>"
     bcontent = content.encode('utf-8')
     response.add_header('Content-Type', 'text/html; charset=UTF-8')
     response.add_header('Content-Length', str(len(bcontent)))
     response.send_headers()
     response.write(bcontent)
     yield from response.write_eof()
示例#20
0
 def function2245(self, arg2114, arg284):
     var2033 = aiohttp.Response(self.writer,
                                200,
                                http_version=arg2114.version)
     var1109 = MultiDict(parse_qsl(urlparse(arg2114.path).query))
     if (arg2114.method == 'POST'):
         var1549 = yield from arg284.read()
     else:
         var1549 = None
     var2428 = '<h1>It Works!</h1>'
     if var1109:
         var2428 += (('<h2>Get params</h2><p>' + str(var1109)) + '</p>')
     if var1549:
         var2428 += (('<h2>Post params</h2><p>' + str(var1549)) + '</p>')
     var4666 = var2428.encode('utf-8')
     var2033.add_header('Content-Type', 'text/html; charset=UTF-8')
     var2033.add_header('Content-Length', str(len(var4666)))
     var2033.send_headers()
     var2033.write(var4666)
     yield from var2033.write_eof()
示例#21
0
    async def handle_request(self, message, payload):
        """Traite une requête POST pour executer une action"""

        if message.method == 'POST':
            data = await payload.read()
            decoded = data.decode('utf-8')
            get_params = MultiDict(parse_qsl(urlparse(message.path).query))
            post_params = json.loads(decoded)
            if get_params['token'] == self.token:
                code = 200
                post_params['client'] = self.client
                method = getattr(self.Action, post_params['action'])
                await method(**post_params)
            else:
                code = 401
        else:
            code = 403
        response = aiohttp.Response(
            self.writer, code, http_version=message.version)
        response.send_headers()
        await response.write_eof()
示例#22
0
 def test_get_non_existing(self):
     atoms = helpers.SafeAtoms({}, MultiDict(), MultiDict())
     self.assertEqual(atoms['unknown'], '-')
示例#23
0
    def handle_request(self, request, payload):
        print('Request path: {0}'.format(request.path))
        data = self.create_data(request, 200)
        if request.method == 'POST':
            post_data = yield from payload.read()
            post_data = MultiDict(parse_qsl(post_data.decode('utf-8')))
            print('POST data:')
            for key, val in post_data.items():
                print('\t- {0}: {1}'.format(key, val))
            data['post_data'] = dict(post_data)

        # Submit the event to the TANNER service
        event_result = yield from self.submit_data(data)

        # Log the event to slurp service if enabled
        if self.run_args.slurp_enabled:
            yield from self.submit_slurp(request.path)
        response = aiohttp.Response(self.writer,
                                    status=200,
                                    http_version=request.version)
        mimetypes.add_type('text/html', '.php')
        if 'payload' in event_result['response']['message']['detection']:
            payload_content = event_result['response']['message']['detection'][
                'payload']
            if type(payload_content) == dict:
                content_type = mimetypes.guess_type(payload_content['page'])[0]
                content = '<html><body></body></html>'
                base_path = '/'.join(
                    ['/opt/snare/pages', self.run_args.page_dir])
                if os.path.exists(base_path + payload_content['page']):
                    with open(base_path + payload_content['page']) as p:
                        content = p.read()
                soup = BeautifulSoup(content, 'html.parser')
                script_tag = soup.new_tag('div')
                script_tag.append(
                    BeautifulSoup(payload_content['value'], 'html.parser'))
                soup.body.append(script_tag)
                content = str(soup).encode()

            else:
                content_type = mimetypes.guess_type(payload_content)[0]
                content = payload_content.encode('utf-8')
        else:
            base_path = '/'.join(['/opt/snare/pages', self.run_args.page_dir])
            query = None
            if request.path == '/':
                parsed_url = self.run_args.index_page
            else:
                parsed_url = urlparse(unquote(request.path))
                if parsed_url.query:
                    query = '?' + parsed_url.query
                parsed_url = parsed_url.path
                if parsed_url.startswith('/'):
                    parsed_url = parsed_url[1:]
            path = '/'.join([base_path, parsed_url])
            content_type = mimetypes.guess_type(path)[0]
            if content_type is None and '.php' in path:
                content_type = 'text/html'
            if query is not None:
                path = os.path.normpath(path + query)
            else:
                path = os.path.normpath(path)
            if os.path.isfile(path) and path.startswith(base_path):
                with open(path, 'rb') as fh:
                    content = fh.read()
                if content_type:
                    if 'text/html' in content_type:
                        content = yield from self.handle_html_content(content)
            else:
                content_type = None
                content = None
                response = aiohttp.Response(self.writer,
                                            status=404,
                                            http_version=request.version)
        if not content_type:
            response.add_header('Content-Type', 'text/plain')
        else:
            response.add_header('Content-Type', content_type)
        if content:
            response.add_header('Content-Length', str(len(content)))
        response.send_headers()
        if content:
            response.write(content)
        yield from response.write_eof()
示例#24
0
    def handle_request(self, request, payload):
        print('Request path: {0}'.format(request.path))
        data = self.create_data(request, 200)
        if request.method == 'POST':
            post_data = yield from payload.read()
            post_data = MultiDict(parse_qsl(post_data.decode('utf-8')))
            print('POST data:')
            for key, val in post_data.items():
                print('\t- {0}: {1}'.format(key, val))
            data['post_data'] = dict(post_data)

        # Submit the event to the TANNER service
        event_result = yield from self.submit_data(data)

        # Log the event to slurp service if enabled
        if self.run_args.slurp_enabled:
            yield from self.submit_slurp(request.path)
        response = aiohttp.Response(self.writer,
                                    status=200,
                                    http_version=request.version)

        content_type = None
        mimetypes.add_type('text/html', '.php')
        mimetypes.add_type('text/html', '.aspx')
        base_path = os.path.join('/opt/snare/pages', self.run_args.page_dir)
        if event_result is not None and (
                'payload' in event_result['response']['message']['detection']
                and event_result['response']['message']['detection']['payload']
                is not None):
            payload_content = event_result['response']['message']['detection'][
                'payload']
            if type(payload_content) == dict:
                if payload_content['page'].startswith('/'):
                    payload_content['page'] = payload_content['page'][1:]
                page_path = os.path.join(base_path, payload_content['page'])
                content = '<html><body></body></html>'
                if os.path.exists(page_path):
                    content_type = mimetypes.guess_type(page_path)[0]
                    with open(page_path, encoding='utf-8') as p:
                        content = p.read()
                soup = BeautifulSoup(content, 'html.parser')
                script_tag = soup.new_tag('div')
                script_tag.append(
                    BeautifulSoup(payload_content['value'], 'html.parser'))
                soup.body.append(script_tag)
                content = str(soup).encode()

            else:
                content_type = mimetypes.guess_type(payload_content)[0]
                content = payload_content.encode('utf-8')
        else:
            query = None
            if request.path == '/':
                parsed_url = self.run_args.index_page
            else:
                parsed_url = urlparse(unquote(request.path))
                if parsed_url.query:
                    query = '?' + parsed_url.query
                parsed_url = parsed_url.path
                if parsed_url.startswith('/'):
                    parsed_url = parsed_url[1:]
            path = os.path.normpath(os.path.join(base_path, parsed_url))
            if os.path.isfile(path) and path.startswith(base_path):
                content_type = mimetypes.guess_type(path)[0]
                with open(path, 'rb') as fh:
                    content = fh.read()
                if content_type:
                    if 'text/html' in content_type:
                        content = yield from self.handle_html_content(content)
            else:
                content_type = None
                content = None
                response = aiohttp.Response(self.writer,
                                            status=404,
                                            http_version=request.version)
        response.add_header('Server', self.run_args.server_header)

        if 'cookies' in data and 'sess_uuid' in data['cookies']:
            previous_sess_uuid = data['cookies']['sess_uuid']
        else:
            previous_sess_uuid = None

        if event_result is not None and (
                'sess_uuid' in event_result['response']['message']):
            cur_sess_id = event_result['response']['message']['sess_uuid']
            if previous_sess_uuid is None or not previous_sess_uuid.strip(
            ) or previous_sess_uuid != cur_sess_id:
                response.add_header('Set-Cookie', 'sess_uuid=' + cur_sess_id)

        if not content_type:
            response.add_header('Content-Type', 'text/plain')
        else:
            response.add_header('Content-Type', content_type)
        if content:
            response.add_header('Content-Length', str(len(content)))

        # logging of ip, stat, req, user, len, time
        if args.logger:
            req = request.method + ' /' + self.run_args.page_dir + request.path
            major = response.version.major
            minor = response.version.minor
            ver = " HTTP/" + str(major) + '.' + str(minor)
            req = req + ver
            user = getpass.getuser()
            tim = strftime("%d/%b/%Y:%H:%M:%S %z")
            d = {
                'hostIP': request.headers['Host'],
                'stat': response.status,
                'req': req,
                'user': user,
                'content_length': response.headers['Content-Length'],
                'time': tim
            }
            level_dict = {
                'CRITICAL': 50,
                'ERROR': 40,
                'WARNING': 30,
                'INFO': 20,
                'DEBUG': 10,
                'NOTSET': 0
            }
            self.logger.log(level_dict[args.logger], ' ', extra=d)

        response.send_headers()
        if content:
            response.write(content)
        yield from response.write_eof()
示例#25
0
    def test_metadata_from_resp_headers_missing_resp_headers(
            self, file_obj_name):

        with pytest.raises(exceptions.MetadataError):
            GoogleCloudFileMetadata.new_from_resp_headers(
                file_obj_name, MultiDict({}))
示例#26
0
async def get_post_params(request):
    data = await request.payload.read()
    data = data.decode('utf-8')
    post_params = MultiDict(parse_qsl(data))
    return post_params
示例#27
0
 def setUp(self):
     self.loop = mock.Mock()
     self._REQUEST = aiohttp.RawRequestMessage('GET', '/some/path', '1.1',
                                               MultiDict(), True, None)
示例#28
0
文件: snare.py 项目: robski888/snare
    def handle_request(self, request, payload):
        print('Request path: {0}'.format(request.path))
        data = self.create_data(request, 200)
        if request.method == 'POST':
            post_data = yield from payload.read()
            post_data = MultiDict(parse_qsl(post_data.decode('utf-8')))
            print('POST data:')
            for key, val in post_data.items():
                print('\t- {0}: {1}'.format(key, val))
            data['post_data'] = dict(post_data)

        # Submit the event to the TANNER service
        event_result = yield from self.submit_data(data)

        # Log the event to slurp service if enabled
        if self.run_args.slurp_enabled:
            yield from self.submit_slurp(request.path)
        response = aiohttp.Response(
            self.writer, status=200, http_version=request.version
        )
        if 'payload' in event_result['response']['message']['detection']:
            payload_content = event_result['response']['message']['detection']['payload']
            if type(payload_content) == dict:
                content_type = mimetypes.guess_type(payload_content['page'])[0]
                content = '<html><body></body></html>'
                base_path = '/'.join(['/opt/snare/pages', self.run_args.page_dir])
                if os.path.exists(base_path + payload_content['page']):
                    with open(base_path + payload_content['page']) as p:
                        content = p.read()
                soup = BeautifulSoup(content, 'html.parser')
                script_tag = soup.new_tag('div')
                script_tag.append(BeautifulSoup(payload_content['value'], 'html.parser'))
                soup.body.append(script_tag)
                content = str(soup).encode()

            else:
                content_type = mimetypes.guess_type(payload_content)[0]
                content = payload_content.encode('utf-8')
        else:
            base_path = '/'.join(['/opt/snare/pages', self.run_args.page_dir])
            if request.path == '/':
                parsed_url = self.run_args.index_page
            else:
                parsed_url = urlparse(unquote(request.path)).path
                if parsed_url.startswith('/'):
                    parsed_url = parsed_url[1:]
            path = '/'.join(
                [base_path, parsed_url]
            )
            path = os.path.normpath(path)
            if os.path.isfile(path) and path.startswith(base_path):
                with open(path, 'rb') as fh:
                    content = fh.read()
                content_type = mimetypes.guess_type(path)[0]
                if content_type:
                    if 'text/html' in content_type:
                        content = yield from self.handle_html_content(content)
            else:
                content_type = None
                content = None
                response = aiohttp.Response(
                    self.writer, status=404, http_version=request.version
                )
        if not content_type:
            response.add_header('Content-Type', 'text/plain')
        else:
            response.add_header('Content-Type', content_type)
        if content:
            response.add_header('Content-Length', str(len(content)))
        response.send_headers()
        if content:
            response.write(content)
        yield from response.write_eof()
示例#29
0
async def handler_exists_layer2(request):
    session_redis = await get_session(request)
    posted_data = await request.post()
    user_id = get_user_id(session_redis, request.app['app_users'])
    layer_name = posted_data.get('layer')
    layer_name_redis = posted_data.get('layer_name')
    file_format = posted_data.get('format')
    projection = json.loads(posted_data.get('projection'))
    res = await request.app['redis_conn'].get('_'.join(
        [user_id, layer_name_redis, "NQ"]))
    if not res:
        request.app['logger'].info(
            '{} - Unable to fetch the requested layer ({}/{})'.format(
                user_id, layer_name, layer_name_redis))
        return web.Response(
            text='{"Error": "Unable to fetch the layer on the server"}')
    elif file_format == "TopoJSON":
        return web.Response(text=res.decode())
    else:
        try:
            res_geojson = topojson_to_geojson(json.loads(res.decode()))
            if "GeoJSON" in file_format:
                return web.Response(text=res_geojson)
            elif "KML" in file_format:
                tmp_path = prepare_folder()
                output_path = ''.join([tmp_path, "/", layer_name, ".geojson"])
                savefile(output_path, res_geojson.encode())
                result = reproj_convert_layer_kml(output_path)
                os.removedirs(tmp_path)
                return web.Response(text=result.decode())
            else:
                out_proj = check_projection(
                    projection["name"] if "name" in
                    projection else projection["proj4string"])
                if not out_proj:
                    return web.Response(text=json.dumps(
                        {'Error': 'app_page.common.error_proj4_string'}))

                available_formats = {
                    "ESRI Shapefile": ".shp",
                    "KML": ".kml",
                    "GML": ".gml"
                }
                ext = available_formats[file_format]
                tmp_path = prepare_folder()
                output_path = ''.join([tmp_path, "/", layer_name, ".geojson"])
                savefile(output_path, res_geojson.encode())
                reproj_convert_layer(output_path,
                                     output_path.replace(".geojson", ext),
                                     file_format, out_proj)
                os.remove(output_path)
                raw_data, filename = fetch_zip_clean(tmp_path, layer_name)
                if ".zip" in filename:
                    b64_zip = b64encode(raw_data)
                    return web.Response(body=b64_zip,
                                        headers=MultiDict({
                                            "Content-Type":
                                            "application/octet-stream",
                                            "Content-Disposition":
                                            ''.join([
                                                "attachment; filename=",
                                                layer_name, ".zip"
                                            ]),
                                            "Content-length":
                                            len(b64_zip)
                                        }))
                else:
                    return web.Response(text=raw_data.decode())
        except Exception as err:
            request.app['logger'].info(
                '{} - Error {} while converting layer {} to {} format)'.format(
                    user_id, err, layer_name, file_format))
            return web.Response(text='{"Error": "Unexpected error"}')
    return web.Response(text='{"Error": "Invalid file format"}')