Exemplo n.º 1
0
def abort(code):
    if code == 400: return web.HTTPBadRequest()
    elif code == 401: return web.HTTPUnauthorized()
    elif code == 402: return web.HTTPPaymentRequired()
    elif code == 403: return web.HTTPForbidden()
    elif code == 404: return web.HTTPNotFound()
    elif code == 405: return web.HTTPMethodNotAllowed()
    elif code == 406: return web.HTTPNotAcceptable()
    elif code == 407: return web.HTTPProxyAuthenticationRequired()
    elif code == 408: return web.HTTPRequestTimeout()
    elif code == 409: return web.HTTPConflict()
    elif code == 410: return web.HTTPGone()
    elif code == 411: return web.HTTPLengthRequired()
    elif code == 412: return web.HTTPPreconditionFailed()
    elif code == 413: return web.HTTPRequestEntityTooLarge()
    elif code == 414: return web.HTTPRequestURITooLong()
    elif code == 415: return web.HTTPUnsupportedMediaType()
    elif code == 416: return web.HTTPRequestRangeNotSatisfiable()
    elif code == 417: return web.HTTPExpectationFailed()
    elif code == 421: return web.HTTPMisdirectedRequest()
    elif code == 422: return web.HTTPUnprocessableEntity()
    elif code == 424: return web.HTTPFailedDependency()
    elif code == 426: return web.HTTPUpgradeRequired()
    elif code == 428: return web.HTTPPreconditionRequired()
    elif code == 429: return web.HTTPTooManyRequests()
    elif code == 431: return web.HTTPRequestHeaderFieldsTooLarge()
    elif code == 451: return web.HTTPUnavailableForLegalReasons()
    else: return web.HTTPBadRequest()
Exemplo n.º 2
0
 def test_pickle(self) -> None:
     resp = web.HTTPRequestEntityTooLarge(
         100, actual_size=123, headers={"X-Custom": "value"}, reason="Too large"
     )
     resp.foo = "bar"
     for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
         pickled = pickle.dumps(resp, proto)
         resp2 = pickle.loads(pickled)
         assert resp2.text == resp.text
         assert resp2.headers == resp.headers
         assert resp2.reason == "Too large"
         assert resp2.status == 413
         assert resp2.foo == "bar"
Exemplo n.º 3
0
 def test_ctor(self) -> None:
     resp = web.HTTPRequestEntityTooLarge(max_size=100,
                                          actual_size=123,
                                          headers={'X-Custom': 'value'},
                                          reason='Too large')
     assert resp.text == ('Maximum request body size 100 exceeded, '
                          'actual body size 123')
     assert resp.headers == {
         'X-Custom': 'value',
         'Content-Type': 'text/plain'
     }
     assert resp.reason == 'Too large'
     assert resp.status == 413
Exemplo n.º 4
0
 def test_ctor(self) -> None:
     resp = web.HTTPRequestEntityTooLarge(
         max_size=100,
         actual_size=123,
         headers={"X-Custom": "value"},
         reason="Too large",
     )
     assert resp.text == (
         "Maximum request body size 100 exceeded, " "actual body size 123"
     )
     assert resp.headers == {"X-Custom": "value", "Content-Type": "text/plain"}
     assert resp.reason == "Too large"
     assert resp.status == 413
Exemplo n.º 5
0
async def gmail_pubsub_push(request: web.Request):
    """
        Handle webhooks from gmail (google pub/sub subscribition)
    """
    MAX_SIZE = 26214400
    if request.content_length > MAX_SIZE:
        raise web.HTTPRequestEntityTooLarge(MAX_SIZE, request.content_length)
    request_data = await request.json()
    logging.info(str(request_data))
    if request_data.get('message'):
        notification_data = request_data['message']['data']
        update = json.loads(
            urlsafe_b64decode(notification_data).decode('utf-8'))
        email: str = update["emailAddress"]
        new_history_id: int = int(update["historyId"])
        creds = tuple(await psqldb.get_gmail_creds(email=email))
        user_creds = gmail_API.make_user_creds(*creds)
        history_id = await psqldb.update_watch_history(
            email=email, history_id=new_history_id)
        if history_id:
            # TODO: Catch 404 and make a full sync in that case
            # https://developers.google.com/gmail/api/guides/sync#full_synchronization
            hist = await gmail_API.read_history(user_creds=user_creds,
                                                email=email,
                                                history_id=str(history_id))
            if hist.get("history"):
                creds = tuple(await psqldb.get_gmail_creds(email=email))
                user_creds = gmail_API.make_user_creds(*creds)
                watched_chats_records = tuple(
                    await psqldb.get_watched_chats(email=email))
                for history_record in hist["history"]:
                    for message_record in history_record["messages"]:
                        message_id = message_record["id"]
                        msg = await gmail_API.get_message_full(
                            user_creds=user_creds,
                            message_id=message_id,
                        )
                        # ON new messages -- send it to all the watched chats linked with this email
                        for chat_id_record in watched_chats_records:
                            for text in msg['text_list']:
                                await dp.bot.send_message(
                                    chat_id_record["chat_id"], text)
                            for file in msg['attachments']:
                                await dp.bot.send_document(
                                    chat_id_record["chat_id"],
                                    types.input_file.InputFile(
                                        BytesIO(file['file']),
                                        filename=file['filename']))
        else:
            logging.info(f"Problems with updating history_id in {email}")
    return web.Response(text='OK')
Exemplo n.º 6
0
async def uploadFile(request):
    # Принимает файл и складывает его в папку, имя которой строится из даты текущего дня.
    # Файл получает новое имя файла в виде хеша
    # Оригинальное название файла и время заливки хранится в базе.
    # Если файл больше fileSizeLimitInBytes то отсылается 413 коды
    reader = await request.multipart()
    part = await reader.next()
    size = 0  # хранит текущий размер файла после итерации

    # Проверяю что файл подходящего разрешения
    ext = part.filename.split(".")[1]
    if ext not in allowedExtensions:
        return web.HTTPBadRequest(text="Не разрешенный тип файла")

    # создаю папку дня если не её не существовало
    # и формирую полный путь для сохранения файла
    partFileName = uuid4().hex
    folderDate = datetime.now().strftime("%Y%m")
    folder = os.path.join(request.app["config"]["server"]["upload_dir"],
                          folderDate)
    if not os.path.exists(folder):
        os.mkdir(folder)

    try:
        filename = os.path.join(folder, partFileName)
        with open(filename, 'wb') as file:
            while True:
                chunk = await part.read_chunk()  # 8192 bytes by default.
                if not chunk:
                    break
                size += len(chunk)
                if size > fileSizeLimitInBytes:
                    raise web.HTTPRequestEntityTooLarge()  # 413 http code
                file.write(chunk)

        row = {
            "filePath": os.path.join(folderDate, partFileName),
            "time": time(),
            "originalFileName": part.filename
        }
        result = await request.mongo.file.insert_one(row)
        return web.json_response({"id": str(result.inserted_id)})
    except Exception as e:
        os.remove(filename)
        return web.HTTPBadRequest(text="Что пошло не так!")  # 400 http code
Exemplo n.º 7
0
async def upload(request):
    """POST request to upload a file into the server."""
    reader = await request.multipart()
    files_uploaded = []
    total_size = 0
    max_upload_size = request.app['max_upload_size']
    async for part in reader:
        _, extension = os.path.splitext(part.filename or '')
        resource_name = random_filename(request.app) + extension
        with open(os.path.join(request.app['storage_path'], resource_name),
                  'wb') as f:
            while True:
                if total_size >= max_upload_size:
                    raise web.HTTPRequestEntityTooLarge()
                chunk = await part.read_chunk()
                if not chunk:
                    break
                total_size += len(chunk)
                f.write(chunk)
        files_uploaded.append(f'{uri(request)}/{resource_name}')
    return web.Response(text='\n'.join(files_uploaded))
Exemplo n.º 8
0
async def api_upload(request):
    data = await request.post()
    image = data['image']
    content_type = image.content_type
    start_time = time.time()
    im_bytes = image.file.read()
    end_time = time.time()
    print('spent', float(end_time - start_time),
          'seconds reading file from api')
    try:
        im_hash, im_password = await upload_image(im_bytes, content_type)
    except TooLargeError:
        return web.HTTPRequestEntityTooLarge(max_size=max_im_len,
                                             actual_size=len(im_bytes))
    return web.json_response({
        'hash': im_hash,
        'url': f'{base_url}/image/{im_hash}',
        'view': f'{base_url}/view/{im_hash}',
        'password': im_password,
        'delete': f'{base_url}/api/delete/{im_password}',
    })
Exemplo n.º 9
0
async def api_upload_short(request):
    data = await request.post()
    image = data['image']
    content_type = image.content_type
    im_bytes = image.file.read()
    short_url = await generate_short_url()
    try:
        im_hash, im_password = await upload_image(im_bytes,
                                                  content_type,
                                                  short_url=short_url)
    except TooLargeError:
        return web.HTTPRequestEntityTooLarge(max_size=max_im_len,
                                             actual_size=len(im_bytes))
    return web.json_response({
        'url': f'{base_url}/{short_url}',
        'raw': f'{base_url}/image/{im_hash}',
        'shortkey': short_url,
        'view': f'{base_url}/view/{im_hash}',
        'password': im_password,
        'delete': f'{base_url}/api/delete/{im_password}',
    })
Exemplo n.º 10
0
    async def handle_webhook(self, request):
        # For some reason, aiohttp's client_max_size isn't effective here.
        request_body = await request.content.read(self.MAX_REQUEST_BODY_SIZE)
        if not request.content.at_eof():
            raise web.HTTPRequestEntityTooLarge(self.MAX_REQUEST_BODY_SIZE,
                                                request.content_length or 0)

        provided_token = request.headers.get('X-Gitlab-Token', '')
        if not hmac.compare_digest(provided_token, self.webhook_auth_token):
            raise web.HTTPUnauthorized()

        try:
            webhook_request = GitLabWebhookRequest(**json.loads(request_body))
        except Exception as e:
            log.info('Invalid request: {0}'.format(e))
            raise web.HTTPBadRequest()

        log.info('Received webhook for project with id {0}'.format(
            webhook_request.project_id))

        self.runner.run(webhook_request.project_id)
        return web.Response(text='Done.\n')
Exemplo n.º 11
0
async def handle_kkdcp(request):

    length = request.content_length
    if length is None:
        raise web.HTTPLengthRequired(text="Length is required.")
    if length > MAX_LENGTH:
        raise web.HTTPRequestEntityTooLarge(text="Request is too large.")

    try:
        data = await request.read()
        proxy_request = codec.decode(data)
    except codec.ParserError as e:
        raise web.HTTPBadRequest(text=str(e))

    loop = asyncio.get_event_loop()

    # TODO: Change this to look up the KDC to talk to
    try:
        krb5_response = await asyncio.wait_for(forward_kerberos(proxy_request.message, loop=loop), timeout=15, loop=loop)
    except asyncio.TimeoutError:
        raise web.HTTPServiceUnavailable(text="Timeout waiting for Kerberos server")

    return web.Response(body=codec.encode(krb5_response), content_type="application/kerberos")
Exemplo n.º 12
0
async def submit(request):
    global filerouter
    global servermode
    sysconf = au.get_system_conf()
    size_cutoff = sysconf['gui_input_size_limit']
    if request.content_length is None:
        return web.HTTPLengthRequired(
            text=json.dumps({
                'status': 'fail',
                'msg': 'Content-Length header required'
            }))
    if request.content_length > size_cutoff * 1024 * 1024:
        return web.HTTPRequestEntityTooLarge(text=json.dumps(
            {
                'status': 'fail',
                'msg': f'Input is too big. Limit is {size_cutoff}MB.'
            }))
    if servermode and server_ready:
        r = await cravat_multiuser.is_loggedin(request)
        if r == False:
            return web.json_response({'status': 'notloggedin'})
    jobs_dirs = await filerouter.get_jobs_dirs(request)
    jobs_dir = jobs_dirs[0]
    job_id = get_next_job_id()
    job_dir = os.path.join(jobs_dir, job_id)
    os.makedirs(job_dir, exist_ok=True)
    reader = await request.multipart()
    job_options = {}
    input_files = []
    while True:
        part = await reader.next()
        if not part:
            break
        if part.name.startswith('file_'):
            input_files.append(part)
            # Have to write to disk here
            wfname = part.filename
            wpath = os.path.join(job_dir, wfname)
            with open(wpath, 'wb') as wf:
                wf.write(await part.read())
        elif part.name == 'options':
            job_options = await part.json()
    input_fnames = [fp.filename for fp in input_files]
    run_name = input_fnames[0]
    if len(input_fnames) > 1:
        run_name += '_and_' + str(len(input_fnames) - 1) + '_files'
    info_fname = '{}.status.json'.format(run_name)
    job_info_fpath = os.path.join(job_dir, info_fname)
    job = WebJob(job_dir, job_info_fpath)
    job.save_job_options(job_options)
    job.set_info_values(orig_input_fname=input_fnames,
                        run_name=run_name,
                        submission_time=datetime.datetime.now().isoformat(),
                        viewable=False)
    # Subprocess arguments
    input_fpaths = [os.path.join(job_dir, fn) for fn in input_fnames]
    run_args = ['oc', 'run']
    for fn in input_fnames:
        run_args.append(os.path.join(job_dir, fn))
    # Annotators
    if 'annotators' in job_options and len(
            job_options['annotators']
    ) > 0 and job_options['annotators'][0] != '':
        annotators = job_options['annotators']
        annotators.sort()
        run_args.append('-a')
        run_args.extend(annotators)
    else:
        annotators = ''
        run_args.append('-e')
        run_args.append('*')
    # Liftover assembly
    run_args.append('-l')
    if 'assembly' in job_options:
        assembly = job_options['assembly']
    else:
        assembly = constants.default_assembly
    run_args.append(assembly)
    if servermode and server_ready:
        await cravat_multiuser.update_user_settings(request,
                                                    {'lastAssembly': assembly})
    else:
        au.set_cravat_conf_prop('last_assembly', assembly)
    # Reports
    if 'reports' in job_options and len(job_options['reports']) > 0:
        run_args.append('-t')
        run_args.extend(job_options['reports'])
    else:
        run_args.extend(['--skip', 'reporter'])
    # Note
    if 'note' in job_options:
        note = job_options['note']
        if note != '':
            run_args.append('--note')
            run_args.append(note)
    # Forced input format
    if 'forcedinputformat' in job_options and job_options['forcedinputformat']:
        run_args.append('--input-format')
        run_args.append(job_options['forcedinputformat'])
    if servermode:
        run_args.append('--writeadmindb')
        run_args.extend(['--jobid', job_id])
    run_args.append('--temp-files')
    global job_queue
    global run_jobs_info
    job_ids = run_jobs_info['job_ids']
    job_ids.append(job_id)
    run_jobs_info['job_ids'] = job_ids
    qitem = {'cmd': 'submit', 'job_id': job_id, 'run_args': run_args}
    job_queue.put(qitem)
    status = {'status': 'Submitted'}
    job.set_info_values(status=status)
    if servermode and server_ready:
        await cravat_multiuser.add_job_info(request, job)
    # makes temporary status.json
    status_json = {}
    status_json['job_dir'] = job_dir
    status_json['id'] = job_id
    status_json['run_name'] = run_name
    status_json['assembly'] = assembly
    status_json['db_path'] = ''
    status_json['orig_input_fname'] = input_fnames
    status_json['orig_input_path'] = input_fpaths
    status_json['submission_time'] = datetime.datetime.now().isoformat()
    status_json['viewable'] = False
    status_json['note'] = note
    status_json['status'] = 'Submitted'
    status_json['reports'] = []
    pkg_ver = au.get_current_package_version()
    status_json['open_cravat_version'] = pkg_ver
    status_json['annotators'] = annotators
    with open(os.path.join(job_dir, run_name + '.status.json'), 'w') as wf:
        json.dump(status_json, wf, indent=2, sort_keys=True)
    return web.json_response(job.get_info_dict())
Exemplo n.º 13
0
async def service_submission(request: web.Request):
    reader = MultipartReader.from_response(request)
    data = None
    filedata = None

    # Read multipart email
    while True:
        part = await reader.next()  # pylint: disable=not-callable
        if part is None:
            break
        if part.headers[hdrs.CONTENT_TYPE] == "application/json":
            data = await part.json()
            continue
        if part.headers[hdrs.CONTENT_TYPE] == "application/zip":
            filedata = await part.read(decode=True)
            # Validate max file size
            maxsize = 10 * 1024 * 1024  # 10MB
            actualsize = len(filedata)
            if actualsize > maxsize:
                raise web.HTTPRequestEntityTooLarge(maxsize, actualsize)
            filename = part.filename
            continue
        raise web.HTTPUnsupportedMediaType(
            reason=f"One part had an unexpected type: {part.headers[hdrs.CONTENT_TYPE]}"
        )

    # data (dict) and file (bytearray) have the necessary information to compose the email
    support_email_address = request.app[APP_CONFIG_KEY]["smtp"]["sender"]
    is_real_usage = any(
        env in os.environ.get("SWARM_STACK_NAME", "")
        for env in ("production", "staging")
    )
    db = get_storage(request.app)
    user = await db.get_user({"id": request[RQT_USERID_KEY]})
    user_email = user.get("email")
    if not is_real_usage:
        support_email_address = user_email

    try:
        # NOTE: temporarily internal import to avoid render_and_send_mail to be interpreted as handler
        # TODO: Move outside when get_handlers_from_namespace is fixed
        from .login.utils import render_and_send_mail

        attachments = [("metadata.json", json.dumps(data, indent=4))]
        if filedata:
            attachments.append((filename, filedata))
        # send email
        await render_and_send_mail(
            request,
            to=support_email_address,
            template=common_themed(EMAIL_TEMPLATE_NAME),
            context={
                "user": user_email,
                "data": json2html.convert(
                    json=json.dumps(data), table_attributes='class="pure-table"'
                ),
                "subject": "TEST: " * (not is_real_usage) + "New service submission",
            },
            attachments=attachments,
        )
    except Exception as exc:
        log.exception("Error while sending the 'new service submission' mail.")
        raise web.HTTPServiceUnavailable() from exc

    raise web.HTTPNoContent(content_type="application/json")
Exemplo n.º 14
0
def route(request):

    session = yield from get_session(request)
    if 'uid' in session:
        uid = session['uid']
    else:
        return web.HTTPForbidden()

    query_parameters = request.rel_url.query
    try:
        fid = int(query_parameters["fid"])
        fid = str(fid).zfill(8)
    except:
        return web.HTTPBadRequest()

    if request.content_type != "multipart/form-data":
        return web.HTTPBadRequest()

    try:
        reader = yield from request.multipart()
    except Exception:
        return web.HTTPBadRequest()

    next = yield from reader.next()

    photo_dir = os.path.join(request.app["photo_dir"], fid)

    if os.path.exists(photo_dir) == 0:
        os.mkdir(photo_dir)

    size = 0
    suffix = ''
    hash_calc = hashlib.md5()

    while True:
        try:
            chunk = yield from next.read_chunk()  # 8192 bytes by default
        except Exception:
            return web.HTTPBadRequest()

        if not chunk:
            break

        if size == 0:

            if len(chunk) < 4:
                return web.HTTPBadRequest()

            top_eight_bytes = ''.join('{:02x}'.format(x)
                                      for x in chunk[0:4]).upper()

            if top_eight_bytes[0:6] == 'FFD8FF':
                suffix = "jpg"
            elif top_eight_bytes[0:8] == '89504E47':
                suffix = "png"
            elif top_eight_bytes[0:8] == '47494638':
                suffix = "gif"
            else:
                return web.HTTPBadRequest()

            while True:
                temp_name = str(int(time.time())) + str(random.randint(
                    0, 9999)).zfill(4)
                temp_file = os.path.join(photo_dir, temp_name)
                if not os.path.exists(temp_file):
                    break

            file = open(temp_file, 'wb')

        size = size + len(chunk)
        file.write(chunk)
        hash_calc.update(chunk)

        if size / 1048576 > 2:  # size limit 2MB
            file.close()
            os.remove(temp_file)
            return web.HTTPRequestEntityTooLarge()

    file.close()
    hash_value = hash_calc.hexdigest()
    formal_name = hash_value + "." + suffix
    formal_file = os.path.join(photo_dir, formal_name)

    if os.path.exists(formal_file) != 0:
        os.remove(temp_file)
    else:
        os.rename(temp_file, formal_file)

    # with (yield from request.app['pool']) as connect:

    #    cursor = yield from connect.cursor()

    #    try:
    #        yield from cursor.execute('''
    #            INSERT INTO photo VALUES(%s,%s,%s,%s)
    #        ''',(hash_value,fid,suffix,0))
    #        yield from connect.commit()
    #    except Exception as e:
    #        print(e)

    #    yield from cursor.close()
    #    connect.close()

    return web.Response(text=toolbox.jsonify({"filename": formal_name}),
                        # headers = {'Access-Control-Allow-Origin':'*'}
                        )
Exemplo n.º 15
0
def route(request):

    if request.content_type != "multipart/form-data":
        return web.HTTPBadRequest()

    query_parameters = request.rel_url.query
    if "token" in query_parameters:
        room = toolbox.token_verify(query_parameters["token"])
        if room is None:
            return web.HTTPBadRequest()
        elif not re.search(r'^\d{6}$', room):
            return web.HTTPBadRequest()
    else:
        return web.HTTPBadRequest()

    try:
        reader = yield from request.multipart()
    except:
        return web.HTTPBadRequest()

    data = yield from reader.next()

    photo_dir = os.path.join(request.app["photo_dir"], room)

    if os.path.exists(photo_dir) == 0:
        os.mkdir(photo_dir)

    temp_path = ''
    size = 0
    suffix = ''
    hash_calc = hashlib.md5()

    while True:
        try:
            chunk = yield from data.read_chunk()  # 8192 bytes by default
        except:
            return web.HTTPBadRequest()

        if not chunk:
            break

        if size == 0:

            if len(chunk) < 4:
                return web.HTTPUnsupportedMediaType(
                    reason="unsupported file type")

            # top_bytes = chunk[0:4].hex().upper()
            top_bytes = ''.join('{:02x}'.format(x) for x in chunk[0:4]).upper()

            if top_bytes[0:6] == 'FFD8FF':
                suffix = "jpg"
            elif top_bytes[0:8] == '89504E47':
                suffix = "png"
            elif top_bytes[0:8] == '47494638':
                suffix = "gif"
            else:
                return web.HTTPUnsupportedMediaType(
                    reason="unsupported file type")

            while True:
                temp_name = str(int(time.time())) + str(random.randint(
                    0, 9999)).zfill(4)
                temp_path = os.path.join(photo_dir, temp_name)
                if not os.path.exists(temp_path):
                    file = open(temp_path, 'wb')
                    break

        size = size + len(chunk)
        file.write(chunk)
        hash_calc.update(chunk)

        if size / 1048576 > 3:  # size limit 3MB
            file.close()
            os.remove(temp_path)
            return web.HTTPRequestEntityTooLarge(reason="file size overflow")

    file.close()
    hash_value = hash_calc.hexdigest()
    formal_name = hash_value + "." + suffix
    formal_path = os.path.join(photo_dir, formal_name)

    if os.path.exists(formal_path) != 0:
        os.remove(temp_path)
    else:
        os.rename(temp_path, formal_path)

    return web.Response(text=formal_name,
                        # headers={'Access-Control-Allow-Origin':'*'}
                        )