Exemplo n.º 1
0
def errorhandler(e):
    """Handle error"""
    if not isinstance(e, HTTPException):
        e = InternalServerError()
    flash("Unexpected Error")
    return redirect(request.referrer)
Exemplo n.º 2
0
 def response_500(self, error=InternalServerError()):
     if isinstance(error, HTTPException):
         return self.error(error=error)
     return self.error(error=InternalServerError())
Exemplo n.º 3
0
    def run_wsgi(self):
        if self.headers.get('Expect', '').lower().strip() == '100-continue':
            self.wfile.write(b'HTTP/1.1 100 Continue\r\n\r\n')

        self.environ = environ = self.make_environ()
        headers_set = []
        headers_sent = []

        def write(data):
            assert headers_set, 'write() before start_response'
            if not headers_sent:
                status, response_headers = headers_sent[:] = headers_set
                try:
                    code, msg = status.split(None, 1)
                except ValueError:
                    code, msg = status, ""
                self.send_response(int(code), msg)
                header_keys = set()
                for key, value in response_headers:
                    self.send_header(key, value)
                    key = key.lower()
                    header_keys.add(key)
                if not ('content-length' in header_keys
                        or environ['REQUEST_METHOD'] == 'HEAD' or status < 200
                        or status in (204, 304)):
                    self.close_connection = True
                    self.send_header('Connection', 'close')
                if 'server' not in header_keys:
                    self.send_header('Server', self.version_string())
                if 'date' not in header_keys:
                    self.send_header('Date', self.date_time_string())
                self.end_headers()

            assert isinstance(data, bytes), 'applications must write bytes'
            self.wfile.write(data)
            self.wfile.flush()

        def start_response(status, response_headers, exc_info=None):
            if exc_info:
                try:
                    if headers_sent:
                        reraise(*exc_info)
                finally:
                    exc_info = None
            elif headers_set:
                raise AssertionError('Headers already set')
            headers_set[:] = [status, response_headers]
            return write

        def execute(app):
            application_iter = app(environ, start_response)
            try:
                for data in application_iter:
                    write(data)
                if not headers_sent:
                    write(b'')
            finally:
                if hasattr(application_iter, 'close'):
                    application_iter.close()
                application_iter = None

        try:
            execute(self.server.app)
        except (socket.error, socket.timeout) as e:
            self.connection_dropped(e, environ)
        except Exception:
            if self.server.passthrough_errors:
                raise
            from werkzeug.debug.tbtools import get_current_traceback
            traceback = get_current_traceback(ignore_system_exceptions=True)
            try:
                # if we haven't yet sent the headers but they are set
                # we roll back to be able to set them again.
                if not headers_sent:
                    del headers_set[:]
                execute(InternalServerError())
            except Exception:
                pass
            self.server.log('error', 'Error on request:\n%s',
                            traceback.plaintext)
Exemplo n.º 4
0
        try:
            execute(app)
        except (socket.error, socket.timeout), e:
            self.connection_dropped(e, environ)
        except:
            if self.server.passthrough_errors:
                raise
            from werkzeug.debug.tbtools import get_current_traceback
            traceback = get_current_traceback(ignore_system_exceptions=True)
            try:
                # if we haven't yet sent the headers but they are set
                # we roll back to be able to set them again.
                if not headers_sent:
                    del headers_set[:]
                execute(InternalServerError())
            except:
                pass
            self.server.log('error', 'Error on request:\n%s',
                            traceback.plaintext)

    def handle(self):
        """Handles a request ignoring dropped connections."""
        try:
            return BaseHTTPRequestHandler.handle(self)
        except (socket.error, socket.timeout), e:
            self.connection_dropped(e)
        except:
            if self.server.ssl_context is None or not is_ssl_error():
                raise
Exemplo n.º 5
0
 def why_me():
     raise InternalServerError("Why me?")
Exemplo n.º 6
0
def expect_content_type(value: str):
    content_type = request.headers["Content-Type"]
    content_type, _ = cgi.parse_header(content_type)
    if content_type != value:
        raise InternalServerError(f"Expected {value} payload")
def _get_playable_recommendations_list(mbids_and_ratings_list):
    """ Get artist, track etc info from recording mbid using labs.listenbrainz.api
        so that they can be played using BrainzPlayer. Refer to webserver/static/js/src/BrainzPlayer.tsx

        Args:
            mbids_and_ratings_list: Contains recording mbid and corresponding score.

        Returns:
            recommendations: list of recommendations of the format
                {
                    'listened_at' : 0,
                    'track_metadata' : {
                        'artist_name' : 'John Mayer',
                        'track_name' : 'Edge of desire',
                        'release_name' : "",
                        'additional_info' : {
                            'recording_mbid' : "181c4177-f33a-441d-b15d-910acaf18b07",
                            'artist_mbids' : "181c4177-f33a-441d-b15d-910acaf18b07"
                        }
                    }
                }
    """
    data = []
    for r in mbids_and_ratings_list:
        data.append({'[recording_mbid]': r['recording_mbid']})

    r = requests.post(SERVER_URL, json=data)
    if r.status_code != 200:
        if r.status_code == 400:
            current_app.logger.error(
                'Invalid data was sent to the labs API.\nData: {}'.format(
                    data))
            raise BadRequest
        else:
            current_app.logger.error(
                "API didn't send a valid response due to Internal Server Error.\nData: {}"
                .format(data))
            raise InternalServerError

    try:
        rows = ujson.loads(r.text)
    except Exception as err:
        raise InternalServerError(str(err))

    recommendations = []

    for row in rows:
        recommendations.append({
            'listened_at': 0,
            'track_metadata': {
                'artist_name': row['artist_credit_name'],
                'track_name': row['recording_name'],
                'release_name': row.get('release_name', ""),
                'additional_info': {
                    'recording_mbid': row['recording_mbid'],
                    'artist_mbids': row['[artist_credit_mbids]']
                }
            }
        })

    return recommendations
Exemplo n.º 8
0
 def patch_version(self):
     if self.sversion:
         return int(int(self.sversion / 100) / 100)
     raise InternalServerError(self._INFORMATION_MSG)
Exemplo n.º 9
0
def errorhandler(e):
    # Handle error
    if not isinstance(e, HTTPException):
        e = InternalServerError()
        return error(e.name, e.code)
Exemplo n.º 10
0
def generate_quiz_leaf(node, weight_depth, blacklisted_leaf_id=None):
    node_data = get_depth_popularity_info(node)

    weight_high_depth_query = "power(((l.popularity -:min_popularity + 1) / (:max_popularity - :min_popularity)) + 4 * ((q.depth - :min_depth) / (:max_depth - :min_depth)), 0.5)"
    weight_low_depth_query = "power(((l.popularity - :min_popularity + 1) / (:max_popularity - :min_popularity)) + 4 * (1 -((q.depth - :min_depth) / (:max_depth - :min_depth))), 0.5)"

    score_query = weight_high_depth_query if weight_depth else weight_low_depth_query

    leaf_response = db.session.execute(
        text(
            "select distinct l.id, l.ott, l.name, vernacular_by_ott.vernacular, iucn.status_code, images_by_ott.src, images_by_ott.src_id, "
            + score_query +
            " as score, q.depth, l.popularity, l.wikidata, l.eol"
            """
            from ordered_leaves l
            join quiz_leaves_by_ott q on q.leaf_id = l.id
            join images_by_ott ON (l.ott = images_by_ott.ott AND images_by_ott.best_any = 1)
            left join vernacular_by_ott on (l.ott = vernacular_by_ott.ott and vernacular_by_ott.lang_primary = 'en' and vernacular_by_ott.preferred = 1)
            left join iucn on l.ott = iucn.ott
            where l.id between :leaf_left and :leaf_right and best_any = 1 and l.id != :blacklisted_leaf_id
            group by l.ott
            order by rand() * score desc
            limit 1
            """),
        {
            "leaf_left": node["leaf_left"],
            "leaf_right": node["leaf_right"],
            "min_depth": node_data["min_depth"],
            "max_depth": node_data["max_depth"],
            "min_popularity": node_data["min_popularity"],
            "max_popularity": node_data["max_popularity"],
            "blacklisted_leaf_id": blacklisted_leaf_id or 0,
        },
    ).fetchall()

    if not leaf_response:
        print("Leaf generation failed.")
        print(node, node_data, blacklisted_leaf_id)
        raise InternalServerError("Leaf generation failed.")

    leaf = dict(
        zip(
            [
                "id",
                "ott",
                "name",
                "vernacular",
                "iucn",
                "img_src",
                "img_src_id",
                "score",
                "depth",
                "popularity",
                "wikidata",
                "eol",
            ],
            leaf_response[0],
        ))

    leaf["thumbnail"] = make_thumbnail_url(leaf["img_src"], leaf["img_src_id"])
    return leaf
Exemplo n.º 11
0
 def major_version(self):
     if self.sversion is not None:
         return int(self.sversion / 10000)
     raise InternalServerError(self._INFORMATION_MSG)
Exemplo n.º 12
0
def generate_quiz_question(quiz):
    node = (db.session.query(
        Node.id, Node.node_rgt).where(Node.ott == quiz["ott"]).first())

    # Find a parent node which has at least two direct valid quiz nodes
    parent_node = (db.session.query(Node.id, Node.ott, Node.name).join(
        QuizNode, QuizNode.node_id == Node.id).where(
            between(
                Node.id, node.id,
                node.node_rgt)).where(QuizNode.can_branch == True).order_by(
                    desc(func.pow(Node.popularity, 0.1) *
                         func.rand())).first())

    if not parent_node:
        raise InternalServerError(
            f"No suitable parent nodes for the ott: {quiz['ott']}")

    # Randomly pick two valid child nodes which will become node_left and node_right
    result = db.session.execute(
        text("""
            SELECT n.id, n.leaf_lft, n.leaf_rgt
            FROM ordered_nodes n
            JOIN quiz_nodes q on q.node_id = n.id
            WHERE n.real_parent = :parent_node_id AND q.num_quiz_leaves > 1
            ORDER BY rand()
            LIMIT 2
            """),
        {
            "parent_node_id": parent_node.id
        },
    ).fetchall()

    if len(result) < 2:
        print("Selected parent node has fewer than two valid node children.")
        print(parent_node._asdict())
        raise InternalServerError()

    node_left_result, node_right_result = result
    node_left = dict(zip(["id", "leaf_left", "leaf_right"], node_left_result))
    node_right = dict(zip(["id", "leaf_left", "leaf_right"],
                          node_right_result))

    leaf_left_1 = generate_quiz_leaf(node_left, False)
    leaf_left_2 = generate_quiz_leaf(node_left, True, leaf_left_1["id"])
    leaf_right = generate_quiz_leaf(node_right, True)

    swap = random() > 0.5

    question = {
        "compare": leaf_left_1,
        "option1": leaf_left_2 if swap else leaf_right,
        "option2": leaf_right if swap else leaf_left_2,
    }

    save_question_to_db(
        quiz_id=quiz["id"],
        leaf_compare=question["compare"],
        leaf_1=question["option1"],
        leaf_2=question["option2"],
    )

    return question
Exemplo n.º 13
0
 def raiseInternalServerError():
     raise InternalServerError('Hello world')
Exemplo n.º 14
0
    def post(self):
        if request.headers.get('Tus-Resumable') is None:
            raise BadRequest(
                'Received file upload for unsupported file transfer protocol')

        file_size = request.headers.get('Upload-Length')
        max_file_size = current_app.config["MAX_CONTENT_LENGTH"]
        if not file_size:
            raise BadRequest('Received file upload of unspecified size')
        file_size = int(file_size)
        if file_size > max_file_size:
            raise RequestEntityTooLarge(
                f'The maximum file upload size is {max_file_size/1024/1024}MB.'
            )

        data = self.parser.parse_args()
        filename = data.get('filename')
        if not filename:
            raise BadRequest('File name cannot be empty')
        if filename.endswith(FORBIDDEN_FILETYPES):
            raise BadRequest('File type is forbidden')

        document_guid = str(uuid.uuid4())
        base_folder = current_app.config['UPLOADED_DOCUMENT_DEST']
        folder = data.get('folder')
        folder = os.path.join(base_folder, folder)
        file_path = os.path.join(folder, document_guid)
        pretty_folder = data.get('pretty_folder')
        pretty_path = os.path.join(base_folder, pretty_folder, filename)

        try:
            if not os.path.exists(folder):
                os.makedirs(folder)
            with open(file_path, "wb") as f:
                f.seek(file_size - 1)
                f.write(b"\0")
        except IOError as e:
            raise InternalServerError('Unable to create file')

        cache.set(FILE_UPLOAD_SIZE(document_guid), file_size, TIMEOUT_24_HOURS)
        cache.set(FILE_UPLOAD_OFFSET(document_guid), 0, TIMEOUT_24_HOURS)
        cache.set(FILE_UPLOAD_PATH(document_guid), file_path, TIMEOUT_24_HOURS)

        document_info = DocumentManager(
            document_guid=document_guid,
            full_storage_path=file_path,
            upload_started_date=datetime.utcnow(),
            file_display_name=filename,
            path_display_name=pretty_path,
        )
        document_info.save()

        response = make_response(jsonify(document_manager_guid=document_guid),
                                 201)
        response.headers['Tus-Resumable'] = TUS_API_VERSION
        response.headers['Tus-Version'] = TUS_API_SUPPORTED_VERSIONS
        response.headers[
            'Location'] = f'{current_app.config["DOCUMENT_MANAGER_URL"]}/document-manager/{document_guid}'
        response.headers['Upload-Offset'] = 0
        response.headers[
            'Access-Control-Expose-Headers'] = "Tus-Resumable,Tus-Version,Location,Upload-Offset"
        response.autocorrect_location_header = False
        return response
Exemplo n.º 15
0
def errorhandler(e):
    """Handle error"""
    if not isinstance(e, HTTPException):
        e = InternalServerError()
Exemplo n.º 16
0
            def post(self):
                """
                Create a new embedding
                """
                entity = request.get_json()

                # looks weird but is the only reliable way to find out if a string value is a true boolean ;-)
                # see https://stackoverflow.com/questions/715417/converting-from-a-string-to-boolean-in-python
                handle_async = request.args.get('async', "",
                                                type=str).lower() in yes_list
                try:
                    entity['created'] = datetime.datetime.now(
                        datetime.timezone.utc).isoformat()

                    b64_graph_str = entity.get('graph')
                    try:
                        graph_str = base64.b64decode(b64_graph_str)
                        node_ids, edges = get_nodes_and_edges_from_graph(
                            graph_str)
                        # node_ids ==> List(str)
                        # edges ==> List(Edge)

                    except Exception as e:
                        app.logger.exception(e)
                        raise BadRequest(
                            "The graph string has to be a base64 encoded graphml string! "
                            "The exact error was: " + str(e))

                    len_nodes = len(node_ids)  # Number of nodes
                    len_edges = len(edges)  # Number of edges

                    if len_edges > 1900 or len_nodes > 600:
                        raise BadRequest(
                            "For fairness reasons this API will only handle graphs with less than 300 vertices and 900 "
                            "edges. Your graph has {} vertices and {} edges which exceed the limit."
                            "".format(len_nodes, len_edges))

                    # Check if self loops are present! We do not support self loops
                    for e in edges:
                        if e.source == e.target:
                            raise BadRequest(
                                "The Implementation only supports graphs where "
                                "every edge has two distinct start and end nodes"
                            )

                    # ignore double edges
                    # # validate for no double edges
                    # all_edge_endpoints = [{e.source, e.target} for e in edges]
                    # duplicate_edges = get_duplicates(all_edge_endpoints)
                    # if len(duplicate_edges) > 0:
                    #     abort(400,
                    #           "Multiedges are not allowed. "
                    #           "The following edges were recognized as duplicate {}".format(duplicate_edges))

                    # validate for unique edge ids
                    duplicate_edge_ids = get_duplicates([e.id for e in edges])
                    if len(duplicate_edge_ids) > 0:
                        abort(
                            400, "Edge ids have to be unique"
                            "The following ids were recognized as duplicate {}"
                            .format(duplicate_edge_ids))

                    # validate page id uniqueness
                    page_ids = [p['id'] for p in entity.get('pages')]
                    duplicate_page_ids = get_duplicates(page_ids)
                    if len(duplicate_page_ids) > 0:
                        abort(
                            400, "Duplicated page ids are not allowed. "
                            "The following id were recognized as duplicate {}".
                            format(duplicate_page_ids))

                    entity['status'] = 'IN_PROGRESS'
                    entity = data_store.insert_new_element(
                        entity)  # entity id is returned here

                    # validate graph not empty
                    if len(page_ids) == 0 or len_edges == 0 or len_nodes == 0:
                        abort(
                            400,
                            "Please submit a graph with at least one node, edge and page"
                        )

                    if handle_async:
                        # abort(501, "Async handling is not enabled.")
                        future_result: ProcessFuture = pool.schedule(
                            SolverInterface.solve,
                            (node_ids, edges, entity.get('pages'),
                             entity.get('constraints'), entity['id']))
                        future_result.add_done_callback(
                            processing_finished_callback)

                        future_result.done()
                        # remove old futures
                        remove_old_jobs()
                        jobs.append(QueueItem(entity.get('id'), future_result))

                    else:
                        try:
                            entity = handle_solver_result(
                                SolverInterface.solve(
                                    node_ids, edges, entity.get('pages'),
                                    entity.get('constraints'), entity['id']))
                        except Exception as e1:
                            error_callback(e1)
                            entity = data_store.get_by_id(entity['id'])

                    return jsonify(entity)
                except HTTPException as e:
                    raise e
                except Exception as e:
                    raise InternalServerError(
                        "The error {} \noccured from this body \n{}".format(
                            str(e), request.get_data(as_text=True))) from e
Exemplo n.º 17
0
def handle_http_exception(error):
    """Return error description and details in response body

    This function is registered at init to handle HTTPException.
    When abort is called in the code, this triggers a HTTPException, and Flask
    calls this handler to generate a better response.

    Also, when an Exception is not caught in a view, Flask automatically
    calls the 500 error handler.

    flask_rest_api republishes webarg's abort override. This abort allows the
    caller to pass kwargs and stores those kwargs in exception.data.

    This handler uses this extra information to populate the response.

    Extra information considered by this handler:
    - message: a comment (string)
    - errors: a dict of errors, typically validation issues on a form
    - headers: a dict of additional headers

    When the error was triggered with 'abort', it is logged with INFO level.
    """

    # TODO: manage case where messages/errors is a list?
    # TODO: use an error Schema
    # TODO: make log optional?

    log_info = True

    # Flask redirects unhandled exceptions to error 500 handler
    # If error is not a HTTPException, then it is an unhandled exception
    # Return a 500 (InternalServerError)
    if not isinstance(error, HTTPException):
        error = InternalServerError()
        # Flask logs uncaught exceptions as ERROR already, no need to log here
        log_info = False

    headers = {}

    payload = {
        'status': str(error),
    }

    # Get additional info passed as kwargs when calling abort
    # data may not exist if HTTPException was raised not using webargs abort
    # or if not kwargs were passed (https://github.com/sloria/webargs/pull/184)
    data = getattr(error, 'data', None)
    if data:
        # If we passed a custom message
        if 'message' in data:
            payload['message'] = data['message']
        # If we passed "errors"
        if 'errors' in data:
            payload['errors'] = data['errors']
        # If webargs added validation errors as "messages"
        # (you should use 'errors' as it is more explicit)
        elif 'messages' in data:
            payload['errors'] = data['messages']
        # If we passed additional headers
        if 'headers' in data:
            headers = data['headers']

    # Log error as INFO, including payload content
    if log_info:
        log_string_content = [
            str(error.code),
        ]
        for key in ('message', 'errors'):
            if key in payload:
                log_string_content.append(str(payload[key]))
        current_app.logger.info(' '.join(log_string_content))

    return jsonify(payload), error.code, headers
Exemplo n.º 18
0
def search(request_params: MultiDict) -> Response:
    """
    Perform a search from the advanced search interface.

    This is intended to support ONLY form-based search, to replace the classic
    advanced search view.

    Parameters
    ----------
    request_params : dict

    Returns
    -------
    dict
        Response content.
    int
        HTTP status code.
    dict
        Extra headers to add to the response.

    Raises
    ------
    InternalServerError
        Raised when there is an unrecoverable error while interacting with the
        search index.

    """
    # We may need to intervene on the request parameters, so we'll
    # reinstantiate as a mutable MultiDict.
    if isinstance(request_params, ImmutableMultiDict):
        request_params = MultiDict(request_params.items(multi=True))

    logger.debug("search request from advanced form")
    response_data: Dict[str, Any] = {}
    response_data["show_form"] = "advanced" not in request_params
    logger.debug("show_form: %s", str(response_data["show_form"]))

    # Here we intervene on the user's query to look for holdouts from
    # the classic search system's author indexing syntax (surname_f). We
    # rewrite with a comma, and show a warning to the user about the
    # change.
    has_classic = False
    for key, value in request_params.items():
        if value is None:
            continue
        match = TERM_FIELD_PTN.search(key)
        if match is None:
            continue
        value = str(value)
        i = match.group(1)
        field = request_params.get(f"terms-{i}-field")
        # We are only looking for this syntax in the author search, or
        # in an all-fields search.
        if field not in ["all", "author"]:
            continue

        value, _has_classic = catch_underscore_syntax(value)
        has_classic = _has_classic if not has_classic else has_classic
        request_params.setlist(key, [value])

    response_data["has_classic_format"] = has_classic
    form = forms.AdvancedSearchForm(request_params)
    q: Optional[Query]
    # We want to avoid attempting to validate if no query has been entered.
    #  If a query was actually submitted via the form, 'advanced' will be
    #  present in the request parameters.
    if "advanced" in request_params:

        if form.validate():
            logger.debug("form is valid")
            q = _query_from_form(form)

            # Pagination is handled outside of the form.
            q = paginate(q, request_params)

            try:
                # Execute the search. We'll use the results directly in
                #  template rendering, so they get added directly to the
                #  response content. asdict(
                response_data.update(SearchSession.search(q))  # type: ignore
            except index.IndexConnectionError as ex:
                # There was a (hopefully transient) connection problem. Either
                #  this will clear up relatively quickly (next request), or
                #  there is a more serious outage.
                logger.error("IndexConnectionError: %s", ex)
                raise InternalServerError(
                    "There was a problem connecting to the search index. This "
                    "is quite likely a transient issue, so please try your "
                    "search again. If this problem persists, please report it "
                    "to [email protected].") from ex
            except index.QueryError as ex:
                # Base exception routers should pick this up and show bug page.
                logger.error("QueryError: %s", ex)
                raise InternalServerError(
                    "There was a problem executing your query. Please try "
                    "your search again.  If this problem persists, please "
                    "report it to [email protected].") from ex
            except index.OutsideAllowedRange as ex:
                raise BadRequest(
                    "Hello clever friend. You can't get results in that range"
                    " right now.") from ex
            response_data["query"] = q
        else:
            logger.debug("form is invalid: %s", str(form.errors))
            if "order" in form.errors or "size" in form.errors:
                # It's likely that the user tried to set these parameters
                # manually, or that the search originated from somewhere else
                # (and was configured incorrectly).
                advanced_url = url_for("ui.advanced_search")
                raise BadRequest(
                    f"It looks like there's something odd about your search"
                    f" request. Please try <a href='{advanced_url}'>starting"
                    f" over</a>.")

            # Force the form to be displayed, so that we can render errors.
            #  This has most likely occurred due to someone manually crafting
            #  a GET response, but it could be something else.
            response_data["show_form"] = True

    # We want the form handy even when it is not shown to the user. For
    #  example, we can generate new form-friendly requests to update sort
    #  order and page size by embedding the form (hidden).
    response_data["form"] = form
    return response_data, HTTPStatus.OK, {}
Exemplo n.º 19
0
    def xml(self, **kwargs):
        database = kwargs.get('database', None)
        if not database:
            database = db_monodb()
        req = openerp.http.request
        language = kwargs.get('language', None)
        if req.httprequest.method == 'GET':
            # Login
            database = kwargs.get('database', None)
            req.session.db = database
            try:
                self.authenticate(req, database, language)
            except Exception as e:
                logger.warning("Failed login attempt: %s" % e)
                return Response('Login with Odoo user name and password',
                                401,
                                headers=[('WWW-Authenticate',
                                          'Basic realm="odoo"')])

            # As an optional extra security check we can validate a web token attached
            # to the request. It allows use to verify that the request is generated
            # from frePPLe and not from somebody else.

            # Generate data
            try:
                xp = exporter(req,
                              database=database,
                              company=kwargs.get('company', None),
                              mode=int(kwargs.get('mode', 1)))
                # TODO Returning an iterator to stream the response back to the client and
                # to save memory on the server side
                return req.make_response(
                    ''.join([i for i in xp.run()]),
                    headers=[('Content-Type', 'application/xml;charset=utf8'),
                             ('Cache-Control',
                              'no-cache, no-store, must-revalidate'),
                             ('Pragma', 'no-cache'), ('Expires', '0')])
            except Exception as e:
                logger.exception('Error generating frePPLe XML data')
                raise InternalServerError(
                    description=
                    'Error generating frePPLe XML data: check the Odoo log file for more details'
                )
        elif req.httprequest.method == 'POST':
            # Authenticate the user
            database = req.httprequest.form.get('database', None)
            req.session.db = database
            try:
                self.authenticate(req, database, language)
            except Exception as e:
                logger.warning("Failed login attempt %s" % e)
                return Response('Login with Odoo user name and password',
                                401,
                                headers=[('WWW-Authenticate',
                                          'Basic realm="odoo"')])

            # Validate the company argument
            company_name = req.httprequest.form.get('company', None)
            company = None
            m = req.session.model('res.company')
            m_search = m.search([('name', '=', company_name)])
            for i in m.browse(m_search):
                company = i
            if not company:
                return Response('Invalid company name argument', 401)

            # Verify that the data was posted from frePPLe and nobody else
            try:
                webtoken = req.httprequest.form.get('webtoken', None)
                decoded = jwt.decode(webtoken,
                                     company.webtoken_key,
                                     algorithms=['HS256'])
                if self.user != decoded.get('user', None):
                    return Response('Incorrect or missing webtoken', 401)
            except:
                return Response('Incorrect or missing webtoken', 401)

            # Import the data
            try:
                ip = importer(req,
                              database=database,
                              company=company,
                              mode=req.httprequest.form.get('mode', 1))
                return req.make_response(
                    ip.run(),
                    [('Content-Type', 'text/plain'),
                     ('Cache-Control', 'no-cache, no-store, must-revalidate'),
                     ('Pragma', 'no-cache'), ('Expires', '0')])
            except Exception as e:
                logger.exception('Error processing data posted by frePPLe')
                raise InternalServerError(
                    description=
                    'Error processing data posted by frePPLe: check the Odoo log file for more details'
                )
        else:
            raise MethodNotAllowed('Only GET and POST requests are accepted')
Exemplo n.º 20
0
def cross_list(method: str, params: MultiDict, session: Session,
               submission_id: int, **kwargs) -> Response:

    submitter, client = user_and_client_from_session(session)
    submission, submission_events = load_submission(submission_id)

    form = ClassificationForm(params)
    form.operation._value = lambda: form.operation.data
    form.filter_choices(submission, session)

    # Create a formset to render removal option.
    #
    # We need forms for existing secondaries, to generate removal requests.
    # When the forms in the formset are submitted, they are handled as the
    # primary form in the POST request to this controller.
    formset = ClassificationForm.formset(submission)
    _primary_category = submission.primary_classification.category
    _primary = taxonomy.CATEGORIES[_primary_category]

    response_data = {
        'submission_id': submission_id,
        'submission': submission,
        'submitter': submitter,
        'client': client,
        'form': form,
        'formset': formset,
        'primary': {
            'id': submission.primary_classification.category,
            'name': _primary['name']
        },
    }

    if method == 'POST':
        # Make sure that the user is not attempting to move to a different
        # step.
        #
        # Since the interface provides an "add" button to add cross-list
        # categories, we only want to handle the form data if the user is not
        # attempting to move to a different step.
        action = params.get('action')
        if not action:
            if not form.validate():
                raise BadRequest(response_data)

            if form.operation.data == form.REMOVE:
                command_type = RemoveSecondaryClassification
            else:
                command_type = AddSecondaryClassification
            command = command_type(category=form.category.data,
                                   creator=submitter,
                                   client=client)
            if not validate_command(form, command, submission, 'category'):
                raise BadRequest(response_data)

            try:
                submission, _ = save(command, submission_id=submission_id)
                response_data['submission'] = submission
            except SaveError as e:
                raise InternalServerError(response_data) from e

            # Re-build the formset to reflect changes that we just made, and
            # generate a fresh form for adding another secondary. The POSTed
            # data should now be reflected in the formset.
            response_data['formset'] = ClassificationForm.formset(submission)
            form = ClassificationForm()
            form.operation._value = lambda: form.operation.data
            form.filter_choices(submission, session)
            response_data['form'] = form

            # Warn the user if they have too many secondaries.
            if len(submission.secondary_categories) > 3:
                alerts.flash_warning(
                    Markup(
                        'Adding more than three cross-list classifications will'
                        ' result in a delay in the acceptance of your submission.'
                    ))

        if action in ['previous', 'save_exit', 'next']:
            return response_data, status.SEE_OTHER, {}
    return response_data, status.OK, {}
Exemplo n.º 21
0
    def run_wsgi(self):
        app = self.server.app
        environ = self.make_environ()
        headers_set = []
        headers_sent = []

        def write(data):
            assert headers_set, 'write() before start_response'
            if not headers_sent:
                status, response_headers = headers_sent[:] = headers_set
                try:
                    code, msg = status.split(None, 1)
                except ValueError:
                    code, msg = status, ""
                self.send_response(int(code), msg)
                header_keys = set()
                for key, value in response_headers:
                    self.send_header(key, value)
                    key = key.lower()
                    header_keys.add(key)
                if 'content-length' not in header_keys:
                    self.close_connection = True
                    self.send_header('Connection', 'close')
                if 'server' not in header_keys:
                    self.send_header('Server', self.version_string())
                if 'date' not in header_keys:
                    self.send_header('Date', self.date_time_string())
                self.end_headers()

            assert type(data) is bytes, 'applications must write bytes'
            self.wfile.write(data)
            self.wfile.flush()

        def start_response(status, response_headers, exc_info=None):
            if exc_info:
                try:
                    if headers_sent:
                        reraise(*exc_info)
                finally:
                    exc_info = None
            elif headers_set:
                raise AssertionError('Headers already set')
            headers_set[:] = [status, response_headers]
            return write

        def execute(app):
            application_iter = app(environ, start_response)
            try:
                # XXX: Is this actually correct?
                if not PY2 and isinstance(application_iter, bytes):
                    # iterating over bytes' items would give us ints
                    application_iter = (application_iter, )
                for data in application_iter:
                    write(data)
                # make sure the headers are sent
                if not headers_sent:
                    write(b'')
            finally:
                if hasattr(application_iter, 'close'):
                    application_iter.close()
                application_iter = None

        try:
            execute(app)
        except (socket.error, socket.timeout) as e:
            self.connection_dropped(e, environ)
        except Exception:
            if self.server.passthrough_errors:
                raise
            from werkzeug.debug.tbtools import get_current_traceback
            traceback = get_current_traceback(ignore_system_exceptions=True)
            try:
                # if we haven't yet sent the headers but they are set
                # we roll back to be able to set them again.
                if not headers_sent:
                    del headers_set[:]
                execute(InternalServerError())
            except Exception:
                pass
            self.server.log('error', 'Error on request:\n%s',
                            traceback.plaintext)
Exemplo n.º 22
0
    def create_dataset_from_csv(self, df, name='__testtbl__'):
        current_app.logger.info('Dropping N/A records')
        current_app.logger.info('Before cleaning shape={0}'.format(df.shape))
        df = df.dropna(axis=0, how='all')
        df = df.fillna(method='ffill')
        current_app.logger.info('After cleaning shape={0}'.format(df.shape))

        columns = list(df.columns)[1:]
        current_app.logger.info('columns: {0}'.format(columns))

        current_app.logger.info('create table: {0}'.format(name))
        query = sql.SQL("""CREATE TABLE {schema}.{tbl}
	(
		date TIMESTAMP WITHOUT TIME ZONE NOT NULL,
		{columns},
		"mask" BOOLEAN DEFAULT FALSE,
		PRIMARY KEY (date)
	)
	WITH (
		OIDS = FALSE
	);
	ALTER TABLE {schema}.{tbl}
		OWNER to {user};
	""").format(
            schema=sql.Identifier(self.config['schema']),
            tbl=sql.Identifier(name),
            columns=sql.SQL(',').join(
                sql.SQL("{0} double precision").format(sql.Identifier(c))
                for c in columns),
            user=sql.Identifier(self.config['user']),
        )
        try:
            self.execute(query)
        except:
            raise InternalServerError(
                description="Fail to create create new table.")

        current_app.logger.info('uploading csv')
        cursor = self.conn.cursor()
        query = sql.SQL("""COPY {schema}.{tbl} ({columns})
	FROM STDIN WITH CSV HEADER DELIMITER AS ','""").format(
            schema=sql.Identifier(self.config['schema']),
            tbl=sql.Identifier(name),
            columns=sql.SQL(',').join(
                sql.Identifier(c) for c in ['date'] + columns),
        )
        try:
            csv = io.StringIO()
            df.to_csv(csv, index=False)
            # this is the tricky part
            # you have to seek back to the start before being read
            # otherwise it will continue to read from the end
            # which reads nothing
            csv.seek(0)
            cursor.copy_expert(sql=query, file=csv)
            self.conn.commit()
        except:
            self.conn.rollback()
            raise InternalServerError(
                description="Failed to write data into table")
        finally:
            cursor.close()

        current_app.logger.info('success')
Exemplo n.º 23
0
def handle_unknown_error(error):
    return handle_http_error(InternalServerError())
Exemplo n.º 24
0
 def view500():
     raise InternalServerError('error')
Exemplo n.º 25
0
    def on_imagen(self, request, nombre):
        try:
            normpath = posixpath.normpath(nombre)
            asset_data = self.img_mngr.get_item(normpath)
        except Exception, e:
            msg = u"Error interno al buscar imagen: %s" % e
            raise InternalServerError(msg)
        if asset_data is None:
            if self.verbose:
                print "WARNING: no pudimos encontrar", repr(nombre)
            try:
                width, _, height = request.args["s"].partition('-')
                width = int(width)
                height = int(height)
            except Exception, e:
                raise InternalServerError("Error al generar imagen")
            img = bmp.BogusBitMap(width, height)
            return Response(img.data, mimetype="img/bmp")
        type_ = guess_type(nombre)[0]
        return Response(asset_data, mimetype=type_)

    def on_institucional(self, request, path):
        path = os.path.join("institucional", path)
        asset_file = os.path.join(config.DIR_ASSETS, path)
        if os.path.isdir(asset_file):
            print "WARNING: ", repr(asset_file), "es un directorio"
            raise NotFound()
        if not os.path.exists(asset_file):
            print "WARNING: no pudimos encontrar", repr(asset_file)
            raise NotFound()
Exemplo n.º 26
0
 def view500():
     # flask-login user is loaded during @login_required, so check that the db has been queried
     mock_user_get.assert_called_with(user['id'])
     raise InternalServerError('error')
Exemplo n.º 27
0
def errorhandler(e):
    """Handle error"""
    if not isinstance(e, HTTPException):
        e = InternalServerError()
    return apology(e.name, e.code)
Exemplo n.º 28
0
def send_async_email(app, msg):
    with app.app_context():
        try:
            mail.send(msg)
        except ConnectionRefusedError:
            raise InternalServerError("[MAIL SERVER] not working")
Exemplo n.º 29
0
def create_package(job_id: int,
                   job_name: str,
                   description: str,
                   router_name: str,
                   provider: str,
                   bbox: List[float],
                   result_path: str,
                   in_pbf_path: str,
                   compression: str,
                   user_email: str,
                   config_string='production',
                   cleanup=True):
    """
    Creates a routing package and puts it in a defined folder.
    """

    # We need to create a new app for this, RQ runs async
    # and has no Flask context otherwise
    app = create_app(config_string)
    app_ctx = app.app_context()
    app_ctx.push()

    session: SessionBase = db.session

    # Set up the logger where we have access to the user email
    # and only if there hasn't been one before
    if not LOGGER.handlers:
        handler = AppSmtpHandler(**get_smtp_details(app.config, [user_email]))
        handler.setLevel(logging.INFO)
        LOGGER.addHandler(handler)

    bbox_geom: Polygon = bbox_to_geom(bbox)

    job = Job.query.get(job_id)
    succeeded = False

    # Huge try/except to make sure we only have to write a failure once.
    # Processing failures only raise HTTPErrors. If a job got deleted while it
    # was in the queue or even already running, any other exception will be thrown.
    # That way we can distinguish between the two scenarios and send emails accordingly.
    try:
        if not job:
            raise Exception(
                f"Job {job_id} doesn't exist anymore in the database.")
        job_status = job.status

        in_pbf_dir = app.config[provider.upper() + '_DIR']

        router = get_router(router_name, provider, in_pbf_path)

        # Set Redis job ID and container ID
        rq_job: Job = get_current_job()
        # testing has no access to a real queue
        if not app.testing:  # pragma: no cover
            job.set_rq_id(rq_job.id)
        job.set_container_id(router.container_id)

        # There's only 2 options how the PBF file is not available:
        # 1. The package just gets registered, so needs to be still extracted
        # 2. Smth went wrong in the pbf update or job deletion procedure
        if not os.path.isfile(in_pbf_path):
            # First take care of 2.
            if job_status == Statuses.COMPLETED.value:
                raise InternalServerError(
                    f"Job {job_id} couldn't find its corresponding PBF file {in_pbf_path}"
                )
            # Then take care of 1.
            try:
                best_pbf_path = get_pbfs_by_area(in_pbf_dir, bbox_geom)[0][0]
            # Raise a HTTP Error to not mess with error handling here
            except (AssertionError, FileNotFoundError) as e:
                raise InternalServerError(str(e))

            # Cut the PBF to the bbox extent (osmium availability is checked in __init__.py)
            osmium_proc = extract_proc(bbox_geom, best_pbf_path, in_pbf_path)

            job.set_status(Statuses.EXTRACTING.value)
            session.commit()

            # Let osmium cut the local PBF
            _, osmium_stderr = osmium_proc.communicate()
            if osmium_stderr:  # pragma: no cover
                raise InternalServerError(
                    f"'osmium': {json.loads(osmium_stderr)}")

            # Check validity
            osmium_reader = Reader(in_pbf_path)
            if osmium_reader.header().box().bottom_left.lat == 0:
                raise InternalServerError(
                    f"'osmium': Apparently bbox {bbox} is not within the extent of {in_pbf_path}"
                )

        job.set_status(Statuses.TILING.value)
        session.commit()

        try:
            exit_code, output = router.build_graph()
            if exit_code:  # pragma: no cover
                raise InternalServerError(
                    f"'{router.name()}': {output.decode()}")
        except ImageNotFound:
            raise InternalServerError(
                f"Docker image {router.image} not found.'")

        job.set_status(Statuses.COMPLETED.value)
        session.commit()

        succeeded = True
    # catch all exceptions we're actually aware of
    except HTTPException as e:
        job.set_status(Statuses.FAILED.value)
        session.commit()
        LOGGER.error(e.description, extra=dict(user=user_email, job_id=job_id))

        raise
    # any other exception is assumed to be a deleted job and will only be logged/email sent
    except Exception as e:  # pragma: no cover
        msg = f"Job {job_id} by {user_email} was deleted."
        LOGGER.warning(msg, extra=dict(user=user_email, job_id=job_id))
        raise
    finally:
        # always write the "last_ran" column
        job.set_last_finished(datetime.utcnow())
        session.commit()
        if not succeeded:
            shutil.rmtree(os.path.dirname(result_path))

        # Pop the context as we're done with app & db
        app_ctx.pop()

    # Write dataset to disk
    router.make_package(result_path, compression)

    # Create the meta JSON
    fname = os.path.basename(result_path)
    j = {
        "job_id": job_id,
        "filepath": fname,
        "name": job_name,
        "description": description,
        "extent": ",".join([str(f) for f in bbox]),
        "last_modified": str(datetime.utcnow())
    }
    dirname = os.path.dirname(result_path)
    # Splits the first
    fname_sanitized = fname.split(os.extsep, 1)[0]
    with open(os.path.join(dirname, fname_sanitized + '.json'),
              'w',
              encoding='utf8') as f:
        json.dump(j, f, indent=2, ensure_ascii=False)

    # only clean up if successful, otherwise retain the container for debugging
    if cleanup:
        router.cleanup()
    LOGGER.info(
        f"Job {job_id} by {user_email} finished successfully. Find the new dataset in {result_path}",
        extra={
            "job_id": job_id,
            "user": user_email
        })
Exemplo n.º 30
0
 def bp_test():
     raise InternalServerError()