コード例 #1
0
    def __call__(self, value, system):
        """
        Implements a subclass of pyramid_oereb.core.renderer.extract.json_.Renderer to create a print result
        out of a json. The json extract is reformatted to fit the structure of mapfish print.

        Args:
            value (tuple): A tuple containing the generated extract record and the params
                dictionary.
            system (dict): The available system properties.

        Returns:
            buffer: The pdf content as received from configured mapfish print instance url.
        """
        print_config = Config.get('print', {})
        if not print_config:
            raise ConfigurationError(
                'No print config section in config file was found.')
        print_service_url = print_config.get('base_url', '')
        if not print_service_url:
            raise ConfigurationError(
                'No print service url ("base_url") was found in the config.')
        print_service_token = print_config.get('token', '')
        if not print_service_token:
            raise ConfigurationError(
                'No print service token ("token") was found in the config.')
        verify_certificate = print_config.get('verify_certificate', True)

        self.headers = {'token': print_service_token}
        self.parameters = {
            'validate': print_config.get('validate', 'false'),
            'usewms': print_config.get('use_wms', 'false'),
        }

        log.debug("Parameter webservice is {}".format(value[1]))

        if value[1].images:
            raise HTTPBadRequest('With image is not allowed in the print')

        self._request = self.get_request(system)
        # If language present in request, use that. Otherwise, keep language from base class
        if 'lang' in self._request.GET:
            self._language = self._request.GET.get('lang')

        self.parameters['language'] = self._language
        self.parameters['flavour'] = self._request.matchdict['flavour']

        # Based on extract record and webservice parameter, render the extract data as JSON
        extract_record = value[0]
        extract_as_xml = self._render(extract_record, value[1])

        response = self.get_response(system)

        if self._request.GET.get('getspec', 'no') != 'no':
            response.headers['Content-Type'] = 'application/xml; charset=UTF-8'
            return extract_as_xml

        prepared_extraxt_as_xml = self.prepare_xml(extract_as_xml)
        print_result = self.request_pdf(print_service_url,
                                        prepared_extraxt_as_xml, self.headers,
                                        self.parameters, verify_certificate)

        response.status_code = print_result.status_code
        response.headers = print_result.headers
        if 'Transfer-Encoding' in response.headers:
            del response.headers['Transfer-Encoding']
        if 'Connection' in response.headers:
            del response.headers['Connection']
        return print_result
コード例 #2
0
ファイル: extract.py プロジェクト: n0izn0iz/assembl
def post_extract(request):
    """
    Create a new extract.
    """
    extract_data = json.loads(request.body)
    discussion_id = int(request.matchdict['discussion_id'])
    user_id = authenticated_userid(request)
    if not user_id:
        # Straight from annotator
        token = request.headers.get('X-Annotator-Auth-Token')
        if token:
            token = decode_token(
                token, request.registry.settings['session.secret'])
            if token:
                user_id = token['userId']
    user_id = user_id or Everyone
    if not user_has_permission(discussion_id, user_id, P_ADD_EXTRACT):
        #TODO: maparent:  restore this code once it works:
        #return HTTPForbidden(result=ACLDenied(permission=P_ADD_EXTRACT))
        return HTTPForbidden()
    if not user_id or user_id == Everyone:
        # TODO: Create an anonymous user.
        raise HTTPServerError("Anonymous extracts are not implemeted yet.")
    content = None
    uri = extract_data.get('uri')
    important = extract_data.get('important', False)
    annotation_text = None
    if uri:
        # Straight from annotator
        annotation_text = extract_data.get('text')
    else:
        target = extract_data.get('target')
        if not (target or uri):
            raise HTTPBadRequest("No target")

        target_class = sqla.get_named_class(target.get('@type'))
        if issubclass(target_class, Post):
            post_id = target.get('@id')
            post = Post.get_instance(post_id)
            if not post:
                raise HTTPNotFound(
                    "Post with id '%s' not found." % post_id)
            content = post
        elif issubclass(target_class, Webpage):
            uri = target.get('url')
    if uri and not content:
        content = Webpage.get_instance(uri)
        if not content:
            # TODO: maparent:  This is actually a singleton pattern, should be
            # handled by the AnnotatorSource now that it exists...
            source = AnnotatorSource.default_db.query(AnnotatorSource).filter_by(
                discussion_id=discussion_id).filter(
                cast(AnnotatorSource.name, Unicode) == 'Annotator').first()
            if not source:
                source = AnnotatorSource(
                    name='Annotator', discussion_id=discussion_id,
                    type='source')
            content = Webpage(url=uri, discussion_id=discussion_id)
    extract_body = extract_data.get('quote', '')

    idea_id = extract_data.get('idIdea', None)
    if idea_id:
        idea = Idea.get_instance(idea_id)
        if(idea.discussion.id != discussion_id):
            raise HTTPBadRequest(
                "Extract from discussion %s cannot be associated with an idea from a different discussion." % extract.get_discussion_id())
    else:
        idea = None


    new_extract = Extract(
        creator_id=user_id,
        owner_id=user_id,
        discussion_id=discussion_id,
        body=extract_body,
        idea=idea,
        important=important,
        annotation_text=annotation_text,
        content=content
    )
    Extract.default_db.add(new_extract)

    for range_data in extract_data.get('ranges', []):
        range = TextFragmentIdentifier(
            extract=new_extract,
            xpath_start=range_data['start'],
            offset_start=range_data['startOffset'],
            xpath_end=range_data['end'],
            offset_end=range_data['endOffset'])
        TextFragmentIdentifier.default_db.add(range)
    Extract.default_db.flush()

    return {'ok': True, '@id': new_extract.uri()}
コード例 #3
0
ファイル: shortener.py プロジェクト: camptocamp/c2cgeoportal
    def create(self) -> Dict[str, str]:

        if "url" not in self.request.params:
            raise HTTPBadRequest("The parameter url is required")

        url = self.request.params["url"]

        # see: https://httpd.apache.org/docs/2.2/mod/core.html#limitrequestline
        if len(url) > 8190:
            raise HTTPBadRequest(
                f"The parameter url is too long ({len(url)} > {8190})")

        # Check that it is an internal URL...
        uri_parts = urlparse(url)
        if "allowed_hosts" in self.settings:
            if uri_parts.netloc not in self.settings["allowed_hosts"]:
                raise HTTPBadRequest(
                    f"The requested host '{uri_parts.netloc}' is not part of allowed hosts: "
                    f"{', '.join(self.settings['allowed_hosts'])}")
        else:
            hostname = uri_parts.hostname
            if hostname != self.request.server_name:
                raise HTTPBadRequest(
                    f"The requested host '{hostname!s}' should be '{self.request.server_name!s}'"
                )

        shortened = False

        for base in self.short_bases:
            base_parts = urlparse(base)
            if uri_parts.path.startswith(base_parts.path):
                shortened = True
                ref = uri_parts.path.split("/")[-1]

        tries = 0
        while not shortened:
            ref = "".join(
                random.choice(string.ascii_letters + string.digits)  # nosec
                for i in range(self.settings.get("length", 4)))
            test_url = DBSession.query(Shorturl).filter(
                Shorturl.ref == ref).all()
            if not test_url:
                break
            tries += 1
            if tries > 20:
                message = "No free ref found, considered to increase the length"
                logger.error(message)
                raise HTTPInternalServerError(message)

        user_email = self.request.user.email if self.request.user is not None else None
        email = self.request.params.get("email")
        if not shortened:
            short_url = Shorturl()
            short_url.url = url
            short_url.ref = ref
            short_url.creator_email = user_email
            short_url.creation = datetime.now()
            short_url.nb_hits = 0

            DBSession.add(short_url)

        if "base_url" in self.settings:
            s_url = self.settings["base_url"] + ref
        else:
            s_url = self.request.route_url("shortener_get", ref=ref)

        if email is not None:
            send_email_config(
                self.request.registry.settings,
                "shortener",
                email,
                full_url=url,
                short_url=s_url,
                message=self.request.params.get("message", ""),
                application_url=self.request.route_url("base"),
                current_url=self.request.current_route_url(),
            )

        set_common_headers(self.request, "shortener", Cache.PRIVATE_NO)
        return {"short_url": s_url}
コード例 #4
0
def report_no_computer_users(context, request, file_ext):
    '''
    Generate a report with all the no-computer users that belongs to a OU
    If the administrator is a superadmin the generated report will contain 
    all the users in the database. 
    
    Args:
        ou_id (string) : ID of the OU.

    Returns:
        headers (list) : The headers of the table to export
        rows (list)    : Rows with the report data
        widths (list)  : The witdhs of the columns of the table to export
        page           : Translation of the word "page" to the current language
        of             : Translation of the word "of" to the current language
        report_type    : Type of report (html, csv or pdf)
    '''

    # Check current user permissions
    ou_id = check_visibility_of_ou(request)
    if ou_id is None:
        raise HTTPBadRequest()

    # Get user data
    query = request.db.nodes.find({
        'type': 'user',
        'path': get_filter_nodes_belonging_ou(ou_id),
        'computers': []
    })

    rows = []

    if file_ext == 'pdf':
        rows = [(item['name'], item['first_name'] + " " + item['last_name'],
                 treatment_string_to_pdf(item, 'email', 35),
                 treatment_string_to_pdf(item, 'phone', 15),
                 treatment_string_to_pdf(item, 'address', 35), item['_id'])
                for item in query]
    else:
        rows = [(treatment_string_to_csv(item, 'name') if file_ext == 'csv' \
                    else get_html_node_link(item),
                treatment_string_to_csv(item, 'first_name'),
                treatment_string_to_csv(item, 'last_name'),
                treatment_string_to_csv(item, 'email'),
                treatment_string_to_csv(item, 'phone'),
                treatment_string_to_csv(item, 'address'),
                item['_id']) for item in query]

    header = (_(u'Username'), _(u'Name'), _(u'Email'), _(u'Phone'),
              _(u'Address'), _(u'Id'))

    # Column widths in percentage
    if file_ext == 'pdf':
        widths = (25, 25, 15, 10, 20, 15)
    else:
        widths = (15, 15, 10, 15, 10, 20, 15)
        header = header[ : 2] + (_(u'Last name'),) + \
            header[ 2 : ]

    title = _(u'No-computer users report')
    now = datetime.datetime.now().strftime("%d/%m/%Y %H:%M")

    # Sort rows
    # TODO: Use MongoDB Collations to do a "ignore_case" sorting
    # (MongoDB 2.6 does not support "ignore case" sorting)
    rows = sorted(rows, key=lambda i: (i[0].lower()))

    return {
        'headers': header,
        'rows': rows,
        'default_order': [[0, 'asc']],
        'widths': widths,
        'report_title': title,
        'page': _(u'Page'),
        'of': _(u'of'),
        'report_type': file_ext,
        'now': now
    }
コード例 #5
0
ファイル: views.py プロジェクト: weblate/warehouse
def search(request):
    metrics = request.find_service(IMetricsService, context=None)

    q = request.params.get("q", "")
    q = q.replace("'", '"')

    if q:
        bool_query = gather_es_queries(q)

        query = request.es.query(bool_query)

        query = query.suggest("name_suggestion", q, term={"field": "name"})
    else:
        query = request.es.query()

    if request.params.get("o"):
        sort_key = request.params["o"]
        if sort_key.startswith("-"):
            sort = {sort_key[1:]: {"order": "desc", "unmapped_type": "long"}}
        else:
            sort = {sort_key: {"unmapped_type": "long"}}

        query = query.sort(sort)

    # Require match to all specified classifiers
    for classifier in request.params.getall("c"):
        query = query.query("prefix", classifiers=classifier)

    try:
        page_num = int(request.params.get("page", 1))
    except ValueError:
        raise HTTPBadRequest("'page' must be an integer.")

    try:
        page = ElasticsearchPage(query,
                                 page=page_num,
                                 url_maker=paginate_url_factory(request))
    except elasticsearch.TransportError:
        metrics.increment("warehouse.views.search.error")
        raise HTTPServiceUnavailable

    if page.page_count and page_num > page.page_count:
        raise HTTPNotFound

    available_filters = collections.defaultdict(list)

    classifiers_q = (request.db.query(Classifier).with_entities(
        Classifier.classifier).filter(Classifier.deprecated.is_(False)).filter(
            exists([release_classifiers.c.trove_id]).where(
                release_classifiers.c.trove_id == Classifier.id)).order_by(
                    Classifier.classifier))

    for cls in classifiers_q:
        first, *_ = cls.classifier.split(" :: ")
        available_filters[first].append(cls.classifier)

    def filter_key(item):
        try:
            return 0, SEARCH_FILTER_ORDER.index(item[0]), item[0]
        except ValueError:
            return 1, 0, item[0]

    def form_filters_tree(split_list):
        """
        Takes a list of lists, each of them containing a filter and
        one of its children.
        Returns a dictionary, each key being a filter and each value being
        the filter's children.
        """
        d = {}
        for l in split_list:
            current_level = d
            for part in l:
                if part not in current_level:
                    current_level[part] = {}
                current_level = current_level[part]
        return d

    def process_available_filters():
        """
        Processes available filters and returns a list of dictionaries.
        The value of a key in the dictionary represents its children
        """
        sorted_filters = sorted(available_filters.items(), key=filter_key)
        output = []
        for f in sorted_filters:
            classifier_list = f[1]
            split_list = [i.split(" :: ") for i in classifier_list]
            tree = form_filters_tree(split_list)
            output.append(tree)
        return output

    metrics = request.find_service(IMetricsService, context=None)
    metrics.histogram("warehouse.views.search.results", page.item_count)

    return {
        "page": page,
        "term": q,
        "order": request.params.get("o", ""),
        "available_filters": process_available_filters(),
        "applied_filters": request.params.getall("c"),
    }
コード例 #6
0
ファイル: __init__.py プロジェクト: schoenpat/pyramlson
        def view(context, request):
            required_params = [context]
            optional_params = dict()
            # URI parameters have the highest prio
            if resource.uri_params:
                for param in resource.uri_params:
                    param_value = request.matchdict[param.name]
                    converted = validate_and_convert(param, param_value)
                    # pyramid router makes sure the URI params are all
                    # set, otherwise the view isn't called all, because
                    # a NotFound error is triggered before the request
                    # can be routed to this view
                    required_params.append(
                        converted if convert else param_value)
            # If there's a body defined - include it before traits or query params
            if resource.body:
                if resource.body[0].mime_type == "application/json":
                    required_params.append(
                        prepare_json_body(request, resource.body))
                else:
                    required_params.append(request.body)
            if resource.query_params:
                for param in resource.query_params:
                    # query params are always named (i.e. not positional)
                    # so they effectively become keyword agruments in a
                    # method call, we just make sure they are present
                    # in the request if marked as 'required'
                    if param.required and param.name not in request.params:
                        raise HTTPBadRequest("{} ({}) is required".format(
                            param.name, param.type))
                    param_value = request.params.get(param.name, MARKER)
                    absent = param_value is MARKER
                    # If there's no default value defined in RAML let the decorated
                    # method decide which defaults to use. Unfortunatelly there is
                    # no way to tell whether a default value was declared as 'null'
                    # in RAML or if it was omitted - it's None in both cases
                    if absent and param.default is None:
                        continue
                    if not absent:
                        if convert:
                            param_value = validate_and_convert(
                                param, param_value)
                    else:
                        if convert:
                            param_value = validate_and_convert(
                                param, param.default)
                        else:
                            param_value = param.default
                    optional_params[transform(param.name)] = param_value
            result = meth(*required_params, **optional_params)

            # check if a response type is specified
            for response in resource.responses:
                if response.code == cfg.returns and len(response.body) == 1:
                    body = response.body[0]
                    if body.mime_type == 'application/json':
                        break
                    response_mime_type = body.mime_type
                    return render_mime_view(result,
                                            cfg.returns,
                                            mime_type=response_mime_type)

            return render_view(request, result, cfg.returns)
コード例 #7
0
ファイル: features.py プロジェクト: pfanguin/mf-chsdi3
 def mapExtent(self, value):
     if value is not None:
         try:
             self._mapExtent = esrijson.to_shape([float_raise_nan(c) for c in value.split(',')])
         except ValueError:
             raise HTTPBadRequest('Please provide numerical values for the parameter mapExtent')
コード例 #8
0
def process_upload(request, field_name):
    # For some reason, the multipart form does not contain
    # a session cookie, and Nathan so far has not been able to explicitly
    # set it.  So a workaround is to put the session ID in the form as
    # hidden POST content.
    # Then we can re-establish our session with the request after
    # checking that our session id is valid.
    redis_session_id = request.POST['session']

    if redis_session_id in request.session.redis.keys():

        def get_specific_session_id(redis,
                                    timeout,
                                    serialize,
                                    generator,
                                    session_id=redis_session_id):
            return session_id

        factory = request.registry.queryUtility(ISessionFactory)
        request.session = factory(request,
                                  new_session_id=get_specific_session_id)

        if request.session.session_id != redis_session_id:
            raise cors_response(
                request,
                HTTPBadRequest('multipart form request '
                               'could not re-establish session'))

    upload_dir = get_session_dir(request)
    max_upload_size = eval(request.registry.settings['max_upload_size'])

    persist_upload = asbool(request.POST.get('persist_upload', False))

    if 'can_persist_uploads' in request.registry.settings.keys():
        can_persist = asbool(request.registry.settings['can_persist_uploads'])
    else:
        can_persist = False

    log.info('save_file_dir: {}'.format(upload_dir))
    log.info('max_upload_size: {}'.format(max_upload_size))

    log.info('persist_upload?: {}'.format(persist_upload))
    log.info('can_persist?: {}'.format(can_persist))

    input_file = request.POST[field_name].file
    file_name, unique_name = gen_unique_filename(
        request.POST[field_name].filename)
    file_path = os.path.join(upload_dir, unique_name)

    size = get_size_of_open_file(input_file)
    log.info('Incoming file size: {}'.format(size))

    if size > max_upload_size:
        raise cors_response(
            request,
            HTTPBadRequest(
                'file is too big!  Max size = {}'.format(max_upload_size)))

    if size >= get_free_space(upload_dir):
        raise cors_response(
            request,
            HTTPInsufficientStorage('Not enough space '
                                    'to save the file'))

    write_to_file(input_file, file_path)

    log.info('Successfully uploaded file "{0}"'.format(file_path))

    if persist_upload and can_persist:
        log.info('Persisting file "{0}"'.format(file_path))

        upload_dir = get_persistent_dir(request)
        if size >= get_free_space(upload_dir):
            raise cors_response(
                request,
                HTTPInsufficientStorage('Not enough space '
                                        'to persist the file'))

        persistent_path = os.path.join(upload_dir, file_name)

        write_to_file(input_file, persistent_path)

    return file_path, file_name
コード例 #9
0
class Teams(ApiView):
    def get(self):
        def get_worked_hours(startDate, endDate, projects_ids):
            worked_hours = self.session.query(TimeEntry.project_id,
                                              func.sum(TimeEntry.time))
            return worked_hours\
                .filter(TimeEntry.project_id.in_(projects_ids))\
                .filter(TimeEntry.date >= startDate)\
                .filter(TimeEntry.date <= endDate)\
                .group_by(TimeEntry.project_id)

        def get_project_worked_hours(project_id, worked_hours):
            worked_hours = filter(lambda x: x[0] == project_id, worked_hours)
            return worked_hours[0][1] if worked_hours else 0.0

        tickets_end_date = datetime.now()
        tickets_start_date = tickets_end_date.replace(day=1)
        tickets_end_date = tickets_end_date.replace(day=1)
        tickets_end_date += relativedelta(months=1)
        tickets_end_date -= relativedelta(days=1)

        tickets_last_month_end_date = tickets_start_date - timedelta(days=1)
        tickets_last_month_start_date = tickets_last_month_end_date.replace(
            day=1)

        teams = self.session.query(Team_m, TeamMember.user_id)\
                            .outerjoin(TeamMember)

        team_to_project = self.session.query(Team_m.id, Project, Client)\
                                      .filter(Sprint.team_id==Team_m.id)\
                                      .filter(Sprint.project_id==Project.id)\
                                      .filter(Project.client_id==Client.id)\
                                      .order_by(Sprint.end.desc())

        teams = h.groupby(teams, lambda x: x[0], lambda x: x[1])
        team_to_project = h.groupby(team_to_project, lambda x: x[0],
                                    lambda x: x[1:])

        projects_ids = []
        for value in team_to_project.values():
            for projectAndClient in value:
                projects_ids.append(projectAndClient[0].id)

        this_month_worked_hours = get_worked_hours(tickets_start_date,
                                                   tickets_end_date,
                                                   projects_ids)

        last_month_worked_hours = get_worked_hours(
            tickets_last_month_start_date, tickets_start_date, projects_ids)

        result = []
        for team, members in teams.iteritems():
            team = team.to_dict()
            team['users'] = members
            projects = team_to_project.get(team['id'], [])
            team['projects'] = [
                dict(id=project.id,
                     name=project.name,
                     client=dict(
                         id=client.id,
                         name=client.name,
                     ),
                     this_month_worked_hours=get_project_worked_hours(
                         project.id, this_month_worked_hours),
                     last_month_worked_hours=get_project_worked_hours(
                         project.id, last_month_worked_hours))
                for project, client in projects
            ]
            result.append(team)

        return dict(teams=result)

    @has_perm('admin')
    def post(self):
        try:
            json_team = self.request.json_body
        except ValueError:
            raise HTTPBadRequest('Expect json')

        team_schema = TeamAddSchema()
        try:
            team_des = team_schema.deserialize(json_team)
        except colander.Invalid, e:
            errors = e.asdict()
            raise HTTPBadRequest(errors)

        team = Team_m(name=team_des['name'])
        self.session.add(team)
        try:
            self.session.flush()
        except IntegrityError:
            raise HTTPBadRequest('Team exists')

        if team_des.get('swap_with_preview'):
            preview = Preview(self.request)
            if not preview.swap_avatar(type='teams', id=team.id):
                raise HTTPBadRequest('No preview to swap')

        return team.to_dict()
コード例 #10
0
 def featureIds(self, value):
     if value is not None:
         self._featureIds = value.split(',')
     else:
         raise HTTPBadRequest(
             'Please provide featureIds')  # pragma: no cover
コード例 #11
0
def instance_post(request):
    raise HTTPBadRequest()
コード例 #12
0
    def __call__(self, value, system):
        """
        Implements a subclass of pyramid_oereb.lib.renderer.extract.json_.Renderer to create a print result
        out of a json. The json extract is reformatted to fit the structure of mapfish print.

        Args:
            value (tuple): A tuple containing the generated extract record and the params
                dictionary.
            system (dict): The available system properties.

        Returns:
            buffer: The pdf content as received from configured mapfish print instance url.
        """
        log.debug("Parameter webservice is {}".format(value[1]))

        if value[1].images:
            raise HTTPBadRequest('With image is not allowed in the print')

        self._request = self.get_request(system)

        # Create a lower case GET dict to be able to accept all cases of upper and lower case writing
        self._lowercase_GET_dict = dict(
            (k.lower(), v.lower()) for k, v in self._request.GET.iteritems())

        # If a language is specified in the request, use it. Otherwise, use the language from base class
        self._fallback_language = Config.get('default_language')
        if 'lang' in self._lowercase_GET_dict:
            self._language = self._lowercase_GET_dict.get('lang')

        # Based on extract record and webservice parameter, render the extract data as JSON
        extract_record = value[0]
        extract_as_dict = self._render(extract_record, value[1])
        feature_geometry = mapping(extract_record.real_estate.limit)
        pdf_to_join = set()

        if Config.get('print', {}).get('compute_toc_pages', False):
            extract_as_dict['nbTocPages'] = TocPages(
                extract_as_dict).getNbPages()
        else:
            extract_as_dict['nbTocPages'] = 1

        self.convert_to_printable_extract(extract_as_dict, feature_geometry,
                                          pdf_to_join)

        print_config = Config.get('print', {})

        extract_as_dict[
            'Display_RealEstate_SubunitOfLandRegister'] = print_config.get(
                'display_real_estate_subunit_of_land_register', True)

        extract_as_dict['Display_Certification'] = print_config.get(
            'display_certification', True)

        spec = {
            'layout': Config.get('print', {})['template_name'],
            'outputFormat': 'pdf',
            'lang': self._language,
            'attributes': extract_as_dict,
        }

        response = self.get_response(system)

        if self._request.GET.get('getspec', 'no') != 'no':
            response.headers[
                'Content-Type'] = 'application/json; charset=UTF-8'
            return json.dumps(spec, sort_keys=True, indent=4)
        pdf_url = urlparse.urljoin(
            Config.get('print', {})['base_url'] + '/', 'buildreport.pdf')
        pdf_headers = Config.get('print', {})['headers']
        print_result = requests.post(pdf_url,
                                     headers=pdf_headers,
                                     data=json.dumps(spec))
        try:
            if Config.get('print', {}).get('compute_toc_pages', False):
                with io.BytesIO() as pdf:
                    pdf.write(print_result.content)
                    pdf_reader = PdfFileReader(pdf)
                    x = []
                    for i in range(len(pdf_reader.getOutlines())):
                        x.append(pdf_reader.getOutlines()[i]['/Page']
                                 ['/StructParents'])
                    try:
                        true_nb_of_toc = min(x) - 1
                    except ValueError:
                        true_nb_of_toc = 1

                    if true_nb_of_toc != extract_as_dict['nbTocPages']:
                        log.warning(
                            'nbTocPages in result pdf: {} are not equal to the one predicted : {}, request new pdf'
                            .format(true_nb_of_toc,
                                    extract_as_dict['nbTocPages']))  # noqa
                        extract_as_dict['nbTocPages'] = true_nb_of_toc
                        print_result = requests.post(pdf_url,
                                                     headers=pdf_headers,
                                                     data=json.dumps(spec))
        except PdfReadError as e:
            err_msg = 'a problem occurred while generating the pdf file'
            log.error(err_msg + ': ' + str(e))
            raise HTTPInternalServerError(err_msg)

        if not extract_as_dict['isReduced'] and print_result.status_code == 200:
            main = tempfile.NamedTemporaryFile(suffix='.pdf')
            main.write(print_result.content)
            main.flush()
            cmd = ['pdftk', main.name]
            temp_files = [main]
            for url in pdf_to_join:
                result = requests.get(url)
                content_type = result.headers.get('content-type')
                log.debug("document url: " + url + " => content_type: " +
                          content_type)
                if content_type != 'application/pdf':
                    msg = "Skipped document inclusion (url: '{}') because content_type: '{}'"
                    log.warning(msg.format(url, content_type))
                    continue
                tmp_file = tempfile.NamedTemporaryFile(suffix='.pdf')
                tmp_file.write(result.content)
                tmp_file.flush()
                temp_files.append(tmp_file)
                cmd.append(tmp_file.name)
            out = tempfile.NamedTemporaryFile(suffix='.pdf')
            cmd += ['cat', 'output', out.name]
            sys.stdout.flush()
            time.sleep(0.1)
            subprocess.check_call(cmd)
            content = out.file.read()
        else:
            content = print_result.content

        # Save printed file to the specified path.
        pdf_archive_path = print_config.get('pdf_archive_path', None)
        if pdf_archive_path is not None:
            self.archive_pdf_file(pdf_archive_path, content, extract_as_dict)

        response.status_code = print_result.status_code
        response.headers = print_result.headers
        if 'Transfer-Encoding' in response.headers:
            del response.headers['Transfer-Encoding']
        if 'Connection' in response.headers:
            del response.headers['Connection']
        return content
コード例 #13
0
    def get_report(self):
        ids = self.request.matchdict["ids"].split(",")
        self.layername = self.request.matchdict["layername"]
        layer_config = self.config["layers"].get(self.layername)

        if layer_config is None:
            raise HTTPBadRequest("Layer not found")

        features_ids = [self.layername + "." + id_ for id_ in ids]

        if layer_config["check_credentials"]:
            # check user credentials
            role_id = None if self.request.user is None else \
                self.request.user.role.id

            # FIXME: support of mapserver groups
            ogc_server_ids = [self.default_ogc_server.id]

            private_layers_object = get_private_layers(ogc_server_ids)
            private_layers_names = [
                private_layers_object[oid].name
                for oid in private_layers_object
            ]

            protected_layers_object = get_protected_layers(
                role_id, ogc_server_ids)
            protected_layers_names = [
                protected_layers_object[oid].name
                for oid in protected_layers_object
            ]

            if self.layername in private_layers_names and \
                    self.layername not in protected_layers_names:
                raise HTTPForbidden

        srs = layer_config["srs"]

        mapserv_url = self.request.route_url("mapserverproxy")
        vector_request_url = "{0!s}?{1!s}".format(
            mapserv_url, "&".join([
                "{0!s}={1!s}".format(*i)
                for i in list({
                    "service": "WFS",
                    "version": "1.1.0",
                    "outputformat": "gml3",
                    "request": "GetFeature",
                    "typeName": self.layername,
                    "featureid": ",".join(features_ids),
                    "srsName": "epsg:" + str(srs)
                }.items())
            ]))

        spec = layer_config["spec"]
        if spec is None:
            spec = {
                "layout": self.layername,
                "outputFormat": "pdf",
                "attributes": {
                    "ids": ids
                }
            }
            map_config = layer_config.get("map")
            if map_config is not None:
                spec["attributes"]["map"] = self._build_map(
                    mapserv_url, vector_request_url, srs, map_config)

            maps_config = layer_config.get("maps")
            if maps_config is not None:
                spec["attributes"]["maps"] = []
                for map_config in maps_config:
                    spec["attributes"]["maps"].append(
                        self._build_map(mapserv_url, vector_request_url, srs,
                                        map_config))
        else:
            spec = loads(
                dumps(spec) % {
                    "layername": self.layername,
                    "ids": dumps(ids),
                    "srs": srs,
                    "mapserv_url": mapserv_url,
                    "vector_request_url": vector_request_url,
                })

        return self._do_print(spec)
コード例 #14
0
def image(request):
    p_extent = tuple(map(float, request.GET['extent'].split(',')))
    p_size = tuple(map(int, request.GET['size'].split(',')))
    p_resource = map(int, filter(None, request.GET['resource'].split(',')))
    p_cache = request.GET.get('cache', 'true').lower() in ('true', 'yes', '1') \
        and request.env.render.tile_cache_enabled

    # Print tile debug info on resulting image
    tdi = request.GET.get('tdi', '').lower() in ('yes', 'true')

    resolution = (
        (p_extent[2] - p_extent[0]) / p_size[0],
        (p_extent[3] - p_extent[1]) / p_size[1],
    )

    aimg = None
    zexact = None
    for resid in p_resource:
        obj = Resource.filter_by(id=resid).one_or_none()

        if obj is None:
            raise ResourceNotFound(resid)

        if not IRenderableStyle.providedBy(obj):
            raise ValidationError("Resource (ID=%d) cannot be rendered." %
                                  (resid, ))

        request.resource_permission(PD_READ, obj)

        rimg = None

        if p_cache and zexact is None:
            if abs(resolution[0] - resolution[1]) < 1e-9:
                ztile = log(
                    (obj.srs.maxx - obj.srs.minx) / (256 * resolution[0]), 2)
                zexact = abs(round(ztile) - ztile) < 1e-9
                if zexact:
                    ztile = int(round(ztile))
            else:
                zexact = False

        # Is requested image may be cached via tiles?
        cached = (
            p_cache and zexact and obj.tile_cache is not None
            and obj.tile_cache.enabled
            and obj.tile_cache.image_compose  # NOQA: W503
            and (obj.tile_cache.max_z is None or ztile <= obj.tile_cache.max_z)
        )  # NOQA: W503

        ext_extent = p_extent
        ext_size = p_size
        ext_offset = (0, 0)

        if cached:
            # Affine transform from layer to tile
            at_l2t = af_transform(
                (obj.srs.minx, obj.srs.miny, obj.srs.maxx, obj.srs.maxy),
                (0, 0, 2**ztile, 2**ztile))
            at_t2l = ~at_l2t

            # Affine transform from layer to image
            at_l2i = af_transform(p_extent, (0, 0) + tuple(p_size))

            # Affine transform from tile to image
            at_t2i = at_l2i * ~at_l2t

            # Tile coordinates of render extent
            t_lb = tuple(at_l2t * p_extent[0:2])
            t_rt = tuple(at_l2t * p_extent[2:4])

            tb = (
                int(
                    floor(t_lb[0]) if t_lb[0] ==
                    min(t_lb[0], t_rt[0]) else ceil(t_lb[0])),
                int(
                    floor(t_lb[1]) if t_lb[1] ==
                    min(t_lb[1], t_rt[1]) else ceil(t_lb[1])),
                int(
                    floor(t_rt[0]) if t_rt[0] ==
                    min(t_lb[0], t_rt[0]) else ceil(t_rt[0])),
                int(
                    floor(t_rt[1]) if t_rt[1] ==
                    min(t_lb[1], t_rt[1]) else ceil(t_rt[1])),
            )

            ext_extent = at_t2l * tb[0:2] + at_t2l * tb[2:4]
            ext_im = rtoint(at_t2i * tb[0:2] + at_t2i * tb[2:4])
            ext_size = (ext_im[2] - ext_im[0], ext_im[1] - ext_im[3])
            ext_offset = (-ext_im[0], -ext_im[3])

            tx_range = tuple(range(min(tb[0], tb[2]), max(tb[0], tb[2])))
            ty_range = tuple(range(min(tb[1], tb[3]), max(tb[1], tb[3])))

            for tx, ty in product(tx_range, ty_range):
                timg = obj.tile_cache.get_tile((ztile, tx, ty))
                if timg is None:
                    rimg = None
                    break
                else:
                    if rimg is None:
                        rimg = Image.new('RGBA', p_size)

                    if tdi:
                        timg = tile_debug_info(timg.convert('RGBA'),
                                               color='blue',
                                               zxy=(ztile, tx, ty),
                                               extent=at_t2l * (tx, ty) +
                                               at_t2l * (tx + 1, ty + 1),
                                               msg='CACHED')

                    toffset = rtoint(at_t2i * (tx, ty))
                    rimg.paste(timg, toffset)

        if rimg is None:
            req = obj.render_request(obj.srs)
            rimg = req.render_extent(ext_extent, ext_size)

            if cached:
                for tx, ty in product(tx_range, ty_range):
                    t_offset = at_t2i * (tx, ty)
                    t_offset = rtoint((t_offset[0] + ext_offset[0],
                                       t_offset[1] + ext_offset[1]))
                    timg = rimg.crop(t_offset +
                                     (t_offset[0] + 256, t_offset[1] + 256))
                    obj.tile_cache.put_tile((ztile, tx, ty), timg)

                    if tdi:
                        rimg = tile_debug_info(rimg,
                                               offset=t_offset,
                                               color='red',
                                               zxy=(ztile, tx, ty),
                                               extent=at_t2l * (tx, ty) +
                                               at_t2l * (tx + 1, ty + 1),
                                               msg='NEW')

            rimg = rimg.crop(
                (ext_offset[0], ext_offset[1], ext_offset[0] + p_size[0],
                 ext_offset[1] + p_size[1]))

        if aimg is None:
            aimg = rimg
        else:
            try:
                aimg = Image.alpha_composite(aimg, rimg)
            except ValueError:
                raise HTTPBadRequest(
                    "Image (ID=%d) must have mode %s, but it is %s mode." %
                    (obj.id, aimg.mode, rimg.mode))

    # If there were no resources for rendering, return empty image
    if aimg is None:
        aimg = Image.new('RGBA', p_size)

    buf = BytesIO()
    aimg.save(buf, 'png')
    buf.seek(0)

    return Response(body_file=buf, content_type='image/png')
コード例 #15
0
def get_posts(request):
    """
    Query interface on posts
    Filters have two forms:
    only_*, is for filters that cannot be reversed (ex: only_synthesis, only_orphan)
    is_*, is for filters that can be reversed (ex:is_unread=true returns only unread message, is_unread=false returns only read messages)
    order: can be chronological, reverse_chronological, popularity
    root_post_id: all posts below the one specified.
    family_post_id: all posts below the one specified, and all its ancestors.
    post_reply_to: replies to a given post
    root_idea_id: all posts associated with the given idea
    ids: explicit message ids.
    posted_after_date, posted_before_date: date selection (ISO format)
    post_author: filter by author
    keyword: use full-text search
    locale: restrict to locale
    """
    localizer = request.localizer
    discussion = request.context

    discussion.import_from_sources()

    user_id = authenticated_userid(request) or Everyone
    permissions = request.permissions

    DEFAULT_PAGE_SIZE = 25
    page_size = DEFAULT_PAGE_SIZE

    filter_names = [
        filter_name for filter_name in request.GET.getone('filters').split(',')
        if filter_name
    ] if request.GET.get('filters') else []

    try:
        page = int(request.GET.getone('page'))
    except (ValueError, KeyError):
        page = 1

    keywords = request.GET.getall('keyword')

    order = request.GET.get('order')
    if order is None:
        order = 'chronological'
    assert order in ('chronological', 'reverse_chronological', 'score',
                     'popularity')
    if order == 'score' and not keywords:
        raise HTTPBadRequest("Cannot ask for a score without keywords")

    if page < 1:
        page = 1

    root_post_id = request.GET.getall('root_post_id')
    if root_post_id:
        root_post_id = Post.get_database_id(root_post_id[0])
    family_post_id = request.GET.getall('family_post_id')
    if family_post_id:
        family_post_id = Post.get_database_id(family_post_id[0])

    root_idea_id = request.GET.getall('root_idea_id')
    if root_idea_id:
        root_idea_id = Idea.get_database_id(root_idea_id[0])

    ids = request.GET.getall('ids[]')
    if ids:
        ids = [Post.get_database_id(id) for id in ids]

    view_def = request.GET.get('view') or 'default'

    only_synthesis = request.GET.get('only_synthesis')

    post_author_id = request.GET.get('post_author')
    if post_author_id:
        post_author_id = AgentProfile.get_database_id(post_author_id)
        assert AgentProfile.get(
            post_author_id
        ), "Unable to find agent profile with id " + post_author_id

    post_replies_to = request.GET.get('post_replies_to')
    if post_replies_to:
        post_replies_to = AgentProfile.get_database_id(post_replies_to)
        assert AgentProfile.get(
            post_replies_to
        ), "Unable to find agent profile with id " + post_replies_to

    posted_after_date = request.GET.get('posted_after_date')
    posted_before_date = request.GET.get('posted_before_date')

    PostClass = SynthesisPost if only_synthesis == "true" else Post
    posts = discussion.db.query(PostClass)

    posts = posts.filter(PostClass.discussion == discussion, )
    ##no_of_posts_to_discussion = posts.count()

    post_data = []

    # True means deleted only, False (default) means non-deleted only. None means both.

    deleted = request.GET.get('deleted', None)
    if deleted is None:
        if not ids:
            deleted = False
        else:
            deleted = None
    elif deleted.lower() == "any":
        deleted = None
    else:
        deleted = asbool(deleted)
    # if deleted is not in (False, True, None):
    #    deleted = False
    # end v4

    only_orphan = asbool(request.GET.get('only_orphan', False))
    if only_orphan:
        if root_idea_id:
            raise HTTPBadRequest(
                localizer.translate(
                    _("Getting orphan posts of a specific idea isn't supported."
                      )))
        orphans = Idea._get_orphan_posts_statement(
            discussion.id, True, include_deleted=deleted).subquery("orphans")
        posts = posts.join(orphans, PostClass.id == orphans.c.post_id)

    if root_idea_id:
        related = Idea.get_related_posts_query_c(discussion.id,
                                                 root_idea_id,
                                                 True,
                                                 include_deleted=deleted)
        posts = posts.join(related, PostClass.id == related.c.post_id)
    elif not only_orphan:
        if deleted is not None:
            if deleted:
                posts = posts.filter(
                    PostClass.publication_state.in_(
                        deleted_publication_states))
            else:
                posts = posts.filter(PostClass.tombstone_date == None)

    if root_post_id:
        root_post = Post.get(root_post_id)

        posts = posts.filter((Post.ancestry.like(root_post.ancestry +
                                                 cast(root_post.id, String) +
                                                 ',%'))
                             | (PostClass.id == root_post.id))
    elif family_post_id:
        root_post = Post.get(family_post_id)
        ancestor_ids = root_post.ancestor_ids()
        posts = posts.filter((Post.ancestry.like(root_post.ancestry +
                                                 cast(root_post.id, String) +
                                                 ',%'))
                             | (PostClass.id == root_post.id)
                             | (PostClass.id.in_(ancestor_ids)))
    else:
        root_post = None

    if ids:
        posts = posts.filter(Post.id.in_(ids))

    if posted_after_date:
        posted_after_date = parse_datetime(posted_after_date)
        if posted_after_date:
            posts = posts.filter(PostClass.creation_date >= posted_after_date)
        #Maybe we should do something if the date is invalid.  benoitg

    if posted_before_date:
        posted_before_date = parse_datetime(posted_before_date)
        if posted_before_date:
            posts = posts.filter(PostClass.creation_date <= posted_before_date)
        #Maybe we should do something if the date is invalid.  benoitg

    if post_author_id:
        posts = posts.filter(PostClass.creator_id == post_author_id)

    if post_replies_to:
        parent_alias = aliased(PostClass)
        posts = posts.join(parent_alias, PostClass.parent)
        posts = posts.filter(parent_alias.creator_id == post_replies_to)

    if keywords:
        locales = request.GET.getall('locale')
        posts, rank = add_text_search(posts, (PostClass.body_id, ), keywords,
                                      locales, order == 'score')

    # Post read/unread management
    is_unread = request.GET.get('is_unread')
    translations = None
    if user_id != Everyone:
        # This is horrible, but the join creates complex subqueries that
        # virtuoso cannot decode properly.
        read_posts = {
            v.post_id
            for v in discussion.db.query(ViewPost).filter(
                ViewPost.tombstone_condition(), ViewPost.actor_id == user_id,
                *ViewPost.get_discussion_conditions(discussion.id))
        }
        liked_posts = {
            l.post_id: l.id
            for l in discussion.db.query(LikedPost).filter(
                LikedPost.tombstone_condition(), LikedPost.actor_id == user_id,
                *LikedPost.get_discussion_conditions(discussion.id))
        }
        if is_unread != None:
            posts = posts.outerjoin(
                ViewPost,
                and_(ViewPost.actor_id == user_id,
                     ViewPost.post_id == PostClass.id,
                     ViewPost.tombstone_date == None))
            if is_unread == "true":
                posts = posts.filter(ViewPost.id == None)
            elif is_unread == "false":
                posts = posts.filter(ViewPost.id != None)
        user = AgentProfile.get(user_id)
        service = discussion.translation_service()
        if service.canTranslate is not None:
            translations = PrefCollectionTranslationTable(
                service, LanguagePreferenceCollection.getCurrent(request))
    else:
        #If there is no user_id, all posts are always unread
        if is_unread == "false":
            raise HTTPBadRequest(
                localizer.translate(
                    _("You must be logged in to view which posts are read")))

    # posts = posts.options(contains_eager(Post.source))
    # Horrible hack... But useful for structure load
    if view_def in ('partial_post', 'id_only'):
        pass  # posts = posts.options(defer(Post.body))
    else:
        ideaContentLinkQuery = posts.with_entities(
            PostClass.id, PostClass.idea_content_links_above_post)
        ideaContentLinkCache = dict(ideaContentLinkQuery.all())
        posts = posts.options(
            # undefer(Post.idea_content_links_above_post),
            joinedload_all(Post.creator),
            joinedload_all(Post.extracts),
            joinedload_all(Post.widget_idea_links),
            joinedload_all(SynthesisPost.publishes_synthesis),
            subqueryload_all(Post.attachments))
        if len(discussion.discussion_locales) > 1:
            posts = posts.options(*Content.subqueryload_options())
        else:
            posts = posts.options(*Content.joinedload_options())

    if order == 'chronological':
        posts = posts.order_by(Content.creation_date)
    elif order == 'reverse_chronological':
        posts = posts.order_by(Content.creation_date.desc())
    elif order == 'score':
        posts = posts.order_by(rank.desc())
    elif order == 'popularity':
        # assume reverse chronological otherwise
        posts = posts.order_by(Content.like_count.desc(),
                               Content.creation_date.desc())
    else:
        posts = posts.order_by(Content.id)
    # print str(posts)

    no_of_posts = 0
    no_of_posts_viewed_by_user = 0

    if deleted is True:
        # We just got deleted posts, now we want their ancestors for context
        post_ids = set()
        ancestor_ids = set()

        def add_ancestors(post):
            post_ids.add(post.id)
            ancestor_ids.update(
                [int(x) for x in post.ancestry.strip(",").split(",") if x])

        posts = list(posts)
        for post in posts:
            add_ancestors(post)
        ancestor_ids -= post_ids
        if ancestor_ids:
            ancestors = discussion.db.query(PostClass).filter(
                PostClass.id.in_(ancestor_ids))
            if view_def in ('partial_post', 'id_only'):
                pass  # ancestors = ancestors.options(defer(Post.body))
            else:
                ancestors = ancestors.options(
                    # undefer(Post.idea_content_links_above_post),
                    joinedload_all(Post.creator),
                    joinedload_all(Post.extracts),
                    joinedload_all(Post.widget_idea_links),
                    joinedload_all(SynthesisPost.publishes_synthesis),
                    subqueryload_all(Post.attachments))
                if len(discussion.discussion_locales) > 1:
                    ancestors = ancestors.options(
                        *Content.subqueryload_options())
                else:
                    ancestors = ancestors.options(
                        *Content.joinedload_options())
            posts.extend(ancestors.all())

    if view_def == 'id_only':
        posts = posts.with_entities(PostClass.id)

    for query_result in posts:
        score, viewpost, likedpost = None, None, None
        if not isinstance(query_result, (list, tuple)):
            query_result = [query_result]
        post = query_result[0]
        no_of_posts += 1
        if view_def == 'id_only':
            post_data.append(Content.uri_generic(post))
            continue
        if deleted is True:
            add_ancestors(post)

        if user_id != Everyone:
            viewpost = post.id in read_posts
            likedpost = liked_posts.get(post.id, None)
            if view_def not in ("partial_post", "id_only"):
                translate_content(post,
                                  translation_table=translations,
                                  service=service)
        serializable_post = post.generic_json(view_def, user_id,
                                              permissions) or {}
        if order == 'score':
            score = query_result[1]
            serializable_post['score'] = score

        if viewpost:
            serializable_post['read'] = True
            no_of_posts_viewed_by_user += 1
        elif user_id != Everyone and root_post is not None and root_post.id == post.id:
            # Mark post read, we requested it explicitely
            viewed_post = ViewPost(actor_id=user_id, post=root_post)
            discussion.db.add(viewed_post)
            serializable_post['read'] = True
        else:
            serializable_post['read'] = False
        # serializable_post['liked'] = likedpost.uri() if likedpost else False
        serializable_post['liked'] = (LikedPost.uri_generic(likedpost)
                                      if likedpost else False)
        if view_def not in ("partial_post", "id_only"):
            serializable_post['indirect_idea_content_links'] = (
                post.indirect_idea_content_links_with_cache(
                    ideaContentLinkCache.get(post.id, None)))

        post_data.append(serializable_post)

    # Benoitg:  For now, this completely garbles threading without intelligent
    #handling of pagination.  Disabling
    #posts = posts.limit(page_size).offset(data['startIndex']-1)
    # This code isn't up to date.  If limiting the query by page, we need to
    # calculate the counts with a separate query to have the right number of
    # results
    #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join(
    #    Post
    #).filter(
    #    Post.discussion_id == discussion.id,
    #    ViewPost.actor_id == user_id,
    #).count() if user_id else 0

    data = {}
    data["page"] = page
    data["unread"] = no_of_posts - no_of_posts_viewed_by_user
    data["total"] = no_of_posts
    data["maxPage"] = max(1, ceil(data["total"] / page_size))
    #TODO:  Check if we want 1 based index in the api
    data["startIndex"] = (page_size * page) - (page_size - 1)

    if data["page"] == data["maxPage"]:
        data["endIndex"] = data["total"]
    else:
        data["endIndex"] = data["startIndex"] + (page_size - 1)
    data["posts"] = post_data

    return data
コード例 #16
0
ファイル: post.py プロジェクト: swergas/assembl
def add_post_form(request):
    if has_moderation(request.params):
        raise HTTPBadRequest("Cannot moderate at post creation")
    return collection_add_with_params(request)
コード例 #17
0
def check_password_token(request):
    token = request.matchdict.get('token', None)
    user, validity = verify_password_change_token(token)
    if validity != Validity.VALID:
        raise HTTPBadRequest(validity.name)
    return {"user": user.uri()}
コード例 #18
0
ファイル: post.py プロジェクト: swergas/assembl
def add_post_json(request):
    if has_moderation(request.json):
        raise HTTPBadRequest("Cannot moderate at post creation")
    return collection_add_json(request)
コード例 #19
0
ファイル: features.py プロジェクト: pfanguin/mf-chsdi3
 def layerId(self, value):
     if value is not None:
         self._layerId = value
     else:
         raise HTTPBadRequest('Please provide a layerId')  # pragma: no cover
コード例 #20
0
def search(context, request, search_type=None, return_generator=False):
    """
    Search view connects to ElasticSearch and returns the results
    """
    # sets up ES and checks permissions/principles

    # gets schemas for all types
    types = request.registry[TYPES]
    search_base = normalize_query(request)
    result = {
        '@context': request.route_path('jsonld_context'),
        '@id': '/search/' + search_base,
        '@type': ['Search'],
        'title': 'Search',
        'filters': [],
    }
    principals = effective_principals(request)
    es = request.registry[ELASTIC_SEARCH]
    es_index = '_all'
    search_audit = request.has_permission('search_audit')

    # extract from/size from query parameters
    from_, size = get_pagination(request)

    # looks at searchTerm query parameter, sets to '*' if none, and creates antlr/lucene query for fancy stuff
    search_term = prepare_search_term(request)

    ## converts type= query parameters to list of doc_types to search, "*" becomes super class Item
    if search_type is None:
        doc_types = request.params.getall('type')
        if '*' in doc_types:
            doc_types = ['Item']

    else:
        doc_types = [search_type]

    # Normalize to item_type
    try:
        doc_types = sorted({types[name].name for name in doc_types})
    except KeyError:
        # Check for invalid types
        bad_types = [t for t in doc_types if t not in types]
        msg = "Invalid type: {}".format(', '.join(bad_types))
        raise HTTPBadRequest(explanation=msg)

    # Clear Filters path -- make a path that clears all non-datatype filters.
    # this saves the searchTerm when you click clear filters
    # http://stackoverflow.com/questions/16491988/how-to-convert-a-list-of-strings-to-a-query-string#answer-16492046
    searchterm_specs = request.params.getall('searchTerm')
    searchterm_only = urlencode([("searchTerm", searchterm)
                                 for searchterm in searchterm_specs])
    if searchterm_only:
        # Search term in query string; clearing keeps that
        clear_qs = searchterm_only
    else:
        # Possibly type(s) in query string
        clear_qs = urlencode([("type", typ) for typ in doc_types])
    result['clear_filters'] = request.route_path('search', slash='/') + (
        ('?' + clear_qs) if clear_qs else '')

    # Building query for filters
    if not doc_types:
        # For form editing embedded searches
        if request.params.get('mode') == 'picker':
            doc_types = ['Item']
        # For /search/ with no type= use defalts
        else:
            doc_types = DEFAULT_DOC_TYPES
    else:
        # TYPE filters that were set by UI for labeling, only seen with >1 types
        # Probably this is why filtering Items with subclasses doesn't work right
        # i.e., search/?type=Dataset   Type is not a regular filter/facet.
        for item_type in doc_types:
            ti = types[item_type]
            qs = urlencode([
                (k.encode('utf-8'), v.encode('utf-8'))
                for k, v in request.params.items()
                if not (k == 'type' and types['Item' if v == '*' else v] is ti)
            ])
            result['filters'].append({
                'field': 'type',
                'term': ti.name,
                'remove': '{}?{}'.format(request.path, qs)
            })

        # Add special views like Report and Matrix if search is a single type
        if len(doc_types) == 1:
            result['views'] = views = []
            views.append({
                'href': request.route_path('report', slash='/') + search_base,
                'title': 'View tabular report',
                'icon': 'table',
            })

    search_fields, highlights = get_search_fields(request, doc_types)

    # Builds filtered query which supports multiple facet selection
    query = get_filtered_query(search_term, search_fields,
                               sorted(list_result_fields(request, doc_types)),
                               principals, doc_types)

    #  Columns is used in report view
    schemas = [types[doc_type].schema for doc_type in doc_types]
    columns = list_visible_columns_for_schemas(request, schemas)
    # and here it is attached to the result for the UI
    if columns:
        result['columns'] = columns

    # If no text search, use match_all query instead of query_string
    if search_term == '*':
        # query['query']['match_all'] = {}
        del query['query']['query_string']
    # If searching for more than one type, don't specify which fields to search
    else:
        # del query['query']['bool']['must']['multi_match']['fields']
        query['query']['query_string']['fields'].extend(
            ['_all', '*.uuid', '*.md5sum', '*.submitted_file_name'])

    # Set sort order
    set_sort_order(request, search_term, types, doc_types, query, result)

    # Setting filters
    used_filters = set_filters(request, query, result)

    # Adding facets to the query
    facets = [
        ('type', {
            'title': 'Data Type'
        }),
    ]
    if len(doc_types) == 1 and 'facets' in types[doc_types[0]].schema:
        facets.extend(types[doc_types[0]].schema['facets'].items())

    # Display all audits if logged in, or all but INTERNAL_ACTION if logged out
    for audit_facet in audit_facets:
        if search_audit and 'group.submitter' in principals or 'INTERNAL_ACTION' not in audit_facet[
                0]:
            facets.append(audit_facet)

    query['aggs'] = set_facets(facets, used_filters, principals, doc_types)

    # Decide whether to use scan for results.
    do_scan = size is None or size > 1000

    # Send search request to proper indices
    if not request.params.get('type') or 'Item' in doc_types:
        es_index = '_all'
    else:
        es_index = [
            types[type_name].item_type for type_name in doc_types
            if hasattr(types[type_name], 'item_type')
        ]

    # Execute the query
    if do_scan:
        es_results = es.search(body=query,
                               index=es_index,
                               search_type='query_then_fetch')
    else:
        es_results = es.search(body=query,
                               index=es_index,
                               from_=from_,
                               size=size)

    result['total'] = total = es_results['hits']['total']

    schemas = (types[item_type].schema for item_type in doc_types)
    result['facets'] = format_facets(es_results, facets, used_filters, schemas,
                                     total, principals)

    # Add batch actions
    result.update(search_result_actions(request, doc_types, es_results))

    # Add all link for collections
    if size is not None and size < result['total']:
        params = [(k, v) for k, v in request.params.items() if k != 'limit']
        params.append(('limit', 'all'))
        result['all'] = '%s?%s' % (request.resource_path(context),
                                   urlencode(params))

    if not result['total']:
        # http://googlewebmastercentral.blogspot.com/2014/02/faceted-navigation-best-and-5-of-worst.html
        request.response.status_code = 404
        result['notification'] = 'No results found'
        result['@graph'] = []
        return result if not return_generator else []

    result['notification'] = 'Success'
    # Format results for JSON-LD
    if not do_scan:
        graph = format_results(request, es_results['hits']['hits'], result)
        if return_generator:
            return graph
        else:
            result['@graph'] = list(graph)
            return result

    # Scan large result sets.
    del query['aggs']
    if size is None:
        # preserve_order=True has unexpected results in clustered environment
        # https://github.com/elastic/elasticsearch-py/blob/master/elasticsearch/helpers/__init__.py#L257
        hits = scan(es, query=query, index=es_index, preserve_order=False)
    else:
        hits = scan(es,
                    query=query,
                    index=es_index,
                    from_=from_,
                    size=size,
                    preserve_order=False)
    graph = format_results(request, hits, result)

    # Support for request.embed() and `return_generator`
    if request.__parent__ is not None or return_generator:
        if return_generator:
            return graph
        else:
            result['@graph'] = list(graph)
            return result

    # Stream response using chunked encoding.
    # XXX BeforeRender event listeners not called.
    app_iter = iter_long_json('@graph', graph, result)
    request.response.content_type = 'application/json'
    if str is bytes:  # Python 2 vs 3 wsgi differences
        request.response.app_iter = app_iter  # Python 2
    else:
        request.response.app_iter = (s.encode('utf-8') for s in app_iter)
    return request.response
コード例 #21
0
ファイル: features.py プロジェクト: pfanguin/mf-chsdi3
 def time(self, value):
     if value is not None:
         try:
             self._time = int(value) if re.search(r'[12]{1}[0-9]{3}', value) else None
         except ValueError:
             raise HTTPBadRequest('Please provide a valid year for the parameter <time>')
コード例 #22
0
ファイル: shortener.py プロジェクト: juliensam/c2cgeoportal
    def create(self):

        if "url" not in self.request.params:
            raise HTTPBadRequest("The parameter url is required")

        url = self.request.params["url"]

        # Check that it is an internal URL...
        uri_parts = urlparse(url)
        hostname = uri_parts.hostname
        if "allowed_hosts" in self.settings:
            if hostname not in self.settings[
                    "allowed_hosts"]:  # pragma: no cover
                raise HTTPBadRequest("The requested host is not allowed.")
        else:
            if hostname != self.request.server_name:
                raise HTTPBadRequest("The requested host '%s' should be '%s'" %
                                     (hostname, self.request.server_name))

        shortened = False

        for base in self.short_bases:
            base_parts = urlparse(base)
            if uri_parts.path.startswith(base_parts.path):
                shortened = True
                ref = uri_parts.path.split("/")[-1]

        tries = 0
        while not shortened:
            ref = "".join(
                random.choice(string.ascii_letters + string.digits)
                for i in range(self.settings.get("length", 4)))
            test_url = DBSession.query(Shorturl).filter(
                Shorturl.ref == ref).all()
            if len(test_url) == 0:
                break
            tries += 1  # pragma: no cover
            if tries > 20:  # pragma: no cover
                message = "No free ref found, considere to increase the length"
                logging.error(message)
                raise HTTPInternalServerError(message)

        user_email = self.request.user.email \
            if self.request.user is not None else None
        email = self.request.params.get("email")
        if not shortened:
            short_url = Shorturl()
            short_url.url = url
            short_url.ref = ref
            short_url.creator_email = user_email
            short_url.creation = datetime.now()
            short_url.nb_hits = 0

            DBSession.add(short_url)

        if "base_url" in self.settings:
            s_url = self.settings["base_url"] + ref
        else:
            s_url = self.request.route_url("shortener_get", ref=ref)

        if \
                email is not None and \
                "email_from" in self.settings and \
                "email_subject" in self.settings and \
                "email_body" in self.settings and \
                "smtp_server" in self.settings:  # pragma: no cover
            text = self.settings["email_body"] % {
                "full_url": url,
                "short_url": s_url,
                "message": self.request.params.get("message", ""),
            }
            send_email(
                self.settings["email_from"],
                [email],
                text.encode("utf-8"),
                self.settings["email_subject"],
                self.settings["smtp_server"],
            )

        set_common_headers(self.request, "shortner", NO_CACHE)
        return {"short_url": s_url}
コード例 #23
0
def _get_user(request, userid):
    user = request.registry[COLLECTIONS]['user'].get(userid)
    if not user:
        raise HTTPBadRequest()
    return user
コード例 #24
0
ファイル: views.py プロジェクト: startnayit/warehouse
def search(request):

    q = request.params.get("q", '')

    if q:
        should = []
        for field in SEARCH_FIELDS:
            kw = {"query": q}
            if field in SEARCH_BOOSTS:
                kw["boost"] = SEARCH_BOOSTS[field]
            should.append(Q("match", **{field: kw}))

        # Add a prefix query if ``q`` is longer than one character.
        if len(q) > 1:
            should.append(Q('prefix', normalized_name=q))

        query = request.es.query("dis_max", queries=should)
        query = query.suggest("name_suggestion", q, term={"field": "name"})
    else:
        query = request.es.query()

    if request.params.get("o"):
        sort_key = request.params["o"]
        if sort_key.startswith("-"):
            sort = {
                sort_key[1:]: {
                    "order": "desc",
                    "unmapped_type": "long",
                },
            }
        else:
            sort = {
                sort_key: {
                    "unmapped_type": "long",
                }
            }

        query = query.sort(sort)

    if request.params.getall("c"):
        query = query.filter("terms", classifiers=request.params.getall("c"))

    try:
        page_num = int(request.params.get("page", 1))
    except ValueError:
        raise HTTPBadRequest("'page' must be an integer.")

    page = ElasticsearchPage(
        query,
        page=page_num,
        url_maker=paginate_url_factory(request),
    )

    if page.page_count and page_num > page.page_count:
        return HTTPNotFound()

    available_filters = collections.defaultdict(list)

    classifiers_q = (
        request.db.query(Classifier)
        .with_entities(Classifier.classifier)
        .filter(
            exists([release_classifiers.c.trove_id])
            .where(release_classifiers.c.trove_id == Classifier.id)
        )
        .order_by(Classifier.classifier)
    )

    for cls in classifiers_q:
        first, *_ = cls.classifier.split(' :: ')
        available_filters[first].append(cls.classifier)

    def filter_key(item):
        try:
            return 0, SEARCH_FILTER_ORDER.index(item[0]), item[0]
        except ValueError:
            return 1, 0, item[0]

    return {
        "page": page,
        "term": q,
        "order": request.params.get("o", ''),
        "available_filters": sorted(available_filters.items(), key=filter_key),
        "applied_filters": request.params.getall("c"),
    }
コード例 #25
0
ファイル: processes.py プロジェクト: crim-ca/weaver
def get_processes(request):
    """
    List registered processes (GetCapabilities).

    Optionally list both local and provider processes.
    """
    try:
        params = sd.GetProcessesQuery().deserialize(request.params)
    except colander.Invalid as ex:
        raise HTTPBadRequest(
            json={
                "code":
                "ProcessInvalidParameter",
                "description":
                "Process query parameters failed validation.",
                "error":
                colander.Invalid.__name__,
                "cause":
                str(ex),
                "value":
                repr_json(ex.value or dict(request.params),
                          force_string=False),
            })

    detail = asbool(params.get("detail", True))
    ignore = asbool(params.get("ignore", True))
    try:
        # get local processes and filter according to schema validity
        # (previously deployed process schemas can become invalid because of modified schema definitions
        results = get_processes_filtered_by_valid_schemas(request)
        processes, invalid_processes, paging, with_providers, total_processes = results
        if invalid_processes:
            raise HTTPServiceUnavailable(
                "Previously deployed processes are causing invalid schema integrity errors. "
                "Manual cleanup of following processes is required: {}".format(
                    invalid_processes))

        body = {
            "processes":
            processes if detail else [get_any_id(p) for p in processes]
        }  # type: JSON
        if not with_providers:
            paging = {
                "page": paging.get("page"),
                "limit": paging.get("limit")
            }  # remove other params
            body.update(paging)
        else:
            paging = {}  # disable to remove paging-related links

        try:
            body["links"] = get_process_list_links(request, paging,
                                                   total_processes)
        except IndexError as exc:
            raise HTTPBadRequest(
                json={
                    "description": str(exc),
                    "cause": "Invalid paging parameters.",
                    "error": type(exc).__name__,
                    "value": repr_json(paging, force_string=False)
                })

        # if 'EMS/HYBRID' and '?providers=True', also fetch each provider's processes
        if with_providers:
            # param 'check' enforced because must fetch for listing of available processes (GetCapabilities)
            # when 'ignore' is not enabled, any failing definition should raise any derived 'ServiceException'
            services = get_provider_services(request,
                                             ignore=ignore,
                                             check=True)
            body.update({
                "providers": [
                    svc.summary(request, ignore=ignore) if detail else {
                        "id": svc.name
                    } for svc in services
                ]
            })
            invalid_services = [False] * len(services)
            for i, provider in enumerate(services):
                # ignore failing parsing of the service description
                if body["providers"][i] is None:
                    invalid_services[i] = True
                    continue
                # attempt parsing available processes and ignore again failing items
                processes = provider.processes(request, ignore=ignore)
                if processes is None:
                    invalid_services[i] = True
                    continue
                total_processes += len(processes)
                body["providers"][i].update({
                    "processes":
                    processes
                    if detail else [get_any_id(proc) for proc in processes]
                })
            if any(invalid_services):
                LOGGER.debug(
                    "Invalid providers dropped due to failing parsing and ignore query: %s",
                    [
                        svc.name
                        for svc, status in zip(services, invalid_services)
                        if status
                    ])
                body["providers"] = [
                    svc
                    for svc, ignore in zip(body["providers"], invalid_services)
                    if not ignore
                ]

        body["total"] = total_processes
        body["description"] = sd.OkGetProcessesListResponse.description
        LOGGER.debug("Process listing generated, validating schema...")
        body = sd.MultiProcessesListing().deserialize(body)
        return HTTPOk(json=body)

    except ServiceException as exc:
        LOGGER.debug(
            "Error when listing provider processes using query parameter raised: [%s]",
            exc,
            exc_info=exc)
        raise HTTPServiceUnavailable(
            json={
                "description":
                "At least one provider could not list its processes. "
                "Failing provider errors were requested to not be ignored.",
                "exception":
                fully_qualified_name(exc),
                "error":
                str(exc)
            })
    except HTTPException:
        raise
    # FIXME: handle colander invalid directly in tween (https://github.com/crim-ca/weaver/issues/112)
    except colander.Invalid as ex:
        raise HTTPBadRequest("Invalid schema: [{!s}]".format(ex))
コード例 #26
0
    def get_report(self):
        self.layername = self.request.matchdict["layername"]
        layer_config = self.config["layers"].get(self.layername)

        multiple = layer_config.get("multiple", False)
        ids = self.request.matchdict["ids"]
        if multiple:
            ids = ids.split(",")

        if layer_config is None:
            raise HTTPBadRequest("Layer not found")

        features_ids = [self.layername + "." + id_ for id_ in ids] if multiple \
            else [self.layername + "." + ids]

        if layer_config["check_credentials"]:
            # FIXME: support of mapserver groups
            ogc_server = models.DBSession.query(main.OGCServer).filter(
                main.OGCServer.name == layer_config["ogc_server"]).one()
            ogc_server_ids = [ogc_server]

            private_layers_object = get_private_layers(ogc_server_ids)
            private_layers_names = [
                private_layers_object[oid].name
                for oid in private_layers_object
            ]

            protected_layers_object = get_protected_layers(
                self.request.user, ogc_server_ids)
            protected_layers_names = [
                protected_layers_object[oid].name
                for oid in protected_layers_object
            ]

            if self.layername in private_layers_names and self.layername not in protected_layers_names:
                raise HTTPForbidden

        srs = layer_config["srs"]

        mapserv_url = self.request.route_url(
            "mapserverproxy", _query={'ogcserver': layer_config["ogc_server"]})
        vector_request_url = add_url_params(
            mapserv_url, {
                "service": "WFS",
                "version": "1.1.0",
                "outputformat": "gml3",
                "request": "GetFeature",
                "typeName": self.layername,
                "featureid": ",".join(features_ids),
                "srsName": srs
            })

        spec = layer_config["spec"]
        if spec is None:
            spec = {
                "layout": self.layername,
                "outputFormat": "pdf",
                "attributes": {
                    "ids": [{
                        "id": id_
                    } for id_ in ids]
                } if multiple else {
                    "id": id
                }
            }
            map_config = layer_config.get("map")
            if map_config is not None:
                spec["attributes"]["map"] = self._build_map(
                    mapserv_url, vector_request_url, srs, map_config)

            maps_config = layer_config.get("maps")
            if maps_config is not None:
                spec["attributes"]["maps"] = []
                for map_config in maps_config:
                    spec["attributes"]["maps"].append(
                        self._build_map(mapserv_url, vector_request_url, srs,
                                        map_config))
        else:
            datasource = layer_config.get("datasource", True)
            if multiple and datasource:
                data = dumps(layer_config["data"])
                datas = [
                    loads(
                        data % {
                            "layername": self.layername,
                            "id": id_,
                            "srs": srs,
                            "mapserv_url": mapserv_url,
                            "vector_request_url": vector_request_url,
                        }) for id_ in ids
                ]
                self.walker(spec, "%(datasource)s", datas)
                spec = loads(
                    dumps(spec) % {
                        "layername": self.layername,
                        "srs": srs,
                        "mapserv_url": mapserv_url,
                        "vector_request_url": vector_request_url,
                    })
            elif multiple:
                spec = loads(
                    dumps(spec) % {
                        "layername": self.layername,
                        "ids": ",".join(ids),
                        "srs": srs,
                        "mapserv_url": mapserv_url,
                        "vector_request_url": vector_request_url,
                    })
            else:
                spec = loads(
                    dumps(spec) % {
                        "layername": self.layername,
                        "id": ids,
                        "srs": srs,
                        "mapserv_url": mapserv_url,
                        "vector_request_url": vector_request_url,
                    })

        return self._do_print(spec)
コード例 #27
0
def report_status(context, request, file_ext):
    '''
    Generate a report with all the users that belongs to an OU.
    If the administrator is a superadmin the generated report will contain 
    all the users in the database. 
    
    Args:
        ou_id (string) : ID of the OU.

    Returns:
        headers (list) : The headers of the table to export
        rows (list)    : Rows with the report data
        widths (list)  : The witdhs of the columns of the table to export
        page           : Translation of the word "page" to the current language
        of             : Translation of the word "of" to the current language
        report_type    : Type of report (html, csv or pdf)
    '''

    # Check current user permissions
    ou_id = check_visibility_of_ou(request)
    if ou_id is None:
        raise HTTPBadRequest()

    # Get user data
    query = request.db.nodes.find({
        'type': 'computer',
        'path': get_filter_nodes_belonging_ou(ou_id)
    }).sort([('error_last_chef_client', pymongo.DESCENDING),
             ('last_agent_run_time', pymongo.DESCENDING),
             ('name', pymongo.ASCENDING)])

    rows = []
    orders = []

    current_time = int(time.time())
    logger.debug("report_status: current_time = {}".format(current_time))

    # update_error_interval: Hours. Converts it to seconds
    update_error_interval = timedelta(hours=int(get_current_registry(
    ).settings.get('update_error_interval', 24))).seconds
    logger.debug("report_status: update_error_interval = {}".format(
        update_error_interval))

    # gecos-agent runs every 60 minutes (cron resource: minutes 30)
    # See https://github.com/gecos-team/gecos-workstation-management-cookbook/blob/master/recipes/default.rb (line: 57)
    # 10-min max delay margin of chef-client concurrent executions
    # See https://github.com/gecos-team/gecosws-agent/blob/trusty/scripts/gecos-chef-client-wrapper (line: 30)
    # 15-min delay margin of network or chef-client execution
    # 60 + 10 + 15 = 85
    delay_margin = timedelta(minutes=85).seconds

    for item in query:
        row = []
        order = []
        status = '0'

        last_agent_run_time = int(item.get('last_agent_run_time', 0))
        logger.debug("report_status: last_agent_run_time = {}".format(
            last_agent_run_time))

        if last_agent_run_time + delay_margin >= current_time:
            item['status'] = '<div class="centered" style="width: 100%">'\
                '<img alt="OK" src="/static/images/checkmark.jpg"/></div>' \
                    if file_ext != 'csv' else 'OK'

            status = '0'
        # Chef-run error or update_error_interval hours has elapsed from last agent run time
        elif (item['error_last_chef_client']
              or last_agent_run_time + update_error_interval >= current_time):
            item['status'] = '<div class="centered" style="width: 100%">'\
                '<img alt="ERROR" src="/static/images/xmark.jpg"/></div>' \
                    if file_ext != 'csv' else 'ERROR'
            status = '2'

        # delay_margin < last_agent_run_time < update_error_interval
        else:
            item['status'] = '<div class="centered" style="width: 100%">'\
                '<img alt="WARN" src="/static/images/alertmark.jpg"/></div>' \
                    if file_ext != 'csv' else 'WARN'
            status = '1'

        if file_ext == 'pdf':
            row.append(treatment_string_to_pdf(item, 'name', 20))
            order.append('')
            row.append(item['_id'])
            order.append('')

            if last_agent_run_time != 0:
                row.append(
                    datetime.utcfromtimestamp(last_agent_run_time).strftime(
                        '%d/%m/%Y %H:%M:%S'))
            else:
                row.append(' -- ')
            order.append(last_agent_run_time)

            row.append(item['status'])
            order.append(status)
        else:
            if file_ext == 'csv':
                row.append(treatment_string_to_csv(item, 'name'))
            else:
                row.append(get_html_node_link(item))
            order.append('')
            row.append(item['_id'])
            order.append('')
            if last_agent_run_time != 0:
                row.append(
                    datetime.utcfromtimestamp(last_agent_run_time).strftime(
                        '%d/%m/%Y %H:%M:%S'))
            else:
                row.append('--')
            order.append(last_agent_run_time)
            row.append(treatment_string_to_csv(item, 'status'))
            order.append(status)

        rows.append(row)
        orders.append(order)

    header = (_(u'Name'), _(u'Id'), _(u'Agent last runtime'), _(u'Status'))

    # Column widths in percentage
    if file_ext == 'pdf':
        widths = (45, 20, 20, 15)
    else:
        widths = (15, 35, 15, 20)

    title = _(u'Computer with anomalies')

    now = datetime.now().strftime("%d/%m/%Y %H:%M")

    # Sort rows
    rows = sorted(rows, key=lambda i: (get_status(i[3]), i[0].lower()))

    return {
        'headers': header,
        'rows': rows,
        'orders': orders,
        'default_order': [[3, 'desc'], [0, 'asc']],
        'widths': widths,
        'report_title': title,
        'page': _(u'Page'),
        'of': _(u'of'),
        'report_type': file_ext,
        'now': now
    }
コード例 #28
0
def create_post(request):
    """
    Create a new post in this discussion.

    We use post, not put, because we don't know the id of the post
    """
    localizer = request.localizer
    request_body = json.loads(request.body)
    user_id = authenticated_userid(request)
    if not user_id:
        raise HTTPUnauthorized()

    body = request_body.get('body', None)
    html = request_body.get('html',
                            None)  # BG: Is this used now? I cannot see it.
    reply_id = request_body.get('reply_id', None)
    idea_id = request_body.get('idea_id', None)
    subject = request_body.get('subject', None)
    publishes_synthesis_id = request_body.get('publishes_synthesis_id', None)

    if not body and not publishes_synthesis_id:
        # Should we allow empty messages otherwise?
        raise HTTPBadRequest(localizer.translate(_("Your message is empty")))

    if reply_id:
        in_reply_to_post = Post.get_instance(reply_id)
    else:
        in_reply_to_post = None

    if idea_id:
        in_reply_to_idea = Idea.get_instance(idea_id)
    else:
        in_reply_to_idea = None

    discussion = request.context

    ctx = discussion.get_instance_context(request=request)
    if html:
        log.warning("Still using html")
        # how to guess locale in this case?
        body = LangString.create(sanitize_html(html))
        # TODO: LocalPosts are pure text right now.
        # Allowing HTML requires changes to the model.
    elif body:
        # TODO: Accept HTML body.
        for e in body['entries']:
            e['value'] = sanitize_text(e['value'])
        body_ctx = LangString.create_from_json(body, context=ctx)
        body = body_ctx._instance
    else:
        body = LangString.EMPTY(discussion.db)

    if subject:
        for e in subject['entries']:
            e['value'] = sanitize_text(e['value'])
        subject_ctx = LangString.create_from_json(subject, context=ctx)
        subject = subject_ctx._instance
    else:
        from assembl.models import LocaleLabel
        locale = LocaleLabel.UNDEFINED
        # print(in_reply_to_post.subject, discussion.topic)
        if in_reply_to_post and in_reply_to_post.get_title():
            original_subject = in_reply_to_post.get_title().first_original()
            if original_subject:
                locale = original_subject.locale_code
                subject = (original_subject.value or ''
                           if in_reply_to_post.get_title() else '')
        elif in_reply_to_idea:
            # TODO:  THis should use a cascade like the frontend
            # also, some ideas have extra langstring titles
            subject = (in_reply_to_idea.short_title
                       if in_reply_to_idea.short_title else '')
            locale = discussion.main_locale
        else:
            subject = discussion.topic if discussion.topic else ''
            locale = discussion.main_locale
        # print subject
        if subject is not None and len(subject):
            new_subject = "Re: " + SUBJECT_RE.sub('', subject).strip()
            if (in_reply_to_post and new_subject == subject
                    and in_reply_to_post.get_title()):
                # reuse subject and translations
                subject = in_reply_to_post.get_title().clone(discussion.db)
            else:
                # how to guess locale in this case?
                subject = LangString.create(new_subject, locale)
        else:
            capture_message(
                "A message is about to be written to the database with an "
                "empty subject.  This is not supposed to happen.")
            subject = LangString.EMPTY(discussion.db)

    post_constructor_args = {
        'discussion': discussion,
        'creator_id': user_id,
        'subject': subject,
        'body': body
    }

    if publishes_synthesis_id:
        published_synthesis = Synthesis.get_instance(publishes_synthesis_id)
        post_constructor_args['publishes_synthesis'] = published_synthesis
        new_post = SynthesisPost(**post_constructor_args)
        new_post.finalize_publish()
    else:
        new_post = LocalPost(**post_constructor_args)
    new_post.guess_languages()

    discussion.db.add(new_post)
    discussion.db.flush()

    if in_reply_to_post:
        new_post.set_parent(in_reply_to_post)
    if in_reply_to_idea:
        idea_post_link = IdeaRelatedPostLink(creator_id=user_id,
                                             content=new_post,
                                             idea=in_reply_to_idea)
        discussion.db.add(idea_post_link)
        idea = in_reply_to_idea
        while idea:
            idea.send_to_changes()
            parents = idea.get_parents()
            idea = next(iter(parents)) if parents else None
    else:
        discussion.root_idea.send_to_changes()
    for source in discussion.sources:
        if 'send_post' in dir(source):
            source.send_post(new_post)
    permissions = request.permissions

    return new_post.generic_json('default', user_id, permissions)
コード例 #29
0
def downloads_view(request):

    # Use the new config variable name but support the old one for
    default_interval = _setting(request, 'shavar', 'default_interval', None)
    backoff_delay = _setting(request, 'shavar', 'client_backoff_delay', None)

    # Throw a fit if both are specified
    if default_interval is not None and backoff_delay is not None:
        raise ConfigurationError("Specify either default_interval or "
                                 "client_backoff_delay in the [shavar] "
                                 "section of your config but not both.\n"
                                 "client_backoff_delay is preferred.")

    delay = backoff_delay or default_interval or 30 * 60

    resp_payload = {'interval': delay, 'lists': {}}

    try:
        parsed = parse_downloads(request)
    except ParseError as e:
        logger.error(e)
        raise HTTPBadRequest(e)

    for list_info in parsed:
        # Do we even serve that list?
        if list_info.name not in _setting(
            request, 'shavar', 'list_names_served', tuple()
        ):
            logger.warn('Unknown list "%s" reported; ignoring'
                        % list_info.name)
            annotate_request(request, "shavar.downloads.unknown.list", 1)
            continue
        provider, type_, format_ = list_info.name.split('-', 2)
        if not provider or not type_ or not format_:
            s = 'Unknown list format for "%s"; ignoring' % list_info.name
            logger.error(s)
            annotate_request(request, "shavar.downloads.unknown.format", 1)
            raise HTTPBadRequest(s)

        app_ver = str(request.GET['appver'])
        sblist, list_ver = get_list(request, list_info.name, app_ver)

        # Calculate delta
        to_add, to_sub = sblist.delta(list_info.adds, list_info.subs)

        # No delta?  No response, I think.  Spec doesn't actually say.
        if not to_add and not to_sub:
            continue

        # Fetch the appropriate chunks
        resp_payload['lists'][list_info.name] = {
            'sblist': sblist,
            'ldata': sblist.fetch(to_add, to_sub),
            'list_ver': list_ver
        }

        # Not publishing deltas for this list?  Delete all previous chunks to
        # make way for the new corpus
        # if _setting(request, list_info.name, 'not_publishing_deltas'):
        if sblist.settings.get('not_publishing_deltas'):
            # Raise hell if we have suspicious data with this flag set
            if (len(to_add) != 1 or len(to_sub) != 0):
                logger.error("Configuration error!  Mismatch between "
                             "{0}'s configuration has "
                             "'not_publishing_deltas' enabled but its data"
                             "file has more than one chunk to serve."
                             .format(list_info.name))
                raise HTTPInternalServerError()
            resp_payload['lists'][list_info.name]['adddels'] = list_info.adds

    return HTTPOk(content_type="application/octet-stream",
                  body=format_downloads(request, resp_payload))
コード例 #30
0
 def _do_update_from_json(self,
                          json,
                          parse_def,
                          ctx,
                          duplicate_handling=None,
                          object_importer=None):
     from ..auth.util import user_has_permission
     user_id = ctx.get_user_id()
     target_user_id = user_id
     user = ctx.get_instance_of_class(User)
     if user:
         target_user_id = user.id
     if self.user_id:
         if target_user_id != self.user_id:
             if not user_has_permission(self.discussion_id, user_id,
                                        P_ADMIN_DISC):
                 raise HTTPUnauthorized()
         # For now, do not allow changing user, it's way too complicated.
         if 'user' in json and User.get_database_id(
                 json['user']) != self.user_id:
             raise HTTPBadRequest()
     else:
         json_user_id = json.get('user', None)
         if json_user_id is None:
             json_user_id = target_user_id
         else:
             json_user_id = User.get_database_id(json_user_id)
             if json_user_id != user_id and not user_has_permission(
                     self.discussion_id, user_id, P_ADMIN_DISC):
                 raise HTTPUnauthorized()
         self.user_id = json_user_id
     if self.discussion_id:
         if 'discussion_id' in json and Discussion.get_database_id(
                 json['discussion_id']) != self.discussion_id:
             raise HTTPBadRequest()
     else:
         discussion_id = json.get('discussion',
                                  None) or ctx.get_discussion_id()
         if discussion_id is None:
             raise HTTPBadRequest()
         self.discussion_id = Discussion.get_database_id(discussion_id)
     new_type = json.get('@type', self.type)
     if self.external_typename() != new_type:
         polymap = inspect(self.__class__).polymorphic_identity
         if new_type not in polymap:
             raise HTTPBadRequest()
         new_type = polymap[new_type].class_
         new_instance = self.change_class(new_type)
         return new_instance._do_update_from_json(
             json, parse_def, ctx, DuplicateHandling.USE_ORIGINAL,
             object_importer)
     creation_origin = json.get('creation_origin', "USER_REQUESTED")
     if creation_origin is not None:
         self.creation_origin = NotificationCreationOrigin.from_string(
             creation_origin)
     if json.get('parent_subscription', None) is not None:
         self.parent_subscription_id = self.get_database_id(
             json['parent_subscription'])
     status = json.get('status', None)
     if status:
         status = NotificationSubscriptionStatus.from_string(status)
         if status != self.status:
             self.status = status
             self.last_status_change_date = datetime.utcnow()
     return self