Esempio n. 1
0
def handle_export_request():
  """Export request handler"""
  # pylint: disable=too-many-locals
  try:
    with benchmark("handle export request data"):
      data = parse_export_request()
      objects = data.get("objects")
      export_to = data.get("export_to")
      current_time = data.get("current_time")
    with benchmark("Generate CSV string"):
      csv_string, object_names = make_export(objects)
    with benchmark("Make response."):
      filename = "{}_{}.csv".format(object_names, current_time)
      return export_file(export_to, filename, csv_string)
  except BadQueryException as exception:
    raise BadRequest(exception.message)
  except Unauthorized as ex:
    raise Unauthorized("{} Try to reload /export page".format(ex.message))
  except HttpError as e:
    message = json.loads(e.content).get("error").get("message")
    if e.resp.code == 401:
      raise Unauthorized("{} Try to reload /export page".format(message))
    raise InternalServerError(message)
  except Exception as e:  # pylint: disable=broad-except
    logger.exception("Export failed: %s", e.message)
    raise InternalServerError("Export failed due to internal server error.")
Esempio n. 2
0
def create_task(name, url, queued_callback=None, parameters=None,
                method="POST", operation_type=None, payload=None,
                queue=DEFAULT_QUEUE, retry_options=None):
  """Create and enqueue a background task."""
  with benchmark("Create background task"):
    parameters = parameters or dict()
    retry_options = retry_options or RETRY_OPTIONS
    bg_operation = None
    if operation_type:
      with benchmark("Create background task. Create BackgroundOperation"):
        bg_operation = _check_and_create_bg_operation(operation_type,
                                                      parameters)
    with benchmark("Create background task. Create BackgroundTask"):
      bg_task_name = "{}_{}".format(uuid.uuid4(), name)
      bg_task = _create_bg_task(
          name=bg_task_name,
          parameters=parameters,
          payload=payload,
          bg_operation=bg_operation)
    with benchmark("Create background task. Enqueue task"):
      # Task request have to contain data to pass content_type validation.
      payload = payload or "{}"
      # Task is limited to 100 KB by taskqueue, data should be extracted
      # from BackgroundTask object.
      _enqueue_task(
          name=bg_task_name,
          url=url,
          method=method,
          payload=payload,
          bg_task=bg_task,
          queued_callback=queued_callback,
          queue=queue,
          retry_options=retry_options)
    return bg_task
Esempio n. 3
0
def do_reindex(with_reindex_snapshots=False):
  """Update the full text search index."""

  indexer = get_indexer()
  indexed_models = {
      m.__name__: m for m in all_models.all_models
      if issubclass(m, mixin.Indexed) and m.REQUIRED_GLOBAL_REINDEX
  }
  people_query = db.session.query(all_models.Person.id,
                                  all_models.Person.name,
                                  all_models.Person.email)
  indexer.cache["people_map"] = {p.id: (p.name, p.email) for p in people_query}
  indexer.cache["ac_role_map"] = dict(db.session.query(
      all_models.AccessControlRole.id,
      all_models.AccessControlRole.name,
  ))
  for model_name in sorted(indexed_models.keys()):
    logger.info("Updating index for: %s", model_name)
    with benchmark("Create records for %s" % model_name):
      model = indexed_models[model_name]
      ids = [obj.id for obj in model.query]
      ids_count = len(ids)
      handled_ids = 0
      for ids_chunk in utils.list_chunks(ids, chunk_size=REINDEX_CHUNK_SIZE):
        handled_ids += len(ids_chunk)
        logger.info("%s: %s / %s", model.__name__, handled_ids, ids_count)
        model.bulk_record_update_for(ids_chunk)
        db.session.commit()

  if with_reindex_snapshots:
    logger.info("Updating index for: %s", "Snapshot")
    with benchmark("Create records for %s" % "Snapshot"):
      snapshot_indexer.reindex()

  indexer.invalidate_cache()
Esempio n. 4
0
  def _get_assessments_json(self, obj, assessments):
    """Get json representation for all assessments in result set."""
    if not assessments:
      return []
    with benchmark("get documents of related assessments"):
      evidence_json_map = self._get_evidences(assessments)
    with benchmark("get snapshots of related assessments"):
      snapshot_json_map = self._get_snapshots(obj, assessments)

    with benchmark("generate related_assessment json"):
      assessments_json = []
      for assessment in assessments:
        single_json = assessment.log_json_base()
        single_json["audit"] = assessment.audit.log_json_base()
        single_json["verified"] = assessment.verified
        single_json["custom_attribute_values"] = [
            cav.log_json_base()
            for cav in assessment.custom_attribute_values
        ]
        single_json["custom_attribute_definitions"] = [
            cad.log_json_base()
            for cad in assessment.custom_attribute_definitions
        ]
        single_json["snapshots"] = snapshot_json_map[assessment.id]
        single_json["evidence"] = evidence_json_map[assessment.id]
        single_json["audit"]["viewLink"] = utils.view_url_for(
            assessment.audit)
        single_json["viewLink"] = utils.view_url_for(assessment)
        assessments_json.append(single_json)
      return assessments_json
Esempio n. 5
0
def propagate_all():
  """Re-evaluate propagation for all objects."""
  with utils.benchmark("Run propagate_all"):
    with utils.benchmark("Add missing acl entries"):
      _add_missing_acl_entries()
    with utils.benchmark("Get non propagated acl ids"):
      query = db.session.query(
          all_models.AccessControlList.id,
      ).filter(
          all_models.AccessControlList.parent_id.is_(None),
      )
      all_acl_ids = [acl.id for acl in query]

    with utils.benchmark("Propagate normal acl entries"):
      count = len(all_acl_ids)
      propagated_count = 0
      for acl_ids in utils.list_chunks(all_acl_ids, chunk_size=50):
        propagated_count += len(acl_ids)
        logger.info("Propagating ACL entries: %s/%s", propagated_count, count)
        _delete_propagated_acls(acl_ids)

        flask.g.new_acl_ids = acl_ids
        flask.g.new_relationship_ids = set()
        flask.g.deleted_objects = set()
        propagate()
Esempio n. 6
0
def handle_export_request():
  try:
    with benchmark("handle export request"):
      data = parse_export_request()
      query_helper = QueryHelper(data)
      ids_by_type = query_helper.get_ids()
    with benchmark("Generate CSV array"):
      converter = Converter(ids_by_type=ids_by_type)
      csv_data = converter.to_array()
    with benchmark("Generate CSV string"):
      csv_string = generate_csv_string(csv_data)
    with benchmark("Make response."):
      object_names = "_".join(converter.get_object_names())
      filename = "{}.csv".format(object_names)
      headers = [
          ("Content-Type", "text/csv"),
          ("Content-Disposition",
           "attachment; filename='{}'".format(filename)),
      ]
      return current_app.make_response((csv_string, 200, headers))
  except BadQueryException as exception:
    raise BadRequest(exception.message)
  except:  # pylint: disable=bare-except
    logger.exception("Export failed")
  raise BadRequest("Export failed due to server error.")
Esempio n. 7
0
def propagate():
  """Propagate all ACLs caused by objects in new_objects list.

  Args:
    new_acl_ids: list of newly created ACL ids,
    new_relationship_ids: list of newly created relationship ids,
  """
  if not (hasattr(flask.g, "new_acl_ids") and
          hasattr(flask.g, "new_relationship_ids") and
          hasattr(flask.g, "deleted_objects")):
    return

  if flask.g.deleted_objects:
    with utils.benchmark("Delete internal ACL entries for deleted objects"):
      _delete_orphan_acl_entries(flask.g.deleted_objects)

  # The order of propagation of relationships and other ACLs is important
  # because relationship code excludes other ACLs from propagating.
  if flask.g.new_relationship_ids:
    with utils.benchmark("Propagate ACLs for new relationships"):
      _propagate_relationships(
          flask.g.new_relationship_ids,
          flask.g.new_acl_ids,
      )
  if flask.g.new_acl_ids:
    with utils.benchmark("Propagate new ACL entries"):
      _propagate(flask.g.new_acl_ids)

  del flask.g.new_acl_ids
  del flask.g.new_relationship_ids
  del flask.g.deleted_objects
Esempio n. 8
0
 def to_array(self):
   with benchmark("Create block converters"):
     self.block_converters_from_ids()
   with benchmark("Handle row data"):
     self.handle_row_data()
   with benchmark("Make block array"):
     return self.to_block_array()
Esempio n. 9
0
  def _apply_limit(self, query, limit):
    """Apply limits for pagination.

    Args:
      query: filter query;
      limit: a tuple of indexes in format (from, to); objects is sliced to
            objects[from, to].

    Returns:
      matched objects ids and total count.
    """
    page_size, first = self._get_limit(limit)

    with benchmark("Apply limit: _apply_limit > query_limit"):
      # Note: limit request syntax is limit:[0,10]. We are counting
      # offset from 0 as the offset of the initial row for sql is 0 (not 1).
      ids = [obj.id for obj in query.limit(page_size).offset(first)]
    with benchmark("Apply limit: _apply_limit > query_count"):
      if len(ids) < page_size:
        total = len(ids) + first
      else:
        # Note: using func.count() as query.count() is generating additional
        # subquery
        count_q = query.statement.with_only_columns([sa.func.count()])
        total = db.session.execute(count_q).scalar()

    return ids, total
Esempio n. 10
0
def recompute_attrs_for_revisions(ids_chunk):
  """Reindex chunk of CAs."""
  with benchmark("Get revisions."):
    revisions = get_revisions(ids_chunk)

  with benchmark("Get all computed attributes"):
    attributes = get_computed_attributes()

  with benchmark("Group revisions by computed attributes"):
    attribute_groups = group_revisions(attributes, revisions)
  with benchmark("get all objects affected by computed attributes"):
    affected_objects = get_affected_objects(attribute_groups)
  with benchmark("Get all relationships for these computed objects"):
    relationships = get_relationships(affected_objects)
  with benchmark("Get snapshot data"):
    snapshot_map, snapshot_tag_map = get_snapshot_data(affected_objects)

  with benchmark("Compute values"):
    computed_values = compute_values(affected_objects, relationships,
                                     snapshot_map)

  with benchmark("Get computed attributes data"):
    attributes_data = get_attributes_data(computed_values)
  with benchmark("Get computed attribute full-text index data"):
    index_data = get_index_data(computed_values, snapshot_tag_map)
  with benchmark("Store attribute data and full-text index data"):
    store_data(attributes_data, index_data)
Esempio n. 11
0
  def get(self, id):
    with benchmark("Query for object"):
      obj = self.get_object(id)
    if obj is None:
      return self.not_found_response()
    if 'Accept' in self.request.headers and \
       'text/html' not in self.request.headers['Accept']:
      return current_app.make_response((
          'text/html', 406, [('Content-Type', 'text/plain')]))
    if not permissions.is_allowed_read(self.model.__name__, obj.id,
                                       obj.context_id):
      raise Forbidden()
    if not permissions.is_allowed_view_object_page_for(obj):
      raise Forbidden()

    with benchmark("Render"):
      rendered_template = self.render_template_for_object(obj)

    # FIXME: Etag based on rendered output, or object itself?
    # if 'If-None-Match' in self.request.headers and \
    #    self.request.headers['If-None-Match'] == self.etag(object_for_json):
    #  return current_app.make_response((
    #    '', 304, [('Etag', self.etag(object_for_json))]))

    return rendered_template
Esempio n. 12
0
def do_reindex():
  """Update the full text search index."""

  indexer = get_indexer()
  indexed_models = {
      m.__name__: m for m in all_models.all_models
      if issubclass(m, mixin.Indexed) and m.REQUIRED_GLOBAL_REINDEX
  }
  people_query = db.session.query(all_models.Person.id,
                                  all_models.Person.name,
                                  all_models.Person.email)
  indexer.cache["people_map"] = {p.id: (p.name, p.email) for p in people_query}
  indexer.cache["ac_role_map"] = dict(db.session.query(
      all_models.AccessControlRole.id,
      all_models.AccessControlRole.name,
  ))
  for model_name in sorted(indexed_models.keys()):
    logger.info("Updating index for: %s", model_name)
    with benchmark("Create records for %s" % model_name):
      model = indexed_models[model_name]
      for query_chunk in generate_query_chunks(db.session.query(model.id)):
        model.bulk_record_update_for([i.id for i in query_chunk])
        db.session.commit()

  logger.info("Updating index for: %s", "Snapshot")
  with benchmark("Create records for %s" % "Snapshot"):
    reindex_snapshots()
  indexer.invalidate_cache()
Esempio n. 13
0
  def _get_snapshottable_objects(self, obj):
    """Get snapshottable objects from parent object's neighborhood."""
    with benchmark("Snapshot._get_snapshotable_objects"):
      related_mappings = set()
      object_rules = self.rules.rules[obj.type]

      with benchmark("Snapshot._get_snapshotable_objects.related_mappings"):
        relatable_rules = {
            rule for rule in object_rules["fst"]
            if isinstance(rule, basestring)
        }

        if relatable_rules:
          related_mappings = obj.related_objects({
              rule for rule in object_rules["fst"]
              if isinstance(rule, basestring)})

      with benchmark("Snapshot._get_snapshotable_objects.direct mappings"):
        direct_mappings = {getattr(obj, rule.name)
                           for rule in object_rules["fst"]
                           if isinstance(rule, Attr)}

      related_objects = {Stub.from_object(obj)
                         for obj in related_mappings | direct_mappings}

      with benchmark("Snapshot._get_snapshotable_objects.fetch neighborhood"):
        return self._fetch_neighborhood(obj, related_objects)
Esempio n. 14
0
 def build_cycle_tasks(self):
   """Builds CalendarEvents based on CycleTaskGroupObjectTasks."""
   with benchmark("Pre-loading data."):
     self._preload_data()
   with benchmark("Generating of events for cycle tasks."):
     self._generate_events()
   with benchmark("Generating event descriptions"):
     self._generate_event_descriptions()
   db.session.commit()
Esempio n. 15
0
 def generate_row_data(self):
   """Get row data from all row converters while exporting."""
   if self.ignore:
     return
   for row_converter in self.row_converters_from_ids():
     obj = row_converter.obj
     with benchmark("Create handlers for object fields"):
       row_converter.handle_obj_row_data()
     with benchmark("Load data for {} {}".format(obj.type, obj.id)):
       yield row_converter.to_array(self.fields)
Esempio n. 16
0
  def _get_ids(self, object_query):
    """Get a set of ids of objects described in the filters."""

    object_name = object_query["object_name"]
    expression = object_query.get("filters", {}).get("expression")

    if expression is None:
      return set()
    object_class = inflector.get_model(object_name)
    query = db.session.query(object_class.id)

    tgt_class = object_class
    if object_name == "Snapshot":
      child_type = self._get_snapshot_child_type(object_query)
      tgt_class = getattr(models.all_models, child_type, object_class)

    requested_permissions = object_query.get("permissions", "read")
    with benchmark("Get permissions: _get_ids > _get_type_query"):
      type_query = self._get_type_query(object_class, requested_permissions)
      if type_query is not None:
        query = query.filter(type_query)
    with benchmark("Parse filter query: _get_ids > _build_expression"):
      filter_expression = custom_operators.build_expression(
          expression,
          object_class,
          tgt_class,
          self.query
      )
      if filter_expression is not None:
        query = query.filter(filter_expression)
    if object_query.get("order_by"):
      with benchmark("Sorting: _get_ids > order_by"):
        query = self._apply_order_by(
            object_class,
            query,
            object_query["order_by"],
            tgt_class,
        )
    with benchmark("Apply limit"):
      limit = object_query.get("limit")
      if limit:
        ids, total = self._apply_limit(query, limit)
      else:
        ids = [obj.id for obj in query]
        total = len(ids)
      object_query["total"] = total

    if hasattr(flask.g, "similar_objects_query"):
      # delete similar_objects_query for the case when several queries are
      # POSTed in one request, the first one filters by similarity and the
      # second one doesn't but tries to sort by __similarity__
      delattr(flask.g, "similar_objects_query")
    return ids
Esempio n. 17
0
def get_revisions(pairs, revisions, filters=None):
  """Retrieve revision ids for pairs

  If revisions dictionary is provided it will validate that the selected
  revision exists in the objects revision history.

  Args:
    pairs: set([(parent_1, child_1), (parent_2, child_2), ...])
    revisions: dict({(parent, child): revision_id, ...})
    filters: predicate
  """
  with benchmark("snapshotter.helpers.get_revisions"):
    revision_id_cache = dict()

    if pairs:
      with benchmark("get_revisions.create caches"):
        child_stubs = {pair.child for pair in pairs}

        with benchmark("get_revisions.create child -> parents cache"):
          parents_cache = collections.defaultdict(set)
          for parent, child in pairs:
            parents_cache[child].add(parent)

      with benchmark("get_revisions.retrieve revisions"):
        query = db.session.query(
            models.Revision.id,
            models.Revision.resource_type,
            models.Revision.resource_id).filter(
            tuple_(
                models.Revision.resource_type,
                models.Revision.resource_id).in_(child_stubs)
        ).order_by(models.Revision.id.desc())
        if filters:
          for _filter in filters:
            query = query.filter(_filter)

      with benchmark("get_revisions.create revision_id cache"):
        for revid, restype, resid in query:
          child = Stub(restype, resid)
          for parent in parents_cache[child]:
            key = Pair(parent, child)
            if key in revisions:
              if revid == revisions[key]:
                revision_id_cache[key] = revid
              else:
                logger.warning(
                    "Specified revision for object %s but couldn't find the"
                    "revision '%s' in object history", key, revisions[key])
            else:
              if key not in revision_id_cache:
                revision_id_cache[key] = revid
    return revision_id_cache
Esempio n. 18
0
  def _get_ids(self, object_query):
    """Get a set of ids of objects described in the filters."""

    object_name = object_query["object_name"]
    expression = object_query.get("filters", {}).get("expression")

    if expression is None:
      return set()
    object_class = inflector.get_model(object_name)
    if object_class is None:
      return set()
    query = db.session.query(object_class.id)

    tgt_class = object_class
    if object_name == "Snapshot":
      child_type = self._get_snapshot_child_type(object_query)
      tgt_class = getattr(models.all_models, child_type, object_class)

    requested_permissions = object_query.get("permissions", "read")
    with benchmark("Get permissions: _get_ids > _get_type_query"):
      type_query = self._get_type_query(object_class, requested_permissions)
      if type_query is not None:
        query = query.filter(type_query)
    with benchmark("Parse filter query: _get_ids > _build_expression"):
      filter_expression = custom_operators.build_expression(
          expression,
          object_class,
          tgt_class,
          self.query
      )
      if filter_expression is not None:
        query = query.filter(filter_expression)
    if object_query.get("order_by"):
      with benchmark("Sorting: _get_ids > order_by"):
        query = pagination.apply_order_by(
            object_class,
            query,
            object_query["order_by"],
            tgt_class,
        )
    with benchmark("Apply limit"):
      limit = object_query.get("limit")
      if limit:
        limit_query = pagination.apply_limit(query, limit)
        total = pagination.get_total_count(query)
        ids = [obj.id for obj in limit_query]
      else:
        ids = [obj.id for obj in query]
        total = len(ids)
      object_query["total"] = total

    return ids
Esempio n. 19
0
 def add_parent(self, obj):
   """Add parent object and automatically scan neighborhood for snapshottable
   objects."""
   with benchmark("Snapshot.add_parent_object"):
     key = Stub.from_object(obj)
     if key not in self.parents:
       with benchmark("Snapshot.add_parent_object.add object"):
         objs = self._get_snapshottable_objects(obj)
         self.parents.add(key)
         self.context_cache[key] = obj.context_id
         self.children = self.children | objs
         self.snapshots[key] = objs
     return self.parents
Esempio n. 20
0
def handle_export_request():
  """Export request handler"""
  # pylint: disable=too-many-locals
  with benchmark("handle export request data"):
    data = parse_export_request()
    objects = data.get("objects")
    export_to = data.get("export_to")
    current_time = data.get("current_time")
  with benchmark("Generate CSV string"):
    csv_string, object_names = make_export(objects)
  with benchmark("Make response."):
    filename = "{}_{}.csv".format(object_names, current_time)
    return export_file(export_to, filename, csv_string)
Esempio n. 21
0
 def related_objects(self, id):
   """Get data for assessment related_objects page."""
   # id name is used as a kw argument and can't be changed here
   # pylint: disable=invalid-name,redefined-builtin
   with benchmark("check assessment permissions"):
     assessment = models.Assessment.query.options(
         orm.undefer_group("Assessment_complete")
     ).get(id)
     if not permissions.is_allowed_read_for(assessment):
       raise Forbidden()
   with benchmark("Get assessment related_objects data"):
     data = self._get_related_data(assessment)
   with benchmark("Make response"):
     return self.json_success_response(data, )
Esempio n. 22
0
  def set_snapshot_result(self, assessment):
    """Set snapshot result"""
    query = self.query[0]
    with benchmark("Get assessment snapshot relationships"):
      snapshots = db.session.query(
          models.Snapshot
      ).join(
          models.Relationship,
          and_(
              models.Snapshot.id == models.Relationship.source_id,
              models.Relationship.source_type == "Snapshot",
              models.Relationship.destination_id == assessment.id,
              models.Relationship.destination_type == "Assessment"
          )
      ).union(
          db.session.query(
              models.Snapshot
          ).join(
              models.Relationship,
              and_(
                  models.Snapshot.id == models.Relationship.destination_id,
                  models.Relationship.destination_type == "Snapshot",
                  models.Relationship.source_id == assessment.id,
                  models.Relationship.source_type == "Assessment"
              )
          )
      ).all()
    with benchmark("Set assessment snapshot relationships"):
      data = []
      for snapshot in snapshots:
        data.append({
            "archived": snapshot.archived,
            "revision": snapshot.revision.log_json(),
            "related_sources": [],
            "parent": {
                "context_id": assessment.context_id,
                "href": "/api/audits/{}".format(assessment.audit_id),
                "type": "Audit",
                "id": assessment.audit_id,
            },
            "child_type": snapshot.child_type,
            "child_id": snapshot.child_id,
            "related_destinations": [],
            "id": snapshot.id,
            "revisions": [],
            "revision_id": snapshot.revision_id,
            "type": snapshot.type,
        })

      _set_data(query, data)
Esempio n. 23
0
def handle_export_request():
  try:
    with benchmark("handle export request"):
      data = parse_export_request()
      objects = data.get("objects")
      export_to = data.get("export_to")
      query_helper = QueryHelper(objects)
      ids_by_type = query_helper.get_ids()
    with benchmark("Generate CSV array"):
      converter = Converter(ids_by_type=ids_by_type)
      csv_data = converter.to_array()
    with benchmark("Generate CSV string"):
      csv_string = generate_csv_string(csv_data)
    with benchmark("Make response."):
      object_names = "_".join(converter.get_object_names())
      filename = "{}.csv".format(object_names)

      if export_to == "gdrive":
        credentials = get_credentials()

        http_auth = credentials.authorize(httplib2.Http())
        drive_service = discovery.build('drive', 'v3', http=http_auth)

        # make export to sheets
        file_metadata = {
            'name': filename,
            'mimeType': 'application/vnd.google-apps.spreadsheet'
        }
        media = http.MediaInMemoryUpload(csv_string,
                                         mimetype='text/csv',
                                         resumable=True)
        gfile = drive_service.files().create(
            body=file_metadata,
            media_body=media,
            fields='id, name, parents').execute()
        headers = [('Content-Type', 'application/json'), ]
        return current_app.make_response((json.dumps(gfile), 200, headers))
      if export_to == "csv":
        headers = [
            ("Content-Type", "text/csv"),
            ("Content-Disposition",
             "attachment; filename='{}'".format(filename)),
        ]
        return current_app.make_response((csv_string, 200, headers))
  except BadQueryException as exception:
    raise BadRequest(exception.message)
  except:  # pylint: disable=bare-except
    logger.exception("Export failed")
  raise BadRequest("Export failed due to server error.")
Esempio n. 24
0
def propagate_all():
  """Re-evaluate propagation for all objects."""
  with utils.benchmark("Run propagate_all"):
    from ggrc_workflows.models.hooks import workflow

    with utils.benchmark("Get non propagated acl ids"):
      query = db.session.query(
          all_models.AccessControlList.object_type,
          all_models.AccessControlList.id,
      ).filter(
          all_models.AccessControlList.parent_id.is_(None),
      )
      non_wf_acl_ids = []
      wf_acl_ids = []
      for object_type, acl_id in query:
        if object_type == "Workflow":
          wf_acl_ids.append(acl_id)
        else:
          non_wf_acl_ids.append(acl_id)

    with utils.benchmark("Propagate normal acl entries"):
      count = len(non_wf_acl_ids)
      propagated_count = 0
      for acl_ids in utils.list_chunks(non_wf_acl_ids):
        propagated_count += len(acl_ids)
        logger.info("Propagating ACL entries: %s/%s", propagated_count, count)
        _delete_propagated_acls(acl_ids)

        flask.g.new_acl_ids = acl_ids
        flask.g.new_relationship_ids = set()
        flask.g.deleted_objects = set()
        propagate()

    with utils.benchmark("Propagate WF related acl entries"):
      count = len(wf_acl_ids)
      propagated_count = 0
      for acl_ids in utils.list_chunks(wf_acl_ids):
        propagated_count += len(acl_ids)
        logger.info(
            "Propagating WF ACL entries: %s/%s",
            propagated_count,
            count
        )
        _delete_propagated_acls(acl_ids)

        flask.g.new_wf_acls = set(acl_ids)
        flask.g.new_wf_comment_ct_ids = set()
        flask.g.deleted_wf_objects = set()
        workflow.handle_acl_changes()
Esempio n. 25
0
def get_attributes_json():
  """Get a list of all custom attribute definitions"""
  with benchmark("Get attributes JSON"):
    with benchmark("Get attributes JSON: query"):
      attrs = models.CustomAttributeDefinition.eager_query().filter(
          models.CustomAttributeDefinition.definition_id.is_(None)
      ).all()
    with benchmark("Get attributes JSON: publish"):
      published = []
      for attr in attrs:
        published.append(publish(attr))
      published = publish_representation(published)
    with benchmark("Get attributes JSON: json"):
      publish_json = as_json(published)
      return publish_json
Esempio n. 26
0
 def set_audit_result(self, assessment):
   """Set audit result"""
   object_query = self.query[5]
   data = db.session.query(
       models.Audit.id,
       models.Audit.title,
       models.Audit.context_id,
   ).filter(
       models.Audit.id == assessment.audit_id
   ).first()
   with benchmark("Get audit data"):
     object_query["count"] = 1
     object_query["total"] = 1
     object_query["last_modified"] = None
     object_query["values"] = [{
         "id": data.id,
         "title": data.title,
         "type": models.Audit.__name__,
         "context": {
             "context_id": None,
             "href": "/api/contexts/{}".format(data.context_id),
             "id": data.context_id,
             "type": "Context",
         },
     }]
Esempio n. 27
0
  def generate_automappings(self, relationship):
    self.auto_mappings = set()
    with benchmark("Automapping generate_automappings"):
      # initial relationship is special since it is already created and
      # processing it would abort the loop so we manually enqueue the
      # neighborhood
      src = Stub.from_source(relationship)
      dst = Stub.from_destination(relationship)
      self._step(src, dst)
      self._step(dst, src)
      while self.queue:
        if len(self.auto_mappings) > self.COUNT_LIMIT:
          break
        src, dst = entry = self.queue.pop()

        if not (self._can_map_to(src, relationship) and
                self._can_map_to(dst, relationship)):
          continue

        created = self._ensure_relationship(src, dst)
        self.processed.add(entry)
        if not created:
          # If the edge already exists it means that auto mappings for it have
          # already been processed and it is safe to cut here.
          continue
        self._step(src, dst)
        self._step(dst, src)

      if len(self.auto_mappings) <= self.COUNT_LIMIT:
        self._flush(relationship)
      else:
        relationship._json_extras = {
            'automapping_limit_exceeded': True
        }
Esempio n. 28
0
def delete_all_computed_values():
  """Remove all attribute values for computed attributes."""
  with benchmark("Delete all computed attribute values"):
    attributes = get_computed_attributes()
    models.Attributes.query.filter_by(
        models.Attributes.attribute_template.in_(attributes)
    ).delete()
Esempio n. 29
0
  def _get_reserved_names(cls, definition_type):
    """Get a list of all attribute names in all objects.

    On first call this function computes all possible names that can be used by
    any model and stores them in a static frozen set. All later calls just get
    this set.

    Returns:
      frozen set containing all reserved attribute names for the current
      object.
    """
    # pylint: disable=protected-access
    # The _inflector is a false positive in our app.
    with benchmark("Generate a list of all reserved attribute names"):
      if not cls._reserved_names.get(definition_type):
        definition_map = {model._inflector.table_singular: model
                          for model in ggrc.models.all_models.all_models}
        definition_map.update({model._inflector.model_singular: model
                              for model in ggrc.models.all_models.all_models})
        reserved_names = []
        definition_model = definition_map.get(definition_type)
        reserved_names.extend(cls._get_model_names(definition_model))

        if hasattr(definition_model, 'RELATED_TYPE'):
          related_model = definition_map.get(definition_model.RELATED_TYPE)
          reserved_names.extend(cls._get_model_names(related_model))
        cls._reserved_names[definition_type] = frozenset(reserved_names)

      return cls._reserved_names[definition_type]
Esempio n. 30
0
def get_snapshots(objects=None, ids=None):
  with benchmark("snapshotter.helpers.get_snapshots"):
    if objects and ids:
      raise Exception(
          "Insert only iterable of (parent, child) tuples or set of IDS")
    columns = db.session.query(
        models.Snapshot.id,
        models.Snapshot.context_id,
        models.Snapshot.created_at,
        models.Snapshot.updated_at,
        models.Snapshot.parent_type,
        models.Snapshot.parent_id,
        models.Snapshot.child_type,
        models.Snapshot.child_id,
        models.Snapshot.revision_id,
        models.Snapshot.modified_by_id,
    )
    if objects:
      return columns.filter(
          tuple_(
              models.Snapshot.parent_type,
              models.Snapshot.parent_id,
              models.Snapshot.child_type,
              models.Snapshot.child_id
          ).in_({(parent.type, parent.id, child.type, child.id)
                 for parent, child in objects}))
    if ids:
      return columns.filter(
          models.Snapshot.id.in_(ids))
    return set()
Esempio n. 31
0
    def _apply_limit(query, limit):
        """Apply limits for pagination.

    Args:
      query: filter query;
      limit: a tuple of indexes in format (from, to); objects is sliced to
            objects[from, to].

    Returns:
      matched objects ids and total count.
    """
        try:
            first, last = limit
            first, last = int(first), int(last)
        except (ValueError, TypeError):
            raise BadQueryException(
                "Invalid limit operator. Integers expected.")

        if first < 0 or last < 0:
            raise BadQueryException("Limit cannot contain negative numbers.")
        elif first >= last:
            raise BadQueryException("Limit start should be smaller than end.")
        else:
            page_size = last - first
            with benchmark("Apply limit: _apply_limit > query_limit"):
                # Note: limit request syntax is limit:[0,10]. We are counting
                # offset from 0 as the offset of the initial row for sql is 0 (not 1).
                ids = [obj.id for obj in query.limit(page_size).offset(first)]
            with benchmark("Apply limit: _apply_limit > query_count"):
                if len(ids) < page_size:
                    total = len(ids) + first
                else:
                    # Note: using func.count() as query.count() is generating additional
                    # subquery
                    count_q = query.statement.with_only_columns(
                        [sa.func.count()])
                    total = db.session.execute(count_q).scalar()

        return ids, total
Esempio n. 32
0
def handle_post_flush(session, flush_context, instances):
    """Handle snapshot objects on api post requests."""
    # pylint: disable=unused-argument
    # Arguments here are set in the event listener and are mandatory.

    with benchmark("Snapshot pre flush handler"):

        snapshots = [o for o in session if isinstance(o, Snapshot)]
        if not snapshots:
            return

        with benchmark("Snapshot revert attrs"):
            _revert_attrs(snapshots)

        new_snapshots = [
            o for o in snapshots if getattr(o, "_update_revision", "") == "new"
        ]
        if new_snapshots:
            with benchmark("Snapshot post api set revisions"):
                _set_latest_revisions(new_snapshots)
            with benchmark("Snapshot post api ensure relationships"):
                _ensure_program_relationships(new_snapshots)
Esempio n. 33
0
 def post_commit_hooks():
     """All post commit hooks handler."""
     with benchmark("post commit hooks"):
         if not database.session.commit_hooks_enable_flag:
             return
         # delete flask caches in order to avoid
         # using cached instances after commit
         if hasattr(flask.g, "user_cache"):
             del flask.g.user_cache
         if hasattr(flask.g, "user_creator_roles_cache"):
             del flask.g.user_creator_roles_cache
         from ggrc.models.hooks import acl
         acl.after_commit()
Esempio n. 34
0
    def _create_mapping_cache(self):
        """Create mapping cache for object in the current block."""

        with benchmark("cache for: {}".format(self.object_class.__name__)):
            relationships = self._get_relationships()
            id_map = self._get_identifier_mappings(relationships)
            with benchmark("building cache"):
                cache = defaultdict(lambda: defaultdict(list))
                for rel in relationships:
                    if rel.source_type == self.object_class.__name__:
                        identifier = id_map.get(rel.destination_type,
                                                {}).get(rel.destination_id)
                        if identifier:
                            cache[rel.source_id][rel.destination_type].append(
                                identifier)
                    else:
                        identifier = id_map.get(rel.source_type,
                                                {}).get(rel.source_id)
                        if identifier:
                            cache[rel.destination_id][rel.source_type].append(
                                identifier)
            return cache
Esempio n. 35
0
    def automap(session, _):
        """Automap after_flush handler."""
        relationships = [
            obj for obj in session.new if isinstance(obj, Relationship)
        ]
        if not relationships:
            return

        with benchmark("automap"):
            automapper = AutomapperGenerator()
            referenced_objects = getattr(flask.g, "referenced_object_stubs",
                                         None)
            if referenced_objects:
                del flask.g.referenced_object_stubs
            if hasattr(flask.g, "_request_permissions"):
                del flask.g._request_permissions
            with benchmark("Automapping generate_automappings"):
                for obj in relationships:
                    automapper.generate_automappings(obj)
            automapper.propagate_acl()
            if referenced_objects:
                flask.g.referenced_object_stubs = referenced_objects
Esempio n. 36
0
def get_handler_results(query):
    """Get results from the best matching query handler.

  Args:
    query: dict containing query parameters.
  Returns:
    dict containing json serializable query results.
  """

    query_handler = DefaultHandler(query)
    name = query_handler.__class__.__name__
    with benchmark("Get query Handler results from: {}".format(name)):
        return query_handler.get_results()
Esempio n. 37
0
def create_task(name,
                url,
                queued_callback=None,
                parameters=None,
                method="POST",
                operation_type=None,
                payload=None,
                queue=DEFAULT_QUEUE,
                retry_options=None):
    """Create and enqueue a background task."""
    with benchmark("Create background task"):
        parameters = parameters or dict()
        retry_options = retry_options or RETRY_OPTIONS
        bg_operation = None
        if operation_type:
            with benchmark(
                    "Create background task. Create BackgroundOperation"):
                bg_operation = _check_and_create_bg_operation(
                    operation_type, parameters)
        with benchmark("Create background task. Create BackgroundTask"):
            bg_task_name = "{}_{}".format(uuid.uuid4(), name)
            bg_task = _create_bg_task(name=bg_task_name,
                                      parameters=parameters,
                                      payload=payload,
                                      bg_operation=bg_operation)
        with benchmark("Create background task. Enqueue task"):
            # Task request have to contain data to pass content_type validation.
            payload = payload or "{}"
            # Task is limited to 100 KB by taskqueue, data should be extracted
            # from BackgroundTask object.
            _enqueue_task(name=bg_task_name,
                          url=url,
                          method=method,
                          payload=payload,
                          bg_task=bg_task,
                          queued_callback=queued_callback,
                          queue=queue,
                          retry_options=retry_options)
        return bg_task
Esempio n. 38
0
def get_attributes_json():
  """Get a list of all custom attribute definitions"""
  with benchmark("Get attributes JSON"):
    with benchmark("Get attributes JSON: query"):
      # get only GCA and exclude external CADs
      # external GCA should be deleted from internal GCA table
      attrs = models.CustomAttributeDefinition.eager_query().filter(
          models.CustomAttributeDefinition.definition_id.is_(None),
          ~models.CustomAttributeDefinition.definition_type.in_(
              constants.GGRCQ_OBJ_TYPES_FOR_SYNC)
      ).all()
      ext_attrs = models.ExternalCustomAttributeDefinition.eager_query().all()
    with benchmark("Get attributes JSON: publish"):
      published = []
      for attr in attrs:
        published.append(builder_json.publish(attr))
      for attr in ext_attrs:
        published.append(builder_json.publish(attr))
      published = builder_json.publish_representation(published)
    with benchmark("Get attributes JSON: json"):
      publish_json = services_common.as_json(published)
      return publish_json
Esempio n. 39
0
def get_current_user_json():
  """Get current user"""
  with benchmark("Get current user JSON"):
    person = login.get_current_user()
    return services_common.as_json({
        "id": person.id,
        "company": person.company,
        "email": person.email,
        "language": person.language,
        "name": person.name,
        "system_wide_role": person.system_wide_role,
        "profile": person.profile,
    })
Esempio n. 40
0
def get_internal_roles_json():
  """Get a list of all access control roles"""
  with benchmark("Get access roles JSON"):
    attrs = models.all_models.AccessControlRole.query.options(
        sqlalchemy.orm.undefer_group("AccessControlRole_complete")
    ).filter(
        models.all_models.AccessControlRole.internal == sqlalchemy.true()
    ).all()
    published = []
    for attr in attrs:
      published.append(builder_json.publish(attr))
    published = builder_json.publish_representation(published)
    return services_common.as_json(published)
    def dispatch_request(self, *args, **kwargs):
        """Dispatch request for related_assessments."""
        with benchmark("dispatch related_assessments request"):
            try:

                if request.method != 'GET':
                    raise BadRequest()

                object_type = request.args.get("object_type")
                object_id = int(request.args.get("object_id"))

                model = models.inflector.get_model(object_type)
                obj = model.query.get(object_id)

                with benchmark("get related assessments"):
                    assessments, total = self._get_assessments(
                        model,
                        object_type,
                        object_id,
                    )

                    assessments_json = self._get_assessments_json(
                        obj, assessments)

                    response_object = {
                        "total": total,
                        "data": assessments_json,
                    }

                    return self.json_success_response(response_object, )

            except (ValueError, TypeError, AttributeError, BadQueryException):
                # Type Error and Value Error are for invalid integer values,
                # Attribute error is for invalid models passed, which return None type
                # that does not have query attribute.
                # Bad query exception is for invalid parameters for limit such as
                # negative numbers.
                raise BadRequest()
Esempio n. 42
0
    def get_results(self):
        """Filter the objects and get their information.

    Updates self.query items with their results. The type of results required
    is read from "type" parameter of every object_query in self.query.

    Returns:
      list of dicts: same query as the input with requested results that match
                     the filter.
    """
        for object_query in self.query:
            query_type = object_query.get("type", "values")
            if query_type not in {"values", "ids", "count"}:
                raise NotImplementedError(
                    "Only 'values', 'ids' and 'count' queries "
                    "are supported now")
            model = inflector.get_model(object_query["object_name"])
            if query_type == "values":
                with benchmark("Get result set: get_results > _get_objects"):
                    objects = self._get_objects(object_query)
                object_query["count"] = len(objects)
                with benchmark("get_results > _get_last_modified"):
                    object_query["last_modified"] = self._get_last_modified(
                        model, objects)
                with benchmark(
                        "serialization: get_results > _transform_to_json"):
                    object_query["values"] = self._transform_to_json(
                        objects,
                        object_query.get("fields"),
                    )
            else:
                with benchmark("Get result set: get_results -> _get_ids"):
                    ids = self._get_ids(object_query)
                object_query["count"] = len(ids)
                object_query["last_modified"] = None  # synonymous to now()
                if query_type == "ids":
                    object_query["ids"] = ids
        return self.query
Esempio n. 43
0
def load_permissions_for(user):
    """Permissions is dictionary that can be exported to json to share with
  clients. Structure is:
  ..

    permissions[action][resource_type][contexts]
                                      [conditions][context][context_conditions]

  'action' is one of 'create', 'read', 'update', 'delete'.
  'resource_type' is the name of a valid GGRC resource type.
  'contexts' is a list of context_id where the action is allowed.
  'conditions' is a dictionary of 'context_conditions' indexed by 'context'
    where 'context' is a context_id.
  'context_conditions' is a list of dictionaries with 'condition' and 'terms'
    keys.
  'condition' is the string name of a conditional operator, such as 'contains'.
  'terms' are the arguments to the 'condition'.
  """
    permissions = {}
    key = 'permissions:{}'.format(user.id)

    with benchmark("load_permissions > query memcache"):
        cache, result = query_memcache(key)
        if result:
            return result

    with benchmark("load_permissions > load default permissions"):
        load_default_permissions(permissions)

    with benchmark("load_permissions > load bootstrap admins"):
        load_bootstrap_admin(user, permissions)

    with benchmark("load_permissions > load user roles"):
        load_user_roles(user, permissions)

    with benchmark("load_permissions > load personal context"):
        load_personal_context(user, permissions)

    with benchmark("load_permissions > load access control list"):
        load_access_control_list(user, permissions)

    with benchmark("load_permissions > load backlog workflows"):
        load_backlog_workflows(permissions)

    with benchmark("load_permissions > store results into memcache"):
        store_results_into_memcache(permissions, cache, key)

    return permissions
 def set_evidence_result(self, assessment):
   """Set evidence result"""
   data_map = collections.defaultdict(list)
   query_map = {
       models.Evidence.FILE: self.query[2],
       models.Evidence.URL: self.query[3],
       models.Evidence.REFERENCE_URL: self.query[4],
   }
   self.query[1]["last_modified"] = None
   with benchmark("Get assessment snapshot relationships"):
     evidences = db.session.query(
         models.Evidence
     ).join(
         models.Relationship,
         and_(
             models.Evidence.id == models.Relationship.source_id,
             models.Relationship.source_type == "Evidence",
             models.Relationship.destination_id == assessment.id,
             models.Relationship.destination_type == "Assessment"
         )
     ).union(
         db.session.query(
             models.Evidence
         ).join(
             models.Relationship,
             and_(
                 models.Evidence.id == models.Relationship.destination_id,
                 models.Relationship.destination_type == "Evidence",
                 models.Relationship.source_id == assessment.id,
                 models.Relationship.source_type == "Assessment"
             )
         )
     ).all()
   with benchmark("Set assessment snapshot relationships"):
     for evidence in evidences:
       data_map[evidence.kind].append(evidence.log_json())
     for kind, query in query_map.items():
       _set_data(query, data_map[kind])
Esempio n. 45
0
def do_reindex(with_reindex_snapshots=False):
  """Update the full text search index."""

  indexer = fulltext.get_indexer()
  indexed_models = {
      m.__name__: m for m in models.all_models.all_models
      if issubclass(m, mixin.Indexed) and m.REQUIRED_GLOBAL_REINDEX
  }
  people_query = db.session.query(
      models.all_models.Person.id,
      models.all_models.Person.name,
      models.all_models.Person.email
  )
  indexer.cache["people_map"] = {p.id: (p.name, p.email) for p in people_query}
  indexer.cache["ac_role_map"] = dict(db.session.query(
      models.all_models.AccessControlRole.id,
      models.all_models.AccessControlRole.name,
  ))
  for model_name in sorted(indexed_models.keys()):
    logger.info("Updating index for: %s", model_name)
    with benchmark("Create records for %s" % model_name):
      model = indexed_models[model_name]
      ids = [id_[0] for id_ in db.session.query(model.id)]
      ids_count = len(ids)
      handled_ids = 0
      ids_chunks = ggrc_utils.list_chunks(ids, chunk_size=REINDEX_CHUNK_SIZE)
      for ids_chunk in ids_chunks:
        handled_ids += len(ids_chunk)
        logger.info("%s: %s / %s", model.__name__, handled_ids, ids_count)
        model.bulk_record_update_for(ids_chunk)
        db.session.plain_commit()

  if with_reindex_snapshots:
    logger.info("Updating index for: %s", "Snapshot")
    with benchmark("Create records for %s" % "Snapshot"):
      snapshot_indexer.reindex()

  indexer.invalidate_cache()
Esempio n. 46
0
def _remove_dead_reindex_objects(indexed_models):
  """Remove fulltext record entries for deleted objects.

  This function cleans up orphan records for objects that have been deleted
  but that have not removed their records for some reason.
  """
  record = fulltext.mysql.MysqlRecordProperty
  with benchmark("Removing dead index records"):
    for model_name, model in sorted(indexed_models.items()):
      logger.info("Removing dead records for: %s", model_name)
      record.query.filter(
          record.type == model_name,
          ~record.key.in_(db.session.query(model.id))
      ).delete(synchronize_session='fetch')
Esempio n. 47
0
def upsert_snapshots(objs, event, revisions=None, _filter=None, dry_run=False):
  """Update (and create if needed) snapshots of parent objects."""
  # pylint: disable=unused-argument
  if not revisions:
    revisions = set()

  with benchmark("Snapshot.update_snapshots"):
    generator = SnapshotGenerator(dry_run)
    if not isinstance(objs, set):
      objs = {objs}
    for obj in objs:
      db.session.add(obj)
      generator.add_parent(obj)
    return generator.upsert(event=event, revisions=revisions, _filter=_filter)
Esempio n. 48
0
def compute_attributes(revision_ids):
    """Compute new values based an changed objects.

  Args:
    objects: array of object stubs of modified objects.
  """

    with benchmark("Compute attributes"):

        if revision_ids == "all_latest":
            with benchmark("Get all latest revisions ids"):
                revision_ids = get_all_latest_revisions_ids()

        if not revision_ids:
            return

        ids_count = len(revision_ids)
        handled_ids = 0
        for ids_chunk in utils.list_chunks(revision_ids,
                                           chunk_size=CA_CHUNK_SIZE):
            handled_ids += len(ids_chunk)
            logger.info("Revision: %s/%s", handled_ids, ids_count)
            recompute_attrs_for_revisions(ids_chunk)
Esempio n. 49
0
 def push_ft_records(self):
   """Function that clear and push new full text records in DB."""
   with benchmark("push ft records into DB"):
     self.warmup()
     for obj in db.session:
       if not isinstance(obj, mixin.Indexed):
         continue
       if obj.id in self.model_ids_to_reindex.get(obj.type, set()):
         db.session.expire(obj)
     for model_name in self.model_ids_to_reindex.keys():
       ids = self.model_ids_to_reindex.pop(model_name)
       chunk_list = utils.list_chunks(list(ids), chunk_size=self.CHUNK_SIZE)
       for ids_chunk in chunk_list:
         get_model(model_name).bulk_record_update_for(ids_chunk)
Esempio n. 50
0
def propagate_all():
    """Re-evaluate propagation for all objects."""
    with utils.benchmark("Run propagate_all"):
        with utils.benchmark("Add missing acl entries"):
            _add_missing_acl_entries()
        with utils.benchmark("Get non propagated acl ids"):
            query = db.session.query(all_models.AccessControlList.id, ).filter(
                all_models.AccessControlList.parent_id.is_(None), )
            all_acl_ids = [acl.id for acl in query]

        with utils.benchmark("Propagate normal acl entries"):
            count = len(all_acl_ids)
            propagated_count = 0
            for acl_ids in utils.list_chunks(all_acl_ids, chunk_size=50):
                propagated_count += len(acl_ids)
                logger.info("Propagating ACL entries: %s/%s", propagated_count,
                            count)
                _delete_propagated_acls(acl_ids)

                flask.g.new_acl_ids = acl_ids
                flask.g.new_relationship_ids = set()
                flask.g.deleted_objects = set()
                propagate()
Esempio n. 51
0
def send_daily_digest_notifications():
  """Send emails for today's or overdue notifications.

  Returns:
    str: String containing a simple list of who received the notification.
  """
  # pylint: disable=invalid-name
  with benchmark("contributed cron job send_daily_digest_notifications"):
    notif_list, notif_data = get_daily_notifications()
    sent_emails = []
    subject = "GGRC daily digest for {}".format(date.today().strftime("%b %d"))

    with benchmark("sending daily emails"):
      for user_email, data in notif_data.iteritems():
        data = modify_data(data)
        email_body = settings.EMAIL_DIGEST.render(digest=data)
        send_email(user_email, subject, email_body)
        sent_emails.append(user_email)

    with benchmark("processing sent notifications"):
      process_sent_notifications(notif_list)

    return "emails sent to: <br> {}".format("<br>".join(sent_emails))
Esempio n. 52
0
  def adjust_status_before_flush(cls, alchemy_session,
                                 flush_context, instances):
    """Reset status of AutoStatusChangeable objects with _need_status_reset.

    Is registered to listen for 'before_flush' events on a later stage.
    """

    # pylint: disable=unused-argument,protected-access
    with benchmark("adjust status before flush"):
      for obj in alchemy_session.identity_map.values():
        if isinstance(obj, AutoStatusChangeable) and obj._need_status_reset:
          obj.change_status()
          obj._need_status_reset = False
          obj._reset_to_status = None
Esempio n. 53
0
    def get(self, id):
        with benchmark("Query for object"):
            obj = self.get_object(id)
        if obj is None:
            return self.not_found_response()
        if 'Accept' in self.request.headers and \
           'text/html' not in self.request.headers['Accept']:
            return current_app.make_response(
                ('text/html', 406, [('Content-Type', 'text/plain')]))
        if not permissions.is_allowed_read(self.model.__name__, obj.id,
                                           obj.context_id):
            raise Forbidden()

        with benchmark("Render"):
            rendered_template = self.render_template_for_object(obj)

        # FIXME: Etag based on rendered output, or object itself?
        # if 'If-None-Match' in self.request.headers and \
        #    self.request.headers['If-None-Match'] == self.etag(object_for_json):
        #  return current_app.make_response((
        #    '', 304, [('Etag', self.etag(object_for_json))]))

        return rendered_template
Esempio n. 54
0
def get_user_task_count():
    """Optimized function for fetching current user task count."""
    with benchmark("Get user task count RAW"):
        current_user = get_current_user()

        user_tasks = CycleTaskGroupObjectTask.query.with_entities(
            # prefetch tasks' finishing dates to avoid firing subsequent queries
            CycleTaskGroupObjectTask.end_date).join(Cycle).join(
                all_models.AccessControlList,
                sa.and_(
                    all_models.AccessControlList.object_type ==
                    CycleTaskGroupObjectTask.__name__,
                    all_models.AccessControlList.object_id ==
                    CycleTaskGroupObjectTask.id,
                ),
            ).join(
                all_models.AccessControlRole,
                sa.and_(
                    all_models.AccessControlRole.id ==
                    all_models.AccessControlList.ac_role_id,
                    all_models.AccessControlRole.object_type ==
                    CycleTaskGroupObjectTask.__name__,
                    all_models.AccessControlRole.name.in_(
                        ("Task Assignees", "Task Secondary Assignees")),
                )).join(
                    all_models.AccessControlPerson,
                    sa.and_(
                        all_models.AccessControlList.id ==
                        all_models.AccessControlPerson.ac_list_id,
                        all_models.AccessControlPerson.person_id ==
                        current_user.id,
                    )).filter(
                        CycleTaskGroupObjectTask.status.in_([
                            CycleTaskGroupObjectTask.ASSIGNED,
                            CycleTaskGroupObjectTask.IN_PROGRESS,
                            CycleTaskGroupObjectTask.FINISHED,
                            CycleTaskGroupObjectTask.DECLINED,
                        ]),
                        Cycle.is_current == True  # noqa # pylint: disable=singleton-comparison
                    ).all()

        task_count = len(user_tasks)

        today = date.today()
        overdue_count = sum(1 for task in user_tasks
                            if task.end_date and today > task.end_date)

        # NOTE: the return value must be a list so that the result can be
        # directly JSON-serialized to an Array in a HAML template
        return [task_count, overdue_count]
Esempio n. 55
0
    def snapshots(self):
        """List of all snapshots in the current block.

    The content of the given snapshots also contains the mapped audit field.
    """
        with benchmark("Gather selected snapshots"):
            if not self.ids:
                return []
            snapshots = models.Snapshot.eager_query().filter(
                models.Snapshot.id.in_(self.ids)).all()

            for snapshot in snapshots:  # add special snapshot attribute
                snapshot.content = self._extend_revision_content(snapshot)
            return snapshots
Esempio n. 56
0
def propagate():
    """Propagate all ACLs caused by objects in new_objects list.

  Args:
    new_acl_ids: list of newly created ACL ids,
    new_relationship_ids: list of newly created relationship ids,
  """
    if not (hasattr(flask.g, "new_acl_ids")
            and hasattr(flask.g, "new_relationship_ids")
            and hasattr(flask.g, "deleted_objects")):
        return

    if flask.g.deleted_objects:
        with utils.benchmark(
                "Delete internal ACL entries for deleted objects"):
            _delete_orphan_acl_entries(flask.g.deleted_objects)

    _set_empty_base_ids()

    current_user_id = login.get_current_user_id()

    # The order of propagation of relationships and other ACLs is important
    # because relationship code excludes other ACLs from propagating.
    if flask.g.new_relationship_ids:
        with utils.benchmark("Propagate ACLs for new relationships"):
            _propagate_relationships(
                flask.g.new_relationship_ids,
                flask.g.new_acl_ids,
                current_user_id,
            )
    if flask.g.new_acl_ids:
        with utils.benchmark("Propagate new ACL entries"):
            _propagate(flask.g.new_acl_ids, current_user_id)

    del flask.g.new_acl_ids
    del flask.g.new_relationship_ids
    del flask.g.deleted_objects
Esempio n. 57
0
    def _get_snapshot_data(self, assessment, relationships):
        """Get snapshot data for the current assessment:

    Args:
      relationships: List of all relationships related to the current
        assessment.
    """
        relationship_ids = self._filter_rels(relationships, "Snapshot")
        if not relationship_ids:
            return []
        with benchmark("Get assessment snapshot relationships"):
            snapshots = models.Snapshot.query.options(
                orm.undefer_group("Snapshot_complete"),
                orm.joinedload('revision'),
            ).filter(models.Snapshot.id.in_(relationship_ids)).all()
        with benchmark("Set assessment snapshot relationships"):
            data = []
            for snapshot in snapshots:
                data.append({
                    "archived": assessment.audit.archived,
                    "revision": snapshot.revision.log_json(),
                    "related_sources": [],
                    "parent": {
                        "context_id": assessment.context_id,
                        "href": "/api/audits/{}".format(assessment.audit_id),
                        "type": "Audit",
                        "id": assessment.audit_id,
                    },
                    "child_type": snapshot.child_type,
                    "child_id": snapshot.child_id,
                    "related_destinations": [],
                    "id": snapshot.id,
                    "revisions": [],
                    "revision_id": snapshot.revision_id,
                    "type": snapshot.type,
                })
        return data
 def set_comment_result(self, assessment):
   """Set comment result"""
   query = self.query[1]
   self.query[1]["last_modified"] = None
   with benchmark("Get assessment snapshot relationships"):
     comments = db.session.query(
         models.Comment
     ).join(
         models.Relationship,
         and_(
             models.Comment.id == models.Relationship.source_id,
             models.Relationship.source_type == "Comment",
             models.Relationship.destination_id == assessment.id,
             models.Relationship.destination_type == "Assessment"
         )
     ).union(
         db.session.query(
             models.Comment
         ).join(
             models.Relationship,
             and_(
                 models.Comment.id == models.Relationship.destination_id,
                 models.Relationship.destination_type == "Comment",
                 models.Relationship.source_id == assessment.id,
                 models.Relationship.source_type == "Assessment"
             )
         )
     ).all()
   with benchmark("Set assessment snapshot relationships"):
     data = []
     sorted_data = []
     for comment in comments:
       data.append(comment.log_json())
       sorted_data = sorted(data,
                            key=lambda x: (x["created_at"], x["id"]),
                            reverse=True)
     _set_data(query, sorted_data)
Esempio n. 59
0
  def _create_mapping_cache(self):
    """Create mapping cache for object in the current block."""
    def identifier(obj):
      return getattr(obj, "slug", getattr(obj, "email", None))

    relationship = models.Relationship

    with benchmark("cache for: {}".format(self.object_class.__name__)):
      with benchmark("cache query"):
        relationships = relationship.eager_query().filter(or_(
            and_(
                relationship.source_type == self.object_class.__name__,
                relationship.source_id.in_(self.object_ids),
            ),
            and_(
                relationship.destination_type == self.object_class.__name__,
                relationship.destination_id.in_(self.object_ids),
            )
        )).all()
      with benchmark("building cache"):
        cache = defaultdict(lambda: defaultdict(list))
        for rel in relationships:
          try:
            if rel.source_type == self.object_class.__name__:
              if rel.destination:
                cache[rel.source_id][rel.destination_type].append(
                    identifier(rel.destination))
            elif rel.source:
              cache[rel.destination_id][rel.source_type].append(
                  identifier(rel.source))
          except AttributeError:
            # Some relationships have an invalid state in the database and make
            # rel.source or rel.destination fail. These relationships are
            # ignored everywhere and should eventually be purged from the db
            current_app.logger.error("Failed adding object to relationship "
                                     "cache. Rel id: %s", rel.id)
      return cache
Esempio n. 60
0
 def _flush(self, parent_relationship):
     """Manually INSERT generated automappings."""
     if not self.auto_mappings:
         return
     with benchmark("Automapping flush"):
         current_user_id = login.get_current_user_id()
         automapping_result = db.session.execute(
             Automapping.__table__.insert().values(
                 relationship_id=parent_relationship.id,
                 source_id=parent_relationship.source_id,
                 source_type=parent_relationship.source_type,
                 destination_id=parent_relationship.destination_id,
                 destination_type=parent_relationship.destination_type,
             ))
         automapping_id = automapping_result.inserted_primary_key
         now = datetime.now()
         # We are doing an INSERT IGNORE INTO here to mitigate a race condition
         # that happens when multiple simultaneous requests create the same
         # automapping. If a relationship object fails our unique constraint
         # it means that the mapping was already created by another request
         # and we can safely ignore it.
         inserter = Relationship.__table__.insert().prefix_with("IGNORE")
         original = self.order(Stub.from_source(parent_relationship),
                               Stub.from_destination(parent_relationship))
         db.session.execute(
             inserter.values([{
                 "id": None,
                 "modified_by_id": current_user_id,
                 "created_at": now,
                 "updated_at": now,
                 "source_id": src.id,
                 "source_type": src.type,
                 "destination_id": dst.id,
                 "destination_type": dst.type,
                 "context_id": None,
                 "status": None,
                 "parent_id": parent_relationship.id,
                 "automapping_id": automapping_id
             } for src, dst in self.auto_mappings if (src, dst) != original
                              ]))  # (src, dst) is sorted
         cache = get_cache(create=True)
         if cache:
             # Add inserted relationships into new objects collection of the cache,
             # so that they will be logged within event and appropriate revisions
             # will be created.
             cache.new.update(
                 (relationship, relationship.log_json())
                 for relationship in Relationship.query.filter_by(
                     automapping_id=automapping_id, ))