Exemplo n.º 1
0
  def _CreateNewLocalRules(self, uuid, user_key):
    """Creates copies of all local rules for the new host."""
    # Pick any host owned by the user to copy rules from. Exclude hosts that
    # haven't completed a full sync because they won't have a complete rule set.
    # NOTE: Because we expect all hosts owned by a user to have the same local
    # rules, we should get the same rules set with any one of the user's hosts.
    username = user_map.EmailToUsername(user_key.id())
    host_query = santa_db.SantaHost.query(
        santa_db.SantaHost.primary_user == username,
        santa_db.SantaHost.last_postflight_dt != None)  # pylint: disable=g-equals-none
    a_host = host_query.get()
    if a_host is None:
      return utils.GetNoOpFuture()

    # Get all local rules from that host.
    rules_query = santa_db.SantaRule.query(
        santa_db.SantaRule.host_id == a_host.key.id(),
        santa_db.SantaRule.in_effect == True)  # pylint: disable=g-explicit-bool-comparison

    # Copy the local rules to the new host.
    new_rules = []
    for batch in query_utils.Paginate(rules_query):
      for rule in batch:
        new_rule = utils.CopyEntity(
            rule, new_parent=rule.key.parent(), host_id=uuid, user_key=user_key)
        new_rules.append(new_rule)

    futures = ndb.put_multi_async(new_rules)
    return utils.GetMultiFuture(futures)
Exemplo n.º 2
0
 def _HasFlaggedBinary(self):
     """Returns whether any of the bundle's blockable contents are flagged."""
     query = SantaBundleBinary.query(ancestor=self.key)
     futures = [
         self._PageHasFlaggedBinary(page)
         for page in query_utils.Paginate(query, page_size=1000)
     ]
     return any(future.get_result() for future in futures)
Exemplo n.º 3
0
def _CopyLocalRules(user_key, dest_host_id):
    """Copy over a user's local rules to a newly-associated host.

  NOTE: Because of the implementation of local whitelisting on Bit9, many of
  these new copied local rules will likely be initially unfulfilled, that is,
  held in Upvote and not saved to Bit9.

  Args:
    user_key: str, The user for whom the rules will be copied.
    dest_host_id: str, The ID of the host for which the new rules will be
        created.
  """
    logging.info('Copying rules for %s to host %s', user_key.id(),
                 dest_host_id)

    username = user_map.EmailToUsername(user_key.id())
    host_query = bit9.Bit9Host.query(bit9.Bit9Host.users == username)
    src_host = yield host_query.get_async()
    if src_host is None:
        raise ndb.Return()
    assert src_host.key.id() != dest_host_id, (
        'User already associated with target host')

    # Get all local rules from that host.
    rules_query = bit9.Bit9Rule.query(
        bit9.Bit9Rule.host_id == src_host.key.id(),
        bit9.Bit9Rule.in_effect == True)  # pylint: disable=g-explicit-bool-comparison

    # Get a rough idea of how many rules we're in for. Since this is a
    # non-critical query, we limit the max number to a fairly low bound.
    rule_count = yield rules_query.count_async(limit=250)
    logging.info('Retrieved %s%s rules to copy',
                 '>' if rule_count == 250 else '', rule_count)

    # Copy the local rules to the new host.
    new_rules = []
    for batch in query_utils.Paginate(rules_query):
        for rule in batch:
            new_rule = model_utils.CopyEntity(rule,
                                              new_parent=rule.key.parent(),
                                              host_id=dest_host_id,
                                              user_key=user_key)
            new_rules.append(new_rule)
    logging.info('Copying %s rules to new host', len(new_rules))
    yield ndb.put_multi_async(new_rules)

    # Create the change sets necessary to submit the new rules to Bit9.
    changes = []
    for new_rule in new_rules:
        change = bit9.RuleChangeSet(rule_keys=[new_rule.key],
                                    change_type=new_rule.policy,
                                    parent=new_rule.key.parent())
        changes.append(change)
    logging.info('Creating %s RuleChangeSet', len(changes))
    yield ndb.put_multi_async(changes)
Exemplo n.º 4
0
 def _HasFlaggedCert(self):
     """Returns whether any of the bundle's signing certs are flagged."""
     query = SantaBundleBinary.query(
         projection=[SantaBundleBinary.cert_key],
         distinct=True,
         ancestor=self.key)
     futures = [
         self._PageHasFlaggedCert(page)
         for page in query_utils.Paginate(query, page_size=1000)
     ]
     return any(future.get_result() for future in futures)
Exemplo n.º 5
0
def Dispatch():
    """Dispatches per-host tasks onto the event processing queue."""
    total_dispatch_count = 0
    logging.info('Starting a new dispatch task')

    # Query for all distinct host_id values among the _UnsyncedEvents, in batches,
    # either until we run out, or the task nears its deadline.
    query = _UnsyncedEvent.query(projection=[_UnsyncedEvent.host_id],
                                 distinct=True)
    for event_page in query_utils.Paginate(query, page_size=25):
        host_ids = [event.host_id for event in event_page]
        for host_id in host_ids:
            deferred.defer(Process,
                           host_id,
                           _queue=constants.TASK_QUEUE.BIT9_PROCESS)
            total_dispatch_count += 1

    logging.info('Dispatched %d task(s)', total_dispatch_count)
Exemplo n.º 6
0
  def testPaginate(self):

    page_size = 10
    for entity_count in xrange(50):

      # Create some number of entities.
      CreateEntities(entity_count)

      # Verify that we get the expected number of pages.
      pages = list(query_utils.Paginate(TestModel.query(), page_size=page_size))
      expected_page_count = int(math.ceil(float(entity_count) / page_size))
      self.assertEqual(expected_page_count, len(pages))

      # Verify that we get the expected number of entities.
      entities = list(itertools.chain(*pages))
      self.assertEqual(entity_count, len(entities))

      # Delete everything.
      for entity in entities:
        entity.key.delete()
Exemplo n.º 7
0
def Process(host_id):
    """Processes _UnsyncedEvents for a single Windows host.

  Args:
    host_id: The integer ID of this host in Bit9.
  """
    try:

        with datastore_locks.DatastoreLock(
                'bit9-process-%d' % host_id,
                default_timeout=_PROCESS_LOCK_TIMEOUT,
                default_max_acquire_attempts=_PROCESS_LOCK_MAX_ACQUIRE_ATTEMPTS
        ):

            total_process_count = 0
            start_time = _Now()
            logging.info('Starting a new processing task for %d', host_id)

            # Query for all _UnsyncedEvents that belong to the given host, in batches,
            # and process them until we run out, or the task nears its deadline.
            query = (_UnsyncedEvent.query(
                _UnsyncedEvent.host_id == host_id).order(
                    _UnsyncedEvent.bit9_id))
            event_pages = query_utils.Paginate(query, page_size=25)
            event_page = next(event_pages, None)
            while time_utils.TimeRemains(start_time,
                                         _TASK_DURATION) and event_page:
                for unsynced_event in event_page:
                    event = api.Event.from_dict(unsynced_event.event)
                    signing_chain = [
                        api.Certificate.from_dict(cert)
                        for cert in unsynced_event.signing_chain
                    ]
                    file_catalog = event.get_expand(api.Event.file_catalog_id)
                    computer = event.get_expand(api.Event.computer_id)

                    # Persist the event data.
                    persist_futures = [
                        _PersistBit9Certificates(signing_chain),
                        _PersistBit9Binary(event, file_catalog, signing_chain),
                        _PersistBanNote(file_catalog),
                        _PersistBit9Host(computer, event.timestamp),
                        _PersistBit9Events(event, file_catalog, computer,
                                           signing_chain)
                    ]
                    ndb.Future.wait_all(persist_futures)
                    for persist_future in persist_futures:
                        persist_future.check_success()

                    # Now that the event sync has completed successfully, remove the
                    # intermediate proto entity.
                    unsynced_event.key.delete()

                    monitoring.events_processed.Increment()
                    total_process_count += 1

                event_page = next(event_pages, None)

        logging.info('Processed %d event(s)', total_process_count)

    except datastore_locks.AcquireLockError:
        logging.debug('Unable to acquire datastore lock')