示例#1
0
  def get(self):

    start_time = datetime.datetime.utcnow()

    changes = rule_models.RuleChangeSet.query(
        projection=[rule_models.RuleChangeSet.blockable_key],
        distinct=True).fetch()

    # Count the number of distinct SHA256s that have outstanding RuleChangeSets.
    blockable_keys = [change.blockable_key for change in changes]
    blockable_key_count = len(blockable_keys)
    logging.info('Retrieved %d pending change(s)', blockable_key_count)
    monitoring.pending_changes.Set(blockable_key_count)

    # Don't just throw everything into the bit9-commit-change queue, because if
    # anything is still pending when the cron fires again, the queue could start
    # to back up. Allow 3 tasks/sec for the number of seconds remaining (minus a
    # small buffer), evenly spread out over the remaining cron period.
    now = datetime.datetime.utcnow()
    cron_seconds = int(datetime.timedelta(minutes=5).total_seconds())
    elapsed_seconds = int((now - start_time).total_seconds())
    available_seconds = cron_seconds - elapsed_seconds - 10

    # Randomly sample from the outstanding changes in order to avoid
    # head-of-the-line blocking due to unsynced hosts, for example.
    sample_size = min(len(blockable_keys), 3 * available_seconds)
    selected_keys = random.sample(blockable_keys, sample_size)
    logging.info('Deferring %d pending change(s)', len(selected_keys))

    for selected_key in selected_keys:

      # Schedule the task for a random time in the remaining cron period.
      countdown = random.randint(0, available_seconds)
      change_set.DeferCommitBlockableChangeSet(
          selected_key, countdown=countdown)
示例#2
0
    def testTailDefer_NoMoreChanges(self):
        with mock.patch.object(change_set, '_CommitChangeSet') as mock_commit:
            change_set.DeferCommitBlockableChangeSet(self.binary.key)

            self.assertTaskCount(constants.TASK_QUEUE.BIT9_COMMIT_CHANGE, 1)
            self.RunDeferredTasks(constants.TASK_QUEUE.BIT9_COMMIT_CHANGE)
            self.assertTaskCount(constants.TASK_QUEUE.BIT9_COMMIT_CHANGE, 0)

            mock_commit.assert_called_once_with(self.change.key)
  def _SetInstallerPolicy(self, blockable_id, new_policy):
    blockable = base_models.Blockable.get_by_id(blockable_id)

    # pylint: disable=g-explicit-bool-comparison, singleton-comparison
    installer_rule_query = rule_models.Bit9Rule.query(
        rule_models.Bit9Rule.in_effect == True,
        rule_models.Bit9Rule.policy.IN(constants.RULE_POLICY.SET_INSTALLER),
        ancestor=blockable.key)
    # pylint: enable=g-explicit-bool-comparison, singleton-comparison
    existing_rule = installer_rule_query.get()
    if existing_rule:
      if existing_rule.policy == new_policy:
        return blockable.is_installer
      else:
        existing_rule.in_effect = False
        existing_rule.put()

    # Create the Bit9Rule associated with the installer state and a change set
    # to commit it.
    new_rule = rule_models.Bit9Rule(
        rule_type=blockable.rule_type,
        in_effect=True,
        policy=new_policy,
        parent=blockable.key)
    new_rule.put()
    change = bit9_models.RuleChangeSet(
        rule_keys=[new_rule.key],
        change_type=new_rule.policy,
        parent=blockable.key)
    change.put()

    message = 'User %s changed installer state to %s' % (
        self.user.key.id(), new_policy)
    tables.BINARY.InsertRow(
        sha256=blockable.key.id(),
        timestamp=datetime.datetime.utcnow(),
        action=constants.BLOCK_ACTION.COMMENT,
        state=blockable.state,
        score=blockable.score,
        platform=constants.PLATFORM.WINDOWS,
        client=constants.CLIENT.BIT9,
        first_seen_file_name=blockable.first_seen_name,
        cert_fingerprint=blockable.cert_id,
        is_compiler=blockable.is_compiler,
        comment=message)

    change_set.DeferCommitBlockableChangeSet(blockable.key)

    # Update the blockable's is_installer property.
    blockable.is_installer = new_policy == constants.RULE_POLICY.FORCE_INSTALLER
    blockable.put()

    return blockable.is_installer
示例#4
0
文件: sync.py 项目: crudbug/upvote
def _CheckAndResolveAnomalousBlock(blockable_key, host_id):
    """Checks whether an unfulfilled rule already existed for this blockable.

  If there are unfulfilled rules, triggers an attempt to commit them back to the
  database.

  Args:
    blockable_key: The key of the blockable that was blocked.
    host_id: The host on which the block occurred.

  Returns:
    Whether the block was anomalous (i.e. whether an unfulfilled rule existed
    for the blockable-host pair).
  """
    # Check and handle anomalous block events by detecting unfulfilled rules and,
    # if present, attempting to commit them.
    # pylint: disable=g-explicit-bool-comparison
    unfulfilled_rule_query = bit9.Bit9Rule.query(
        bit9.Bit9Rule.is_committed == True,
        bit9.Bit9Rule.is_fulfilled == False,
        bit9.Bit9Rule.host_id == host_id,
        ancestor=blockable_key).order(bit9.Bit9Rule.updated_dt)
    # pylint: enable=g-explicit-bool-comparison
    unfulfilled_rules = unfulfilled_rule_query.fetch()

    # Installer rules shouldn't be local (e.g. have host_id's) so they shouldn't
    # have been returned by the query. Still, the sanity check couldn't hurt.
    assert all(rule.policy in constants.RULE_POLICY.SET_EXECUTION
               for rule in unfulfilled_rules)
    if unfulfilled_rules:
        logging.info('Processing %s unfulfilled rules for %s',
                     len(unfulfilled_rules), blockable_key.id())

        # Mark all outstanding unfulfilled rules _except_ the most recent one as
        # fulfilled as we're going to ignore them.
        for rule in unfulfilled_rules[:-1]:
            rule.is_fulfilled = True

        # Mark the most recent unfulfilled rule as uncommitted as we're going to
        # commit it.
        unfulfilled_rules[-1].is_committed = False

        # Create and trigger a change set to commit the most recent rule.
        change = bit9.RuleChangeSet(rule_keys=[unfulfilled_rules[-1].key],
                                    change_type=unfulfilled_rules[-1].policy,
                                    parent=blockable_key)

        ndb.put_multi(unfulfilled_rules + [change])

        change_set.DeferCommitBlockableChangeSet(blockable_key)

    return bool(unfulfilled_rules)
示例#5
0
    def testTailDefer_MoreChanges(self):
        test_utils.CreateRuleChangeSet(
            self.binary.key,
            rule_keys=[self.local_rule.key],
            change_type=constants.RULE_POLICY.BLACKLIST)
        with mock.patch.object(change_set, '_CommitChangeSet') as mock_commit:
            change_set.DeferCommitBlockableChangeSet(self.binary.key)

            self.assertTaskCount(constants.TASK_QUEUE.BIT9_COMMIT_CHANGE, 1)
            self.RunDeferredTasks(constants.TASK_QUEUE.BIT9_COMMIT_CHANGE)
            # Tail defer task for remaining change.
            self.assertTaskCount(constants.TASK_QUEUE.BIT9_COMMIT_CHANGE, 1)

            mock_commit.assert_called_once_with(self.change.key)
示例#6
0
 def _TriggerCommit(self):
   change_set.DeferCommitBlockableChangeSet(self.blockable.key)
示例#7
0
def _CheckAndResolveAnomalousBlock(blockable_key, host_id):
  """Checks whether an unfulfilled rule already existed for this blockable.

  If there are unfulfilled rules, triggers an attempt to commit them back to the
  database.

  Args:
    blockable_key: The key of the blockable that was blocked.
    host_id: The host on which the block occurred.

  Returns:
    Whether the block was anomalous (i.e. whether an unfulfilled rule existed
    for the blockable-host pair).
  """
  # Check and handle anomalous block events by detecting unfulfilled rules and,
  # if present, attempting to commit them.
  # pylint: disable=g-explicit-bool-comparison, singleton-comparison
  unfulfilled_rule_query = rule_models.Bit9Rule.query(
      rule_models.Bit9Rule.is_committed == True,
      rule_models.Bit9Rule.is_fulfilled == False,
      rule_models.Bit9Rule.host_id == host_id,
      ancestor=blockable_key
  ).order(rule_models.Bit9Rule.updated_dt)
  # pylint: enable=g-explicit-bool-comparison, singleton-comparison
  unfulfilled_rules = unfulfilled_rule_query.fetch()

  # Installer rules shouldn't be local (e.g. have host_id's) so they shouldn't
  # have been returned by the query. Still, the sanity check couldn't hurt.
  assert all(
      rule.policy in _POLICY.SET_EXECUTION
      for rule in unfulfilled_rules)
  if unfulfilled_rules:
    logging.info(
        'Processing %s unfulfilled rules for %s', len(unfulfilled_rules),
        blockable_key.id())

    # Mark all outstanding unfulfilled rules _except_ the most recent one as
    # fulfilled as we're going to ignore them.
    for rule in unfulfilled_rules[:-1]:
      rule.is_fulfilled = True

    # Mark the most recent unfulfilled rule as uncommitted as we're going to
    # commit it.
    unfulfilled_rules[-1].is_committed = False

    # Revise the Rule creation time to now. This will ensure that this
    # unfulfilled Rule will once again get picked up by the 'fast' and 'slow'
    # retry crons below. This should help fulfill such Rules in a *slightly*
    # more timely manner, in cases where an unfulfilled Rule ages out of the
    # week-long retry period, but is later executed by the corresponding user.
    unfulfilled_rules[-1].recorded_dt = datetime.datetime.utcnow()

    # Create and trigger a change set to commit the most recent rule.
    change = rule_models.RuleChangeSet(
        rule_keys=[unfulfilled_rules[-1].key],
        change_type=unfulfilled_rules[-1].policy, parent=blockable_key)

    ndb.put_multi(unfulfilled_rules + [change])

    change_set.DeferCommitBlockableChangeSet(blockable_key)

  return bool(unfulfilled_rules)