Esempio n. 1
0
def check_docker_dependencies(dependencies):
    """Checks docker dependencies.

  Args:
    dependencies(dict): dictionary of dependencies to check for.

  Raises:
    TurbiniaException: If dependency is not met.
  """
    #TODO(wyassine): may run into issues down the line when a docker image
    # does not have bash or which installed. (no linux fs layer).
    log.info('Performing docker dependency check.')
    job_names = list(job_manager.JobsManager.GetJobNames())
    images = docker_manager.DockerManager().list_images(
        return_filter='short_id')

    # Iterate through list of jobs
    for job, values in dependencies.items():
        if job not in job_names:
            log.warning(
                'The job {0:s} was not found or has been disabled. Skipping '
                'dependency check...'.format(job))
            continue
        docker_image = values.get('docker_image')
        # short id only pulls the first 10 characters of image id.
        if docker_image and len(docker_image) > 10:
            docker_image = docker_image[0:10]

        if docker_image in images:
            for program in values['programs']:
                cmd = 'type {0:s}'.format(program)
                stdout, stderr, ret = docker_manager.ContainerManager(
                    values['docker_image']).execute_container(cmd, shell=True)
                if ret != 0:
                    raise TurbiniaException(
                        'Job dependency {0:s} not found for job {1:s}. Please install '
                        'the dependency for the container or disable the job.'.
                        format(program, job))
            job_manager.JobsManager.RegisterDockerImage(
                job, values['docker_image'])
        elif docker_image:
            raise TurbiniaException(
                'Docker image {0:s} was not found for the job {1:s}. Please '
                'update the config with the correct image id'.format(
                    values['docker_image'], job))
Esempio n. 2
0
  def execute(
      self, cmd, result, save_files=None, log_files=None, new_evidence=None,
      close=False, shell=False, stderr_file=None, stdout_file=None,
      success_codes=None):
    """Executes a given binary and saves output.

    Args:
      cmd (list|string): Command arguments to run
      result (TurbiniaTaskResult): The result object to put data into.
      save_files (list): A list of files to save (files referenced by Evidence
          objects are automatically saved, so no need to include them).
      log_files (list): A list of files to save even if execution fails.
      new_evidence (list): These are new evidence objects created by the task.
          If the task is successful, they will be added to the result.
      close (bool): Whether to close out the result.
      shell (bool): Whether the cmd is in the form of a string or a list.
      success_codes (list(int)): Which return codes are considered successful.
      stderr_file (str): Path to location to save stderr.
      stdout_file (str): Path to location to save stdout.

    Returns:
      Tuple of the return code, and the TurbiniaTaskResult object
    """
    # Avoid circular dependency.
    from turbinia.jobs import manager as job_manager

    save_files = save_files if save_files else []
    log_files = log_files if log_files else []
    new_evidence = new_evidence if new_evidence else []
    success_codes = success_codes if success_codes else [0]
    stdout = None
    stderr = None

    # Get timeout value.
    timeout_limit = job_manager.JobsManager.GetTimeoutValue(self.job_name)

    # Execute the job via docker.
    docker_image = job_manager.JobsManager.GetDockerImage(self.job_name)
    if docker_image:
      ro_paths = [
          result.input_evidence.local_path, result.input_evidence.source_path,
          result.input_evidence.device_path, result.input_evidence.mount_path
      ]
      rw_paths = [self.output_dir, self.tmp_dir]
      container_manager = docker_manager.ContainerManager(docker_image)
      stdout, stderr, ret = container_manager.execute_container(
          cmd, shell, ro_paths=ro_paths, rw_paths=rw_paths,
          timeout_limit=timeout_limit)

    # Execute the job on the host system.
    else:
      try:
        if shell:
          proc = subprocess.Popen(
              cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
          proc.wait(timeout_limit)
        else:
          proc = subprocess.Popen(
              cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
          proc.wait(timeout_limit)
      except subprocess.TimeoutExpired as exception:
        # Log error and close result.
        message = (
            'Execution of [{0!s}] failed due to job timeout of '
            '{1:d} seconds has been reached.'.format(cmd, timeout_limit))
        result.log(message)
        result.close(self, success=False, status=message)
        # Increase timeout metric and raise exception
        turbinia_worker_tasks_timeout_total.inc()
        raise TurbiniaException(message)

      stdout, stderr = proc.communicate()
      ret = proc.returncode

    result.error['stdout'] = str(stdout)
    result.error['stderr'] = str(stderr)

    if stderr_file and not stderr:
      result.log(
          'Attempting to save stderr to {0:s}, but no stderr found during '
          'execution'.format(stderr_file))
    elif stderr:
      if not stderr_file:
        _, stderr_file = tempfile.mkstemp(
            suffix='.txt', prefix='stderr-', dir=self.output_dir)
      result.log(
          'Writing stderr to {0:s}'.format(stderr_file), level=logging.DEBUG)
      with open(stderr_file, 'wb') as fh:
        fh.write(stderr)
      log_files.append(stderr_file)

    if stdout_file and not stdout:
      result.log(
          'Attempting to save stdout to {0:s}, but no stdout found during '
          'execution'.format(stdout_file))
    elif stdout:
      if not stdout_file:
        _, stdout_file = tempfile.mkstemp(
            suffix='.txt', prefix='stdout-', dir=self.output_dir)
      result.log(
          'Writing stdout to {0:s}'.format(stdout_file), level=logging.DEBUG)
      with open(stdout_file, 'wb') as fh:
        fh.write(stdout)
      log_files.append(stdout_file)

    log_files = list(set(log_files))
    for file_ in log_files:
      if not os.path.exists(file_):
        result.log(
            'Log file {0:s} does not exist to save'.format(file_),
            level=logging.DEBUG)
        continue
      if os.path.getsize(file_) == 0:
        result.log(
            'Log file {0:s} is empty. Not saving'.format(file_),
            level=logging.DEBUG)
        continue
      result.log('Output log file found at {0:s}'.format(file_))
      if not self.run_local:
        self.output_manager.save_local_file(file_, result)

    if ret not in success_codes:
      message = 'Execution of [{0!s}] failed with status {1:d}'.format(cmd, ret)
      result.log(message)
      if close:
        result.close(self, success=False, status=message)
    else:
      result.log('Execution of [{0!s}] succeeded'.format(cmd))
      for file_ in save_files:
        if os.path.getsize(file_) == 0:
          result.log(
              'Output file {0:s} is empty. Not saving'.format(file_),
              level=logging.DEBUG)
          continue
        result.log('Output save file at {0:s}'.format(file_))
        if not self.run_local:
          self.output_manager.save_local_file(file_, result)

      for evidence in new_evidence:
        # If the local path is set in the Evidence, we check to make sure that
        # the path exists and is not empty before adding it.
        if evidence.source_path and not os.path.exists(evidence.source_path):
          message = (
              'Evidence {0:s} source_path {1:s} does not exist. Not returning '
              'empty Evidence.'.format(evidence.name, evidence.source_path))
          result.log(message, level=logging.WARN)
        elif (evidence.source_path and os.path.exists(evidence.source_path) and
              os.path.getsize(evidence.source_path) == 0):
          message = (
              'Evidence {0:s} source_path {1:s} is empty. Not returning '
              'empty new Evidence.'.format(evidence.name, evidence.source_path))
          result.log(message, level=logging.WARN)
        else:
          result.add_evidence(evidence, self._evidence_config)

      if close:
        result.close(self, success=True)

    return ret, result
Esempio n. 3
0
    def execute(self,
                cmd,
                result,
                save_files=None,
                log_files=None,
                new_evidence=None,
                close=False,
                shell=False,
                success_codes=None):
        """Executes a given binary and saves output.

    Args:
      cmd (list|string): Command arguments to run
      result (TurbiniaTaskResult): The result object to put data into.
      save_files (list): A list of files to save (files referenced by Evidence
          objects are automatically saved, so no need to include them).
      log_files (list): A list of files to save even if execution fails.
      new_evidence (list): These are new evidence objects created by the task.
          If the task is successful, they will be added to the result.
      close (bool): Whether to close out the result.
      shell (bool): Whether the cmd is in the form of a string or a list.
      success_codes (list(int)): Which return codes are considered successful.

    Returns:
      Tuple of the return code, and the TurbiniaTaskResult object
    """
        # Avoid circular dependency.
        from turbinia.jobs import manager as job_manager

        save_files = save_files if save_files else []
        log_files = log_files if log_files else []
        new_evidence = new_evidence if new_evidence else []
        success_codes = success_codes if success_codes else [0]

        # Execute the job via docker.
        docker_image = job_manager.JobsManager.GetDockerImage(self.job_name)
        if docker_image:
            ro_paths = [
                result.input_evidence.local_path,
                result.input_evidence.source_path,
                result.input_evidence.device_path,
                result.input_evidence.mount_path
            ]
            rw_paths = [self.output_dir, self.tmp_dir]
            container_manager = docker_manager.ContainerManager(docker_image)
            stdout, stderr, ret = container_manager.execute_container(
                cmd, shell, ro_paths=ro_paths, rw_paths=rw_paths)

        # Execute the job on the host system.
        else:
            if shell:
                proc = subprocess.Popen(cmd, shell=True)
            else:
                proc = subprocess.Popen(cmd)
            stdout, stderr = proc.communicate()
            ret = proc.returncode

        result.error['stdout'] = stdout
        result.error['stderr'] = stderr

        for file_ in log_files:
            if not os.path.exists(file_):
                result.log(
                    'Log file {0:s} does not exist to save'.format(file_),
                    level=logging.DEBUG)
                continue
            if os.path.getsize(file_) == 0:
                result.log('Log file {0:s} is empty. Not saving'.format(file_),
                           level=logging.DEBUG)
                continue
            result.log('Output log file found at {0:s}'.format(file_))
            if not self.run_local:
                self.output_manager.save_local_file(file_, result)

        if ret not in success_codes:
            message = 'Execution of [{0!s}] failed with status {1:d}'.format(
                cmd, ret)
            result.log(message)
            if close:
                result.close(self, success=False, status=message)
        else:
            result.log('Execution of [{0!s}] succeeded'.format(cmd))
            for file_ in save_files:
                if os.path.getsize(file_) == 0:
                    result.log(
                        'Output file {0:s} is empty. Not saving'.format(file_),
                        level=logging.DEBUG)
                    continue
                result.log('Output save file at {0:s}'.format(file_))
                if not self.run_local:
                    self.output_manager.save_local_file(file_, result)

            for evidence in new_evidence:
                # If the local path is set in the Evidence, we check to make sure that
                # the path exists and is not empty before adding it.
                if evidence.source_path and not os.path.exists(
                        evidence.source_path):
                    message = (
                        'Evidence {0:s} source_path {1:s} does not exist. Not returning '
                        'empty Evidence.'.format(evidence.name,
                                                 evidence.source_path))
                    result.log(message, level=logging.WARN)
                elif (evidence.source_path
                      and os.path.exists(evidence.source_path)
                      and os.path.getsize(evidence.source_path) == 0):
                    message = (
                        'Evidence {0:s} source_path {1:s} is empty. Not returning '
                        'empty new Evidence.'.format(evidence.name,
                                                     evidence.source_path))
                    result.log(message, level=logging.WARN)
                else:
                    result.add_evidence(evidence, self._evidence_config)

            if close:
                result.close(self, success=True)

        return ret, result
Esempio n. 4
0
 def setUp(self, mock_get, mock_docker):
     self.test_img = '1234'
     mock_docker.from_env.return_value = docker.client.DockerClient
     mock_get.return_value = self.test_img
     self.container_mgr = docker_manager.ContainerManager(self.test_img)