Exemple #1
0
 def __init__(self, job_uuid, name, config, api_key, entry_point_group_name="hoplite.jobs", port=5000):
     """
     @param job_uuid unique identifier for this job
     @param name the name of the job, corresponds to the plugin name
     @param config dictionary object containing configuration for the
         specific job
     """
     self.port = port
     self.uuid = job_uuid
     self.name = name
     self.config = config
     self._api_key = api_key
     self._status = {}
     self._process = None
     self._started = False
     self._killed = False
     self._pipe_to_self = None
     self._pipe_to_process = None
     # TODO: We need this workaround because in tests I create jobs that
     # don't have a corresponding loaded entry point
     # At some point the tests should be refactored to use jobs that exist
     # and we can get rid of this code
     module = EntryPointManager().get_plugin_module_by_name(name)
     logger_name = module.__name__ if module is not None else name
     self._logger = server_logging.get_job_logger(
         logger_name, uuid=self.uuid)
     self._entry_point_group_name = entry_point_group_name
from hoplite.utils import server_logging
import ftplib
import os
import socket
from ftplib import FTP
from hoplite.client.status_updater import MockStatusUpdater
from hoplite.builtin_plugins.constants import DownloadFolderFromFtpJobConstants as KEYS

logger = server_logging.get_job_logger(__name__)


def _get_files_in_dir(ftp_session, source, dest, status):
    filelist = ftp_session.nlst()
    for path in filelist:
        local_path = os.path.join(dest, path)
        remote_path = source + path
        try:
            # See if it's a directory
            new_remote_path = source + path + "/"
            ftp_session.cwd(source + path + "/")
            # It is! So we should create the directory on dest
            try:
                os.mkdir(local_path)
            except OSError:
                logger.debug("Dir: {0} already exists".format(local_path))
            logger.debug("Get Folder: {0}".format(new_remote_path))
            _get_files_in_dir(ftp_session, new_remote_path, local_path, status)
        except ftplib.error_perm:
            logger.debug("Get file: {0}".format(path))
            with open(local_path, "wb") as file_handle:
                ftp_session.retrbinary("RETR " + remote_path, file_handle.write)
Exemple #3
0
def job_wrapper(pipe_to_parent, entry_point_name, config, status_updater,
                entry_point_group_name='hoplite.jobs', uuid=''):
    """
    A picklable function that is used to start the job. It loads the specified
    module and calls run on it with the correct parameters.

    In the event of an error occurring in the job, the error is bubbled up to
    the highest parent of the job. This is done by encapsulating each exception
    into a JobFailedError, which is raised to cause the bubbling action. Since
    Exceptions aren't picklable, information from previous exceptions is put
    into a dictionary.

    This exception bubbling is useful for situations in which jobs are used to
    call other jobs. The stack trace for each "level" is saved, and the entire
    list of jobs with their respective traces can be displayed at the top level
    (where the JobFailedError is handled).
    """
    module = EntryPointManager(
        entry_point_group_name).get_plugin_module_by_name(entry_point_name)
    logger = server_logging.get_job_logger(module.__name__, uuid)
    try:
        module.run(config, status_updater)
    except JobFailedError as e:
        logger.error(
            "A job raised an exception and it was not caught."
            " Address: {0} UUID: {1}".format(e.addr, e.uuid))
        logger.error(
            "Exception Traceback: {0}".format(
                traceback.format_tb(e.traceback_object)))
        _, _, tb = sys.exc_info()
        traceback_object = tb
        exception_dictionary = {
            "address": e.addr,
            "uuid": e.uuid,
            "traceback": e.traceback_object,
            "previous_exception": e.previous_exception
        }
        pass_to_parent = {
            "traceback": traceback_object,
            "previous_exception": exception_dictionary
        }
        pipe_to_parent.send(pass_to_parent)
    except Exception as e:
        except_type, except_class, tb = sys.exc_info()
        traceback_object = tb
        type_string = str(except_type)
        try:
            pickled_exception = pickle.dumps(e)
        except pickle.PicklingError:
            pickled_exception = None

        exception_dictionary = {
            "type": type_string,
            "message": e.message,
            "exception_object": pickled_exception
        }
        pass_to_parent = {
            "traceback": pickle.dumps(traceback_object),
            "previous_exception": exception_dictionary
        }
        logger.error("Job UUID:{0} Type:{1} Finished with except type:{2} "
                     "except class:{3} traceback:{4}".format(
                        uuid, entry_point_name,
                        except_type, except_class, traceback.format_tb(tb)))
        pipe_to_parent.send(pass_to_parent)
    logger.debug("Finished running UUID:{0}".format(uuid))