Example #1
0
def start_remote_fit(problem, options, queue, notify):
    """
    Queue remote fit.
    """
    from jobqueue.client import connect
    try:
        from dill import dumps as dill_dumps
        dumps = lambda obj: dill_dumps(obj, recurse=True)
    except ImportError:
        from pickle import dumps

    data = dict(package='bumps',
                version=__version__,
                problem=dumps(problem),
                options=dumps(options))
    request = dict(
        service='fitter',
        version=__version__,  # fitter service version
        notify=notify,
        name=problem.title,
        data=data)

    server = connect(queue)
    job = server.submit(request)
    return job
Example #2
0
File: tq.py Project: abom/tq
def job_set_result(job, value, redis_connection):
    job.result = value
    print("setting result to : ", value)
    dumped_value = dill_dumps(value)
    redis_connection.set(job.jobkey, dumped_value)
    redis_connection.set(job.jobkey_cached, dumped_value)
    return job
Example #3
0
File: tq.py Project: xmonader/tq
 def set_job_result(self, job, value):
     job.result = value
     print("setting result to : ", value)
     dumped_value = dill_dumps(value)
     self.r.set(job.job_result_key, dumped_value)
     self.r.set(job.job_result_key_cached, dumped_value)
     self.save_job(job)
     return job
Example #4
0
    def import_internal_services(self, items, base_dir, sync_internal, is_first):
        """ Imports and optionally caches locally internal services.
        """
        cache_file_path = os.path.join(base_dir, 'config', 'repo', 'internal-cache.dat')

        # sync_internal may be False but if the cache does not exist (which is the case if a server starts up the first time),
        # we need to create it anyway and sync_internal becomes True then. However, the should be created only by the very first
        # worker in a group of workers - the rest can simply assume that the cache is ready to read.
        if is_first and not os.path.exists(cache_file_path):
            sync_internal = True

        if sync_internal:

            # Synchronizing internal modules means re-building the internal cache from scratch
            # and re-deploying everything.

            service_info = []
            internal_cache = {
                'service_info': service_info
            }

            deployed = self.import_services_from_anywhere(items, base_dir)

            for class_ in deployed:
                impl_name = class_.get_impl_name()
                service_info.append({
                    'class_': class_,
                    'mod': inspect.getmodule(class_),
                    'impl_name': impl_name,
                    'service_id': self.impl_name_to_id[impl_name],
                    'is_active': self.services[impl_name]['is_active'],
                    'slow_threshold': self.services[impl_name]['slow_threshold'],
                    'fs_location': inspect.getfile(class_),
                })


            # All set, write out the cache file
            f = open(cache_file_path, 'wb')
            f.write(dill_dumps(internal_cache))
            f.close()

            return deployed

        else:
            deployed = []

            f = open(cache_file_path, 'rb')
            items = bunchify(dill_load(f))
            f.close()

            for item in items.service_info:
                self._visit_class(item.mod, deployed, item.class_, item.fs_location, True,
                    item.service_id, item.is_active, item.slow_threshold)

            return deployed
Example #5
0
File: tq.py Project: xmonader/tq
 def __init__(self, fun, retries=3):
     self._job_id = None
     self.fun = dill_dumps(fun)
     self.fun_name = fun.__name__
     self.retries = retries
     self.state = JobState.STOPPED
     self.args = []
     self.kwargs = {}
     self.result = None
     self.error = None
     self.start_time = None
     self.done_time = None
     self.last_modified_time = None  # should it be a list of prev handling times?
     self.in_process = False
     self.memoized = True
     self.timeout = 0
     self.worker_id = None
     self.safe_to_collect = False
Example #6
0
File: tq.py Project: abom/tq
 def __init__(self, fun, retries=3):
     self._jid = Job.jid
     self._job_id = None
     Job.jid += 1
     self.fun = dill_dumps(fun)
     self.funname = fun.__name__
     self.retries = retries
     self.state = SCHEDULED
     self.args = []
     self.kwargs = {}
     self.jobkey_cached = "jobs-results:cache-{}".format(self._hash())
     self.jobkey = "job-results:job-{}".format(self.job_id)
     self.result = None
     self.error = None
     self.start_time = None
     self.done_time = None
     self.last_modified_time = None  # should it be a list of prev handling times?
     self.in_process = False
     self.memoized = True
     self.timeout = 0
Example #7
0
    def import_internal_services(self, items, base_dir, sync_internal,
                                 is_first):
        """ Imports and optionally caches locally internal services.
        """
        cache_file_path = os.path.join(base_dir, 'config', 'repo',
                                       'internal-cache.dat')

        sql_services = {}
        for item in self.odb.get_sql_internal_service_list(
                self.server.cluster_id):
            sql_services[item.impl_name] = {
                'id': item.id,
                'impl_name': item.impl_name,
                'is_active': item.is_active,
                'slow_threshold': item.slow_threshold,
            }

        # sync_internal may be False but if the cache does not exist (which is the case if a server starts up the first time),
        # we need to create it anyway and sync_internal becomes True then. However, the should be created only by the very first
        # worker in a group of workers - the rest can simply assume that the cache is ready to read.
        if is_first and not os.path.exists(cache_file_path):
            sync_internal = True

        if sync_internal:

            # Synchronizing internal modules means re-building the internal cache from scratch
            # and re-deploying everything.

            service_info = []
            internal_cache = {'service_info': service_info}

            logger.info('Deploying and caching internal services (%s)',
                        self.server.name)
            info = self.import_services_from_anywhere(items, base_dir)

            for service in info.to_process:  # type: InRAMService

                class_ = service.service_class
                impl_name = service.impl_name

                service_info.append({
                    'service_class':
                    class_,
                    'mod':
                    inspect.getmodule(class_),
                    'impl_name':
                    impl_name,
                    'service_id':
                    self.impl_name_to_id[impl_name],
                    'is_active':
                    self.services[impl_name]['is_active'],
                    'slow_threshold':
                    self.services[impl_name]['slow_threshold'],
                    'fs_location':
                    inspect.getfile(class_),
                    'deployment_info':
                    '<todo>'
                })

            # All set, write out the cache file
            f = open(cache_file_path, 'wb')
            f.write(dill_dumps(internal_cache))
            f.close()

            logger.info('Deployed and cached %d internal services (%s) (%s)',
                        len(info.to_process), info.total_size_human,
                        self.server.name)

            return info.to_process

        else:
            logger.info('Deploying cached internal services (%s)',
                        self.server.name)
            to_process = []

            try:
                f = open(cache_file_path, 'rb')
                dill_items = dill_load(f)
            except ValueError as e:
                msg = e.args[0]
                if _unsupported_pickle_protocol_msg in msg:
                    msg = msg.replace(_unsupported_pickle_protocol_msg,
                                      '').strip()
                    protocol_found = int(msg)

                    # If the protocol found is higher than our own, it means that the cache
                    # was built a Python version higher than our own, we are on Python 2.7
                    # and cache was created under Python 3.4. In such a case, we need to
                    # recreate the cache anew.
                    if protocol_found > highest_pickle_protocol:
                        logger.info(
                            'Cache pickle protocol found `%d` > current highest `%d`, forcing sync_internal',
                            protocol_found, highest_pickle_protocol)
                        return self.import_internal_services(
                            items, base_dir, True, is_first)

                    # A different reason, re-raise the erorr then
                    else:
                        raise

                # Must be a different kind of a ValueError, propagate it then
                else:
                    raise
            finally:
                f.close()

            len_si = len(dill_items['service_info'])

            for idx, item in enumerate(dill_items['service_info'], 1):
                class_ = self._visit_class(item['mod'], item['service_class'],
                                           item['fs_location'], True)
                to_process.append(class_)

            self._store_in_ram(None, to_process)

            logger.info('Deployed %d cached internal services (%s)', len_si,
                        self.server.name)

            return to_process
Example #8
0
 def dumpObject(obj):
     try:
         return dill_dumps(obj, HIGHEST_PROTOCOL)
     except (PicklingError, ):
         raise DumpError(dill.__name__)
Example #9
0
File: tq.py Project: abom/tq
def job_dumps(job):
    return dill_dumps(job)
Example #10
0
File: tq.py Project: xmonader/tq
 def job_dumps(self, job):
     return dill_dumps(job)