from slipstream.job.distributor import Distributor from slipstream.job.util import override class DummuTestActionsDistributor(Distributor): ACTION_NAME = 'dummy_test_action' def __init__(self): super(DummuTestActionsDistributor, self).__init__() self.collect_interval = 15.0 @override def job_generator(self): while True: job = { 'action': DummuTestActionsDistributor.ACTION_NAME, 'targetResource': { 'href': 'dummy' } } yield job time.sleep(self.collect_interval) @override def _get_jobs_type(self): return 'dummy_test_action' if __name__ == '__main__': main(DummuTestActionsDistributor)
from slipstream.job.distributor import Distributor from slipstream.job.util import override class CleanupJobsDistributor(Distributor): ACTION_NAME = 'cleanup_jobs' def __init__(self): super(CleanupJobsDistributor, self).__init__() self.collect_interval = 86400.0 # 1 day @override def job_generator(self): while True: job = { 'action': CleanupJobsDistributor.ACTION_NAME, 'targetResource': { 'href': 'job' } } yield job time.sleep(self.collect_interval) @override def _get_jobs_type(self): return 'cleanup_jobs' if __name__ == '__main__': main(CleanupJobsDistributor)
return response.resources_list @override def job_generator(self): while True: start_time = time.time() credentials = self._get_credentials() for cred in credentials: pending_jobs = \ self.ss_api.cimi_search('jobs', filter='action="{}" and targetResource/href="{}" and state="QUEUED"' .format(CollectQuotasDistributor.ACTION_NAME, cred.id), last=0) if pending_jobs.json['count'] == 0: job = {'action': CollectQuotasDistributor.ACTION_NAME, 'targetResource': {'href': cred.id}} yield job else: logging.debug('Action {} already queued, will not create a new job for {}.' .format(CollectQuotasDistributor.ACTION_NAME, cred.id)) time.sleep(self.collect_interval - (time.time() - start_time)) @override def _get_jobs_type(self): return 'collect_quotas' if __name__ == '__main__': main(CollectQuotasDistributor)
# to define endpoint dynamically, from the connector resource # # endpoint = .... if not endpoint: continue job = { 'action': CollectStorageBucketsDistributor.ACTION_NAME, 'targetResource': { 'href': credential.id } } yield job else: logging.debug( 'Action {} already queued or running, will not create a new job for {}.' .format(CollectStorageBucketsDistributor.ACTION_NAME, credential.id)) time.sleep(yield_interval) time.sleep(self._time_left(start_time)) @override def _get_jobs_type(self): return 'collect_storage_buckets' if __name__ == '__main__': main(CollectStorageBucketsDistributor)
#!/usr/bin/env python # -*- coding: utf-8 -*- from slipstream.job.base import main from slipstream.job.executor import Executor if __name__ == '__main__': main(Executor)
from __future__ import print_function import time from slipstream.job.base import main from slipstream.job.distributor import Distributor from slipstream.job.util import override class NuvlaBoxStateCheckDistributor(Distributor): ACTION_NAME = 'nuvlabox_state_check' def __init__(self): super(NuvlaBoxStateCheckDistributor, self).__init__() self.distribute_interval = 600.0 # 10 minutes @override def job_generator(self): while True: job = {'action': NuvlaBoxStateCheckDistributor.ACTION_NAME, 'targetResource': {'href': 'job'}} yield job time.sleep(self.distribute_interval) @override def _get_jobs_type(self): return 'nuvlabox_state_check' if __name__ == '__main__': main(NuvlaBoxStateCheckDistributor)
from __future__ import print_function import time from slipstream.job.base import main from slipstream.job.distributor import Distributor from slipstream.job.util import override class CleanupNuvlaboxStateSnapshotsDistributor(Distributor): ACTION_NAME = 'cleanup_nb_state_snaps' def __init__(self): super(CleanupNuvlaboxStateSnapshotsDistributor, self).__init__() self.collect_interval = 86400.0 # 1 day @override def job_generator(self): while True: job = {'action': CleanupNuvlaboxStateSnapshotsDistributor.ACTION_NAME, 'targetResource': {'href': 'nuvlabox-state-snapshot'}} yield job time.sleep(self.collect_interval) @override def _get_jobs_type(self): return 'cleanup_nb_state_snaps' if __name__ == '__main__': main(CleanupNuvlaboxStateSnapshotsDistributor)
from slipstream.job.distributor import Distributor from slipstream.job.util import override class CleanupVmsDistributor(Distributor): ACTION_NAME = 'cleanup_virtual_machines' def __init__(self): super(CleanupVmsDistributor, self).__init__() self.collect_interval = 3600.0 # 1 hour @override def job_generator(self): while True: job = { 'action': CleanupVmsDistributor.ACTION_NAME, 'targetResource': { 'href': 'virtual-machine' } } yield job time.sleep(self.collect_interval) @override def _get_jobs_type(self): return 'cleanup_virtual_machines' if __name__ == '__main__': main(CleanupVmsDistributor)
float(nb_credentials), 1) * 0.6 for credential in credentials: pending_jobs = \ self.ss_api.cimi_search('jobs', filter='action="{}" and targetResource/href="{}" and state="QUEUED"' .format(CollectVmsDistributor.ACTION_NAME, credential.id), last=0) if pending_jobs.count == 0: job = { 'action': CollectVmsDistributor.ACTION_NAME, 'targetResource': { 'href': credential.id } } yield job else: logging.debug( 'Action {} already queued, will not create a new job for {}.' .format(CollectVmsDistributor.ACTION_NAME, credential.id)) time.sleep(yield_interval) time.sleep(self._time_left(start_time)) @override def _get_jobs_type(self): return 'collect_virtual_machines' if __name__ == '__main__': main(CollectVmsDistributor)