def log_to_queue(tid, level, msg): host = settings.RABBITMQ_HOST port = settings.RABBITMQ_PORT vhost = load_setting("CORE_VHOST") username = load_setting("CORE_SENDER_USERNAME") password = load_setting("TUMBO_CORE_SENDER_PASSWORD") log_queue_name = load_setting("LOGS_QUEUE") channel = connect_to_queue(host, log_queue_name, vhost, username=username, password=password, port=port) payload = { 'rid': tid, 'level': level, 'msg': msg, } channel.basic_publish(exchange='', routing_key=log_queue_name, body=json.dumps(payload), properties=pika.BasicProperties(delivery_mode=1, )) channel.close() channel.connection.close()
def _login_repository(self): try: login_user = load_setting("DOCKER_LOGIN_USER", False) login_pass = load_setting("DOCKER_LOGIN_PASS", False) login_email = load_setting("DOCKER_LOGIN_EMAIL", False) login_host = load_setting("DOCKER_LOGIN_HOST", False) except ImproperlyConfigured, e: logger.exception("Cannot log into the repository %s" % login_host)
def start(self, pid=None, **kwargs): self.pid = pid self._pre_start() python_path = sys.executable try: MODELSPY = os.path.join(settings.PROJECT_ROOT, "../../worker") default_env = self.get_default_env() env = {} env.update(default_env) env.update(os.environ.copy()) env['EXECUTOR'] = "Spawn" env['TUMBO_CORE_SENDER_PASSWORD'] = load_setting( "TUMBO_CORE_SENDER_PASSWORD") env['TUMBO_WORKER_THREADCOUNT'] = str( load_setting("TUMBO_WORKER_THREADCOUNT")) env['TUMBO_PUBLISH_INTERVAL'] = str( load_setting("TUMBO_PUBLISH_INTERVAL")) env['RABBITMQ_HOST'] = str(load_setting("WORKER_RABBITMQ_HOST")) env['RABBITMQ_PORT'] = str(load_setting("WORKER_RABBITMQ_PORT")) python_paths = "" try: for p in os.environ['PYTHONPATH'].split(":"): logger.info(p) python_paths += ":" + os.path.abspath(p) python_paths += ":" + os.path.abspath( os.path.join(settings.PROJECT_ROOT, "../../tumbo")) except KeyError: pass env['PYTHONPATH'] = python_paths try: for var in settings.PROPAGATE_VARIABLES: if os.environ.get(var, None): env[var] = os.environ[var] except AttributeError, e: pass logger.info(env['PYTHONPATH']) settings.SETTINGS_MODULE = "app_worker.settings" p = subprocess.Popen( "%s %s/manage.py start_worker --settings=%s --vhost=%s --base=%s --username=%s --password=%s" % (python_path, MODELSPY, settings.SETTINGS_MODULE, self.vhost, self.base_name, self.base_name, self.password), cwd=settings.PROJECT_ROOT, shell=True, stdin=None, stdout=None, stderr=None, preexec_fn=os.setsid, env=env) atexit.register(p.terminate) self.pid = p.pid
def __init__(self, *args, **kwargs): """ tls_config = docker.tls.TLSConfig( client_cert=('/path/to/client-cert.pem', '/path/to/client-key.pem'), ca_cert='/path/to/ca.pem' ) client = docker.Client(base_url='<https_url>', tls=tls_config) """ client_cert = load_var_to_file("DOCKER_CLIENT_CERT") client_key = load_var_to_file("DOCKER_CLIENT_KEY") ssl_version = "TLSv1" tls_config = TLSConfig(client_cert=(client_cert, client_key), ssl_version=ssl_version, verify=False, assert_hostname=False) base_url = load_setting("DOCKER_TLS_URL") self.api = Client(base_url, tls=tls_config) self._login_repository() super(RemoteDockerExecutor, self).__init__(*args, **kwargs)
from django.db import DatabaseError, transaction from core.executors.remote import distribute from core.models import Base, Instance, Process, Thread, Apy, Setting from core.communication import CommunicationThread from core.plugins import call_plugin_func from core import __VERSION__ from core.utils import load_setting #from core.utils import profileit import psutil logger = logging.getLogger(__name__) HEARTBEAT_VHOST = load_setting('CORE_VHOST') HEARTBEAT_QUEUE = load_setting('HEARTBEAT_QUEUE') CONFIGURATION_QUEUE = "configuration" FOREIGN_CONFIGURATION_QUEUE = "fconfiguration" SETTING_QUEUE = "setting" PLUGIN_CONFIG_QUEUE = "pluginconfig" def log_mem(**kwargs): name = kwargs['name'] p = psutil.Process(os.getpid()) from redis_metrics import set_metric try: while True:
def handle(self, *args, **options): threads = [] threads_static = [] base = options['base'] vhost = options['vhost'] username = options['username'] password = options['password'] logger.debug("vhost: %s" % vhost) host = getattr(settings, "RABBITMQ_HOST", "localhost") port = getattr(settings, "RABBITMQ_PORT", 5672) SENDER_PASSWORD = load_setting("TUMBO_CORE_SENDER_PASSWORD") logger.info("TUMBO_WORKER_THREADCOUNT: %s" % load_setting("TUMBO_WORKER_THREADCOUNT")) logger.info("TUMBO_PUBLISH_INTERVAL: %s" % load_setting("TUMBO_PUBLISH_INTERVAL")) for c in range(0, settings.TUMBO_WORKER_THREADCOUNT): # start threads from core.executors.remote import CONFIGURATION_QUEUE, RPC_QUEUE name = "ExecutorSrvThread-%s-%s" % (c, base) thread = ExecutorServerThread(name, host, port, vhost, queues_consume=[[RPC_QUEUE]], topic_receiver=[[CONFIGURATION_QUEUE]], username=username, password=password) threads.append(thread) thread.daemon = True thread.start() for c in range(0, settings.TUMBO_WORKER_THREADCOUNT): # start threads from core.executors.remote import STATIC_QUEUE name = "StaticServerThread-%s-%s" % (c, base) thread = StaticServerThread(name, host, port, vhost, queues_consume=[[STATIC_QUEUE]], topic_receiver=[], username=username, password=password) threads_static.append(thread) thread.daemon = True thread.start() logger.info('StaticServerThreads started') thread = HeartbeatThread("HeartbeatThread-%s" % c, host, port, load_setting("CORE_VHOST"), queues_produce=[[HEARTBEAT_QUEUE]], username=load_setting("CORE_SENDER_USERNAME"), password=SENDER_PASSWORD, additional_payload={'vhost': vhost}, ttl=3000) thread.thread_list = threads logger.info('HeartbeatThreads started') threads.append(thread) thread.daemon = True thread.start() for t in threads: try: logger.info("%s Thread started" % settings.TUMBO_WORKER_THREADCOUNT) t.join(1000) except KeyboardInterrupt: print "Ctrl-c received." sys.exit(0)
def handle(self, *args, **options): heartbeat_threads = [] async_threads = [] log_threads = [] print options if options['mode']: mode = options['mode'] print "Starting in mode: %s" % mode else: mode = "all" print "Starting in default mode: %s" % mode core_threads = [] if mode in ["cleanup", "all"]: # start cleanup thread inactivate_thread = threading.Thread(target=inactivate, name="InactivateThread") inactivate_thread.daemon = True inactivate_thread.start() core_threads.append(inactivate_thread) update_status_thread = threading.Thread( target=update_status, args=["InactiveThread", 1, [inactivate_thread]]) update_status_thread.daemon = True update_status_thread.start() log_mem_thread = threading.Thread( target=log_mem, kwargs={'name': "Background-%s" % mode}) log_mem_thread.daemon = True log_mem_thread.start() core_threads.append(log_mem_thread) host = load_setting("RABBITMQ_HOST") port = int(load_setting("RABBITMQ_PORT")) SENDER_PASSWORD = load_setting("TUMBO_CORE_SENDER_PASSWORD") RECEIVER_PASSWORD = load_setting("TUMBO_CORE_RECEIVER_PASSWORD") # create core vhost CORE_SENDER_USERNAME = load_setting("CORE_SENDER_USERNAME") CORE_RECEIVER_USERNAME = load_setting("CORE_RECEIVER_USERNAME") SENDER_PERMISSIONS = load_setting("SENDER_PERMISSIONS") RECEIVER_PERMISSIONS = load_setting("RECEIVER_PERMISSIONS") service = RabbitmqAdmin.factory("HTTP_API") core_vhost = load_setting("CORE_VHOST") service.add_vhost(core_vhost) service.add_user(CORE_SENDER_USERNAME, SENDER_PASSWORD) service.add_user(CORE_RECEIVER_USERNAME, RECEIVER_PASSWORD) service.set_perms(core_vhost, CORE_SENDER_USERNAME, SENDER_PERMISSIONS) service.set_perms(core_vhost, CORE_RECEIVER_USERNAME, RECEIVER_PERMISSIONS) if mode in ["heartbeat", "all"]: # heartbeat queues_consume = [[HEARTBEAT_QUEUE, True]] HEARTBEAT_THREAD_COUNT = settings.TUMBO_HEARTBEAT_LISTENER_THREADCOUNT for c in range(0, HEARTBEAT_THREAD_COUNT): name = "HeartbeatThread-%s" % c thread = HeartbeatThread(name, host, port, core_vhost, CORE_RECEIVER_USERNAME, RECEIVER_PASSWORD, queues_consume=queues_consume, ttl=3000) heartbeat_threads.append(thread) thread.daemon = True thread.start() update_status_thread = threading.Thread(target=update_status, args=[ "HeartbeatThread", HEARTBEAT_THREAD_COUNT, heartbeat_threads ]) update_status_thread.daemon = True update_status_thread.start() if mode in ["async", "all"]: # async response thread ASYNC_THREAD_COUNT = settings.TUMBO_ASYNC_LISTENER_THREADCOUNT async_queue_name = load_setting("ASYNC_RESPONSE_QUEUE") queues_consume_async = [[async_queue_name, True]] for c in range(0, ASYNC_THREAD_COUNT): name = "AsyncResponseThread-%s" % c thread = AsyncResponseThread( name, host, port, core_vhost, CORE_RECEIVER_USERNAME, RECEIVER_PASSWORD, queues_consume=queues_consume_async, ttl=3000) async_threads.append(thread) thread.daemon = True thread.start() async_status_thread = threading.Thread(target=update_status, args=[ "AsyncResponseThread", ASYNC_THREAD_COUNT, async_threads ]) async_status_thread.daemon = True async_status_thread.start() if mode in ["log", "all"]: # log receiver LOG_THREAD_COUNT = settings.TUMBO_LOG_LISTENER_THREADCOUNT log_queue_name = load_setting("LOGS_QUEUE") queues_consume_log = [[log_queue_name, True]] for c in range(0, LOG_THREAD_COUNT): name = "LogReceiverThread-%s" % c thread = LogReceiverThread(name, host, port, core_vhost, CORE_RECEIVER_USERNAME, RECEIVER_PASSWORD, queues_consume=queues_consume_log, ttl=10000) log_threads.append(thread) thread.daemon = True thread.start() log_status_thread = threading.Thread( target=update_status, args=["LogReceiverThread", LOG_THREAD_COUNT, log_threads]) log_status_thread.daemon = True log_status_thread.start() if mode in ["scheduler", "all"]: # start scheduler from core import scheduler #scheduler.start_scheduler() scheduler_thread = threading.Thread( target=scheduler.start_scheduler) scheduler_thread.daemon = True scheduler_thread.start() update_status_thread = threading.Thread( target=update_status, args=["SchedulerThread", 1, [scheduler_thread]]) update_status_thread.daemon = True update_status_thread.start() threads = core_threads + heartbeat_threads + async_threads + log_threads for t in threads: try: logger.info("Thread %s started" % t) t.join(1000) except KeyboardInterrupt: logger.info("KeyBoardInterrupt received") print "Ctrl-c received." sys.exit(0)
(self.vhost, RPC_QUEUE)) while self.response is None and not self. async: self.connection.process_data_events() return self.response def end(self): self.channel.close() self.connection.close() del self.channel del self.connection if not async: executor = ExecutorClient(vhost, username, password) else: executor = ExecutorClient(vhost, load_setting("CORE_RECEIVER_USERNAME"), load_setting("TUMBO_CORE_RECEIVER_PASSWORD"), async=async) try: response = executor.call(apy) except Exception, e: logger.warn(e) response = json.dumps({ u'status': u'TIMEOUT', u'exception': None, u'returned': None, 'id': u'cannot_import' }) finally: executor.end()