def start(self): try: self.executor except Executor.DoesNotExist: logger.debug("create executor for base %s" % self) executor = Executor(base=self) executor.save() if not self.executor.is_running(): r = self.executor.start() # call plugin logger.info("on_start_base starting...") call_plugin_func(self, "on_start_base") logger.info("on_start_base done...") return r return None
def test_config(self): obj = self.base1 config = {'password': "******"} a = PluginUserConfig(plugin_name="DataStorePlugin", base=obj, config=config) a.save() success, failed = call_plugin_func(obj, "get_persistent_config") for k, v in success.iteritems(): assert type(v) is dict, "type is: %s (%s)" % (type(v), str(v))
def _pre_start(self): success, failed = call_plugin_func(self.executor.base, "on_start_base") if len(failed.keys()) > 0: logger.warning("Problem with on_start_base for plugin (%s)" % str(failed))
distribute(FOREIGN_CONFIGURATION_QUEUE, serializers.serialize("json", [instance,]), vhost, base_obj.name, base_obj.executor.password ) for instance in Setting.objects.filter(base__name=base): distribute(SETTING_QUEUE, json.dumps({ instance.key: instance.value }), vhost, instance.base.name, instance.base.executor.password ) # Plugin config success, failed = call_plugin_func(base_obj, "config_for_workers") logger.info("Plugin to sync - success: "+str(success)) logger.info("Plugin to sync - failed: "+str(failed)) for plugin, config in success.items(): logger.info("Send '%s' config '%s' to %s" % (plugin, config, base_obj.name)) distribute(PLUGIN_CONFIG_QUEUE, json.dumps({plugin: config}), vhost, base_obj.name, base_obj.executor.password ) if data.has_key('ready_for_init') and data['ready_for_init']: ## execute init exec try: init = base_obj.apys.get(name='init')
def destroy(self): call_plugin_func(self, "on_destroy_base") return self.executor.destroy()
def start(self, id, *args, **kwargs): self._pre_start() self.service_ports = [] if kwargs.has_key('service_ports'): self.service_ports = kwargs.get('service_ports') self.port_bindings = {} for port in self.service_ports: self.port_bindings[port] = port if not self._container_exists(id): logger.info("Create container for %s" % self.vhost) import docker default_env = self.get_default_env() env = {} env.update(default_env) env.update({ 'RABBITMQ_HOST': settings.WORKER_RABBITMQ_HOST, 'RABBITMQ_PORT': settings.WORKER_RABBITMQ_PORT, 'TUMBO_WORKER_THREADCOUNT': settings.TUMBO_WORKER_THREADCOUNT, 'TUMBO_PUBLISH_INTERVAL': settings.TUMBO_PUBLISH_INTERVAL, 'TUMBO_CORE_SENDER_PASSWORD': settings.TUMBO_CORE_SENDER_PASSWORD, 'EXECUTOR': "docker", 'SERVICE_PORT': self.executor.port, 'SERVICE_IP': self.executor.ip }) try: for var in settings.PROPAGATE_VARIABLES: if os.environ.get(var, None): env[var] = os.environ[var] except AttributeError: pass if self.executor.ip6: env['SERVICE_IP6'] = self.executor.ip6 if "PROFILE_DO_FUNC" in os.environ: env['PROFILE_DO_FUNC'] = True # feed environment variables with vars from plugins success, failed = call_plugin_func(self.executor, "executor_context") if len(failed.keys()) > 0: logger.warning( "Problem with executor_context for plugin (%s)" % str(failed)) for plugin, context in success.items(): logger.info("Set context for plugin %s" % plugin) env.update(context) container = self.api.create_container( image=DOCKER_IMAGE, name=self.name, detach=True, ports=self.service_ports, mem_limit=MEM_LIMIT, #cpu_shares=CPU_SHARES, environment=env, host_config=docker.utils.create_host_config( port_bindings=self.port_bindings), entrypoint=self._start_command) else: container = self._get_container(id) id = container.get('Id') logger.info("Start container (%s)" % id) self.api.start(container=id) return id