def _celery_client(ctx, agent): # We retrieve broker url from old agent in order to support # cases when old agent is not connected to current rabbit server. if 'broker_config' in agent: broker_config = agent['broker_config'] else: broker_config = ctx.bootstrap_context.broker_config() broker_url = utils.internal.get_broker_url(broker_config) ctx.logger.info('Connecting to {0}'.format(broker_url)) celery_client = celery.Celery() # We can't pass broker_url to Celery constructor because it would # be overriden by the value from broker_config.py. config = {'BROKER_URL': broker_url, 'CELERY_RESULT_BACKEND': broker_url} if ManagerVersion(agent['version']) != ManagerVersion('3.2'): config['CELERY_TASK_RESULT_EXPIRES'] = \ defaults.CELERY_TASK_RESULT_EXPIRES fd, cert_path = tempfile.mkstemp() os.close(fd) try: if broker_config.get('broker_ssl_enabled'): with open(cert_path, 'w') as cert_file: cert_file.write(broker_config.get('broker_ssl_cert', '')) broker_ssl = { 'ca_certs': cert_path, 'cert_reqs': ssl.CERT_REQUIRED } else: broker_ssl = False config['BROKER_USE_SSL'] = broker_ssl celery_client.conf.update(**config) yield celery_client finally: os.remove(cert_path)
def restore(snapshot_id, recreate_deployments_envs, config, force, timeout, **kwargs): ctx.logger.info('Restoring snapshot {0}'.format(snapshot_id)) config = _DictToAttributes(config) _assert_clean_elasticsearch(log_warning=force) tempdir = tempfile.mkdtemp('-snapshot-data') try: file_server_root = config.file_server_root snapshots_dir = os.path.join( file_server_root, config.file_server_snapshots_folder ) snapshot_path = os.path.join(snapshots_dir, snapshot_id, '{0}.zip' .format(snapshot_id)) with zipfile.ZipFile(snapshot_path, 'r') as zipf: zipf.extractall(tempdir) with open(os.path.join(tempdir, _METADATA_FILE), 'r') as f: metadata = json.load(f) client = get_rest_client() manager_version = _get_manager_version(client) from_version = ManagerVersion(metadata[_M_VERSION]) ctx.logger.info('Manager version = {0}, snapshot version = {1}'.format( str(manager_version), str(from_version))) if from_version.greater_than(manager_version): raise NonRecoverableError( 'Cannot restore a newer manager\'s snapshot on this manager ' '[{0} > {1}]'.format(str(from_version), str(manager_version))) existing_deployments_ids = [d.id for d in client.deployments.list()] ctx.logger.info('Starting restoring snapshot of manager {0}' .format(from_version)) new_plugins = _restore_snapshot(config, tempdir, metadata, timeout) install_plugins(new_plugins) if recreate_deployments_envs: recreate_deployments_environments(existing_deployments_ids) ctx.logger.info('Successfully restored snapshot of manager {0}' .format(from_version)) finally: shutil.rmtree(tempdir)
def _get_init_script_path_and_url(new_agent, old_agent_version): script_path, script_url = init_script_download_link(new_agent) # Prior to 4.2 (and script plugin 1.5.1) there was no way to pass # a certificate to the script plugin, so the initial script must be # passed over http if ManagerVersion(old_agent_version) < ManagerVersion('4.2'): # This is the relative path on the manager, except the host and port link_relpath = script_url.split('/', 3)[3] script_url = urljoin(_http_rest_host(new_agent), link_relpath) return script_path, script_url
def restore(snapshot_id, recreate_deployments_envs, config, force, **kwargs): ctx.logger.info('Restoring snapshot {0}'.format(snapshot_id)) config = _DictToAttributes(config) _assert_clean_elasticsearch(log_warning=force) tempdir = tempfile.mkdtemp('-snapshot-data') try: file_server_root = config.file_server_root snapshots_dir = os.path.join(file_server_root, config.file_server_snapshots_folder) snapshot_path = os.path.join(snapshots_dir, snapshot_id, '{0}.zip'.format(snapshot_id)) with zipfile.ZipFile(snapshot_path, 'r') as zipf: zipf.extractall(tempdir) with open(os.path.join(tempdir, _METADATA_FILE), 'r') as f: metadata = json.load(f) client = get_rest_client() manager_version = _get_manager_version(client) from_version = ManagerVersion(metadata[_M_VERSION]) ctx.logger.info('Manager version = {0}, snapshot version = {1}'.format( str(manager_version), str(from_version))) if from_version.greater_than(manager_version): raise NonRecoverableError( 'Cannot restore a newer manager\'s snapshot on this manager ' '[{0} > {1}]'.format(str(from_version), str(manager_version))) existing_deployments_ids = [d.id for d in client.deployments.list()] ctx.logger.info( 'Starting restoring snapshot of manager {0}'.format(from_version)) new_plugins = _restore_snapshot(config, tempdir, metadata) install_plugins(new_plugins) if recreate_deployments_envs: recreate_deployments_environments(existing_deployments_ids) ctx.logger.info('Successfully restored snapshot of manager {0}'.format( from_version)) finally: shutil.rmtree(tempdir)
def restore(self): self._tempdir = tempfile.mkdtemp('-snapshot-data') snapshot_path = self._get_snapshot_path() ctx.logger.debug('Going to restore snapshot, ' 'snapshot_path: {0}'.format(snapshot_path)) try: metadata = self._extract_snapshot_archive(snapshot_path) self._snapshot_version = ManagerVersion(metadata[M_VERSION]) schema_revision = metadata.get( M_SCHEMA_REVISION, self.SCHEMA_REVISION_4_0, ) stage_revision = metadata.get( M_STAGE_SCHEMA_REVISION, None, ) self._validate_snapshot() existing_plugins = self._get_existing_plugin_names() with Postgres(self._config) as postgres: self._restore_db(postgres, schema_revision, stage_revision) self._restore_files_to_manager() self._restore_plugins(existing_plugins) self._restore_influxdb() self._restore_credentials(postgres) self._restore_agents() self._restore_amqp_vhosts_and_users() self._restore_deployment_envs() if self._restore_certificates: self._restore_certificate() finally: ctx.logger.debug('Removing temp dir: {0}'.format(self._tempdir)) shutil.rmtree(self._tempdir)
def _celery_app(agent): # We retrieve broker url from old agent in order to support # cases when old agent is not connected to current rabbit server. broker_config = agent.get('broker_config', ctx.bootstrap_context.broker_config()) agent_version = agent.get('version') or str(_get_manager_version()) broker_url = utils.internal.get_broker_url(broker_config) ssl_cert_path = _get_ssl_cert_path(broker_config) celery_client = get_celery_app( broker_url=broker_url, broker_ssl_enabled=broker_config.get('broker_ssl_enabled'), broker_ssl_cert_path=ssl_cert_path) if ManagerVersion(agent_version) != ManagerVersion('3.2'): celery_client.conf['CELERY_TASK_RESULT_EXPIRES'] = \ CELERY_TASK_RESULT_EXPIRES try: yield celery_client finally: if ssl_cert_path: os.remove(ssl_cert_path)
def restore(self): self._mark_manager_restoring() self._tempdir = tempfile.mkdtemp('-snapshot-data') snapshot_path = self._get_snapshot_path() ctx.logger.debug('Going to restore snapshot, ' 'snapshot_path: {0}'.format(snapshot_path)) try: metadata = self._extract_snapshot_archive(snapshot_path) self._snapshot_version = ManagerVersion(metadata[M_VERSION]) schema_revision = metadata.get( M_SCHEMA_REVISION, self.SCHEMA_REVISION_4_0, ) stage_revision = metadata.get(M_STAGE_SCHEMA_REVISION) or '' if stage_revision and self._premium_enabled: stage_revision = re.sub(r".*\n", '', stage_revision) self._validate_snapshot() existing_plugins = self._get_existing_plugin_names() with Postgres(self._config) as postgres: utils.sudo(ALLOW_DB_CLIENT_CERTS_SCRIPT) self._restore_files_to_manager() utils.sudo(DENY_DB_CLIENT_CERTS_SCRIPT) with self._pause_services(): self._restore_db(postgres, schema_revision, stage_revision) self._restore_hash_salt() self._encrypt_secrets(postgres) self._encrypt_rabbitmq_passwords(postgres) self._possibly_update_encryption_key() self._generate_new_rest_token() self._restart_rest_service() self._restore_plugins(existing_plugins) self._restore_credentials(postgres) self._restore_agents() self._restore_amqp_vhosts_and_users() self._restore_deployment_envs(postgres) self._restore_scheduled_executions() self._restore_inter_deployment_dependencies() if self._premium_enabled: self._reconfigure_status_reporter(postgres) if self._restore_certificates: self._restore_certificate() finally: self._trigger_post_restore_commands() ctx.logger.debug('Removing temp dir: {0}'.format(self._tempdir)) shutil.rmtree(self._tempdir)
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. from cloudify.utils import ManagerVersion METADATA_FILENAME = 'metadata.json' M_VERSION = 'snapshot_version' M_SCHEMA_REVISION = 'schema_revision' M_STAGE_SCHEMA_REVISION = 'stage_schema_revision' M_COMPOSER_SCHEMA_REVISION = 'composer_schema_revision' M_HAS_CLOUDIFY_EVENTS = 'has_cloudify_events' ARCHIVE_CERT_DIR = 'ssl' BROKER_DEFAULT_VHOST = '/' DEFAULT_TENANT_NAME = 'default_tenant' SECRET_STORE_AGENT_KEY_PREFIX = 'cfyagent_key__' STAGE_BASE_FOLDER = '/opt/cloudify-stage' STAGE_CONFIG_FOLDER = 'conf' STAGE_WIDGETS_FOLDER = 'dist/widgets' STAGE_TEMPLATES_FOLDER = 'dist/templates' STAGE_USER = '******' # created during bootstrap STAGE_RESTORE_SCRIPT = '/opt/cloudify/stage/restore-snapshot.py' MANAGER_PYTHON = '/opt/manager/env/bin/python' COMPOSER_BASE_FOLDER = '/opt/cloudify-composer' COMPOSER_CONFIG_FOLDER = 'backend/conf' COMPOSER_BLUEPRINTS_FOLDER = 'backend/dev' V_4_0_0 = ManagerVersion('4.0.0') V_4_1_0 = ManagerVersion('4.1.0')
# created during bootstrap STAGE_RESTORE_SCRIPT = '/opt/cloudify/stage/restore-snapshot.py' MANAGER_PYTHON = '/opt/manager/env/bin/python' ADMIN_TOKEN_SCRIPT = '/opt/cloudify/mgmtworker/create-admin-token.py' ALLOW_DB_CLIENT_CERTS_SCRIPT = ( '/opt/cloudify/mgmtworker/allow-snapshot-ssl-client-cert-access') DENY_DB_CLIENT_CERTS_SCRIPT = ( '/opt/cloudify/mgmtworker/deny-snapshot-ssl-client-cert-access') COMPOSER_BASE_FOLDER = '/opt/cloudify-composer' COMPOSER_CONFIG_FOLDER = 'backend/conf' COMPOSER_BLUEPRINTS_FOLDER = 'backend/dev' COMPOSER_USER = '******' SECURITY_FILENAME = 'rest-security.conf' SECURITY_FILE_LOCATION = join('/opt/manager/', SECURITY_FILENAME) V_4_0_0 = ManagerVersion('4.0.0') V_4_1_0 = ManagerVersion('4.1.0') V_4_2_0 = ManagerVersion('4.2.0') V_4_3_0 = ManagerVersion('4.3.0') V_4_4_0 = ManagerVersion('4.4.0') V_4_5_5 = ManagerVersion('4.5.5') V_4_6_0 = ManagerVersion('4.6.0') V_5_0_5 = ManagerVersion('5.0.5') class VisibilityState(object): PRIVATE = 'private' TENANT = 'tenant' GLOBAL = 'global' STATES = [PRIVATE, TENANT, GLOBAL]
def get_manager_version(client): return ManagerVersion(client.manager.get_version()['version'])
def _get_manager_version(client=None): if client is None: client = get_rest_client() return ManagerVersion(client.manager.get_version()['version'])
INTERNAL_P12_FILENAME = 'cloudify_internal.p12' BROKER_DEFAULT_VHOST = '/' DEFAULT_TENANT_NAME = 'default_tenant' SECRET_STORE_AGENT_KEY_PREFIX = 'cfyagent_key__' STAGE_BASE_FOLDER = '/opt/cloudify-stage' STAGE_CONFIG_FOLDER = 'conf' STAGE_WIDGETS_FOLDER = 'dist/widgets' STAGE_TEMPLATES_FOLDER = 'dist/templates' STAGE_USERDATA_FOLDER = 'dist/userData' STAGE_USER = '******' # created during bootstrap STAGE_RESTORE_SCRIPT = '/opt/cloudify/stage/restore-snapshot.py' STAGE_TOKEN_SCRIPT = '/opt/cloudify/stage/make-auth-token.py' MANAGER_PYTHON = '/opt/manager/env/bin/python' COMPOSER_BASE_FOLDER = '/opt/cloudify-composer' COMPOSER_CONFIG_FOLDER = 'backend/conf' COMPOSER_BLUEPRINTS_FOLDER = 'backend/dev' V_4_0_0 = ManagerVersion('4.0.0') V_4_1_0 = ManagerVersion('4.1.0') V_4_2_0 = ManagerVersion('4.2.0') V_4_3_0 = ManagerVersion('4.3.0') class VisibilityState(object): PRIVATE = 'private' TENANT = 'tenant' GLOBAL = 'global' STATES = [PRIVATE, TENANT, GLOBAL]
def _get_manager_version(): version_json = cloudify.manager.get_rest_client().manager.get_version() return ManagerVersion(version_json['version'])
def _celery_task_name(version): if not version or ManagerVersion(version) > ManagerVersion('3.3.1'): return 'cloudify.dispatch.dispatch' else: return 'script_runner.tasks.run'
def _get_ssl_cert_content(old_agent_version): if ManagerVersion(old_agent_version) < ManagerVersion('4.2'): return None with open(get_local_rest_certificate(), 'r') as cert_file: return cert_file.read()
def _version_at_least(version_a, version_b): return version_a.equals(ManagerVersion(version_b)) \ or version_a.greater_than(ManagerVersion(version_b))
def _uses_cloudify_amqp(agent): version = agent.get('version') return version and ManagerVersion(version) >= ManagerVersion('4.4')
def restore(self): self._mark_manager_restoring() self._tempdir = tempfile.mkdtemp('-snapshot-data') snapshot_path = self._get_snapshot_path() ctx.logger.debug('Going to restore snapshot, ' 'snapshot_path: {0}'.format(snapshot_path)) try: metadata = self._extract_snapshot_archive(snapshot_path) self._snapshot_version = ManagerVersion(metadata[M_VERSION]) schema_revision = metadata.get( M_SCHEMA_REVISION, self.SCHEMA_REVISION_4_0, ) stage_revision = metadata.get(M_STAGE_SCHEMA_REVISION) or '' if stage_revision and self._premium_enabled: stage_revision = re.sub(r".*\n", '', stage_revision) composer_revision = metadata.get(M_COMPOSER_SCHEMA_REVISION) or '' if composer_revision == '20170601133017-4_1-init.js': # Old composer metadata always incorrectly put the first # migration not the last one. As we don't support anything # earlier than the last migration before 5.3, this will always # be the right answer composer_revision = '20171229105614-4_3-blueprint-repo.js' if composer_revision and self._premium_enabled: composer_revision = re.sub(r".*\n", '', composer_revision) self._validate_snapshot() with Postgres(self._config) as postgres: utils.sudo(ALLOW_DB_CLIENT_CERTS_SCRIPT) self._restore_files_to_manager() utils.sudo(DENY_DB_CLIENT_CERTS_SCRIPT) self._service_management = \ json.loads(postgres.get_service_management()) with self._pause_services(): self._restore_db(postgres, schema_revision, stage_revision, composer_revision) self._restore_hash_salt() self._encrypt_secrets(postgres) self._encrypt_rabbitmq_passwords(postgres) self._possibly_update_encryption_key() self._generate_new_rest_token() self._restart_rest_service() self._restart_stage_service() self._restore_credentials(postgres) self._restore_amqp_vhosts_and_users() self._restore_agents() self._restore_deployment_envs() self._restore_scheduled_executions() self._restore_inter_deployment_dependencies() self._update_roles_and_permissions() self._update_deployment_statuses() self._update_node_instance_indices() self._set_default_user_profile_flags() if self._restore_certificates: self._restore_certificate() finally: self._trigger_post_restore_commands() ctx.logger.debug('Removing temp dir: {0}'.format(self._tempdir)) shutil.rmtree(self._tempdir)