def save_new_settings(self, settings, image): self.rotate_sentinel_images() labels = [(self.compose_label, settings)] with contextlib.suppress(host_errors): images_info = self.get_images_info() if images_info: labels.append((self.images_info_label, images_info)) dockerfile = ('FROM {image}\n' 'LABEL {labels}\n').format( image=image or 'scratch', labels=' '.join( itertools.starmap('{0}={1}'.format, labels)), ) build_command = 'echo {dockerfile} | docker build --tag {tag} -'.format( dockerfile=shlex_quote(dockerfile), tag=self.current_settings_tag, ) try: fabricio.run(build_command) except host_errors as error: fabricio.log( 'WARNING: {error}'.format(error=error), output=sys.stderr, color=colors.red, )
def save_new_settings(self, configuration, image): self.rotate_sentinel_images() labels = [(self.configuration_label, b64encode(configuration).decode())] try: digests = self._get_digests(self.images) digests_bucket = json.dumps(digests, sort_keys=True) digests_bucket = b64encode(digests_bucket.encode()).decode() labels.append((self.digests_label, digests_bucket)) except fabricio.host_errors: pass dockerfile = ( 'FROM {image}\n' 'LABEL {labels}\n' ).format( image=image or 'scratch', labels=' '.join(itertools.starmap('{0}={1}'.format, labels)), ) build_command = 'echo {dockerfile} | docker build --tag {tag} -'.format( dockerfile=shlex_quote(dockerfile), tag=self.current_settings_tag, ) try: fabricio.run(build_command) except fabricio.host_errors as error: fabricio.log( 'WARNING: {error}'.format(error=error), output=sys.stderr, color=colors.red, )
def update(self, tag=None, force=False): """ start new Docker container if necessary """ updated = self.container.update(tag=tag, force=strtobool(force)) if not updated: fabricio.log('No changes detected, update skipped.')
def update(self, force=False, tag=None, registry=None): if not files.exists( os.path.join(self.data, 'PG_VERSION'), use_sudo=True, ): fabricio.log('PostgreSQL database not found, creating new...') self.run(tag=tag, registry=registry) time.sleep(10) # wait until all data prepared during first start main_config_changed = self.update_config( content=open(self.postgresql_conf).read(), path=os.path.join(self.data, 'postgresql.conf'), ) hba_config_changed = self.update_config( content=open(self.pg_hba_conf).read(), path=os.path.join(self.data, 'pg_hba.conf'), ) force = force or main_config_changed updated = super(PostgresqlContainer, self).update( force=force, tag=tag, registry=registry, ) if not updated and hba_config_changed: self.signal('HUP') # reload configs return updated
def set_master_info(self): if self.multiprocessing_data.exception is not None: fab.abort('Task aborted due an exception: {exception}'.format( exception=self.multiprocessing_data.exception, )) fabricio.log('Found master: {host}'.format(host=fab.env.host)) self.multiprocessing_data.master = fab.env.host
def set_master_info(self): if self.multiprocessing_data.exception is not None: fab.abort('Task aborted due an exception: {exception}'.format( exception=self.multiprocessing_data.exception, )) fabricio.log('Found master: {host}'.format(host=fab.env.host)) self.multiprocessing_data.master = fab.env.host
def update(self, force=False, tag=None, registry=None): if not force: try: current_image_id = self.image.id except RuntimeError: # current container not found pass else: new_image = self.__class__.image[registry:tag] if current_image_id == new_image.id: fabricio.log('No change detected, update skipped.') return False new_container = self.fork(name=self.name) obsolete_container = self.get_backup_container() try: obsolete_image = obsolete_container.image except RuntimeError: pass else: obsolete_container.delete() obsolete_image.delete(ignore_errors=True) try: self.rename(obsolete_container.name) except RuntimeError: pass else: self.stop() new_container.run(tag=tag, registry=registry) return True
def is_manager(self): try: if self.pull_errors.get(fab.env.host, False): return False is_manager = fabricio.run( "docker info 2>&1 | grep 'Is Manager:'", use_cache=True, ).endswith('true') if is_manager: self.manager_found.set() return is_manager except host_errors as error: fabricio.log( 'WARNING: {error}'.format(error=error), output=sys.stderr, color=colors.red, ) return False finally: with self.is_manager_call_count.get_lock(): self.is_manager_call_count.value += 1 if self.is_manager_call_count.value >= len(fab.env.all_hosts): if not self.manager_found.is_set(): msg = 'Service manager with pulled image was not found' raise ServiceError(msg) self.manager_found.clear() self.is_manager_call_count.value = 0
def save_new_settings(self, configuration, image): self.rotate_sentinel_images() labels = [(self.configuration_label, b64encode(configuration).decode())] try: digests = self._get_digests(self.images) digests_bucket = json.dumps(digests, sort_keys=True) digests_bucket = b64encode(digests_bucket.encode()).decode() labels.append((self.digests_label, digests_bucket)) except fabricio.host_errors: pass dockerfile = ( 'FROM {image}\n' 'LABEL {labels}\n' ).format( image=image or 'scratch', labels=' '.join(itertools.starmap('{0}={1}'.format, labels)), ) build_command = 'echo {dockerfile} | docker build --tag {tag} -'.format( dockerfile=shlex_quote(dockerfile), tag=self.current_settings_tag, ) try: fabricio.run(build_command) except fabricio.host_errors as error: fabricio.log( 'WARNING: {error}'.format(error=error), output=sys.stderr, color=colors.red, )
def _task(*args, **kwargs): if fab.env.get('host_string', False): return func(*args, **kwargs) fabricio.log("'{func}' execution was skipped due to no host provided " "(command: {command})".format( func=func.__name__, command=fab.env.command, ))
def _task(*args, **kwargs): if fab.env.get('host_string', False): return func(*args, **kwargs) fabricio.log( "'{func}' execution was skipped due to no host provided " "(command: {command})".format( func=func.__name__, command=fab.env.command, ) )
def pull_image(self, *args, **kwargs): try: return super(ManagedService, self).pull_image(*args, **kwargs) except fabricio.host_errors as error: self.managers[fab.env.host] = False fabricio.log( 'WARNING: {error}'.format(error=error), output=sys.stderr, color=colors.red, )
def pull_image(self, *args, **kwargs): try: if self.image: return super(_Base, self).pull_image(*args, **kwargs) except host_errors as error: self.pull_errors[fab.env.host] = True fabricio.log( 'WARNING: {error}'.format(error=error), output=sys.stderr, color=colors.red, )
def create_db(self, tag=None, registry=None): fabricio.log('PostgreSQL database not found, creating new...') self.image[registry:tag].run( # official PostgreSQL image executes 'postgres initdb' before # any 'postgres' command (see /docker-entrypoint.sh), # therefore if you use image other then official, you should # implement your own `create_db()` 'postgres --version', options=self.safe_options, quiet=False, )
def update(self, tag=None, force=False): """ update service to a new version """ updated = self.service.update( tag=tag, registry=self.registry, force=utils.strtobool(force), ) if not updated: fabricio.log('Host does not require update, update skipped.')
def update(self, tag=None, force=False): """ update service to a new version """ updated = self.service.update( tag=tag, registry=self.host_registry, account=self.account, force=utils.strtobool(force), ) if not updated: fabricio.log('No changes detected, update skipped.')
def update(self, tag=None, force=False): """ update service to a new version """ with self.remote_host(): updated = self.service.update( tag=tag, registry=self.host_registry, account=self.account, force=utils.strtobool(force), ) if updated is False: fabricio.log('No changes detected, update skipped.')
def create_db(self, tag=None, registry=None, account=None): """ Official PostgreSQL Docker image executes 'postgres initdb' before any command starting with 'postgres' (see /docker-entrypoint.sh), therefore if you use custom image, you probably have to implement your own `create_db()` """ fabricio.log('PostgreSQL database not found, creating new...') self.image[registry:tag:account].run( 'postgres --version', # create new DB (see method description) options=self.safe_options, quiet=False, )
def create_db(self, tag=None, registry=None, account=None): """ Official PostgreSQL Docker image executes 'postgres initdb' before any command starting with 'postgres' (see /docker-entrypoint.sh), therefore if you use custom image, you probably have to implement your own `create_db()` """ fabricio.log('PostgreSQL database not found, creating new...') self.image[registry:tag:account].run( 'postgres --version', # create new DB (see method description) options=self.safe_options, quiet=False, )
def update_recovery_config(self, tag=None, registry=None, account=None): db_exists = self.db_exists() recovery_conf_file = os.path.join(self.pg_data, 'recovery.conf') if db_exists: self.multiprocessing_data.db_exists = True if not files.exists(recovery_conf_file, use_sudo=self.sudo): # master founded self.set_master_info() return False fabricio.log('Waiting for master info ({seconds} seconds)...'.format( seconds=self.pg_recovery_wait_for_master_seconds, )) self.master_obtained.wait(self.pg_recovery_wait_for_master_seconds) if not self.master_obtained.is_set(): if db_exists and not self.pg_recovery_master_promotion_enabled: fab.abort( 'Database exists but master not found. This probably ' 'means master failure. New master promotion disabled ' 'by default, but can be enabled by setting attribute ' '\'pg_recovery_master_promotion_enabled\' to True.' ) self.master_lock.acquire() if not self.master_obtained.is_set(): if db_exists: fabricio.move_file( path_from=recovery_conf_file, path_to=recovery_conf_file + '.backup', sudo=self.sudo, ) self.set_master_info() return True elif not self.multiprocessing_data.db_exists: self.set_master_info() return False self.master_lock.release() self.master_obtained.wait() if not db_exists: self.copy_data_from_master( tag=tag, registry=registry, account=account, ) return self.update_config( content=self.get_recovery_config(), path=os.path.join(self.pg_data, 'recovery.conf'), )
def update_recovery_config(self, tag=None, registry=None, account=None): db_exists = self.db_exists() recovery_conf_file = os.path.join(self.pg_data, 'recovery.conf') if db_exists: self.multiprocessing_data.db_exists = True if not files.exists(recovery_conf_file, use_sudo=True): # master founded self.set_master_info() return False fabricio.log('Waiting for master info ({seconds} seconds)...'.format( seconds=self.pg_recovery_wait_for_master_seconds, )) self.master_obtained.wait(self.pg_recovery_wait_for_master_seconds) if not self.master_obtained.is_set(): if db_exists and not self.pg_recovery_master_promotion_enabled: fab.abort( 'Database exists but master not found. This probably ' 'means master failure. New master promotion disabled ' 'by default, but can be enabled by setting attribute ' '\'pg_recovery_master_promotion_enabled\' to True.') self.master_lock.acquire() if not self.master_obtained.is_set(): if db_exists: fabricio.move_file( path_from=recovery_conf_file, path_to=recovery_conf_file + '.backup', sudo=True, ) self.set_master_info() return True elif not self.multiprocessing_data.db_exists: self.set_master_info() return False self.master_lock.release() self.master_obtained.wait() if not db_exists: self.copy_data_from_master( tag=tag, registry=registry, account=account, ) recovery_config = self.get_recovery_config() return self.update_config( content=recovery_config, path=os.path.join(self.pg_data, 'recovery.conf'), )
def update_config(self, content, path): old_file = six.BytesIO() if files.exists(path, use_sudo=self.sudo): fab.get(remote_path=path, local_path=old_file, use_sudo=self.sudo) old_content = old_file.getvalue() need_update = content != old_content if need_update: fabricio.move_file( path_from=path, path_to=path + '.backup', sudo=self.sudo, ignore_errors=True, ) fab.put(six.BytesIO(content), path, use_sudo=self.sudo, mode='0644') fabricio.log('{path} updated'.format(path=path)) else: fabricio.log('{path} not changed'.format(path=path)) return need_update
def update_config(content, path): old_file = six.BytesIO() if files.exists(path, use_sudo=True): fab.get(remote_path=path, local_path=old_file, use_sudo=True) old_content = old_file.getvalue() need_update = content != old_content if need_update: fabricio.move( path_from=path, path_to=path + '.backup', sudo=True, ignore_errors=True, ) fab.put(six.BytesIO(content), path, use_sudo=True, mode='0644') fabricio.log('{path} updated'.format(path=path)) else: fabricio.log('{path} not changed'.format(path=path)) return need_update
def is_manager(self, raise_manager_error=True): is_manager = self.managers.get(fab.env.host) try: if is_manager is None: is_manager = self.managers[fab.env.host] = self._is_manager() except fabricio.host_errors as error: is_manager = self.managers[fab.env.host] = False fabricio.log( 'WARNING: {error}'.format(error=error), output=sys.stderr, color=colors.red, ) finally: if ( raise_manager_error and len(self.managers) >= len(fab.env.all_hosts) and not any(self.managers.values()) ): msg = 'service manager not found or it failed to pull image' raise ManagerNotFoundError(msg) return is_manager
def _task(*args, **kwargs): if fab.env.get('host_string', False): return task(*args, **kwargs) fabricio.log('task `{task}` skipped (no host provided)'.format( task=fab.env.command, ))