def start_ckan(self, production=False, log_syslog=False, paster_reload=True, interactive=False): """ Start the apache server or paster serve :param log_syslog: A flag to redirect all container logs to host's syslog :param production: True for apache, False for paster serve + debug on :param paster_reload: Instruct paster to watch for file changes """ self.stop_ckan() address = self.address or '127.0.0.1' port = self.port # in prod we always use log_syslog driver log_syslog = True if self.always_prod else log_syslog production = production or self.always_prod # We only override the site URL with the docker URL on three conditions override_site_url = (self.address is None and not is_boot2docker() and not self.site_url) command = ['/scripts/web.sh', str(production), str(override_site_url), str(paster_reload)] # XXX nasty hack, remove this once we have a lessc command # for users (not just for building our preload image) if not production: css = self.target + '/ckan/ckan/public/base/css' if not exists(css + '/main.debug.css'): from shutil import copyfile copyfile(css + '/main.css', css + '/main.debug.css') ro = { self.target: '/project', scripts.get_script_path('datapusher.sh'): '/scripts/datapusher.sh' } if not is_boot2docker(): ro[self.datadir + '/venv'] = '/usr/lib/ckan' datapusher = self.needs_datapusher() if datapusher: run_container( self._get_container_name('datapusher'), 'datacats/web', '/scripts/datapusher.sh', ro=ro, volumes_from=(self._get_container_name('venv') if is_boot2docker() else None), log_syslog=log_syslog) while True: self._create_run_ini(port, production) try: self._run_web_container(port, command, address, log_syslog=log_syslog, datapusher=datapusher, interactive=interactive) if not is_boot2docker(): self.address = address except PortAllocatedError: port = self._next_port(port) continue break
def _run_web_container(self, port, command): """ Start web comtainer on port with command """ if is_boot2docker(): ro = {} volumes_from = 'datacats_venv_' + self.name else: ro = {self.datadir + '/venv': '/usr/lib/ckan'} volumes_from = None run_container( name='datacats_web_' + self.name, image='datacats/web', rw={self.datadir + '/files': '/var/www/storage'}, ro=dict({ self.target: '/project/', self.datadir + '/run/development.ini': '/project/development.ini', WEB: '/scripts/web.sh'}, **ro), links={'datacats_solr_' + self.name: 'solr', 'datacats_postgres_' + self.name: 'db'}, volumes_from=volumes_from, command=command, port_bindings={ 5000: port if is_boot2docker() else ('127.0.0.1', port)}, )
def _run_web_container(self, port, command): """ Start web comtainer on port with command """ if is_boot2docker(): ro = {} volumes_from = 'datacats_venv_' + self.name else: ro = {self.datadir + '/venv': '/usr/lib/ckan'} volumes_from = None run_container( name='datacats_web_' + self.name, image='datacats/web', rw={self.datadir + '/files': '/var/www/storage'}, ro=dict( { self.target: '/project/', self.datadir + '/run/development.ini': '/project/development.ini', WEB: '/scripts/web.sh' }, **ro), links={ 'datacats_solr_' + self.name: 'solr', 'datacats_postgres_' + self.name: 'db' }, volumes_from=volumes_from, command=command, port_bindings={ 5000: port if is_boot2docker() else ('127.0.0.1', port) }, )
def create_virtualenv(srcdir, datadir, preload_image, get_container_name): """ Populate venv from preloaded image """ try: if docker.is_boot2docker(): docker.data_only_container(get_container_name("venv"), ["/usr/lib/ckan"]) img_id = docker.web_command( "/bin/mv /usr/lib/ckan/ /usr/lib/ckan_original", image=preload_image, commit=True ) docker.web_command( command="/bin/cp -a /usr/lib/ckan_original/. /usr/lib/ckan/.", volumes_from=get_container_name("venv"), image=img_id, ) docker.remove_image(img_id) return docker.web_command( command="/bin/cp -a /usr/lib/ckan/. /usr/lib/ckan_target/.", rw={datadir + "/venv": "/usr/lib/ckan_target"}, image=preload_image, ) finally: rw = {datadir + "/venv": "/usr/lib/ckan"} if not docker.is_boot2docker() else {} volumes_from = get_container_name("venv") if docker.is_boot2docker() else None # fix venv permissions docker.web_command( command="/bin/chown -R --reference=/project /usr/lib/ckan", rw=rw, volumes_from=volumes_from, ro={srcdir: "/project"}, )
def _two_to_one(datadir): """After this command, your environment will be converted to format version {} and will not work with Datacats versions beyond and including 1.0.0. This format version doesn't support multiple sites, and after this only your "primary" site will be usable, though other sites will be maintained if you wish to do a migration back to a version which supports multisite. Would you like to continue the migration? (y/n) [n]:""" _, env_name = _split_path(datadir) print 'Making sure that containers are stopped...' # New-style names remove_container('datacats_web_{}_primary'.format(env_name)) remove_container('datacats_postgres_{}_primary'.format(env_name)) remove_container('datacats_solr_{}_primary'.format(env_name)) print 'Doing conversion...' if exists(path_join(datadir, '.version')): os.remove(path_join(datadir, '.version')) to_move = (['files', 'passwords.ini', 'run', 'solr'] + (['postgres'] if not is_boot2docker() else [])) web_command( command=['/scripts/migrate.sh', '/project/data/sites/primary', '/project/data'] + to_move, ro={scripts.get_script_path('migrate.sh'): '/scripts/migrate.sh'}, rw={datadir: '/project/data'} ) pgdata_name = 'datacats_pgdata_{}_primary'.format(env_name) if is_boot2docker() and inspect_container(pgdata_name): rename_container(pgdata_name, 'datacats_pgdata_{}'.format(env_name)) print 'Doing cleanup...' with open(path_join(datadir, 'project-dir')) as pd: datacats_env_location = path_join(pd.read(), '.datacats-environment') cp = SafeConfigParser() cp.read(datacats_env_location) # We need to move the port OUT of site_primary section and INTO datacats cp.set('datacats', 'port', cp.get('site_primary', 'port')) cp.remove_section('site_primary') with open(datacats_env_location, 'w') as config: cp.write(config) cp = SafeConfigParser() cp.read(path_join(datadir, 'passwords.ini')) # This isn't needed in this version cp.remove_option('passwords', 'beaker_session_secret') with open(path_join(datadir, 'passwords.ini'), 'w') as config: cp.write(config)
def _run_web_container(self, port, command, address='127.0.0.1', log_syslog=False, datapusher=True): """ Start web container on port with command """ if is_boot2docker(): ro = {} volumes_from = self._get_container_name('venv') else: ro = {self.datadir + '/venv': '/usr/lib/ckan'} volumes_from = None links = { self._get_container_name('solr'): 'solr', self._get_container_name('postgres'): 'db' } links.update({self._get_container_name(container): container for container in self.extra_containers}) if datapusher: if 'datapusher' not in self.containers_running(): raise DatacatsError(container_logs(self._get_container_name('datapusher'), "all", False, False)) links[self._get_container_name('datapusher')] = 'datapusher' try: run_container( name=self._get_container_name('web'), image='datacats/web', rw={self.sitedir + '/files': '/var/www/storage', self.sitedir + '/run/development.ini': '/project/development.ini'}, ro=dict({ self.target: '/project/', scripts.get_script_path('web.sh'): '/scripts/web.sh', scripts.get_script_path('adjust_devini.py'): '/scripts/adjust_devini.py'}, **ro), links=links, volumes_from=volumes_from, command=command, port_bindings={ 5000: port if is_boot2docker() else (address, port)}, log_syslog=log_syslog ) except APIError as e: if '409' in str(e): raise DatacatsError('Web container already running. ' 'Please stop_web before running.') else: raise
def start_postgres_and_solr(self): """ run the DB and search containers """ # complicated because postgres needs hard links to # work on its data volume. see issue #5 if is_boot2docker(): data_only_container('datacats_pgdata_' + self.name, ['/var/lib/postgresql/data']) rw = {} volumes_from = 'datacats_pgdata_' + self.name else: rw = {self.datadir + '/postgres': '/var/lib/postgresql/data'} volumes_from = None # users are created when data dir is blank so we must pass # all the user passwords as environment vars run_container(name='datacats_postgres_' + self.name, image='datacats/postgres', environment=self.passwords, rw=rw, volumes_from=volumes_from) run_container( name='datacats_solr_' + self.name, image='datacats/solr', rw={self.datadir + '/solr': '/var/lib/solr'}, ro={self.target + '/schema.xml': '/etc/solr/conf/schema.xml'})
def run_command(self, command, db_links=False, rw_venv=False, rw_project=False, rw=None, ro=None, clean_up=False, stream_output=None): rw = {} if rw is None else dict(rw) ro = {} if ro is None else dict(ro) ro.update(self._proxy_settings()) if is_boot2docker(): volumes_from = self._get_container_name('venv') else: volumes_from = None venvmount = rw if rw_venv else ro venvmount[self.datadir + '/venv'] = '/usr/lib/ckan' projectmount = rw if rw_project else ro projectmount[self.target] = '/project' if db_links: self._create_run_ini(self.port, production=False, output='run.ini') links = { self._get_container_name('solr'): 'solr', self._get_container_name('postgres'): 'db', } ro[self.sitedir + '/run/run.ini'] = '/project/development.ini' else: links = None return web_command(command=command, ro=ro, rw=rw, links=links, volumes_from=volumes_from, clean_up=clean_up, commit=True, stream_output=stream_output)
def create_directories(datadir, sitedir, srcdir=None): """ Create expected directories in datadir, sitedir and optionally srcdir """ # It's possible that the datadir already exists # (we're making a secondary site) if not path.isdir(datadir): os.makedirs(datadir, mode=0o700) try: # This should take care if the 'site' subdir if needed os.makedirs(sitedir, mode=0o700) except OSError: raise DatacatsError("Site already exists.") # venv isn't site-specific, the rest are. if not docker.is_boot2docker(): if not path.isdir(datadir + '/venv'): os.makedirs(datadir + '/venv') os.makedirs(sitedir + '/postgres') os.makedirs(sitedir + '/solr') os.makedirs(sitedir + '/files') os.makedirs(sitedir + '/run') if srcdir: os.makedirs(srcdir)
def __init__(self, name, target, datadir, site_name, ckan_version=None, port=None, deploy_target=None, site_url=None, always_prod=False, extension_dir='ckan', address=None, remote_server_key=None, extra_containers=None): self.name = name self.target = target self.datadir = datadir self.extension_dir = extension_dir self.ckan_version = ckan_version # This is the site that all commands will operate on. self.site_name = site_name self.port = int(port if port else self._choose_port()) self.address = address if not is_boot2docker() else None self.deploy_target = deploy_target self.remote_server_key = remote_server_key self.site_url = site_url self.always_prod = always_prod self.sites = None # Used by the no-init-db functionality if extra_containers: self.extra_containers = extra_containers else: self.extra_containers = []
def start_postgres_and_solr(self): """ run the DB and search containers """ # complicated because postgres needs hard links to # work on its data volume. see issue #5 if is_boot2docker(): data_only_container('datacats_pgdata_' + self.name, ['/var/lib/postgresql/data']) rw = {} volumes_from = 'datacats_pgdata_' + self.name else: rw = {self.datadir + '/postgres': '/var/lib/postgresql/data'} volumes_from = None # users are created when data dir is blank so we must pass # all the user passwords as environment vars run_container( name='datacats_postgres_' + self.name, image='datacats/postgres', environment=self.passwords, rw=rw, volumes_from=volumes_from) run_container( name='datacats_solr_' + self.name, image='datacats/solr', rw={self.datadir + '/solr': '/var/lib/solr'}, ro={self.target + '/schema.xml': '/etc/solr/conf/schema.xml'})
def run_command(self, command, db_links=False, rw_venv=False, rw_project=False, rw=None, ro=None, clean_up=False): rw = {} if rw is None else dict(rw) ro = {} if ro is None else dict(ro) ro.update(self._proxy_settings()) if is_boot2docker(): volumes_from = 'datacats_venv_' + self.name else: volumes_from = None venvmount = rw if rw_venv else ro venvmount[self.datadir + '/venv'] = '/usr/lib/ckan' projectmount = rw if rw_project else ro projectmount[self.target] = '/project' if db_links: self._create_run_ini(self.port, production=False, output='run.ini') links = { 'datacats_solr_' + self.name: 'solr', 'datacats_postgres_' + self.name: 'db', } ro[self.datadir + '/run/run.ini'] = '/project/development.ini' else: links = None return web_command(command=command, ro=ro, rw=rw, links=links, volumes_from=volumes_from, clean_up=clean_up)
def interactive_shell(self, command=None, paster=False): """ launch interactive shell session with all writable volumes :param: list of strings to execute instead of bash """ if not exists(self.target + '/.bash_profile'): # this file is required for activating the virtualenv self.create_bash_profile() if not command: command = [] use_tty = sys.stdin.isatty() and sys.stdout.isatty() if is_boot2docker(): venv_volumes = ['--volumes-from', 'datacats_venv_' + self.name] else: venv_volumes = ['-v', self.datadir + '/venv:/usr/lib/ckan:rw'] self._create_run_ini(self.port, production=False, output='run.ini') self._create_run_ini(self.port, production=True, output='test.ini', source='ckan/test-core.ini', override_site_url=False) script = SHELL if paster: script = PASTER if command and command != ['help'] and command != ['--help']: command += ['--config=/project/development.ini'] command = [self.extension_dir] + command proxy_settings = self._proxy_settings() if proxy_settings: venv_volumes += [ '-v', self.datadir + '/run/proxy-environment:/etc/environment:ro' ] # FIXME: consider switching this to dockerpty # using subprocess for docker client's interactive session return subprocess.call([ DOCKER_EXE, 'run', ] + (['--rm'] if not environ.get('CIRCLECI', False) else []) + [ '-it' if use_tty else '-i', ] + venv_volumes + [ '-v', self.target + ':/project:rw', '-v', self.datadir + '/files:/var/www/storage:rw', '-v', script + ':/scripts/shell.sh:ro', '-v', PASTER_CD + ':/scripts/paster_cd.sh:ro', '-v', self.datadir + '/run/run.ini:/project/development.ini:ro', '-v', self.datadir + '/run/test.ini:/project/ckan/test-core.ini:ro', '--link', 'datacats_solr_' + self.name + ':solr', '--link', 'datacats_postgres_' + self.name + ':db', '--hostname', self.name, 'datacats/web', '/scripts/shell.sh' ] + command)
def start_supporting_containers(sitedir, srcdir, passwords, get_container_name, extra_containers, log_syslog=False): """ Start all supporting containers (containers required for CKAN to operate) if they aren't already running, along with some extra containers specified by the user """ if docker.is_boot2docker(): docker.data_only_container(get_container_name('pgdata'), ['/var/lib/postgresql/data']) rw = {} volumes_from = get_container_name('pgdata') else: rw = {sitedir + '/postgres': '/var/lib/postgresql/data'} volumes_from = None running = set(containers_running(get_container_name)) needed = set(extra_containers).union({'postgres', 'solr'}) if not needed.issubset(running): stop_supporting_containers(get_container_name, extra_containers) # users are created when data dir is blank so we must pass # all the user passwords as environment vars # XXX: postgres entrypoint magic docker.run_container(name=get_container_name('postgres'), image='datacats/postgres', environment=passwords, rw=rw, volumes_from=volumes_from, log_syslog=log_syslog) docker.run_container( name=get_container_name('solr'), image='datacats/solr', rw={sitedir + '/solr': '/var/lib/solr'}, ro={srcdir + '/schema.xml': '/etc/solr/conf/schema.xml'}, log_syslog=log_syslog) for container in extra_containers: # We don't know a whole lot about the extra containers so we're just gonna have to # mount /project and /datadir r/o even if they're not needed for ease of # implementation. docker.run_container(name=get_container_name(container), image=EXTRA_IMAGE_MAPPING[container], ro={ sitedir: '/datadir', srcdir: '/project' }, log_syslog=log_syslog)
def web_address(self): """ Return the url of the web server or None if not running """ port = self._current_web_port() address = self.address or '127.0.0.1' if port is None: return None return 'http://{0}:{1}/'.format( address if address and not is_boot2docker() else docker_host(), port)
def start_supporting_containers(sitedir, srcdir, passwords, get_container_name, extra_containers, log_syslog=False): """ Start all supporting containers (containers required for CKAN to operate) if they aren't already running, along with some extra containers specified by the user """ if docker.is_boot2docker(): docker.data_only_container(get_container_name('pgdata'), ['/var/lib/postgresql/data']) rw = {} volumes_from = get_container_name('pgdata') else: rw = {sitedir + '/postgres': '/var/lib/postgresql/data'} volumes_from = None running = set(containers_running(get_container_name)) needed = set(extra_containers).union({'postgres', 'solr'}) if not needed.issubset(running): stop_supporting_containers(get_container_name, extra_containers) # users are created when data dir is blank so we must pass # all the user passwords as environment vars # XXX: postgres entrypoint magic docker.run_container( name=get_container_name('postgres'), image='datacats/postgres', environment=passwords, rw=rw, volumes_from=volumes_from, log_syslog=log_syslog) docker.run_container( name=get_container_name('solr'), image='datacats/solr', rw={sitedir + '/solr': '/var/lib/solr'}, ro={srcdir + '/schema.xml': '/etc/solr/conf/schema.xml'}, log_syslog=log_syslog) for container in extra_containers: # We don't know a whole lot about the extra containers so we're just gonna have to # mount /project and /datadir r/o even if they're not needed for ease of # implementation. docker.run_container( name=get_container_name(container), image=EXTRA_IMAGE_MAPPING[container], ro={ sitedir: '/datadir', srcdir: '/project' }, log_syslog=log_syslog )
def create_virtualenv(srcdir, datadir, preload_image, get_container_name): """ Populate venv from preloaded image """ try: if docker.is_boot2docker(): docker.data_only_container( get_container_name('venv'), ['/usr/lib/ckan'], ) img_id = docker.web_command( '/bin/mv /usr/lib/ckan/ /usr/lib/ckan_original', image=preload_image, commit=True, ) docker.web_command( command='/bin/cp -a /usr/lib/ckan_original/. /usr/lib/ckan/.', volumes_from=get_container_name('venv'), image=img_id, ) docker.remove_image(img_id) return docker.web_command( command='/bin/cp -a /usr/lib/ckan/. /usr/lib/ckan_target/.', rw={datadir + '/venv': '/usr/lib/ckan_target'}, image=preload_image, ) finally: rw = { datadir + '/venv': '/usr/lib/ckan' } if not docker.is_boot2docker() else {} volumes_from = get_container_name( 'venv') if docker.is_boot2docker() else None # fix venv permissions docker.web_command( command='/bin/chown -R --reference=/project /usr/lib/ckan', rw=rw, volumes_from=volumes_from, ro={srcdir: '/project'}, )
def data_complete(self): """ Return True if all the expected datadir files are present """ if (not isdir(self.datadir + '/files') or not isdir(self.datadir + '/run') or not isdir(self.datadir + '/search')): return False if is_boot2docker(): return True return (isdir(self.datadir + '/venv') and isdir(self.datadir + '/data'))
def interactive_shell(self, command=None, paster=False): """ launch interactive shell session with all writable volumes :param: list of strings to execute instead of bash """ if not exists(self.target + '/.bash_profile'): # this file is required for activating the virtualenv self.create_bash_profile() if not command: command = [] use_tty = sys.stdin.isatty() and sys.stdout.isatty() if is_boot2docker(): venv_volumes = ['--volumes-from', 'datacats_venv_' + self.name] else: venv_volumes = ['-v', self.datadir + '/venv:/usr/lib/ckan:rw'] self._create_run_ini(self.port, production=False, output='run.ini') self._create_run_ini(self.port, production=True, output='test.ini', source='ckan/test-core.ini', override_site_url=False) script = SHELL if paster: script = PASTER if command and command != ['help'] and command != ['--help']: command += ['--config=/project/development.ini'] command = [self.extension_dir] + command proxy_settings = self._proxy_settings() if proxy_settings: venv_volumes += ['-v', self.datadir + '/run/proxy-environment:/etc/environment:ro'] # FIXME: consider switching this to dockerpty # using subprocess for docker client's interactive session return subprocess.call([ DOCKER_EXE, 'run', ] + (['--rm'] if not environ.get('CIRCLECI', False) else []) + [ '-it' if use_tty else '-i', ] + venv_volumes + [ '-v', self.target + ':/project:rw', '-v', self.datadir + '/files:/var/www/storage:rw', '-v', script + ':/scripts/shell.sh:ro', '-v', PASTER_CD + ':/scripts/paster_cd.sh:ro', '-v', self.datadir + '/run/run.ini:/project/development.ini:ro', '-v', self.datadir + '/run/test.ini:/project/ckan/test-core.ini:ro', '--link', 'datacats_solr_' + self.name + ':solr', '--link', 'datacats_postgres_' + self.name + ':db', '--hostname', self.name, 'datacats/web', '/scripts/shell.sh'] + command)
def create_directories(self, create_project_dir=True): """ Call once for new projects to create the initial project directories. """ makedirs(self.datadir, mode=0o700) makedirs(self.datadir + '/search') if not is_boot2docker(): makedirs(self.datadir + '/venv') makedirs(self.datadir + '/data') makedirs(self.datadir + '/files') makedirs(self.datadir + '/run') if create_project_dir: makedirs(self.target)
def data_complete(self): """ Return True if all the expected datadir files are present """ if (not isdir(self.datadir + '/files') or not isdir(self.datadir + '/run') or not isdir(self.datadir + '/search')): return False if is_boot2docker(): return True return ( isdir(self.datadir + '/venv') and isdir(self.datadir + '/data'))
def data_complete(datadir, sitedir, get_container_name): """ Return True if the directories and containers we're expecting are present in datadir, sitedir and containers """ if any(not path.isdir(sitedir + x) for x in ("/files", "/run", "/solr")): return False if docker.is_boot2docker(): # Inspect returns None if the container doesn't exist. return all(docker.inspect_container(get_container_name(x)) for x in ("pgdata", "venv")) return path.isdir(datadir + "/venv") and path.isdir(sitedir + "/postgres")
def _create_run_ini(self, port, production, output='development.ini', source='development.ini', override_site_url=True): """ Create run/development.ini in datadir with debug and site_url overridden and with correct db passwords inserted """ cp = SafeConfigParser() try: cp.read([self.target + '/' + source]) except ConfigParserError: raise DatacatsError('Error reading development.ini') cp.set('DEFAULT', 'debug', 'false' if production else 'true') if self.site_url: site_url = self.site_url else: if is_boot2docker(): web_address = socket.gethostbyname(docker_host()) else: web_address = self.address site_url = 'http://{}:{}'.format(web_address, port) if override_site_url: cp.set('app:main', 'ckan.site_url', site_url) cp.set( 'app:main', 'sqlalchemy.url', 'postgresql://*****:*****@db:5432/ckan'.format( self.passwords['CKAN_PASSWORD'])) cp.set( 'app:main', 'ckan.datastore.read_url', 'postgresql://*****:*****@db:5432/ckan_datastore'. format(self.passwords['DATASTORE_RO_PASSWORD'])) cp.set( 'app:main', 'ckan.datastore.write_url', 'postgresql://*****:*****@db:5432/ckan_datastore'. format(self.passwords['DATASTORE_RW_PASSWORD'])) cp.set('app:main', 'solr_url', 'http://solr:8080/solr') cp.set('app:main', 'beaker.session.secret', self.passwords['BEAKER_SESSION_SECRET']) if not isdir(self.sitedir + '/run'): makedirs(self.sitedir + '/run') # upgrade old datadir with open(self.sitedir + '/run/' + output, 'w') as runini: cp.write(runini)
def data_complete(datadir, sitedir, get_container_name): """ Return True if the directories and containers we're expecting are present in datadir, sitedir and containers """ if any(not path.isdir(sitedir + x) for x in ('/files', '/run', '/solr')): return False if docker.is_boot2docker(): # Inspect returns None if the container doesn't exist. return all( docker.inspect_container(get_container_name(x)) for x in ('pgdata', 'venv')) return path.isdir(datadir + '/venv') and path.isdir(sitedir + '/postgres')
def purge_data(self, which_sites=None, never_delete=False): """ Remove uploaded files, postgres db, solr index, venv """ # Default to the set of all sites if not which_sites: which_sites = self.sites datadirs = [] boot2docker = is_boot2docker() if which_sites: if self.target: cp = SafeConfigParser() cp.read([self.target + '/.datacats-environment']) for site in which_sites: if boot2docker: remove_container(self._get_container_name('pgdata')) else: datadirs += [site + '/postgres'] # Always rm the site dir & solr & files datadirs += [site, site + '/files', site + '/solr'] if self.target: cp.remove_section('site_' + site) self.sites.remove(site) if self.target: with open(self.target + '/.datacats-environment', 'w') as conf: cp.write(conf) datadirs = ['sites/' + datadir for datadir in datadirs] if not self.sites and not never_delete: datadirs.append('venv') web_command( command=['/scripts/purge.sh'] + ['/project/data/' + d for d in datadirs], ro={scripts.get_script_path('purge.sh'): '/scripts/purge.sh'}, rw={self.datadir: '/project/data'}, ) if not self.sites and not never_delete: shutil.rmtree(self.datadir)
def purge_data(self): """ Remove uploaded files, postgres db, solr index, venv """ datadirs = ['files', 'solr'] if is_boot2docker(): remove_container('datacats_pgdata_' + self.name) remove_container('datacats_venv_' + self.name) else: datadirs += ['postgres', 'venv'] web_command( command=['/scripts/purge.sh'] + ['/project/data/' + d for d in datadirs], ro={PURGE: '/scripts/purge.sh'}, rw={self.datadir: '/project/data'}, ) shutil.rmtree(self.datadir)
def _create_run_ini(self, port, production, output='development.ini', source='development.ini', override_site_url=True): """ Create run/development.ini in datadir with debug and site_url overridden and with correct db passwords inserted """ cp = SafeConfigParser() try: cp.read([self.target + '/' + source]) except ConfigParserError: raise DatacatsError('Error reading development.ini') cp.set('DEFAULT', 'debug', 'false' if production else 'true') if self.site_url: site_url = self.site_url else: if is_boot2docker(): web_address = socket.gethostbyname(docker_host()) else: web_address = self.address site_url = 'http://{}:{}'.format(web_address, port) if override_site_url: cp.set('app:main', 'ckan.site_url', site_url) cp.set('app:main', 'sqlalchemy.url', 'postgresql://*****:*****@db:5432/ckan' .format(self.passwords['CKAN_PASSWORD'])) cp.set('app:main', 'ckan.datastore.read_url', 'postgresql://*****:*****@db:5432/ckan_datastore' .format(self.passwords['DATASTORE_RO_PASSWORD'])) cp.set('app:main', 'ckan.datastore.write_url', 'postgresql://*****:*****@db:5432/ckan_datastore' .format(self.passwords['DATASTORE_RW_PASSWORD'])) cp.set('app:main', 'solr_url', 'http://solr:8080/solr') cp.set('app:main', 'beaker.session.secret', self.passwords['BEAKER_SESSION_SECRET']) if not isdir(self.sitedir + '/run'): makedirs(self.sitedir + '/run') # upgrade old datadir with open(self.sitedir + '/run/' + output, 'w') as runini: cp.write(runini)
def create(opts): """Create a new environment Usage: datacats create [-bin] [--interactive] [-s NAME] [--address=IP] [--syslog] [--ckan=CKAN_VERSION] [--no-datapusher] [--site-url SITE_URL] [--no-init-db] ENVIRONMENT_DIR [PORT] Options: --address=IP Address to listen on (Linux-only) --ckan=CKAN_VERSION Use CKAN version CKAN_VERSION [default: 2.3] -b --bare Bare CKAN site with no example extension -i --image-only Create the environment but don't start containers --interactive Doesn't detach from the web container --no-datapusher Don't install/enable ckanext-datapusher --no-init-db Don't initialize the database. Useful for importing CKANs. -n --no-sysadmin Don't prompt for an initial sysadmin user account -s --site=NAME Pick a site to create [default: primary] --site-url SITE_URL The site_url to use in API responses (e.g. http://example.org:{port}/) --syslog Log to the syslog ENVIRONMENT_DIR is a path for the new environment directory. The last part of this path will be used as the environment name. """ if opts['--address'] and is_boot2docker(): raise DatacatsError('Cannot specify address on boot2docker.') return create_environment( environment_dir=opts['ENVIRONMENT_DIR'], port=opts['PORT'], create_skin=not opts['--bare'], start_web=not opts['--image-only'], create_sysadmin=not opts['--no-sysadmin'], site_name=opts['--site'], ckan_version=opts['--ckan'], address=opts['--address'], log_syslog=opts['--syslog'], datapusher=not opts['--no-datapusher'], site_url=opts['--site-url'], interactive=opts['--interactive'], init_db=not opts['--no-init-db'], )
def __init__(self, name, target, datadir, site_name, ckan_version=None, port=None, deploy_target=None, site_url=None, always_prod=False, extension_dir='ckan', address=None, remote_server_key=None, extra_containers=None): self.name = name self.target = target self.datadir = datadir self.extension_dir = extension_dir self.ckan_version = ckan_version # This is the site that all commands will operate on. self.site_name = site_name self.port = int(port if port else self._choose_port()) self.address = address if not is_boot2docker() else None self.deploy_target = deploy_target self.remote_server_key = remote_server_key self.site_url = site_url self.always_prod = always_prod self.sites = None if extra_containers: self.extra_containers = extra_containers else: self.extra_containers = []
def create_virtualenv(self): """ Populate venv directory from preloaded image """ if is_boot2docker(): data_only_container('datacats_venv_' + self.name, ['/usr/lib/ckan']) img_id = web_command( '/bin/mv /usr/lib/ckan/ /usr/lib/ckan_original', image=self._preload_image(), commit=True, ) web_command( command='/bin/cp -a /usr/lib/ckan_original/. /usr/lib/ckan/.', volumes_from='datacats_venv_' + self.name, image=img_id, ) remove_image(img_id) else: web_command( command='/bin/cp -a /usr/lib/ckan/. /usr/lib/ckan_target/.', rw={self.datadir + '/venv': '/usr/lib/ckan_target'}, image=self._preload_image())
def _one_to_two(datadir): """After this command, your environment will be converted to format version {}. and will only work with datacats version exceeding and including 1.0.0. This migration is necessary to support multiple sites within the same environment. Your current site will be kept and will be named "primary". Would you like to continue the migration? (y/n) [n]:""" new_site_name = 'primary' split = _split_path(datadir) print 'Making sure that containers are stopped...' env_name = split[1] # Old-style names on purpose! We need to stop old containers! remove_container('datacats_web_' + env_name) remove_container('datacats_solr_' + env_name) remove_container('datacats_postgres_' + env_name) print 'Doing conversion...' # Begin the actual conversion to_move = (['files', 'passwords.ini', 'run', 'solr'] + (['postgres'] if not is_boot2docker() else [])) # Make a primary site site_path = path_join(datadir, 'sites', new_site_name) if not exists(site_path): makedirs(site_path) web_command( command=['/scripts/migrate.sh', '/project/data', '/project/data/sites/' + new_site_name] + to_move, ro={scripts.get_script_path('migrate.sh'): '/scripts/migrate.sh'}, rw={datadir: '/project/data'}, clean_up=True ) if is_boot2docker(): rename_container('datacats_pgdata_' + env_name, 'datacats_pgdata_' + env_name + '_' + new_site_name) # Lastly, grab the project directory and update the ini file with open(path_join(datadir, 'project-dir')) as pd: project = pd.read() cp = SafeConfigParser() config_loc = path_join(project, '.datacats-environment') cp.read([config_loc]) new_section = 'site_' + new_site_name cp.add_section(new_section) # Ports need to be moved into the new section port = cp.get('datacats', 'port') cp.remove_option('datacats', 'port') cp.set(new_section, 'port', port) with open(config_loc, 'w') as config: cp.write(config) # Make a session secret for it (make it per-site) cp = SafeConfigParser() config_loc = path_join(site_path, 'passwords.ini') cp.read([config_loc]) # Generate a new secret cp.set('passwords', 'beaker_session_secret', generate_password()) with open(config_loc, 'w') as config: cp.write(config) with open(path_join(datadir, '.version'), 'w') as f: f.write('2')
def _run_web_container(self, port, command, address, log_syslog=False, datapusher=True, interactive=False): """ Start web container on port with command """ if is_boot2docker(): ro = {} volumes_from = self._get_container_name('venv') else: ro = {self.datadir + '/venv': '/usr/lib/ckan'} volumes_from = None links = { self._get_container_name('solr'): 'solr', self._get_container_name('postgres'): 'db' } links.update({self._get_container_name(container): container for container in self.extra_containers}) if datapusher: if 'datapusher' not in self.containers_running(): raise DatacatsError(container_logs(self._get_container_name('datapusher'), "all", False, False)) links[self._get_container_name('datapusher')] = 'datapusher' ro = dict({ self.target: '/project/', scripts.get_script_path('web.sh'): '/scripts/web.sh', scripts.get_script_path('adjust_devini.py'): '/scripts/adjust_devini.py'}, **ro) rw = { self.sitedir + '/files': '/var/www/storage', self.sitedir + '/run/development.ini': '/project/development.ini' } try: if not interactive: run_container( name=self._get_container_name('web'), image='datacats/web', rw=rw, ro=ro, links=links, volumes_from=volumes_from, command=command, port_bindings={ 5000: port if is_boot2docker() else (address, port)}, log_syslog=log_syslog ) else: # FIXME: share more code with interactive_shell if is_boot2docker(): switches = ['--volumes-from', self._get_container_name('pgdata'), '--volumes-from', self._get_container_name('venv')] else: switches = [] switches += ['--volume={}:{}:ro'.format(vol, ro[vol]) for vol in ro] switches += ['--volume={}:{}'.format(vol, rw[vol]) for vol in rw] links = ['--link={}:{}'.format(link, links[link]) for link in links] args = ['docker', 'run', '-it', '--name', self._get_container_name('web'), '-p', '{}:5000'.format(port) if is_boot2docker() else '{}:{}:5000'.format(address, port)] + \ switches + links + ['datacats/web', ] + command subprocess.call(args) except APIError as e: if '409' in str(e): raise DatacatsError('Web container already running. ' 'Please stop_web before running.') else: raise
def reload_(environment, opts): """Reload environment source and configuration Usage: datacats reload [-b] [-p|--no-watch] [--syslog] [-s NAME] [--site-url=SITE_URL] [--address=IP] [ENVIRONMENT [PORT]] datacats reload -r [-b] [--syslog] [-s NAME] [--address=IP] [--site-url=SITE_URL] [ENVIRONMENT] Options: --address=IP Address to listen on (Linux-only) --site-url=SITE_URL The site_url to use in API responses. Can use Python template syntax to insert the port and address (e.g. http://example.org:{port}/) -b --background Don't wait for response from web server --no-watch Do not automatically reload templates and .py files on change -p --production Reload with apache and debug=false -r --remote Reload DataCats.com cloud instance -s --site=NAME Specify a site to reload [default: primary] --syslog Log to the syslog ENVIRONMENT may be an environment name or a path to an environment directory. Default: '.' """ if opts['--address'] and is_boot2docker(): raise DatacatsError('Cannot specify address on boot2docker.') environment.require_data() environment.stop_ckan() if opts['PORT'] or opts['--address'] or opts['--site-url']: if opts['PORT']: environment.port = int(opts['PORT']) if opts['--address']: environment.address = opts['--address'] if opts['--site-url']: site_url = opts['--site-url'] # TODO: Check it against a regex or use urlparse try: site_url = site_url.format(address=environment.address, port=environment.port) environment.site_url = site_url environment.save_site(False) except (KeyError, IndexError, ValueError) as e: raise DatacatsError('Could not parse site_url: {}'.format(e)) environment.save() for container in environment.extra_containers: require_extra_image(EXTRA_IMAGE_MAPPING[container]) environment.stop_supporting_containers() environment.start_supporting_containers() environment.start_ckan( production=opts['--production'], paster_reload=not opts['--no-watch'], log_syslog=opts['--syslog'],) write('Starting web server at {0} ...'.format(environment.web_address())) if opts['--background']: write('\n') return try: environment.wait_for_web_available() finally: write('\n')
def interactive_shell(self, command=None, paster=False, detach=False): """ launch interactive shell session with all writable volumes :param: list of strings to execute instead of bash """ if not exists(self.target + '/.bash_profile'): # this file is required for activating the virtualenv self.create_bash_profile() if not command: command = [] use_tty = sys.stdin.isatty() and sys.stdout.isatty() background = environ.get('CIRCLECI', False) or detach if is_boot2docker(): venv_volumes = ['--volumes-from', self._get_container_name('venv')] else: venv_volumes = ['-v', self.datadir + '/venv:/usr/lib/ckan:rw'] self._create_run_ini(self.port, production=False, output='run.ini') self._create_run_ini(self.port, production=True, output='test.ini', source='ckan/test-core.ini', override_site_url=False) script = scripts.get_script_path('shell.sh') if paster: script = scripts.get_script_path('paster.sh') if command and command != ['help'] and command != ['--help']: command += ['--config=/project/development.ini'] command = [self.extension_dir] + command proxy_settings = self._proxy_settings() if proxy_settings: venv_volumes += [ '-v', self.sitedir + '/run/proxy-environment:/etc/environment:ro' ] links = { self._get_container_name('solr'): 'solr', self._get_container_name('postgres'): 'db' } links.update({ self._get_container_name(container): container for container in self.extra_containers }) link_params = [] for link in links: link_params.append('--link') link_params.append(link + ':' + links[link]) if 'datapusher' in self.containers_running(): link_params.append('--link') link_params.append( self._get_container_name('datapusher') + ':datapusher') # FIXME: consider switching this to dockerpty # using subprocess for docker client's interactive session return subprocess.call( [ DOCKER_EXE, 'run', ] + (['--rm'] if not background else []) + [ '-t' if use_tty else '', '-d' if detach else '-i', ] + venv_volumes + [ '-v', self.target + ':/project:rw', '-v', self.sitedir + '/files:/var/www/storage:rw', '-v', script + ':/scripts/shell.sh:ro', '-v', scripts.get_script_path('paster_cd.sh') + ':/scripts/paster_cd.sh:ro', '-v', self.sitedir + '/run/run.ini:/project/development.ini:ro', '-v', self.sitedir + '/run/test.ini:/project/ckan/test-core.ini:ro' ] + link_params + ['--hostname', self.name, 'datacats/web', '/scripts/shell.sh'] + command)
def init(opts, no_install=False, quiet=False): """Initialize a purged environment or copied environment directory Usage: datacats init [-in] [--syslog] [-s NAME] [--address=IP] [--interactive] [--site-url SITE_URL] [ENVIRONMENT_DIR [PORT]] [--no-init-db] Options: --address=IP Address to listen on (Linux-only) --interactive Don't detach from the web container -i --image-only Create the environment but don't start containers --no-init-db Don't initialize the database. Useful for importing other CKANs -n --no-sysadmin Don't prompt for an initial sysadmin user account -s --site=NAME Pick a site to initialize [default: primary] --site-url SITE_URL The site_url to use in API responses (e.g. http://example.org:{port}/) --syslog Log to the syslog ENVIRONMENT_DIR is an existing datacats environment directory. Defaults to '.' """ if opts['--address'] and is_boot2docker(): raise DatacatsError('Cannot specify address on boot2docker.') environment_dir = opts['ENVIRONMENT_DIR'] port = opts['PORT'] address = opts['--address'] start_web = not opts['--image-only'] create_sysadmin = not opts['--no-sysadmin'] site_name = opts['--site'] site_url = opts['--site-url'] interactive = opts['--interactive'] init_db = not opts['--no-init-db'] environment_dir = abspath(environment_dir or '.') log_syslog = opts['--syslog'] environment = Environment.load(environment_dir, site_name) if address: environment.address = address if port: environment.port = int(port) if site_url: environment.site_url = site_url try: if environment.sites and site_name in environment.sites: raise DatacatsError('Site named {0} already exists.' .format(site_name)) # There are a couple of steps we can/must skip if we're making a sub-site only making_full_environment = not environment.data_exists() if not quiet: write('Creating environment {0}/{1} ' 'from existing environment directory "{0}"' .format(environment.name, environment.site_name)) steps = [ lambda: environment.create_directories(create_project_dir=False)] + ([ environment.save, environment.create_virtualenv ] if making_full_environment else []) + [ environment.save_site, environment.start_supporting_containers, environment.fix_storage_permissions, ] for fn in steps: fn() if not quiet: write('.') if not quiet: write('\n') except: if not quiet: print raise return finish_init(environment, start_web, create_sysadmin, log_syslog=log_syslog, do_install=not no_install, quiet=quiet, site_url=site_url, interactive=interactive, init_db=init_db)
def init(opts, no_install=False, quiet=False): """Initialize a purged environment or copied environment directory Usage: datacats init [-in] [--syslog] [-s NAME] [--address=IP] [--interactive] [--site-url SITE_URL] [ENVIRONMENT_DIR [PORT]] [--no-init-db] Options: --address=IP Address to listen on (Linux-only) --interactive Don't detach from the web container -i --image-only Create the environment but don't start containers --no-init-db Don't initialize the database. Useful for importing other CKANs -n --no-sysadmin Don't prompt for an initial sysadmin user account -s --site=NAME Pick a site to initialize [default: primary] --site-url SITE_URL The site_url to use in API responses (e.g. http://example.org:{port}/) --syslog Log to the syslog ENVIRONMENT_DIR is an existing datacats environment directory. Defaults to '.' """ if opts['--address'] and is_boot2docker(): raise DatacatsError('Cannot specify address on boot2docker.') environment_dir = opts['ENVIRONMENT_DIR'] port = opts['PORT'] address = opts['--address'] start_web = not opts['--image-only'] create_sysadmin = not opts['--no-sysadmin'] site_name = opts['--site'] site_url = opts['--site-url'] interactive = opts['--interactive'] init_db = not opts['--no-init-db'] environment_dir = abspath(environment_dir or '.') log_syslog = opts['--syslog'] environment = Environment.load(environment_dir, site_name) if address: environment.address = address if port: environment.port = int(port) if site_url: environment.site_url = site_url try: if environment.sites and site_name in environment.sites: raise DatacatsError( 'Site named {0} already exists.'.format(site_name)) # There are a couple of steps we can/must skip if we're making a sub-site only making_full_environment = not environment.data_exists() if not quiet: write('Creating environment {0}/{1} ' 'from existing environment directory "{0}"'.format( environment.name, environment.site_name)) steps = [ lambda: environment.create_directories(create_project_dir=False) ] + ([environment.save, environment.create_virtualenv] if making_full_environment else []) + [ environment.save_site, environment.start_supporting_containers, environment.fix_storage_permissions, ] for fn in steps: fn() if not quiet: write('.') if not quiet: write('\n') except: if not quiet: print raise return finish_init(environment, start_web, create_sysadmin, log_syslog=log_syslog, do_install=not no_install, quiet=quiet, site_url=site_url, interactive=interactive, init_db=init_db)
def _run_web_container(self, port, command, address='127.0.0.1', log_syslog=False, datapusher=True): """ Start web container on port with command """ if is_boot2docker(): ro = {} volumes_from = self._get_container_name('venv') else: ro = {self.datadir + '/venv': '/usr/lib/ckan'} volumes_from = None links = { self._get_container_name('solr'): 'solr', self._get_container_name('postgres'): 'db' } links.update({ self._get_container_name(container): container for container in self.extra_containers }) if datapusher: if 'datapusher' not in self.containers_running(): raise DatacatsError( container_logs(self._get_container_name('datapusher'), "all", False, False)) links[self._get_container_name('datapusher')] = 'datapusher' try: run_container(name=self._get_container_name('web'), image='datacats/web', rw={ self.sitedir + '/files': '/var/www/storage', self.sitedir + '/run/development.ini': '/project/development.ini' }, ro=dict( { self.target: '/project/', scripts.get_script_path('web.sh'): '/scripts/web.sh', scripts.get_script_path('adjust_devini.py'): '/scripts/adjust_devini.py' }, **ro), links=links, volumes_from=volumes_from, command=command, port_bindings={ 5000: port if is_boot2docker() else (address, port) }, log_syslog=log_syslog) except APIError as e: if '409' in str(e): raise DatacatsError('Web container already running. ' 'Please stop_web before running.') else: raise
def _run_web_container(self, port, command, address, log_syslog=False, datapusher=True, interactive=False): """ Start web container on port with command """ if is_boot2docker(): ro = {} volumes_from = self._get_container_name('venv') else: ro = {self.datadir + '/venv': '/usr/lib/ckan'} volumes_from = None links = { self._get_container_name('solr'): 'solr', self._get_container_name('postgres'): 'db' } links.update({ self._get_container_name(container): container for container in self.extra_containers }) if datapusher: if 'datapusher' not in self.containers_running(): raise DatacatsError( container_logs(self._get_container_name('datapusher'), "all", False, False)) links[self._get_container_name('datapusher')] = 'datapusher' ro = dict( { self.target: '/project/', scripts.get_script_path('web.sh'): '/scripts/web.sh', scripts.get_script_path('adjust_devini.py'): '/scripts/adjust_devini.py' }, **ro) rw = { self.sitedir + '/files': '/var/www/storage', self.sitedir + '/run/development.ini': '/project/development.ini' } try: if not interactive: run_container(name=self._get_container_name('web'), image='datacats/web', rw=rw, ro=ro, links=links, volumes_from=volumes_from, command=command, port_bindings={ 5000: port if is_boot2docker() else (address, port) }, log_syslog=log_syslog) else: # FIXME: share more code with interactive_shell if is_boot2docker(): switches = [ '--volumes-from', self._get_container_name('pgdata'), '--volumes-from', self._get_container_name('venv') ] else: switches = [] switches += [ '--volume={}:{}:ro'.format(vol, ro[vol]) for vol in ro ] switches += [ '--volume={}:{}'.format(vol, rw[vol]) for vol in rw ] links = [ '--link={}:{}'.format(link, links[link]) for link in links ] args = ['docker', 'run', '-it', '--name', self._get_container_name('web'), '-p', '{}:5000'.format(port) if is_boot2docker() else '{}:{}:5000'.format(address, port)] + \ switches + links + ['datacats/web', ] + command subprocess.call(args) except APIError as e: if '409' in str(e): raise DatacatsError('Web container already running. ' 'Please stop_web before running.') else: raise
def purge_data(self, which_sites=None, never_delete=False): """ Remove uploaded files, postgres db, solr index, venv """ # Default to the set of all sites if not exists(self.datadir + '/.version'): format_version = 1 else: with open(self.datadir + '/.version') as f: format_version = int(f.read().strip()) if format_version == 1: print 'WARNING: Defaulting to old purge for version 1.' datadirs = ['files', 'solr'] if is_boot2docker(): remove_container('datacats_pgdata_{}'.format(self.name)) remove_container('datacats_venv_{}'.format(self.name)) else: datadirs += ['postgres', 'venv'] web_command( command=['/scripts/purge.sh'] + ['/project/data/' + d for d in datadirs], ro={scripts.get_script_path('purge.sh'): '/scripts/purge.sh'}, rw={self.datadir: '/project/data'}, ) shutil.rmtree(self.datadir) elif format_version == 2: if not which_sites: which_sites = self.sites datadirs = [] boot2docker = is_boot2docker() if which_sites: if self.target: cp = SafeConfigParser() cp.read([self.target + '/.datacats-environment']) for site in which_sites: if boot2docker: remove_container(self._get_container_name('pgdata')) else: datadirs += [site + '/postgres'] # Always rm the site dir & solr & files datadirs += [site, site + '/files', site + '/solr'] if self.target: cp.remove_section('site_' + site) self.sites.remove(site) if self.target: with open(self.target + '/.datacats-environment', 'w') as conf: cp.write(conf) datadirs = ['sites/' + datadir for datadir in datadirs] if not self.sites and not never_delete: datadirs.append('venv') web_command( command=['/scripts/purge.sh'] + ['/project/data/' + d for d in datadirs], ro={scripts.get_script_path('purge.sh'): '/scripts/purge.sh'}, rw={self.datadir: '/project/data'}, ) if not self.sites and not never_delete: shutil.rmtree(self.datadir) else: raise DatacatsError('Unknown format version {}'.format(format_version))
def interactive_shell(self, command=None, paster=False, detach=False): """ launch interactive shell session with all writable volumes :param: list of strings to execute instead of bash """ if not exists(self.target + '/.bash_profile'): # this file is required for activating the virtualenv self.create_bash_profile() if not command: command = [] use_tty = sys.stdin.isatty() and sys.stdout.isatty() background = environ.get('CIRCLECI', False) or detach if is_boot2docker(): venv_volumes = ['--volumes-from', self._get_container_name('venv')] else: venv_volumes = ['-v', self.datadir + '/venv:/usr/lib/ckan:rw'] self._create_run_ini(self.port, production=False, output='run.ini') self._create_run_ini(self.port, production=True, output='test.ini', source='ckan/test-core.ini', override_site_url=False) script = scripts.get_script_path('shell.sh') if paster: script = scripts.get_script_path('paster.sh') if command and command != ['help'] and command != ['--help']: command += ['--config=/project/development.ini'] command = [self.extension_dir] + command proxy_settings = self._proxy_settings() if proxy_settings: venv_volumes += ['-v', self.sitedir + '/run/proxy-environment:/etc/environment:ro'] links = {self._get_container_name('solr'): 'solr', self._get_container_name('postgres'): 'db'} links.update({self._get_container_name(container): container for container in self.extra_containers}) link_params = [] for link in links: link_params.append('--link') link_params.append(link + ':' + links[link]) # FIXME: consider switching this to dockerpty # using subprocess for docker client's interactive session return subprocess.call([ DOCKER_EXE, 'run', ] + (['--rm'] if not background else []) + [ '-t' if use_tty else '', '-d' if detach else '-i', ] + venv_volumes + [ '-v', self.target + ':/project:rw', '-v', self.sitedir + '/files:/var/www/storage:rw', '-v', script + ':/scripts/shell.sh:ro', '-v', scripts.get_script_path('paster_cd.sh') + ':/scripts/paster_cd.sh:ro', '-v', self.sitedir + '/run/run.ini:/project/development.ini:ro', '-v', self.sitedir + '/run/test.ini:/project/ckan/test-core.ini:ro'] + link_params + (['--link', self._get_container_name('datapusher') + ':datapusher'] if self.needs_datapusher() else []) + ['--hostname', self.name, 'datacats/web', '/scripts/shell.sh'] + command)
def purge_data(self, which_sites=None, never_delete=False): """ Remove uploaded files, postgres db, solr index, venv """ # Default to the set of all sites if not exists(self.datadir + '/.version'): format_version = 1 else: with open(self.datadir + '/.version') as f: format_version = int(f.read().strip()) if format_version == 1: print 'WARNING: Defaulting to old purge for version 1.' datadirs = ['files', 'solr'] if is_boot2docker(): remove_container('datacats_pgdata_{}'.format(self.name)) remove_container('datacats_venv_{}'.format(self.name)) else: datadirs += ['postgres', 'venv'] web_command( command=['/scripts/purge.sh'] + ['/project/data/' + d for d in datadirs], ro={scripts.get_script_path('purge.sh'): '/scripts/purge.sh'}, rw={self.datadir: '/project/data'}, ) shutil.rmtree(self.datadir) elif format_version == 2: if not which_sites: which_sites = self.sites datadirs = [] boot2docker = is_boot2docker() if which_sites: if self.target: cp = SafeConfigParser() cp.read([self.target + '/.datacats-environment']) for site in which_sites: if boot2docker: remove_container(self._get_container_name('pgdata')) else: datadirs += [site + '/postgres'] # Always rm the site dir & solr & files datadirs += [site, site + '/files', site + '/solr'] if self.target: cp.remove_section('site_' + site) self.sites.remove(site) if self.target: with open(self.target + '/.datacats-environment', 'w') as conf: cp.write(conf) datadirs = ['sites/' + datadir for datadir in datadirs] if not self.sites and not never_delete: datadirs.append('venv') web_command( command=['/scripts/purge.sh'] + ['/project/data/' + d for d in datadirs], ro={scripts.get_script_path('purge.sh'): '/scripts/purge.sh'}, rw={self.datadir: '/project/data'}, ) if not self.sites and not never_delete: shutil.rmtree(self.datadir) else: raise DatacatsError( 'Unknown format version {}'.format(format_version))
def reload_(environment, opts): """Reload environment source and configuration Usage: datacats reload [-b] [-p|--no-watch] [--syslog] [-s NAME] [--site-url=SITE_URL] [-i] [--address=IP] [ENVIRONMENT [PORT]] datacats reload -r [-b] [--syslog] [-s NAME] [--address=IP] [--site-url=SITE_URL] [-i] [ENVIRONMENT] Options: --address=IP Address to listen on (Linux-only) -i --interactive Calls out to docker via the command line, allowing for interactivity with the web image. --site-url=SITE_URL The site_url to use in API responses. Can use Python template syntax to insert the port and address (e.g. http://example.org:{port}/) -b --background Don't wait for response from web server --no-watch Do not automatically reload templates and .py files on change -p --production Reload with apache and debug=false -s --site=NAME Specify a site to reload [default: primary] --syslog Log to the syslog ENVIRONMENT may be an environment name or a path to an environment directory. Default: '.' """ if opts['--interactive']: # We can't wait for the server if we're tty'd opts['--background'] = True if opts['--address'] and is_boot2docker(): raise DatacatsError('Cannot specify address on boot2docker.') environment.require_data() environment.stop_ckan() if opts['PORT'] or opts['--address'] or opts['--site-url']: if opts['PORT']: environment.port = int(opts['PORT']) if opts['--address']: environment.address = opts['--address'] if opts['--site-url']: site_url = opts['--site-url'] # TODO: Check it against a regex or use urlparse try: site_url = site_url.format(address=environment.address, port=environment.port) environment.site_url = site_url environment.save_site(False) except (KeyError, IndexError, ValueError) as e: raise DatacatsError('Could not parse site_url: {}'.format(e)) environment.save() for container in environment.extra_containers: require_extra_image(EXTRA_IMAGE_MAPPING[container]) environment.stop_supporting_containers() environment.start_supporting_containers() environment.start_ckan(production=opts['--production'], paster_reload=not opts['--no-watch'], log_syslog=opts['--syslog'], interactive=opts['--interactive']) write('Starting web server at {0} ...'.format(environment.web_address())) if opts['--background']: write('\n') return try: environment.wait_for_web_available() finally: write('\n')