Exemplo n.º 1
0
	def move_to(self, dst):
		if not os.path.exists(dst):
			self._logger.debug("creating %s" % dst)
			os.makedirs(dst)
		
		for config in ['postgresql.conf', 'pg_ident.conf', 'pg_hba.conf']:
			old_config = os.path.join(self.path, config)
			new_config = os.path.join(dst, config)
			if os.path.exists(old_config):
				self._logger.debug('Moving %s' % config)
				shutil.move(old_config, new_config)
			elif os.path.exists(new_config):
				self._logger.debug('%s is already in place. Skipping.' % config)
			else:
				raise BaseException('Postgresql config file not found: %s' % old_config)
			rchown(DEFAULT_USER, new_config)

		#the following block needs revision
		
		#self._make_symlinks(dst)
		self._patch_sysconfig(dst)
		
		self.path = dst
		
		self._logger.debug("configuring pid")
		conf = PostgresqlConf.find(self)
		conf.pid_file = os.path.join(dst, 'postmaster.pid')
Exemplo n.º 2
0
	def move_to(self, dst, move_files=True):
		new_cluster_dir = os.path.join(dst, STORAGE_DATA_DIR)
		
		if not os.path.exists(dst):
			self._logger.debug('Creating directory structure for postgresql cluster: %s' % dst)
			os.makedirs(dst)
		
		if move_files:
			source = self.path 
			if not os.path.exists(self.path):
				source = self.default_path
				self._logger.debug('data_directory in postgresql.conf points to non-existing location, using %s instead' % source)
			if source != new_cluster_dir:
				self._logger.debug("copying cluster files from %s into %s" % (source, new_cluster_dir))
				shutil.copytree(source, new_cluster_dir)	
		self._logger.debug("changing directory owner to %s" % self.user)	
		rchown(self.user, dst)
		
		self._logger.debug("Changing postgres user`s home directory")
		if disttool.is_redhat_based():
			#looks like ubuntu doesn`t need this
			system2([USERMOD, '-d', new_cluster_dir, self.user]) 
			
		self.path = new_cluster_dir
	
		return new_cluster_dir
Exemplo n.º 3
0
    def create(self, dst):
        if not os.path.exists(dst):
            self._logger.debug("Creating directory structure for mongodb files: %s" % dst)
            os.makedirs(dst)

        self._logger.debug("changing directory owner to %s" % self.user)
        rchown(self.user, dst)
        self.path = dst

        return dst
Exemplo n.º 4
0
def get_pidfile(port=DEFAULT_PORT):

	pid_file = os.path.join(DEFAULT_PID_DIR,'redis-server.%s.pid' % port)
	'''
	fix for ubuntu1004
	'''
	if not os.path.exists(pid_file):
		open(pid_file, 'w').close()
	rchown('redis', pid_file)
	return pid_file
Exemplo n.º 5
0
 def args(self):
     s = ["--fork"]
     if self.configpath:
         s.append("--config=%s" % self.configpath)
     if self.dbpath:
         s.append("--dbpath=%s" % self.dbpath)
     if self.port:
         s.append("--port=%s" % self.port)
     if self.keyfile and os.path.exists(self.keyfile):
         rchown(DEFAULT_USER, self.keyfile)
         s.append("--keyFile=%s" % self.keyfile)
     return s
Exemplo n.º 6
0
 def _prepare_arbiter(self, rs_name):
     if os.path.isdir(ARBITER_DATA_DIR):
         shutil.rmtree(ARBITER_DATA_DIR)
     self._logger.debug("Creating datadir for arbiter: %s" % ARBITER_DATA_DIR)
     os.makedirs(ARBITER_DATA_DIR)
     rchown(DEFAULT_USER, ARBITER_DATA_DIR)
     self._logger.debug("Preparing arbiter's config file")
     self.arbiter_conf.dbpath = ARBITER_DATA_DIR
     self.arbiter_conf.replSet = rs_name
     self.arbiter_conf.shardsvr = True
     self.arbiter_conf.port = ARBITER_DEFAULT_PORT
     self.arbiter_conf.logpath = ARBITER_LOG_PATH
Exemplo n.º 7
0
	def apply_private_ssh_key(self,source_path=None):
		source_path = source_path or self.private_key_path
		if not os.path.exists(source_path):
			self._logger.error('Cannot apply private ssh key: source %s not found' % source_path)
		else:
			if not os.path.exists(self.ssh_dir):
				os.makedirs(self.ssh_dir)
				rchown(self.name, self.ssh_dir)
				
			dst = os.path.join(self.ssh_dir, 'id_rsa')
			shutil.copyfile(source_path, dst)
			os.chmod(dst, 0400)
			rchown(self.name, dst)
Exemplo n.º 8
0
	def apply_public_ssh_key(self, source_path=None):
		source_path = source_path or self.public_key_path 
		if not os.path.exists(self.ssh_dir):
			os.makedirs(self.ssh_dir)
			rchown(self.name, self.ssh_dir)
		
		pub_key = read_file(source_path,logger=self._logger)
		path = os.path.join(self.ssh_dir, 'authorized_keys')
		keys = read_file(path,logger=self._logger) if os.path.exists(path) else ''
		
		if not keys or not pub_key in keys:
			write_file(path, data='\n%s %s\n' % (pub_key, self.name), mode='a', logger=self._logger)
			rchown(self.name, path)
Exemplo n.º 9
0
	def move_to(self, dst, move_files=True):
		new_db_path = os.path.join(dst, os.path.basename(self.db_path))

		if not os.path.exists(dst):
			LOG.debug('Creating directory structure for redis db files: %s' % dst)
			os.makedirs(dst)

		if move_files and os.path.exists(os.path.dirname(self.db_path)) and os.path.isfile(self.db_path):
			LOG.debug("copying db file %s into %s" % (os.path.dirname(self.db_path), dst))
			shutil.copyfile(self.db_path, new_db_path)

		LOG.debug("changing directory owner to %s" % self.user)
		rchown(self.user, dst)
		self.db_path = new_db_path
		return new_db_path
Exemplo n.º 10
0
def make_symlinks(source_dir, dst_dir, username='******'):
	#Vital hack for getting CentOS init script to work
	for obj in ['base', 'PG_VERSION', 'postmaster.pid']:
		
		src = os.path.join(source_dir, obj)
		dst = os.path.join(dst_dir, obj) 
		
		if os.path.islink(dst):
			os.unlink(dst)
		elif os.path.exists(dst):
			shutil.rmtree(dst)
			
		os.symlink(src, dst)
		
		if os.path.exists(src):
			rchown(username, dst)	
Exemplo n.º 11
0
    def _prepare_config_server(self):
        self._logger.debug("Preparing config server")
        if not os.path.exists(CONFIG_SERVER_DATA_DIR):
            os.makedirs(CONFIG_SERVER_DATA_DIR)
        rchown(DEFAULT_USER, CONFIG_SERVER_DATA_DIR)
        """
		configsvr changes the default port and turns on the diaglog, 
		a log that keeps every action the config database performs 
		in a replayable format, just in case.
		For mongo 1.8+ use --port 27019 and --journal (instead of --diaglog). 
		Journaling gives mostly the same effect as the diaglog with better performance.
		P.S. Assume that mongodb roles Scalr will be build on x64 platform only
		Wchich means journal option by default will be on.
		"""
        self.config_server_conf.configsvr = True
        self.config_server_conf.port = CONFIG_SERVER_DEFAULT_PORT
        self.config_server_conf.logpath = CONFIG_SERVER_LOG_PATH
Exemplo n.º 12
0
 def start(cls):
     if not cls.is_running():
         cls._logger.info("Starting %s process" % MONGOS)
         args = [
             MONGOS,
             "--fork",
             "--logpath",
             ROUTER_LOG_PATH,
             "--configdb",
             "mongo-0-0:%s" % CONFIG_SERVER_DEFAULT_PORT,
         ]
         if cls.keyfile and os.path.exists(cls.keyfile):
             rchown(DEFAULT_USER, cls.keyfile)
             args.append("--keyFile=%s" % cls.keyfile)
         system2(args, close_fds=True, preexec_fn=os.setsid)
         wait_until(lambda: cls.is_running, timeout=MAX_START_TIMEOUT)
         wait_until(lambda: cls.get_cli().has_connection, timeout=MAX_START_TIMEOUT)
         cls._logger.debug("%s process has been started." % MONGOS)
Exemplo n.º 13
0
    def move_mysqldir_to(self, storage_path):
        LOG.info("Moving mysql dir to %s" % storage_path)
        for directive, dirname in (
            ("mysqld/log_bin", os.path.join(storage_path, STORAGE_BINLOG)),
            ("mysqld/datadir", os.path.join(storage_path, STORAGE_DATA_DIR) + "/"),
        ):

            dest = os.path.dirname(dirname)
            if os.path.isdir(dest):
                LOG.info("No need to move %s to %s: already in place." % (directive, dest))
            else:
                os.makedirs(dest)

                raw_value = self.my_cnf.get(directive)
                LOG.debug("directive %s:%s" % (directive, raw_value))
                if raw_value:
                    src_dir = os.path.dirname(raw_value + "/") + "/"
                    LOG.debug("source path: %s" % src_dir)
                    if os.path.isdir(src_dir) and src_dir != dest:
                        try:
                            if not system2((software.which("selinuxenabled"),), raise_exc=False)[2]:
                                if not system2((software.which("getsebool"), "mysqld_disable_trans"), raise_exc=False)[
                                    2
                                ]:
                                    LOG.debug("Make SELinux rule for rsync")
                                    system2((software.which("setsebool"), "-P", "mysqld_disable_trans", "1"))
                        except LookupError:
                            pass

                        LOG.info("Copying mysql directory '%s' to '%s'", src_dir, dest)
                        rsync = filetool.Rsync().archive()
                        rsync.source(src_dir).dest(dest).exclude(["ib_logfile*"])
                        system2(str(rsync), shell=True)
            self.my_cnf.set(directive, dirname)

            rchown("mysql", dest)
            # Adding rules to apparmor config
            if disttool.is_debian_based():
                _add_apparmor_rules(dest)
Exemplo n.º 14
0
	def on_DbMsr_CreateBackup(self, message):
		#TODO: Think how to move the most part of it into Postgresql class 
		# Retrieve password for scalr mysql user
		tmpdir = backup_path = None
		try:
			# Get databases list
			psql = PSQL(user=self.postgresql.root_user.name)
			databases = psql.list_pg_databases()
			if 'template0' in databases:
				databases.remove('template0')
			
			
			op = operation(name=self._op_backup, phases=[{
				'name': self._phase_backup
			}])
			op.define()			
			
			with op.phase(self._phase_backup):
			
				if not os.path.exists(self._tmp_path):
					os.makedirs(self._tmp_path)
					
				# Defining archive name and path
				backup_filename = time.strftime('%Y-%m-%d-%H:%M:%S')+'.tar.gz'
				backup_path = os.path.join(self._tmp_path, backup_filename)
				
				# Creating archive 
				backup = tarfile.open(backup_path, 'w:gz')
	
				# Dump all databases
				self._logger.info("Dumping all databases")
				tmpdir = tempfile.mkdtemp(dir=self._tmp_path)		
				rchown(self.postgresql.root_user.name, tmpdir)

				def _single_backup(db_name):
					dump_path = tmpdir + os.sep + db_name + '.sql'
					pg_args = '%s %s --no-privileges -f %s' % (PG_DUMP, db_name, dump_path)
					su_args = [SU_EXEC, '-', self.postgresql.root_user.name, '-c', pg_args]
					err = system2(su_args)[1]
					if err:
						raise HandlerError('Error while dumping database %s: %s' % (db_name, err))
					backup.add(dump_path, os.path.basename(dump_path))	

				make_backup_steps(databases, op, _single_backup)						

				backup.close()
				
				with op.step(self._step_upload_to_cloud_storage):
					# Creating list of full paths to archive chunks
					if os.path.getsize(backup_path) > BACKUP_CHUNK_SIZE:
						parts = [os.path.join(tmpdir, file) for file in split(backup_path, backup_filename, BACKUP_CHUNK_SIZE , tmpdir)]
					else:
						parts = [backup_path]
					sizes = [os.path.getsize(file) for file in parts]
						
					cloud_storage_path = self._platform.scalrfs.backups(BEHAVIOUR)
					self._logger.info("Uploading backup to cloud storage (%s)", cloud_storage_path)
					trn = transfer.Transfer()
					cloud_files = trn.upload(parts, cloud_storage_path)
					self._logger.info("Postgresql backup uploaded to cloud storage under %s/%s", 
									cloud_storage_path, backup_filename)
			
			result = list(dict(path=path, size=size) for path, size in zip(cloud_files, sizes))
			op.ok(data=result)
				
			# Notify Scalr
			self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(
				db_type = BEHAVIOUR,
				status = 'ok',
				backup_parts = result
			))
						
		except (Exception, BaseException), e:
			self._logger.exception(e)
			
			# Notify Scalr about error
			self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(
				db_type = BEHAVIOUR,
				status = 'error',
				last_error = str(e)
			))
Exemplo n.º 15
0
	def on_init(self):		
		#temporary fix for starting-after-rebundle issue
		if not os.path.exists(PG_SOCKET_DIR):
			os.makedirs(PG_SOCKET_DIR)
			rchown(user='******', path=PG_SOCKET_DIR)
			
		bus.on("host_init_response", self.on_host_init_response)
		bus.on("before_host_up", self.on_before_host_up)
		bus.on("before_reboot_start", self.on_before_reboot_start)
		bus.on("before_reboot_finish", self.on_before_reboot_finish)
		
		if self._cnf.state == ScalarizrState.BOOTSTRAPPING:
			self._insert_iptables_rules()
			
			if disttool.is_redhat_based():		
					
				checkmodule_paths = software.whereis('checkmodule')
				semodule_package_paths = software.whereis('semodule_package')
				semodule_paths = software.whereis('semodule')
			
				if all((checkmodule_paths, semodule_package_paths, semodule_paths)):
					
					filetool.write_file('/tmp/sshkeygen.te',
								SSH_KEYGEN_SELINUX_MODULE, logger=self._logger)
					
					self._logger.debug('Compiling SELinux policy for ssh-keygen')
					system2((checkmodule_paths[0], '-M', '-m', '-o',
							 '/tmp/sshkeygen.mod', '/tmp/sshkeygen.te'), logger=self._logger)
					
					self._logger.debug('Building SELinux package for ssh-keygen')
					system2((semodule_package_paths[0], '-o', '/tmp/sshkeygen.pp',
							 '-m', '/tmp/sshkeygen.mod'), logger=self._logger)
					
					self._logger.debug('Loading ssh-keygen SELinux package')					
					system2((semodule_paths[0], '-i', '/tmp/sshkeygen.pp'), logger=self._logger)
				
		
		if self._cnf.state == ScalarizrState.RUNNING:

			storage_conf = Storage.restore_config(self._volume_config_path)
			storage_conf['tags'] = self.postgres_tags
			self.storage_vol = Storage.create(storage_conf)
			if not self.storage_vol.mounted():
				self.storage_vol.mount()
			
			self.postgresql.service.start()
			self.accept_all_clients()
			
			self._logger.debug("Checking presence of Scalr's PostgreSQL root user.")
			root_password = self.root_password
			
			if not self.postgresql.root_user.exists():
				self._logger.debug("Scalr's PostgreSQL root user does not exist. Recreating")
				self.postgresql.root_user = self.postgresql.create_user(ROOT_USER, root_password)
			else:
				try:
					self.postgresql.root_user.check_system_password(root_password)
					self._logger.debug("Scalr's root PgSQL user is present. Password is correct.")				
				except ValueError:
					self._logger.warning("Scalr's root PgSQL user was changed. Recreating.")
					self.postgresql.root_user.change_system_password(root_password)
					
			if self.is_replication_master:	
				#ALTER ROLE cannot be executed in a read-only transaction
				self._logger.debug("Checking password for pg_role scalr.")		
				if not self.postgresql.root_user.check_role_password(root_password):
					self._logger.warning("Scalr's root PgSQL role was changed. Recreating.")
					self.postgresql.root_user.change_role_password(root_password)