Esempio n. 1
0
	def __init__(self, rbd=None, snap=None):
		self.ceph = Ceph(None).backup
		self.tmp_dir = None
		self.dev = None

		self.rbd = rbd
		self.snap = snap
Esempio n. 2
0
    def __get_ceph_storage(self):
        result = {}
        for storage in self.px.storage.get():
            if storage['type'] != 'rbd':
                continue
            name = storage['storage']
            endpoint = self.__get_ceph_endpoint(name)

            result[name] = Ceph(storage['pool'], endpoint=endpoint)
        return result
Esempio n. 3
0
 def initialize(self):
     self.__marathon = MarathonClient(options.marathon_masters,
                                      options.marathon_port)
     self.__ceph = Ceph(options.ceph_api_host, options.ceph_api_port)
Esempio n. 4
0
def backup_az(az_domain, backup_az_domain, ceph_host, backup_ceph_host):
    # get ceph conf and keyring
    LOG.info("connect to ceph: host=%s" % ceph_host)
    ceph = Ceph(ceph_host=ceph_host,
                ceph_user=CEPH_USER,
                ceph_key_file=CEPH_KEYPAIR)

    LOG.info("get %s from %s" % (CEPH_CONF, ceph_host))
    ceph.get_file(LOCAL_CEPH_PATH + "/" + CEPH_CONF,
                  REMOTE_CEPH_PATH + "/" + CEPH_CONF)

    LOG.info("get %s from %s" % (CEPH_KEYRING, ceph_host))
    ceph.get_file(LOCAL_CEPH_PATH + "/" + CEPH_KEYRING,
                  REMOTE_CEPH_PATH + "/" + CEPH_KEYRING)
    ceph.close()

    # get backup ceph conf and keyring
    LOG.info("connect to backup_ceph: host=%s" % backup_ceph_host)
    backup_ceph = Ceph(ceph_host=backup_ceph_host,
                       ceph_user=CEPH_USER,
                       ceph_key_file=CEPH_KEYPAIR)

    LOG.info("get %s from %s" % (CEPH_BACKUP_CONF, backup_ceph_host))
    backup_ceph.get_file(LOCAL_CEPH_PATH + "/" + CEPH_BACKUP_CONF,
                         REMOTE_CEPH_PATH + "/" + CEPH_BACKUP_CONF)

    LOG.info("get %s from %s" % (CEPH_BACKUP_KEYRING, backup_ceph_host))
    backup_ceph.get_file(LOCAL_CEPH_PATH + "/" + CEPH_BACKUP_KEYRING,
                         REMOTE_CEPH_PATH + "/" + CEPH_BACKUP_KEYRING)
    backup_ceph.close()

    backend = Backend()
    # update volume_backend_name
    volume_backend_name = CEPH_VOLUME_PREFIX + ":" + az_domain + ":" + backup_az_domain
    LOG.info("ceph storage backend update: volume_backend_name = %s" %
             volume_backend_name)
    backend.update_ceph_param("volume_backend_name", volume_backend_name)

    # update iscsi_server_ip
    LOG.info("ceph storage backend update:iscsi_server_ip=%s" % ceph_host)
    backend.update_ceph_param("iscsi_server_ip", ceph_host)
    # backend.commit()
    '''
    update_params = {}
    volume_backend_name = CEPH_VOLUME_PREFIX+":"+az_domain+":"+backup_az_domain
    update_params["volume_backend_name"] = volume_backend_name
    update_params["iscsi_server_ip"] = ceph_host
    backend.update_ceph_params(update_params)
    '''
    # set volume_type key
    # volume_type=VOLUME_TYPE_PREFIX+"@"+az_domain
    shell_file = CURRENT_PATH + "/script/volume_backend_name.sh"
    # os.system("/bin/bash " + shell_file + " " + volume_type + " " + volume_backend_name)
    os.system("/bin/bash " + shell_file + " " + az_domain + " " +
              backup_az_domain)

    # restart Service
    restart_component("cinder", "cinder-volume")
    restart_component("cinder", "cinder-backup")
Esempio n. 5
0
def backup_az(az_domain, backup_az_domain, ceph_host, backup_ceph_host):
    # get ceph conf and keyring
    LOG.info("connect to ceph: host=%s" % ceph_host)
    ceph = Ceph(ceph_host=ceph_host, ceph_user=CEPH_USER, ceph_key_file=CEPH_KEYPAIR)

    LOG.info("get %s from %s" % (CEPH_CONF, ceph_host))
    ceph.get_file(LOCAL_CEPH_PATH+"/"+CEPH_CONF, REMOTE_CEPH_PATH+"/"+CEPH_CONF)

    LOG.info("get %s from %s" % (CEPH_KEYRING, ceph_host))
    ceph.get_file(LOCAL_CEPH_PATH+"/"+CEPH_KEYRING, REMOTE_CEPH_PATH+"/"+CEPH_KEYRING)
    ceph.close()

    # get backup ceph conf and keyring
    LOG.info("connect to backup_ceph: host=%s" % backup_ceph_host)
    backup_ceph = Ceph(ceph_host=backup_ceph_host, ceph_user=CEPH_USER, ceph_key_file=CEPH_KEYPAIR)
    
    LOG.info("get %s from %s" % (CEPH_BACKUP_CONF, backup_ceph_host))
    backup_ceph.get_file(LOCAL_CEPH_PATH+"/"+CEPH_BACKUP_CONF, REMOTE_CEPH_PATH+"/"+CEPH_BACKUP_CONF)
    
    LOG.info("get %s from %s" % (CEPH_BACKUP_KEYRING, backup_ceph_host))
    backup_ceph.get_file(LOCAL_CEPH_PATH+"/"+CEPH_BACKUP_KEYRING, REMOTE_CEPH_PATH+"/"+CEPH_BACKUP_KEYRING)
    backup_ceph.close()

    backend = Backend()
    # update volume_backend_name
    volume_backend_name = CEPH_VOLUME_PREFIX+":"+az_domain+":"+backup_az_domain
    LOG.info("ceph storage backend update: volume_backend_name = %s" % volume_backend_name)
    backend.update_ceph_param("volume_backend_name", volume_backend_name)

    # update iscsi_server_ip
    LOG.info("ceph storage backend update:iscsi_server_ip=%s" % ceph_host)
    backend.update_ceph_param("iscsi_server_ip", ceph_host)
    # backend.commit()
    '''
    update_params = {}
    volume_backend_name = CEPH_VOLUME_PREFIX+":"+az_domain+":"+backup_az_domain
    update_params["volume_backend_name"] = volume_backend_name
    update_params["iscsi_server_ip"] = ceph_host
    backend.update_ceph_params(update_params)
    '''
    # set volume_type key
    # volume_type=VOLUME_TYPE_PREFIX+"@"+az_domain
    shell_file = CURRENT_PATH+"/script/volume_backend_name.sh"
    # os.system("/bin/bash " + shell_file + " " + volume_type + " " + volume_backend_name)
    os.system("/bin/bash " + shell_file + " " + az_domain + " " + backup_az_domain)

    # restart Service
    restart_component("cinder", "cinder-volume")
    restart_component("cinder", "cinder-backup")
Esempio n. 6
0
class Restore():
	def __init__(self, rbd=None, snap=None):
		self.ceph = Ceph(None).backup
		self.tmp_dir = None
		self.dev = None

		self.rbd = rbd
		self.snap = snap

	def list_mapped(self):
		return self.ceph.get_mapped()

	def ls(self):
		result = list()
		if self.rbd is None:
			for i in self.ceph.ls():
				if i.startswith('restore-'):
					continue
				split = i.split(';')
				result.append({
					'ident': split[2],
					'disk': split[1],
					'uuid': i,
				})
		else:
			for i in self.ceph.snap(self.rbd):
				split = i.split(';')
				creation = dateutil.parser.parse(split[3])
				result.append({
					'creation': creation,
					'uuid': i,
				})
		return result

	def mount_rbd(self, kpartx):
		part = self.dev
		if kpartx is True:
			maps = sh.Command('kpartx')('-av', self.dev)
			for mapped in maps:
				mapped = mapped.rstrip()
				Log.info(mapped)

			nbd = self.dev.split('/')[2]

			# len(..) == 2 -> only one partition is found
			if len(maps.split('\n')) != 2:
				Log.info('You can now:')
				Log.info('\tmount /dev/mapper/%spX %s' % (nbd, self.tmp_dir))
				Log.info('\t# Inspect %s and look at your files' % (self.tmp_dir,))
				return
			part = '/dev/mapper/%sp1' % (nbd,)

		time.sleep(0.5)
		try:
			sh.Command('mount')(part, self.tmp_dir)
			Log.info('Please find our files in %s' % (self.tmp_dir,))
			return self.tmp_dir
		except Exception as e:
			Log.warn('mount %s %s failed' % (part, self.tmp_dir))

	def mount(self):
		Log.info('Mapping %s@%s ..' % (self.rbd, self.snap))
		for i in self.ceph.get_mapped():
			if i['parent_image'] != self.rbd or i['parent_snap'] != self.snap:
				continue
			Log.info('Already mapped on %s, and possibly mounted on %s' % (i['dev'], i['mountpoint']))
			return i['mountpoint']

		self.ceph.protect(self.rbd, self.snap)
		clone = self.ceph.clone(self.rbd, self.snap)
		self.dev = self.ceph.map(clone)

		if self.dev is None:
			Log.error('Cannot map %s (cloned from %s@%s)' % (clone, self.rbd, self.snap))
			return

		kpartx = False
		for part in sh.Command('wipefs')('-p', self.dev):
			if part.startswith('#'):
				continue
			part = part.rstrip()
			part_type = part.split(',')[3]
			if part_type in ('dos', 'gpt'):
				kpartx = True
				break
			if part_type == 'xfs':
				# Dirty hack
				# We need to zero the xfs logs
				# However, a full xfs_repair can be quite long
				# As the zero log is really fast, 30sec should
				# be enough
				try:
					sh.Command('timeout')('30', 'xfs_repair', '-L', self.dev)
				except:
					# If xfs_repair timed out, an
					# Exception is thrown. Do not care.
					pass

		self.tmp_dir = tempfile.mkdtemp()
		try:
			return self.mount_rbd(kpartx)
		except:
			pass

	def umount(self):
		Log.info('Unmapping %s@%s ..' % (self.rbd, self.snap))
		for i in self.ceph.get_mapped():
			if i['parent_image'] != self.rbd or i['parent_snap'] != self.snap:
				continue
			Log.info('%s@%s currently mapped on %s' % (self.rbd, self.snap, i['dev']))
			if i['mountpoint'] is not None:
				try:
					sh.Command('umount')(i['mountpoint'])
				except:
					Log.warn('Cannot umount %s, maybe someone is using it ?' % (i['mountpoint'],))
					continue
				os.rmdir(i['mountpoint'])
			sh.Command('kpartx')('-dv', i['dev'])
			self.ceph.unmap(i['dev'])
			self.ceph.rm(i['image'])
			self.ceph.unprotect(self.rbd, self.snap)
Esempio n. 7
0
 def initialize(self):
   self.__ceph = Ceph()