def volume_create(req): log.error('Request: %s', req) name = req['Name'] if '@' in name: return {'Err': '"@" is illegal in a volume name'} clonefrom = None opts = req['Opts'] if opts: clonefrom = opts['from'] volpath = join(VOLUMES_PATH, name) if clonefrom == None: # volume already exists? if name in [v['Name'] for v in list_volumes()['Volumes']]: return {'Err': ''} try: btrfs.Subvolume(volpath).create() except CalledProcessError as e: return {'Err': e.stderr.decode()} except OSError as e: return {'Err': e.strerror} except Exception as e: return {'Err': str(e)} return {'Err': ''} else: sourcepath = join(VOLUMES_PATH, clonefrom) targetpath = volpath volume = btrfs.Subvolume(sourcepath) if volume.exists(): # clone volume.snapshot(targetpath) return {'Err': ''} else: return {'Err': 'No source volume'}
def snapshot_restore(req): """ Snapshot a volume and overwrite it with the specified snapshot. """ snapshot_name = req['Name'] target_name = req.get('Target') if '@' not in snapshot_name: # we're passing the name of the volume. Use the latest snapshot. volume_name = snapshot_name snapshots = os.listdir(SNAPSHOTS_PATH) snapshots = [s for s in snapshots if s.startswith(volume_name + '@')] if not snapshots: return {'Err': ''} snapshot_name = sorted(snapshots)[-1] snapshot_path = join(SNAPSHOTS_PATH, snapshot_name) snapshot = btrfs.Subvolume(snapshot_path) target_name = target_name or snapshot_name.split('@')[0] target_path = join(VOLUMES_PATH, target_name) volume = btrfs.Subvolume(target_path) res = {'Err': ''} if snapshot.exists(): if volume.exists(): # backup and delete timestamp = datetime.now().strftime(DTFORMAT) stamped_name = '{}@{}'.format(target_name, timestamp) stamped_path = join(SNAPSHOTS_PATH, stamped_name) volume.snapshot(stamped_path, readonly=True) res['VolumeBackup'] = stamped_name volume.delete() snapshot.snapshot(target_path) else: res['Err'] = 'No such snapshot' return res
def snapshot_restore(): """ Snapshot a volume and overwrite it with the specified snapshot. """ snapshot_name = jsonloads(request.body.read())['Name'] if '@' not in snapshot_name: # we're passing the name of the volume. Use the latest snapshot. volume_name = snapshot_name snapshots = os.listdir(SNAPSHOTS_PATH) snapshots = [s for s in snapshots if s.startswith(volume_name + '@')] if not snapshots: return json.dumps({'Err': ''}) snapshot_name = sorted(snapshots)[-1] snapshot_path = join(SNAPSHOTS_PATH, snapshot_name) snapshot = btrfs.Subvolume(snapshot_path) volume_name = snapshot_name.split('@')[0] volume_path = join(VOLUMES_PATH, volume_name) volume = btrfs.Subvolume(volume_path) res = {'Err': ''} if snapshot.exists(): if volume.exists(): # backup and delete timestamp = datetime.now().isoformat() stamped_name = '{}@{}'.format(volume_name, timestamp) stamped_path = join(SNAPSHOTS_PATH, stamped_name) volume.snapshot(stamped_path, readonly=True) res['VolumeBackup'] = stamped_name volume.delete() snapshot.snapshot(volume_path) else: res['Err'] = 'No such snapshot' return json.dumps(res)
def snapshot_send(req): """The last sent snapshot is remembered by adding a suffix with the target """ test = req.get('Test', False) snapshot_name = req['Name'] snapshot_path = join(SNAPSHOTS_PATH, snapshot_name) remote_host = req['Host'] remote_snapshots = SNAPSHOTS_PATH if not test else TEST_REMOTE_PATH # take the latest snapshot suffixed with the target host sent_snapshots = sorted([ s for s in os.listdir(SNAPSHOTS_PATH) if len(s.split('@')) == 3 and s.split('@')[0] == snapshot_name.split( '@')[0] and s.split('@')[2] == remote_host ]) latest = sent_snapshots[-1] if len(sent_snapshots) > 0 else None if latest and len(latest.rsplit('@')) == 3: latest = latest.rsplit('@', 1)[0] parent = '-p "{}"'.format(join(SNAPSHOTS_PATH, latest)) if latest else '' port = os.getenv("SSH_PORT", '1122') # needed by a current issue with send run('btrfs filesystem sync "{}"'.format(SNAPSHOTS_PATH), shell=True) cmd = ('btrfs send {parent} "{snapshot_path}"' ' | ssh -p {port} {remote_host} "btrfs receive {remote_snapshots}"') try: log.info(cmd.format(**locals())) run(cmd.format(**locals()), shell=True, check=True, stdout=PIPE, stderr=PIPE) except CalledProcessError as e: log.warn( 'Failed using parent %s. Sending full snapshot %s ' '(stdout: %s, stderr: %s)', latest, snapshot_path, e.stdout, e.stderr) parent = '' try: rmcmd = ( 'ssh -p {port} {remote_host} ' '"btrfs subvolume delete {remote_snapshots}/{snapshot_name}"') log.info(rmcmd.format(**locals())) run(rmcmd.format(**locals()), shell=True, stdout=PIPE, stderr=PIPE) log.info(cmd.format(**locals())) run(cmd.format(**locals()), shell=True, check=True, stdout=PIPE, stderr=PIPE) except CalledProcessError as e: log.error( 'Failed sending full snapshot ' '(stdout: %s, stderr: %s)', e.stdout, e.stderr) return {'Err': e.stderr.decode()} btrfs.Subvolume(snapshot_path).snapshot('{}@{}'.format( snapshot_path, remote_host), readonly=True) for old_snapshot in sent_snapshots: btrfs.Subvolume(old_snapshot).delete return {'Err': ''}
def test_send(self): """We can send a snapshot incrementally to another host """ # create a volume with a file name = PREFIX_TEST_VOLUME + uuid.uuid4().hex path = join(VOLUMES_PATH, name) self.create_a_volume_with_a_file(name) # snapshot resp = self.app.post('/VolumeDriver.Snapshot', json.dumps({'Name': name})) snapshot = json.loads(resp.body.decode())['Snapshot'] snapshot_path = join(SNAPSHOTS_PATH, snapshot) # send the snapshot (to the same host with another name) self.app.post( '/VolumeDriver.Snapshot.Send', json.dumps({ 'Name': snapshot, 'Host': 'localhost', 'Test': True })) remote_path = join(TEST_REMOTE_PATH, snapshot) # check the volumes have the same content with open(join(snapshot_path, 'foobar')) as x: with open(join(remote_path, 'foobar')) as y: self.assertEqual(x.read(), y.read()) # change files in the master volume with open(join(path, 'foobar'), 'w') as f: f.write('changed foobar') # send again to the other volume resp = self.app.post('/VolumeDriver.Snapshot', json.dumps({'Name': name})) snapshot2 = json.loads(resp.body.decode())['Snapshot'] snapshot2_path = join(SNAPSHOTS_PATH, snapshot2) self.app.post( '/VolumeDriver.Snapshot.Send', json.dumps({ 'Name': snapshot2, 'Host': 'localhost', 'Test': True })) remote_path2 = join(TEST_REMOTE_PATH, snapshot2) # check the files are the same with open(join(snapshot2_path, 'foobar')) as x: with open(join(remote_path2, 'foobar')) as y: self.assertEqual(x.read(), y.read()) # check the second snapshot is a child of the first one self.assertEqual( btrfs.Subvolume(remote_path).show()['UUID'], btrfs.Subvolume(remote_path2).show()['Parent UUID'])
def volume_get(): name = jsonloads(request.body.read())['Name'] path = join(VOLUMES_PATH, name) if not btrfs.Subvolume(path).exists(): return json.dumps({'Err': '{}: no such volume'.format(path)}) return json.dumps( {'Volume': {'Name': name, 'Mountpoint': path}, 'Err': ''})
def volume_remove(req): name = req['Name'] path = join(VOLUMES_PATH, name) try: btrfs.Subvolume(path).delete() except Exception: log.error('%s: no such volume', name) return {'Err': '{}: no such volume'.format(name)} return {'Err': ''}
def volume_remove(): name = jsonloads(request.body.read())['Name'] path = join(VOLUMES_PATH, name) try: btrfs.Subvolume(path).delete() except Exception: log.error('%s: no such volume', name) return json.dumps({'Err': '{}: no such volume'.format(name)}) return json.dumps({'Err': ''})
def volume_list(): volumes = [] for p in [join(VOLUMES_PATH, v) for v in os.listdir(VOLUMES_PATH) if v != 'metadata.db']: if not btrfs.Subvolume(p).exists(): continue volumes.append(p) return json.dumps({'Volumes': [{'Name': basename(v)} for v in volumes], 'Err': ''})
def snapshot_delete(req): name = req['Name'] path = join(SNAPSHOTS_PATH, name) if not os.path.exists(path): return {'Err': 'No such snapshot'} try: btrfs.Subvolume(path).delete() except Exception as e: log.error("Error deleting snapshot: %s", str(e)) return {'Err': str(e)} return {'Err': ''}
def snapshot_delete(): name = jsonloads(request.body.read())['Name'] path = join(SNAPSHOTS_PATH, name) if not os.path.exists(path): return json.dumps({'Err': 'No such snapshot'}) try: btrfs.Subvolume(path).delete() except Exception as e: log.error("Error deleting snapshot: %s", str(e)) return json.dumps({'Err': str(e)}) return json.dumps({'Err': ''})
def volume_create(): name = jsonloads(request.body.read())['Name'] if '@' in name: return json.dumps({'Err': '"@" is illegal in the name of the volume'}) volpath = join(VOLUMES_PATH, name) # volume already exists? if name in [v['Name']for v in json.loads(volume_list())['Volumes']]: return json.dumps({'Err': ''}) try: btrfs.Subvolume(volpath).create() except Exception as e: return json.dumps({'Err': e.strerror}) return json.dumps({'Err': ''})
def volume_snapshot(): """snapshot a volume in the SNAPSHOTS dir """ name = jsonloads(request.body.read())['Name'] path = join(VOLUMES_PATH, name) timestamped = '{}@{}'.format(name, datetime.now().isoformat()) snapshot_path = join(SNAPSHOTS_PATH, timestamped) if not os.path.exists(path): return json.dumps({'Err': 'No such volume: {}'.format(name)}) try: btrfs.Subvolume(path).snapshot(snapshot_path, readonly=True) except Exception as e: log.error("Error creating snapshot: %s", str(e)) return json.dumps({'Err': str(e)}) return json.dumps({'Err': '', 'Snapshot': timestamped})
def volume_snapshot(req): """snapshot a volume in the SNAPSHOTS dir """ name = req['Name'] path = join(VOLUMES_PATH, name) timestamped = '{}@{}'.format(name, datetime.now().strftime(DTFORMAT)) snapshot_path = join(SNAPSHOTS_PATH, timestamped) if not os.path.exists(path): return {'Err': 'No such volume: {}'.format(name)} try: btrfs.Subvolume(path).snapshot(snapshot_path, readonly=True) except Exception as e: log.error("Error creating snapshot: %s", str(e)) return {'Err': str(e)} return {'Err': '', 'Snapshot': timestamped}
def snapshot_clone(req): """ Create a new volume as clone from another. """ volumename = req['Name'] targetname = req.get('Target') volumepath = join(VOLUMES_PATH, volumename) targetpath = join(VOLUMES_PATH, targetname) volume = btrfs.Subvolume(volumepath) res = {'Err': ''} if volume.exists(): # clone volume.snapshot(targetpath) res['VolumeCloned'] = targetname else: res['Err'] = 'No such volume' return res
def volume_create(req): name = req['Name'] if '@' in name: return {'Err': '"@" is illegal in a volume name'} volpath = join(VOLUMES_PATH, name) # volume already exists? if name in [v['Name'] for v in list_volumes()['Volumes']]: return {'Err': ''} try: btrfs.Subvolume(volpath).create() except CalledProcessError as e: return {'Err': e.stderr.decode()} except OSError as e: return {'Err': e.strerror} except Exception as e: return {'Err': str(e)} return {'Err': ''}
def snapshot_clone(): """ Create a new volume as clone from another. """ params = jsonloads(request.body.read()) volume_name = params['Name'] target_name = params.get('Target') volume_path = join(VOLUMES_PATH, volume_name) target_path = join(VOLUMES_PATH, target_name) volume = btrfs.Subvolume(volume_path) res = {'Err': ''} if volume.exists(): # clone volume.snapshot(target_path) res['VolumeCloned'] = target_name else: res['Err'] = 'No such volume' return json.dumps(res)
def snapshots_purge(): """ Purge snapshots with a retention pattern (see cli help) """ params = jsonloads(request.body.read()) volume_name = params['Name'] dryrun = params.get('Dryrun', False) # convert the pattern to seconds, check validity and reorder units = { 'm': 1, 'h': 60, 'd': 60 * 24, 'w': 60 * 24 * 7, 'y': 60 * 24 * 365 } try: pattern = sorted( int(i[:-1]) * units[i[-1]] for i in params['Pattern'].split(':')) assert (len(pattern) >= 2) except: log.error("Invalid purge pattern: %s", params['Pattern']) return json.dumps({'Err': 'Invalid purge pattern'}) # snapshots related to the volume, more recents first snapshots = (s for s in os.listdir(SNAPSHOTS_PATH) if s.startswith(volume_name + '@')) try: for snapshot in compute_purges(snapshots, pattern, datetime.now()): if dryrun: log.info('(Dry run) Would delete snapshot {}'.format(snapshot)) else: btrfs.Subvolume(join(SNAPSHOTS_PATH, snapshot)).delete() log.info('Deleted snapshot {}'.format(snapshot)) except Exception as e: log.error("Error purging snapshots: %s", e.strerror) return json.dumps({'Err': e.strerror}) return json.dumps({'Err': ''})
def volumepath(name): path = join(VOLUMES_PATH, name) if not btrfs.Subvolume(path).exists(): return None return path
def cleanup(self): """clean-up test volumes and snapshots before each test""" for directory in (VOLUMES_PATH, SNAPSHOTS_PATH, TEST_REMOTE_PATH): btrfs.Subvolume(join(directory, PREFIX_TEST_VOLUME) + '*').delete(check=False)
def snapshots_purge(): """ Purge snapshots with a retention pattern (see cli help) """ params = jsonloads(request.body.read()) volume_name = params['Name'] dryrun = params.get('Dryrun', False) units = {'m': 1, 'h': 60, 'd': 60*24, 'w': 60*24*7, 'y': 60*24*365} try: pattern = sorted(int(i[:-1])*units[i[-1]] for i in params['Pattern'].split(':')) assert(len(pattern) >= 2) max_age = pattern[-1] except: log.error("Invalid purge pattern: %s", params['Pattern']) return json.dumps({'Err': 'Invalid purge pattern'}) # snapshots related to the volume, more recents first snapshots = sorted((s for s in os.listdir(SNAPSHOTS_PATH) if s.startswith(volume_name + '@')), reverse=True) now = datetime.now() # Age of the snapshots in minutes. # Example : [30, 70, 90, 150, 210, ..., 4000] snapshots_age = [] for s in snapshots: try: snapshots_age.append( int((now - datetime.strptime( s.split('@')[1], "%Y-%m-%dT%H:%M:%S.%f")).total_seconds() )//60) except: log.info("Skipping purge of %s with invalid date format", s) continue if not snapshots: return json.dumps({'Err': ''}) # pattern = 60:180:3600 # age segments = [(60, 180), (180, 3600)] try: for age_segment in [(pattern[i], pattern[i+1]) for i, p in enumerate(pattern[:-1])]: last_timeframe = -1 for i, age in enumerate(snapshots_age): # if the age is outside the age_segment, delete nothing. # Only 70 and 90 are inside the age_segment (60, 180) if age <= age_segment[0] or age >= age_segment[1] < max_age: continue # Now get the timeframe of the snapshot. # Ages 70 and 90 are in the same timeframe (70//60 == 90//60) timeframe = age // age_segment[0] # delete if we already had a snapshot in the same timeframe # or if the snapshot is very old if timeframe == last_timeframe or age > max_age: snapshot = snapshots[i] if dryrun: log.info('(Dry run) Would delete snapshot {}' .format(snapshot)) else: btrfs.Subvolume( join(SNAPSHOTS_PATH, snapshot)).delete() log.info('Deleted snapshot {}'.format(snapshot)) last_timeframe = timeframe except Exception as e: log.error("Error purging snapshots: %s", e.strerror) return json.dumps({'Err': e.strerror}) return json.dumps({'Err': ''})
def cleanup_snapshots(): btrfs.Subvolume(join(SNAPSHOTS_PATH, PREFIX_TEST_VOLUME) + '*').delete(check=False)