def test_overrides_are_not_saved(self): cfg = ConfigStruct('/home/mycfg', options={'one': 1, 'two': 2}) cfg.options.might_prefer(one='cage match', two=None) cfg.save() with open('/home/mycfg') as fh: body = fh.read().strip() assert 'two = 2' in body and 'one = 1' in body
def test_my_added_items(self): cfg = ConfigStruct('/home/mycfg', options={}) self.mfs.add_entries({'/home/mycfg': '[options]\nfancy = True\n'}) cfg.options.shoes = 'unlaced' cfg.save() with open('/home/mycfg') as fh: body = fh.read().strip() assert 'fancy = True' in body and 'shoes = unlaced' in body
def test_their_added_items(self): cfg = ConfigStruct('/home/mycfg', options={}) self.mfs.add_entries({'/home/mycfg': '[options]\nfancy = True\nshoes = laced'}) cfg.options.fancy = 'MINE, DAMMIT!' cfg.save() with open('/home/mycfg') as fh: body = fh.read().strip() assert 'fancy = MINE, DAMMIT!' in body and 'shoes = laced' in body
def test_default_logging(self, capsys): cfg = ConfigStruct('/home/mycfg', 'options') cfg.configure_basic_logging('me') main_logger = logging.getLogger('me') child_logger = main_logger.getChild('runt') other_logger = logging.getLogger('stranger') main_logger.info('main info') child_logger.info('child info') other_logger.info('other info') other_logger.warn('other warn') out, err = capsys.readouterr() assert('main info' in err and 'child info' in err and 'other warn' in err and not 'other info' in err)
def test_default_logging_options(self): cfg = ConfigStruct('/home/mycfg', 'options') cfg.save() with open('/home/mycfg') as fh: body = fh.read().strip() assert 'log_level = info' in body and 'log_file = STDERR' in body
def test_with_overrides(self): cfg = ConfigStruct('/home/mycfg', options={'one': 1, 'two': 2}) cfg.options.might_prefer(one='cage match', two=None) assert cfg.options.one == 'cage match' and cfg.options.two == 2
def test_choose_theirs(self): self.mfs.add_entries({'/home/mycfg': '[options]\nfancy = True\n'}) cfg = ConfigStruct('/home/mycfg', options={'fancy': False}) assert cfg.options.fancy
def test_with_defaults(self): cfg = ConfigStruct('/home/mycfg', options={'one': 1}) assert cfg.options.one == 1 cfg.save() with open('/home/mycfg') as fh: assert fh.read().strip() == '[options]\none = 1'
def test_repr_with_overrides(self): cfg = ConfigStruct('/home/mycfg', options={'stuff': 'nonsense'}) cfg.options.might_prefer(fancy=True) rc = repr(cfg) assert 'nonsense' in rc and 'fancy' in rc
def test_repr(self): cfg = ConfigStruct('/home/mycfg', options={'stuff': 'nonsense'}) assert repr(cfg) == '''{'options': {'stuff': 'nonsense'}}'''
def test_empty_save(self): cfg = ConfigStruct('/home/mycfg') cfg.save() assert os.path.getsize('/home/mycfg') == 0
def main(config_file, region, log_level, log_file, concurrency, selection_string, accumulation_string, reduction_string, command, s3_uri): '''Perform simple listing, collating, or deleting of many S3 objects at the same time. Examples: \b List empty objects: s3workers list --select 'size == 0' s3://mybucket/myprefix \b Report total of all non-empty objects: s3workers list --select 'size > 0' --reduce 'accumulator += size' s3://mybucket/myprefix \b Total size group by MD5: s3workers list --accumulator '{}' --reduce 'v=accumulator.get(md5,0)+size; accumulator[md5]=v' s3://mybucket/myprefix ''' # noqa: E501 config = ConfigStruct(config_file, 'options', options=DEFAULTS) opts = config.options # let command line options have temporary precedence if provided values opts.might_prefer(region=region, log_level=log_level, log_file=log_file, concurrency=concurrency) config.configure_basic_logging(__name__) logger = logging.getLogger(__name__) s3_uri = re.sub(r'^(s3:)?/+', '', s3_uri) items = s3_uri.split('/', 1) bucket_name = items[0] prefix = items[1] if len(items) > 1 else '' conn = s3.connect_to_region(opts.region) if opts.region else connect_s3() bucket = conn.get_bucket(bucket_name) progress = S3KeyProgress() reducer = None if reduction_string: reducer = Reducer(reduction_string, accumulation_string) def key_dumper(key): accumulator = reducer.reduce(key.name, key.size, key.md5, key.last_modified) progress.write('%s %10d %s %s => %s', key.last_modified, key.size, key.md5, key.name, accumulator) else: def key_dumper(key): progress.write('%s %10d %s %s', key.last_modified, key.size, key.md5, key.name) def key_deleter(key): progress.write('DELETING: %s %10d %s %s', key.last_modified, key.size, key.md5, key.name) key.delete() selector = compile(selection_string, '<select>', 'eval') if selection_string else None handler = key_deleter if command == 'delete' else key_dumper manager = Manager(opts.concurrency) manager.start_workers() logger.info('Preparing %d jobs for %d workers', len(SHARDS), manager.worker_count) # break up jobs into single char prefix jobs for shard in SHARDS: manager.add_work(S3ListJob(bucket, prefix + shard, selector, handler, progress.report)) manager.wait_for_workers() progress.finish() if reducer: click.echo('Final accumulator value: ' + str(reducer.accumulator))