def apply(ctx, conf_dir, auto_approve, dataset, exclude_dataset, mode, backup_dataset): # TODO Impl auto-approve option add_counts, change_counts, destroy_counts = [], [], [] with ThreadPoolExecutor(max_workers=ctx.obj['parallelism']) as e: dataset_action = DatasetAction(e, project=ctx.obj['project'], credential_file=ctx.obj['credential_file'], no_color=not ctx.obj['color'], debug=ctx.obj['debug']) source_datasets = [d for d in as_completed(dataset_action.list_datasets( dataset, exclude_dataset)) if d] target_datasets = list_local_datasets(conf_dir, dataset, exclude_dataset) echo('------------------------------------------------------------------------') echo() fs = [] add_count, add_fs = dataset_action.add(source_datasets, target_datasets) add_counts.append(add_count) fs.extend(add_fs) change_count, change_fs = dataset_action.change(source_datasets, target_datasets) change_counts.append(change_count) fs.extend(change_fs) destroy_count, destroy_fs = dataset_action.destroy(source_datasets, target_datasets) destroy_counts.append(destroy_count) fs.extend(destroy_fs) as_completed(fs) fs = [] for d in target_datasets: target_tables = list_local_tables(conf_dir, d.dataset_id) if target_tables is None: continue table_action = TableAction(e, d.dataset_id, migration_mode=mode, backup_dataset_id=backup_dataset, project=ctx.obj['project'], credential_file=ctx.obj['credential_file'], no_color=not ctx.obj['color'], debug=ctx.obj['debug']) source_tables = [t for t in as_completed(table_action.list_tables()) if t] if target_tables or source_tables: echo('------------------------------------------------------------------------') echo() add_count, add_fs = table_action.add(source_tables, target_tables) add_counts.append(add_count) fs.extend(add_fs) change_count, change_fs = table_action.change(source_tables, target_tables) change_counts.append(change_count) fs.extend(change_fs) destroy_count, destroy_fs = table_action.destroy(source_tables, target_tables) destroy_counts.append(destroy_count) fs.extend(destroy_fs) as_completed(fs) if not any(chain.from_iterable([add_counts, change_counts, destroy_counts])): echo(msg.MESSAGE_SUMMARY_NO_CHANGE) echo() else: echo(msg.MESSAGE_APPLY_SUMMARY.format( sum(add_counts), sum(change_counts), sum(destroy_counts))) echo()
def plan(ctx, conf_dir, detailed_exitcode, dataset, exclude_dataset): echo(msg.MESSAGE_PLAN_HEADER) add_counts, change_counts, destroy_counts = [], [], [] with ThreadPoolExecutor(max_workers=ctx.obj['parallelism']) as e: dataset_action = DatasetAction(e, project=ctx.obj['project'], credential_file=ctx.obj['credential_file'], no_color=not ctx.obj['color'], debug=ctx.obj['debug']) source_datasets = [d for d in as_completed(dataset_action.list_datasets( dataset, exclude_dataset)) if d] target_datasets = list_local_datasets(conf_dir, dataset, exclude_dataset) echo('------------------------------------------------------------------------') echo() add_counts.append(dataset_action.plan_add(source_datasets, target_datasets)) change_counts.append(dataset_action.plan_change(source_datasets, target_datasets)) destroy_counts.append(dataset_action.plan_destroy(source_datasets, target_datasets)) for d in target_datasets: target_tables = list_local_tables(conf_dir, d.dataset_id) if target_tables is None: continue table_action = TableAction(e, d.dataset_id, project=ctx.obj['project'], credential_file=ctx.obj['credential_file'], no_color=not ctx.obj['color'], debug=ctx.obj['debug']) source_tables = [t for t in as_completed(table_action.list_tables()) if t] if target_tables or source_tables: echo('------------------------------------------------------------------------') echo() add_counts.append(table_action.plan_add(source_tables, target_tables)) change_counts.append(table_action.plan_change(source_tables, target_tables)) destroy_counts.append(table_action.plan_destroy(source_tables, target_tables)) if not any(chain.from_iterable([add_counts, change_counts, destroy_counts])): echo(msg.MESSAGE_SUMMARY_NO_CHANGE) echo() else: echo(msg.MESSAGE_PLAN_SUMMARY.format( sum(add_counts), sum(change_counts), sum(destroy_counts))) echo() if detailed_exitcode: sys.exit(2)
def apply_destroy(ctx, conf_dir, auto_approve, dataset, exclude_dataset): # TODO Impl auto-approve option destroy_counts = [] with ThreadPoolExecutor(max_workers=ctx.obj['parallelism']) as e: dataset_action = DatasetAction(e, project=ctx.obj['project'], credential_file=ctx.obj['credential_file'], no_color=not ctx.obj['color'], debug=ctx.obj['debug']) source_datasets = [d for d in as_completed(dataset_action.list_datasets( dataset, exclude_dataset)) if d] target_datasets = list_local_datasets(conf_dir, dataset, exclude_dataset) echo('------------------------------------------------------------------------') echo() fs = [] for d in target_datasets: table_action = TableAction(e, d.dataset_id, project=ctx.obj['project'], credential_file=ctx.obj['credential_file'], no_color=not ctx.obj['color'], debug=ctx.obj['debug']) source_tables = [t for t in as_completed(table_action.list_tables()) if t] if source_tables: echo('------------------------------------------------------------------------') echo() destroy_count, destroy_fs = table_action.destroy(source_tables, []) destroy_counts.append(destroy_count) fs.extend(destroy_fs) as_completed(fs) fs = [] destroy_count, destroy_fs = dataset_action.intersection_destroy( source_datasets, target_datasets) destroy_counts.append(destroy_count) fs.extend(destroy_fs) as_completed(fs) if not any(destroy_counts): echo(msg.MESSAGE_SUMMARY_NO_CHANGE) echo() else: echo(msg.MESSAGE_APPLY_DESTROY_SUMMARY.format(sum(destroy_counts))) echo()