def main(args): """Main function which runs worker.""" title = '## Starting evaluation of round {0} ##'.format(args.round_name) logging.info('\n' + '#' * len(title) + '\n' + '#' * len(title) + '\n' + '##' + ' ' * (len(title)-2) + '##' + '\n' + title + '\n' + '#' * len(title) + '\n' + '#' * len(title) + '\n' + '##' + ' ' * (len(title)-2) + '##' + '\n') if args.blacklisted_submissions: logging.warning('BLACKLISTED SUBMISSIONS: %s', args.blacklisted_submissions) random.seed() logging.info('Running nvidia-docker to ensure that GPU works') shell_call(['docker', 'run', '--runtime=nvidia', '--rm', 'nvidia/cuda', 'nvidia-smi']) eval_worker = EvaluationWorker( worker_id=args.worker_id, storage_client=eval_lib.CompetitionStorageClient( args.project_id, args.storage_bucket), datastore_client=eval_lib.CompetitionDatastoreClient( args.project_id, args.round_name), storage_bucket=args.storage_bucket, round_name=args.round_name, dataset_name=args.dataset_name, blacklisted_submissions=args.blacklisted_submissions, num_defense_shards=args.num_defense_shards) eval_worker.run_work()
def main(args): """Main function which runs worker.""" title = "## Starting evaluation of round {0} ##".format(args.round_name) logging.info("\n" + "#" * len(title) + "\n" + "#" * len(title) + "\n" + "##" + " " * (len(title) - 2) + "##" + "\n" + title + "\n" + "#" * len(title) + "\n" + "#" * len(title) + "\n" + "##" + " " * (len(title) - 2) + "##" + "\n") if args.blacklisted_submissions: logging.warning("BLACKLISTED SUBMISSIONS: %s", args.blacklisted_submissions) random.seed() logging.info("Running nvidia-docker to ensure that GPU works") shell_call([ "docker", "run", "--runtime=nvidia", "--rm", "nvidia/cuda", "nvidia-smi" ]) eval_worker = EvaluationWorker( worker_id=args.worker_id, storage_client=eval_lib.CompetitionStorageClient( args.project_id, args.storage_bucket), datastore_client=eval_lib.CompetitionDatastoreClient( args.project_id, args.round_name), storage_bucket=args.storage_bucket, round_name=args.round_name, dataset_name=args.dataset_name, blacklisted_submissions=args.blacklisted_submissions, num_defense_shards=args.num_defense_shards, ) eval_worker.run_work()
def main(args): """Main function which runs master.""" if args.blacklisted_submissions: logging.warning("BLACKLISTED SUBMISSIONS: %s", args.blacklisted_submissions) if args.limited_dataset: logging.info("Using limited dataset: 3 batches * 10 images") max_dataset_num_images = 30 batch_size = 10 else: logging.info("Using full dataset. Batch size: %d", DEFAULT_BATCH_SIZE) max_dataset_num_images = None batch_size = DEFAULT_BATCH_SIZE random.seed() print("\nRound: {0}\n".format(args.round_name)) eval_master = EvaluationMaster( storage_client=eval_lib.CompetitionStorageClient( args.project_id, args.storage_bucket), datastore_client=eval_lib.CompetitionDatastoreClient( args.project_id, args.round_name), round_name=args.round_name, dataset_name=args.dataset_name, blacklisted_submissions=args.blacklisted_submissions, results_dir=args.results_dir, num_defense_shards=args.num_defense_shards, verbose=args.verbose, batch_size=batch_size, max_dataset_num_images=max_dataset_num_images, ) if args.command == "attack": eval_master.prepare_attacks() elif args.command == "defense": eval_master.prepare_defenses() elif args.command == "cleanup_defenses": eval_master.cleanup_defenses() elif args.command == "results": eval_master.compute_results() elif args.command == "status": eval_master.show_status() elif args.command == "cleanup_datastore": eval_master.cleanup_datastore() elif args.command == "cleanup_failed_attacks": eval_master.cleanup_failed_attacks() elif args.command == "cleanup_attacks_with_zero_images": eval_master.cleanup_attacks_with_zero_images() else: print("Invalid command: ", args.command) print("") print(USAGE)