type=int, help='Turn on interpolation and give a number of frames of output') parser.add_argument( '--versions', default=1, type=int, help='how many versions to make [put formatter in filename if > 1]') parser.add_argument('--length', default=None, type=int, help='Length of generated vector list') args = parser.parse_args() template_dict = {} array_to_image = load_image_function(args.renderer + ".render") render_parts = args.renderer.split('.') template_dict["RENDERER"] = render_parts[-1] if args.random_seed is not None: print("Setting random seed: ", args.random_seed) random.seed(args.random_seed) np.random.seed(args.random_seed) template_dict["SEED"] = args.random_seed else: template_dict["SEED"] = None template_dict["SIZE"] = args.size if args.outfile == "None": args.outfile = None
def main(): global good_enough, max_dry_period, render_size global sigma, alpha parser = argparse.ArgumentParser(description="shape optimization") parser.add_argument('--input-array', default=None, help="inputs") parser.add_argument('--outdir', default=None, help="saved outputs") parser.add_argument('--catlog', default=None, help="read/write to category log file") parser.add_argument( '--maxcats', default=1, help="maximum entries in catlog before category is blacklisted") parser.add_argument('--target-class', default=None, help='which target classes to optimize') parser.add_argument('--show-name', default=False, action='store_true', help="show imagenet classname and exit") parser.add_argument('--show-friendly-name', default=False, action='store_true', help="show imagenet classname and exit") parser.add_argument("--renderer", default="lines1", help="renderer with image drawing function") parser.add_argument("--networks", default="train1", help="comma separated list of networks") parser.add_argument('--random-seed', default=None, type=int, help='Use a specific random seed (for repeatability)') parser.add_argument( '--random-head', default=None, type=int, help='Add N steps of random initializations of head data (cur 2)') parser.add_argument('--header-length', default=2, type=int, help='The length of the header (used for random-head)') parser.add_argument('--early-stop', default=None, help='early stop number (good enough)') parser.add_argument('--max-attempts', default=30, type=int, help='stop if no improvement for n cycles') parser.add_argument('--num-lines', default=17, type=int, help='Number of lines to use') parser.add_argument('--render-size', default=None, type=int, help='Size to render during testing') parser.add_argument('--num-pop', default=100, type=int, help='Population size') parser.add_argument('--alpha-scale', default=1, type=float, help='scale learning rate') parser.add_argument('--sigma-scale', default=1, type=float, help='Scale random noise added each cycle') parser.add_argument('--init-step', default=4, type=int, help='Init step') parser.add_argument('--max-iterations', default=1000, type=int, help='Maximum iterations') args = parser.parse_args() # apply arguments outdir = args.outdir if args.early_stop is not None and args.early_stop.lower() != "none": good_enough = float(args.early_stop) max_dry_period = args.max_attempts print("Threshold is {} attempts to {}".format(max_dry_period, good_enough)) if args.render_size is not None: render_size = args.render_size print("Overriding render_size to {}".format(render_size)) class_mapping = open_class_mapping() if args.target_class is None or args.target_class == "none": imagenet_indexes = None else: imagenet_indexes = get_class_index_list(class_mapping, args.target_class) # scale alpha and/or sigma if args.sigma_scale != 1: old_sigma = sigma sigma *= args.sigma_scale print("Scaling sigma {}x from {} to {}".format(args.sigma_scale, old_sigma, sigma)) if args.alpha_scale != 1: old_alpha = alpha alpha *= args.alpha_scale print("Scaling alpha {}x from {} to {}".format(args.alpha_scale, old_alpha, alpha)) # let's get to it if imagenet_indexes is not None: if args.networks == "vggface": with open("labels.json") as json_data: label_index = imagenet_indexes[0] d = json.load(json_data) # TODO: maybe handle multiples here categories = [] if args.show_name or args.show_friendly_name: print(d[label_index].strip()) sys.exit(0) if label_index < len(d): categories.append(d[label_index].strip()) else: categories.append("face_{:04d}".format(int(label_index))) else: categories = [] for imagenet_index in imagenet_indexes: if args.show_name: print(get_class_fullname(class_mapping, imagenet_index)) sys.exit(0) if args.show_friendly_name: print(get_class_label(class_mapping, imagenet_index)) sys.exit(0) class_label = get_class_label(class_mapping, imagenet_index) if class_label is not None: categories.append(class_label) else: categories.append("category_{:04d}".format( int(imagenet_key))) print("----> Processing {}".format(categories)) # make output directory if needed if outdir != '' and not os.path.exists(outdir): os.makedirs(outdir) # setup models active_models = get_active_models_from_arg(args.networks) array_to_image = load_image_function(args.renderer + ".render") if args.random_seed: print("Setting random seed: ", args.random_seed) random.seed(args.random_seed) np.random.seed(args.random_seed) # TODO: not do this or maybe there is a tf2 way? tf.compat.v1.set_random_seed(args.random_seed) if args.catlog == "none": args.catlog = None if imagenet_indexes is None and args.catlog is not None: read_category_blacklist(args.catlog, args.maxcats) if imagenet_indexes is None: objective_fn = get_optimization_function_noindex( active_models, array_to_image, render_size) else: objective_fn = get_optimization_function(active_models, imagenet_indexes, array_to_image, render_size) # optimize(outdir, objective_fn, args.num_pop, 1000) if args.input_array is not None: initial_array = np.load(args.input_array) print("loaded data from: {}".format(args.input_array)) else: initial_array = None # print("RANDOM HEAD {}".format(args.random_head)) optimize(outdir, array_to_image, objective_fn, iterations=args.max_iterations, numpop=args.num_pop, preview_size=render_size, num_lines=args.num_lines, initial_array=initial_array, init_step=args.init_step, rand_head=args.random_head, head_length=args.header_length) if imagenet_indexes is None and args.catlog is not None: append_category_blacklist(args.catlog)
'--interpolate', default=None, type=int, help='Turn on interpolation and give a number of frames of output') parser.add_argument( '--versions', default=1, type=int, help='how many versions to make [put formatter in filename if > 1]') parser.add_argument('--length', default=None, type=int, help='Length of generated vector list') args = parser.parse_args() array_to_image = load_image_function(args.image_function) if args.random_seed: print("Setting random seed: ", args.random_seed) random.seed(args.random_seed) np.random.seed(args.random_seed) # for i in range(args.random_seed): # n = np.random.uniform() if args.input_glob is None: files = ["(random)"] else: files = real_glob(args.input_glob) print("Found {} files in glob {}".format(len(files), args.input_glob)) if len(files) == 0: print("No files to process")