def check_lazy_initialize(args, dmodel, smile_offsets): # debug: don't load anything... # return dmodel, smile_offsets # first get model ready if dmodel is None and (args.model is not None or args.model_file is not None): print('Finding saved model...') dmodel = zoo.load_model(args.model, args.model_file, args.model_type) # get attributes if smile_offsets is None and args.anchor_offset is not None: offsets = get_json_vectors(args.anchor_offset) dim = len(offsets[0]) offset_indexes = args.anchor_indexes.split(",") offset_vector = offset_from_string(offset_indexes[0], offsets, dim) for n in range(1, len(offset_indexes)): offset_vector += offset_from_string(offset_indexes[n], offsets, dim) smile_offsets = [offset_vector] return dmodel, smile_offsets
def main(cliargs): parser = argparse.ArgumentParser(description="Plot model samples") # models are only for seeds-image parser.add_argument("--model", dest='model', type=str, default=None, help="name of model in plat zoo") parser.add_argument("--model-file", dest='model_file', type=str, default=None, help="path to the saved model") parser.add_argument( "--model-type", dest='model_type', type=str, default=None, help="the type of model (usually inferred from filename)") parser.add_argument( "--model-interface", dest='model_interface', type=str, default=None, help="class interface for model (usually inferred from model-type)") parser.add_argument('--build-annoy', dest='build_annoy', default=False, action='store_true') parser.add_argument("--jsons", type=str, default=None, help="Comma separated list of json arrays") parser.add_argument('--dataset', dest='dataset', default=None, help="Source dataset.") parser.add_argument('--dataset-image', dest='dataset_image', default=None, help="use image as source dataset") parser.add_argument("--dataset-offset", dest='dataset_offset', type=int, default=0, help="dataset offset to skip") parser.add_argument("--dataset-max", type=int, default=None, help="Source dataset.") parser.add_argument('--seeds-image', dest='seeds_image', default=None, help="image source of seeds") parser.add_argument('--drop-seeds', dest='drop_seeds', default=False, action='store_true') parser.add_argument('--annoy-index', dest='annoy_index', default=None, help="Annoy index.") parser.add_argument( '--split', dest='split', default="all", help= "Which split to use from the dataset (train/nontrain/valid/test/any).") parser.add_argument("--image-size", dest='image_size', type=int, default=64, help="size of (offset) images") parser.add_argument("--z-dim", dest='z_dim', type=int, default=100, help="z dimension") parser.add_argument('--outdir', dest='outdir', default="neighborgrids", help="Output dir for neighborgrids.") parser.add_argument('--outfile', dest='outfile', default="index_{:03d}.png", help="Output file (template) for neighborgrids.") parser.add_argument("--outgrid-width", dest='outgrid_width', type=int, default=5, help="width of output grid") parser.add_argument("--outgrid-height", dest='outgrid_height', type=int, default=3, help="height of output grid") parser.add_argument('--range', dest='range', default="0", help="Range of indexes to run.") args = parser.parse_args(cliargs) # check for model download first if args.model is not None: zoo.check_model_download(args.model) encoded = json_list_to_array(args.jsons) # print(encoded.shape) if args.build_annoy: aindex = build_annoy_index(encoded, args.annoy_index) sys.exit(0) # open annoy index and spit out some neighborgrids aindex = load_annoy_index(args.annoy_index, args.z_dim) if args.dataset is not None: anchor_images = get_anchor_images(args.dataset, args.split, offset=args.dataset_offset, numanchors=args.dataset_max, unit_scale=False) image_size = anchor_images.shape[2] # dataset_image requires image_size if args.dataset_image is not None: image_size = args.image_size _, _, anchor_images = anchors_from_image(args.dataset_image, image_size=(image_size, image_size), unit_scale=False) if args.dataset_offset > 0: anchor_images = anchor_images[args.dataset_offset:] if args.dataset_max is not None: anchor_images = anchor_images[:args.dataset_max] r = map(int, args.range.split(",")) core_dataset_size = len(anchor_images) if (len(encoded) != core_dataset_size): print("Warning: {} vectors and {} images".format( len(encoded), core_dataset_size)) if args.seeds_image is not None: image_size = args.image_size _, _, extra_images = anchors_from_image(args.seeds_image, image_size=(image_size, image_size), unit_scale=False) net_inputs = (extra_images / 255.0).astype('float32') print('Loading saved model') dmodel = zoo.load_model(args.model, args.model_file, args.model_type, args.model_interface) image_vectors = dmodel.encode_images(net_inputs) num_extras = len(extra_images) encoded = np.concatenate((encoded, image_vectors), axis=0) anchor_images = np.concatenate((anchor_images, extra_images), axis=0) # for now, override given range r = [core_dataset_size, core_dataset_size + num_extras] print anchor_images.shape if not os.path.exists(args.outdir): os.makedirs(args.outdir) if len(r) == 1: r = [r[0], r[0] + 1] num_out_cells = args.outgrid_width * args.outgrid_height for i in range(r[0], r[1]): if i < core_dataset_size: # will find the N nearest neighbors neighbors = aindex.get_nns_by_item(i, num_out_cells, include_distances=True) file_num = i else: if args.drop_seeds: # just the N nearest neighbors neighbors = aindex.get_nns_by_vector( encoded[i], num_out_cells, include_distances=True ) # will find the 20 nearest neighbors else: # original seed + (N-1) nearest neigbors neighbors = aindex.get_nns_by_vector( encoded[i], num_out_cells - 1, include_distances=True ) # will find the 20 nearest neighbors neighbors[0].append(i) neighbors[1].append(0) file_num = i - core_dataset_size g = neighbors_to_rfgrid(neighbors[0], encoded, anchor_images, image_size, args.outgrid_width, args.outgrid_height) out_template = "{}/{}".format(args.outdir, args.outfile) g.save(out_template.format(file_num))
def sample(parser, context, args): parser.add_argument("--interface", dest='model_class', type=str, default=None, help="class encapsulating model") parser.add_argument('--preload-model', default=False, action='store_true', help="Load the model first before starting processing") parser.add_argument("--model", dest='model', type=str, default=None, help="name of model in plat zoo") parser.add_argument("--model-file", dest='model_file', type=str, default=None, help="path to the saved model") parser.add_argument( "--model-type", dest='model_type', type=str, default=None, help="the type of model (usually inferred from filename)") parser.add_argument( "--model-interface", dest='model_interface', type=str, default=None, help="class interface for model (usually inferred from model-type)") parser.add_argument("--rows", type=int, default=3, help="number of rows of samples to display") parser.add_argument("--cols", type=int, default=7, help="number of columns of samples to display") parser.add_argument("--outfile", dest='save_path', type=str, default="plat_%DATE%_%MODEL%_%SEQ%.png", help="where to save the generated samples") parser.add_argument('--fan', dest='fan', default=False, action='store_true') parser.add_argument('--analogy', dest='analogy', default=False, action='store_true') parser.add_argument('--global-offset', dest='global_offset', default=None, help="use json file as source of global offsets") parser.add_argument('--global-indices', dest='global_indices', default=None, type=str, help="offset indices to apply globally") parser.add_argument('--global-scale', dest='global_scale', default=1.0, type=float, help="scaling factor for global offset") parser.add_argument('--global-ramp', dest='global_ramp', default=False, action='store_true', help="ramp global effect with z-step") parser.add_argument('--anchor-offset', dest='anchor_offset', default=None, help="use json file as source of each anchors offsets") parser.add_argument('--anchor-offset-x', dest='anchor_offset_x', default="5", type=str, help="which indices to combine for x offset") parser.add_argument('--anchor-offset-y', dest='anchor_offset_y', default="39", type=str, help="which indices to combine for y offset") parser.add_argument('--anchor-offset-x-minscale', dest='anchor_offset_x_minscale', default=0, type=float, help="scaling factor for min x offset") parser.add_argument('--anchor-offset-y-minscale', dest='anchor_offset_y_minscale', default=0, type=float, help="scaling factor for min y offset") parser.add_argument('--anchor-offset-x-maxscale', dest='anchor_offset_x_maxscale', default=2.0, type=float, help="scaling factor for min x offset") parser.add_argument('--anchor-offset-y-maxscale', dest='anchor_offset_y_maxscale', default=2.0, type=float, help="scaling factor for min y offset") parser.add_argument('--anchor-wave', dest='anchor_wave', default=False, action='store_true', help="interpret anchor offsets as wave paramaters") parser.add_argument('--radial-wave', dest='radial_wave', default=False, action='store_true', help="anchor-wave mode is radial") parser.add_argument('--clip-wave', dest='clip_wave', default=False, action='store_true', help="anchor-wave mode is clipped (don't wrap)") parser.add_argument('--anchor-noise', dest='anchor_noise', default=False, action='store_true', help="interpret anchor offsets as noise paramaters") parser.add_argument('--anchor-jsons', dest='anchor_jsons', default=False, help="a json paths in n dimensions") parser.add_argument('--gradient', dest='gradient', default=False, action='store_true') parser.add_argument('--linear', dest='linear', default=False, action='store_true') parser.add_argument('--gaussian', dest='gaussian', default=False, action='store_true') parser.add_argument('--uniform', dest='uniform', default=False, action='store_true', help="Random prior is uniform [-1,1] (not gaussian)") parser.add_argument('--tight', dest='tight', default=False, action='store_true') parser.add_argument("--seed", type=int, default=None, help="Optional random seed") parser.add_argument('--chain', dest='chain', default=False, action='store_true') parser.add_argument('--encircle', dest='encircle', default=False, action='store_true') parser.add_argument('--partway', dest='partway', type=float, default=None) parser.add_argument( "--spacing", type=int, default=1, help="spacing of mine grid, rows,cols must be multiples of spacing +1") parser.add_argument('--anchors', dest='anchors', default=False, action='store_true', help="use reconstructed images instead of random ones") parser.add_argument('--anchor-glob', dest='anchor_glob', default=None, help="use file glob source of anchors") parser.add_argument('--anchor-directory', dest='anchor_dir', default=None, help="monitor directory for anchors") parser.add_argument('--watch', dest='watch', default=False, action='store_true', help="monitor anchor-directory indefinitely") parser.add_argument('--anchor-image', dest='anchor_image', default=None, help="use image as source of anchors") parser.add_argument('--anchor-vectors', dest='anchor_vectors', default=None, help="use json file as source of anchors") parser.add_argument('--invert-anchors', dest='invert_anchors', default=False, action='store_true', help="Use antipode of given anchors.") parser.add_argument("--numanchors", type=int, default=None, help="number of anchors to generate") parser.add_argument('--dataset', dest='dataset', default=None, help="Dataset for anchors.") parser.add_argument("--with-labels", dest='with_labels', default=False, action='store_true', help="use labels for conditioning information") parser.add_argument("--clone-label", dest='clone_label', type=int, default=None, help="clone given label (used with --with-labels)") parser.add_argument('--color-convert', dest='color_convert', default=False, action='store_true', help="Convert source dataset to color from grayscale.") parser.add_argument( '--split', dest='split', default="all", help= "Which split to use from the dataset (train/nontrain/valid/test/any).") parser.add_argument("--offset", type=int, default=0, help="data offset to skip") parser.add_argument("--stepsize", type=int, default=1, help="data step size from offset") parser.add_argument("--allowed", dest='allowed', type=str, default=None, help="Only allow whitelisted labels L1,L2,...") parser.add_argument("--prohibited", dest='prohibited', type=str, default=None, help="Only allow blacklisted labels L1,L2,...") parser.add_argument('--passthrough', dest='passthrough', default=False, action='store_true', help="Use originals instead of reconstructions") parser.add_argument('--shoulders', dest='shoulders', default=False, action='store_true', help="Append anchors to left/right columns") parser.add_argument('--encoder', dest='encoder', default=False, action='store_true', help="Ouput dataset as encoded vectors") parser.add_argument("--batch-size", dest='batch_size', type=int, default=64, help="batch size when encoding vectors") parser.add_argument("--image-size", dest='image_size', type=int, default=64, help="size of (offset) images") parser.add_argument('--anchor-image-template', dest='anchor_image_template', default=None, help="template for anchor image filename") parser.add_argument('--outfile-template', dest='save_path_template', default=None, help="template for save path filename") parser.add_argument( '--multistrip', dest='multistrip', default=None, type=int, help="update anchor-offset-x for each entry in anchor-offset") parser.add_argument('--range', dest='range', default=None, help="low,high integer range for tempalte run") parser.add_argument('--z-step', dest='z_step', default=0.01, type=float, help="variable that gets stepped each template step") parser.add_argument( '--z-initial', dest='z_initial', default=0.0, type=float, help="initial value of variable stepped each template step") args = parser.parse_args(args) # check for model download first if args.model is not None: zoo.check_model_download(args.model) if args.seed is not None: np.random.seed(args.seed) random.seed(args.seed) dmodel = None if args.preload_model: dmodel = zoo.load_model(args.model, args.model_file, args.model_type, args.model_interface) z_range = None range_data = None event_handler = AnchorFileHandler() cur_z_step = args.z_initial barename = None if args.anchor_image: basename = os.path.basename(args.anchor_image) barename = os.path.splitext(basename)[0] if args.anchor_dir: event_handler.setup(args, dmodel, args.save_path, cur_z_step) for f in sorted(os.listdir(args.anchor_dir)): full_path = os.path.join(args.anchor_dir, f) if os.path.isfile(full_path): event_handler.process(full_path) if args.watch: print("Watching anchor directory {}".format(args.anchor_dir)) observer = Observer() observer.schedule(event_handler, path=args.anchor_dir, recursive=False) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join() elif args.anchor_jsons: event_handler.setup(args, dmodel, args.save_path, cur_z_step) event_handler.process(args.anchor_image) elif args.range is not None: # TODO: migrate this case to event handler like anchor_jsons above z_range = map(int, args.range.split(",")) z_step = args.z_step cur_z_step = args.z_initial if z_range is not None: template_low, template_high = z_range for i in range(template_low, template_high + 1): if args.anchor_image_template is not None: cur_anchor_image = args.anchor_image_template.format(i) else: cur_anchor_image = args.anchor_image cur_save_path = args.save_path_template.format(i) dmodel = run_with_args(args, dmodel, cur_anchor_image, cur_save_path, cur_z_step, range_data=range_data) cur_z_step += z_step else: run_with_args(args, dmodel, args.anchor_image, args.save_path, cur_z_step, barename)
def run_with_args(args, dmodel, cur_anchor_image, cur_save_path, cur_z_step, cur_basename="basename", range_data=None, template_dict={}): anchor_images = None anchor_labels = None if args.anchors: allowed = None prohibited = None include_targets = False if (args.allowed): include_targets = True allowed = map(int, args.allowed.split(",")) if (args.prohibited): include_targets = True prohibited = map(int, args.prohibited.split(",")) anchor_images = get_anchor_images(args.dataset, args.split, args.offset, args.stepsize, args.numanchors, allowed, prohibited, args.image_size, args.color_convert, include_targets=include_targets) if args.with_labels: anchor_labels = get_anchor_labels(args.dataset, args.split, args.offset, args.stepsize, args.numanchors) if args.anchor_glob is not None: files = plat.sampling.real_glob(args.anchor_glob) if args.offset > 0: files = files[args.offset:] if args.stepsize > 1: files = files[::args.stepsize] if args.numanchors is not None: files = files[:args.numanchors] anchor_images = anchors_from_filelist(files) print("Read {} images from {} files".format(len(anchor_images), len(files))) if len(anchor_images) == 0: print("No images, cannot contine") sys.exit(0) if cur_anchor_image is not None: _, _, anchor_images = anchors_from_image(cur_anchor_image, image_size=(args.image_size, args.image_size)) if args.offset > 0: anchor_images = anchor_images[args.offset:] if args.stepsize > 0: anchor_images = anchor_images[::args.stepsize] if args.numanchors is not None: anchor_images = anchor_images[:args.numanchors] # at this point we can make a dummy anchor_labels if we need if anchor_images is not None and anchor_labels is None: anchor_labels = [None] * len(anchor_images) if args.passthrough: # determine final filename string image_size = anchor_images[0].shape[1] save_path = plat.sampling.emit_filename(cur_save_path, {}, args) print("Preparing image file {}".format(save_path)) img = grid2img(anchor_images, args.rows, args.cols, not args.tight) img.save(save_path) sys.exit(0) if dmodel is None: dmodel = zoo.load_model(args.model, args.model_file, args.model_type, args.model_interface) embedded = None if anchor_images is not None: x_queue = anchor_images[:] c_queue = anchor_labels[:] anchors = None # print("========> ENCODING {} at a time".format(args.batch_size)) while (len(x_queue) > 0): cur_x = x_queue[:args.batch_size] cur_c = c_queue[:args.batch_size] x_queue = x_queue[args.batch_size:] c_queue = c_queue[args.batch_size:] encoded = dmodel.encode_images(cur_x, cur_c) try: emb_l = dmodel.embed_labels(cur_c) except AttributeError: emb_l = [None] * args.batch_size if anchors is None: anchors = encoded embedded = emb_l else: anchors = np.concatenate((anchors, encoded), axis=0) embedded = np.concatenate((embedded, emb_l), axis=0) # anchors = dmodel.encode_images(anchor_images) elif args.anchor_vectors is not None: anchors = get_json_vectors(args.anchor_vectors) else: anchors = None if args.invert_anchors: anchors = -1 * anchors if args.encoder: if anchors is not None: plat.sampling.output_vectors(anchors, args.save_path) else: plat.sampling.stream_output_vectors(dmodel, args.dataset, args.split, args.save_path, batch_size=args.batch_size) sys.exit(0) global_offset = None if args.anchor_offset is not None: # compute anchors as offsets from existing anchor offsets = get_json_vectors(args.anchor_offset) if args.anchor_wave: anchors = plat.sampling.anchors_wave_offsets( anchors, offsets, args.rows, args.cols, args.spacing, args.radial_wave, args.clip_wave, cur_z_step, args.anchor_offset_x, args.anchor_offset_x_minscale, args.anchor_offset_x_maxscale) elif args.anchor_noise: anchors = plat.sampling.anchors_noise_offsets( anchors, offsets, args.rows, args.cols, args.spacing, cur_z_step, args.anchor_offset_x, args.anchor_offset_y, args.anchor_offset_x_minscale, args.anchor_offset_y_minscale, args.anchor_offset_x_maxscale, args.anchor_offset_y_maxscale) elif range_data is not None: anchors = plat.sampling.anchors_json_offsets( anchors, offsets, args.rows, args.cols, args.spacing, cur_z_step, args.anchor_offset_x, args.anchor_offset_y, args.anchor_offset_x_minscale, args.anchor_offset_y_minscale, args.anchor_offset_x_maxscale, args.anchor_offset_y_maxscale, range_data) else: anchors = plat.sampling.anchors_from_offsets( anchors[0], offsets, args.anchor_offset_x, args.anchor_offset_y, args.anchor_offset_x_minscale, args.anchor_offset_y_minscale, args.anchor_offset_x_maxscale, args.anchor_offset_y_maxscale) if args.global_offset is not None: offsets = get_json_vectors(args.global_offset) if args.global_ramp: offsets = cur_z_step * offsets global_offset = plat.sampling.get_global_offset( offsets, args.global_indices, args.global_scale) z_dim = dmodel.get_zdim() # I don't remember what partway/encircle do so they are not handling the chain layout # this handles the case (at least) of mines with random anchors if (args.partway is not None) or args.encircle or (anchors is None): srows = ((args.rows // args.spacing) + 1) scols = ((args.cols // args.spacing) + 1) rand_anchors = plat.sampling.generate_latent_grid( z_dim, rows=srows, cols=scols, fan=False, gradient=False, spherical=False, gaussian=False, anchors=None, anchor_images=None, mine=False, chain=False, spacing=args.spacing, analogy=False, rand_uniform=args.uniform) if args.partway is not None: l = len(rand_anchors) clipped_anchors = anchors[:l] anchors = (1.0 - args.partway ) * rand_anchors + args.partway * clipped_anchors elif args.encircle: anchors = surround_anchors(srows, scols, anchors, rand_anchors) else: anchors = rand_anchors z = plat.sampling.generate_latent_grid(z_dim, args.rows, args.cols, args.fan, args.gradient, not args.linear, args.gaussian, anchors, anchor_images, True, args.chain, args.spacing, args.analogy) if global_offset is not None: z = z + global_offset template_dict["BASENAME"] = cur_basename # emb_l = None # emb_l = [None] * len(z) if args.clone_label is not None: emb_l = np.tile(embedded[args.clone_label], [len(z), 1]) else: emb_l = plat.sampling.generate_latent_grid( z_dim, args.rows, args.cols, args.fan, args.gradient, not args.linear, args.gaussian, embedded, anchor_images, True, args.chain, args.spacing, args.analogy) #TODO - maybe not best way to check if labels are valid # if anchor_labels is None or anchor_labels[0] is None: # emb_l = [None] * len(z) plat.sampling.grid_from_latents(z, dmodel, args.rows, args.cols, anchor_images, args.tight, args.shoulders, cur_save_path, args, args.batch_size, template_dict=template_dict, emb_l=emb_l) return dmodel
def main(cliargs): parser = argparse.ArgumentParser(description="Plot model samples") # models are only for seeds-image parser.add_argument("--model", dest='model', type=str, default=None, help="name of model in plat zoo") parser.add_argument("--model-file", dest='model_file', type=str, default=None, help="path to the saved model") parser.add_argument("--model-type", dest='model_type', type=str, default=None, help="the type of model (usually inferred from filename)") parser.add_argument("--model-interface", dest='model_interface', type=str, default=None, help="class interface for model (usually inferred from model-type)") parser.add_argument('--build-annoy', dest='build_annoy', default=False, action='store_true') parser.add_argument("--jsons", type=str, default=None, help="Comma separated list of json arrays") parser.add_argument('--dataset', dest='dataset', default=None, help="Source dataset.") parser.add_argument('--dataset-image', dest='dataset_image', default=None, help="use image as source dataset") parser.add_argument("--dataset-offset", dest='dataset_offset', type=int, default=0, help="dataset offset to skip") parser.add_argument("--dataset-max", type=int, default=None, help="Source dataset.") parser.add_argument('--seeds-image', dest='seeds_image', default=None, help="image source of seeds") parser.add_argument('--drop-seeds', dest='drop_seeds', default=False, action='store_true') parser.add_argument('--annoy-index', dest='annoy_index', default=None, help="Annoy index.") parser.add_argument('--split', dest='split', default="all", help="Which split to use from the dataset (train/nontrain/valid/test/any).") parser.add_argument("--image-size", dest='image_size', type=int, default=64, help="size of (offset) images") parser.add_argument("--z-dim", dest='z_dim', type=int, default=100, help="z dimension") parser.add_argument('--outdir', dest='outdir', default="neighborgrids", help="Output dir for neighborgrids.") parser.add_argument('--outfile', dest='outfile', default="index_{:03d}.png", help="Output file (template) for neighborgrids.") parser.add_argument("--outgrid-width", dest='outgrid_width', type=int, default=5, help="width of output grid") parser.add_argument("--outgrid-height", dest='outgrid_height', type=int, default=3, help="height of output grid") parser.add_argument('--range', dest='range', default="0", help="Range of indexes to run.") args = parser.parse_args(cliargs) # check for model download first if args.model is not None: zoo.check_model_download(args.model) encoded = json_list_to_array(args.jsons) # print(encoded.shape) if args.build_annoy: aindex = build_annoy_index(encoded, args.annoy_index) sys.exit(0) # open annoy index and spit out some neighborgrids aindex = load_annoy_index(args.annoy_index, args.z_dim) if args.dataset is not None: anchor_images = get_anchor_images(args.dataset, args.split, offset=args.dataset_offset, numanchors=args.dataset_max, unit_scale=False) image_size = anchor_images.shape[2] # dataset_image requires image_size if args.dataset_image is not None: image_size = args.image_size _, _, anchor_images = anchors_from_image(args.dataset_image, image_size=(image_size, image_size), unit_scale=False) if args.dataset_offset > 0: anchor_images = anchor_images[args.dataset_offset:] if args.dataset_max is not None: anchor_images = anchor_images[:args.dataset_max] r = map(int, args.range.split(",")) core_dataset_size = len(anchor_images) if(len(encoded) != core_dataset_size): print("Warning: {} vectors and {} images".format(len(encoded), core_dataset_size)) if args.seeds_image is not None: image_size = args.image_size _, _, extra_images = anchors_from_image(args.seeds_image, image_size=(image_size, image_size), unit_scale=False) net_inputs = (extra_images / 255.0).astype('float32') print('Loading saved model') dmodel = zoo.load_model(args.model, args.model_file, args.model_type, args.model_interface) image_vectors = dmodel.encode_images(net_inputs) num_extras = len(extra_images) encoded = np.concatenate((encoded, image_vectors), axis=0) anchor_images = np.concatenate((anchor_images, extra_images), axis=0) # for now, override given range r = [core_dataset_size, core_dataset_size + num_extras] print anchor_images.shape if not os.path.exists(args.outdir): os.makedirs(args.outdir) if len(r) == 1: r = [r[0], r[0]+1] num_out_cells = args.outgrid_width * args.outgrid_height for i in range(r[0], r[1]): if i < core_dataset_size: # will find the N nearest neighbors neighbors = aindex.get_nns_by_item(i, num_out_cells, include_distances=True) file_num = i else: if args.drop_seeds: # just the N nearest neighbors neighbors = aindex.get_nns_by_vector(encoded[i], num_out_cells, include_distances=True) # will find the 20 nearest neighbors else: # original seed + (N-1) nearest neigbors neighbors = aindex.get_nns_by_vector(encoded[i], num_out_cells-1, include_distances=True) # will find the 20 nearest neighbors neighbors[0].append(i) neighbors[1].append(0) file_num = i - core_dataset_size g = neighbors_to_rfgrid(neighbors[0], encoded, anchor_images, image_size, args.outgrid_width, args.outgrid_height) out_template = "{}/{}".format(args.outdir, args.outfile) g.save(out_template.format(file_num))
def canvas(parser, context, args): parser = argparse.ArgumentParser(description="Plot model samples") parser.add_argument("--model", dest='model', type=str, default=None, help="name of model in plat zoo") parser.add_argument("--model-file", dest='model_file', type=str, default=None, help="path to the saved model") parser.add_argument( "--model-type", dest='model_type', type=str, default=None, help="the type of model (usually inferred from filename)") parser.add_argument( "--model-interface", dest='model_interface', type=str, default=None, help="class interface for model (usually inferred from model-type)") parser.add_argument("--width", type=int, default=512, help="width of canvas to render in pixels") parser.add_argument("--height", type=int, default=512, help="height of canvas to render in pixels") parser.add_argument("--rows", type=int, default=3, help="number of rows of anchors") parser.add_argument("--cols", type=int, default=3, help="number of columns of anchors") parser.add_argument("--xmin", type=int, default=0, help="min x in virtual space") parser.add_argument("--xmax", type=int, default=100, help="max x in virtual space") parser.add_argument("--ymin", type=int, default=0, help="min y in virtual space") parser.add_argument("--ymax", type=int, default=100, help="max y in virtual space") parser.add_argument("--outfile", dest='save_path', type=str, default="canvas_%DATE%_%MODEL%_%SEQ%.png", help="where to save the generated samples") parser.add_argument("--seed", type=int, default=None, help="Optional random seed") parser.add_argument('--do-check-bounds', dest='do_check_bounds', default=False, action='store_true', help="clip to drawing bounds") parser.add_argument('--anchor-image', dest='anchor_image', default=None, help="use image as source of anchors") parser.add_argument('--anchor-mine', dest='anchor_mine', default=None, help="use image as single source of mine coordinates") parser.add_argument( '--random-mine', dest='random_mine', default=False, action='store_true', help="use random sampling as source of mine coordinates") parser.add_argument('--additive', dest='additive', default=False, action='store_true', help="use additive compositing") parser.add_argument( '--mask-name', dest='mask_name', default=None, help="prefix name for alpha mask to use (full/rounded/hex") parser.add_argument('--mask-layout', dest='mask_layout', default=None, help="use image as source of mine grid points") parser.add_argument('--mask-scale', dest='mask_scale', default=1.0, type=float, help="Scale mask layout (squeeze)") parser.add_argument('--mask-width', dest='mask_width', type=int, default=15, help="width for computed mask") parser.add_argument('--mask-height', dest='mask_height', type=int, default=15, help="height for computed mask") parser.add_argument('--mask-radius', dest='mask_radius', default=None, type=float, help="radius for computed mask") parser.add_argument('--layout', dest='layout', default=None, help="layout json file") parser.add_argument('--layout-scale', dest='layout_scale', default=1, type=int, help="Scale layout") parser.add_argument('--batch-size', dest='batch_size', type=int, default=100, help="number of images to decode at once") parser.add_argument('--passthrough', dest='passthrough', default=False, action='store_true', help="Use originals instead of reconstructions") parser.add_argument('--anchor-offset', dest='anchor_offset', default=None, help="use json file as source of each anchors offsets") parser.add_argument('--anchor-offset-a', dest='anchor_offset_a', default="42", type=str, help="which indices to combine for offset a") parser.add_argument('--anchor-offset-b', dest='anchor_offset_b', default="31", type=str, help="which indices to combine for offset b") parser.add_argument("--image-size", dest='image_size', type=int, default=64, help="size of (offset) images") parser.add_argument('--global-offset', dest='global_offset', default=None, help="use json file as source of global offsets") parser.add_argument('--global-indices', dest='global_indices', default=None, type=str, help="offset indices to apply globally") parser.add_argument('--global-scale', dest='global_scale', default=1.0, type=float, help="scaling factor for global offset") args = parser.parse_args(args) template_dict = {} if args.seed: np.random.seed(args.seed) random.seed(args.seed) global_offset = None if args.global_offset is not None: offsets = get_json_vectors(args.global_offset) global_offset = plat.sampling.get_global_offset( offsets, args.global_indices, args.global_scale) anchor_images = None if args.anchor_image is not None: _, _, anchor_images = anchors_from_image(args.anchor_image, image_size=(args.image_size, args.image_size)) elif args.anchor_mine is not None: _, _, anchor_images = anchors_from_image(args.anchor_mine, image_size=(args.image_size, args.image_size)) basename = os.path.basename(args.anchor_mine) template_dict["BASENAME"] = os.path.splitext(basename)[0] anchors = None if not args.passthrough: dmodel = zoo.load_model(args.model, args.model_file, args.model_type, args.model_interface) workq = anchor_images[:] anchors_list = [] while (len(workq) > 0): print("Processing {} anchors".format(args.batch_size)) curq = workq[:args.batch_size] workq = workq[args.batch_size:] cur_anchors = dmodel.encode_images(curq) for c in cur_anchors: anchors_list.append(c) anchors = np.asarray(anchors_list) if anchors is None: anchors = np.random.normal(loc=0, scale=1, size=(args.cols * args.rows, 100)) anchor_offsets = None if args.anchor_offset is not None: # compute anchors as offsets from existing anchor anchor_offsets = get_json_vectors(args.anchor_offset) canvas = Canvas(args.width, args.height, args.xmin, args.xmax, args.ymin, args.ymax, args.mask_name, args.image_size, args.do_check_bounds) workq = [] do_hex = True if args.layout: with open(args.layout) as json_file: layout_data = json.load(json_file) xy = np.array(layout_data["xy"]) grid_size = layout_data["size"] roots = layout_data["r"] if "s" in layout_data: s = layout_data["s"] else: s = None for i, pair in enumerate(xy): x = pair[0] * canvas.canvas_xmax / grid_size[0] y = pair[1] * canvas.canvas_ymax / grid_size[1] a = (pair[0] + 0.5 * s[i]) / float(grid_size[0]) b = (pair[1] + 0.5 * s[i]) / float(grid_size[1]) r = roots[i] if s is None: scale = args.layout_scale else: scale = s[i] * args.layout_scale # print("Placing {} at {}, {} because {},{} and {}, {}".format(scale, x, y, canvas.canvas_xmax, canvas.canvas_ymax, grid_size[0], grid_size[1])) if args.passthrough: output_image = anchor_images[r] canvas.place_image(output_image, x, y, args.additive, scale=scale) else: if args.anchor_mine is not None or args.random_mine: z = create_mine_canvas(args.rows, args.cols, b, a, anchors) elif anchor_offsets is not None: z = apply_anchor_offsets(anchors[r], anchor_offsets, a, b, args.anchor_offset_a, args.anchor_offset_b) else: z = anchors[r] if global_offset is not None: z = z + global_offset # print("Storing {},{} with {}".format(x, y, len(z))) workq.append({"z": z, "x": x, "y": y, "s": scale}) elif args.mask_layout or args.mask_radius: if args.mask_layout: rawim = imread(args.mask_layout) if len(rawim.shape) == 2: im_height, im_width = rawim.shape mask_layout = rawim else: im_height, im_width, _ = rawim.shape mask_layout = rawim[:, :, 0] else: im_height, im_width = args.mask_height, args.mask_width mask_layout = make_mask_layout(im_height, im_width, args.mask_radius) for xpos in range(im_width): for ypos in range(im_height): a = float(xpos) / (im_width - 1) if do_hex and ypos % 2 == 0: a = a + 0.5 / (im_width - 1) x = args.mask_scale * canvas.xmax * a b = float(ypos) / (im_height - 1) y = args.mask_scale * canvas.ymax * b if not mask_layout[ypos][xpos] > 128: pass elif args.passthrough: output_image = anchor_images[0] canvas.place_image(output_image, x, y, args.additive) else: if len(anchors) == 1 or anchor_offsets is not None: z = apply_anchor_offsets(anchors[0], anchor_offsets, a, b, args.anchor_offset_a, args.anchor_offset_b) else: z = create_mine_canvas(args.rows, args.cols, b, a, anchors) if global_offset is not None: z = z + global_offset workq.append({"z": z, "x": x, "y": y, "s": 1.0}) while (len(workq) > 0): curq = workq[:args.batch_size] workq = workq[args.batch_size:] latents = [e["z"] for e in curq] images = dmodel.sample_at(np.array(latents)) for i in range(len(curq)): # print("Placing {},{} with {}".format(curq[i]["x"], curq[i]["y"], len(latents))) canvas.place_image(images[i], curq[i]["x"], curq[i]["y"], args.additive, scale=curq[i]["s"]) # print("Placed") template_dict["SIZE"] = args.image_size outfile = plat.sampling.emit_filename(args.save_path, template_dict, args) canvas.save(outfile)
def canvas(parser, context, args): parser = argparse.ArgumentParser(description="Plot model samples") parser.add_argument("--model", dest='model', type=str, default=None, help="name of model in plat zoo") parser.add_argument("--model-file", dest='model_file', type=str, default=None, help="path to the saved model") parser.add_argument("--model-type", dest='model_type', type=str, default=None, help="the type of model (usually inferred from filename)") parser.add_argument("--model-interface", dest='model_interface', type=str, default=None, help="class interface for model (usually inferred from model-type)") parser.add_argument("--width", type=int, default=512, help="width of canvas to render in pixels") parser.add_argument("--height", type=int, default=512, help="height of canvas to render in pixels") parser.add_argument("--rows", type=int, default=3, help="number of rows of anchors") parser.add_argument("--cols", type=int, default=3, help="number of columns of anchors") parser.add_argument("--xmin", type=int, default=0, help="min x in virtual space") parser.add_argument("--xmax", type=int, default=100, help="max x in virtual space") parser.add_argument("--ymin", type=int, default=0, help="min y in virtual space") parser.add_argument("--ymax", type=int, default=100, help="max y in virtual space") parser.add_argument("--outfile", dest='save_path', type=str, default="canvas_%DATE%_%MODEL%_%SEQ%.png", help="where to save the generated samples") parser.add_argument("--seed", type=int, default=None, help="Optional random seed") parser.add_argument('--do-check-bounds', dest='do_check_bounds', default=False, action='store_true', help="clip to drawing bounds") parser.add_argument('--background-image', dest='background_image', default=None, help="use image initial background") parser.add_argument('--anchor-image', dest='anchor_image', default=None, help="use image as source of anchors") parser.add_argument('--anchor-mine', dest='anchor_mine', default=None, help="use image as single source of mine coordinates") parser.add_argument('--anchor-canvas', dest='anchor_canvas', default=False, action='store_true', help="anchor image from canvas") parser.add_argument('--random-mine', dest='random_mine', default=False, action='store_true', help="use random sampling as source of mine coordinates") parser.add_argument('--additive', dest='additive', default=False, action='store_true', help="use additive compositing") parser.add_argument('--mask-name', dest='mask_name', default=None, help="prefix name for alpha mask to use (full/rounded/hex") parser.add_argument('--mask-layout', dest='mask_layout', default=None, help="use image as source of mine grid points") parser.add_argument('--mask-scale', dest='mask_scale', default=1.0, type=float, help="Scale mask layout (squeeze)") parser.add_argument('--mask-width', dest='mask_width', type=int, default=15, help="width for computed mask") parser.add_argument('--mask-height', dest='mask_height', type=int, default=15, help="height for computed mask") parser.add_argument('--mask-radius', dest='mask_radius', default=None, type=float, help="radius for computed mask") parser.add_argument('--layout', dest='layout', default=None, help="layout json file") parser.add_argument('--layout-scale', dest='layout_scale', default=1, type=int, help="Scale layout") parser.add_argument('--batch-size', dest='batch_size', type=int, default=100, help="number of images to decode at once") parser.add_argument('--passthrough', dest='passthrough', default=False, action='store_true', help="Use originals instead of reconstructions") parser.add_argument('--anchor-offset', dest='anchor_offset', default=None, help="use json file as source of each anchors offsets") parser.add_argument('--anchor-offset-a', dest='anchor_offset_a', default="42", type=str, help="which indices to combine for offset a") parser.add_argument('--anchor-offset-b', dest='anchor_offset_b', default="31", type=str, help="which indices to combine for offset b") parser.add_argument("--image-size", dest='image_size', type=int, default=64, help="size of (offset) images") parser.add_argument('--global-offset', dest='global_offset', default=None, help="use json file as source of global offsets") parser.add_argument('--global-indices', dest='global_indices', default=None, type=str, help="offset indices to apply globally") parser.add_argument('--global-scale', dest='global_scale', default=1.0, type=float, help="scaling factor for global offset") args = parser.parse_args(args) template_dict = {} if args.seed: np.random.seed(args.seed) random.seed(args.seed) global_offset = None if args.global_offset is not None: offsets = get_json_vectors(args.global_offset) global_offset = plat.sampling.get_global_offset(offsets, args.global_indices, args.global_scale) anchor_images = None if args.anchor_image is not None: _, _, anchor_images = anchors_from_image(args.anchor_image, image_size=(args.image_size, args.image_size)) elif args.anchor_mine is not None: _, _, anchor_images = anchors_from_image(args.anchor_mine, image_size=(args.image_size, args.image_size)) basename = os.path.basename(args.anchor_mine) template_dict["BASENAME"] = os.path.splitext(basename)[0] anchors = None if not args.passthrough: dmodel = zoo.load_model(args.model, args.model_file, args.model_type, args.model_interface) workq = anchor_images[:] anchors_list = [] while(len(workq) > 0): print("Processing {} anchors".format(args.batch_size)) curq = workq[:args.batch_size] workq = workq[args.batch_size:] cur_anchors = dmodel.encode_images(curq) for c in cur_anchors: anchors_list.append(c) anchors = np.asarray(anchors_list) if anchors is None: anchors = np.random.normal(loc=0, scale=1, size=(args.cols * args.rows, 100)) anchor_offsets = None if args.anchor_offset is not None: # compute anchors as offsets from existing anchor anchor_offsets = get_json_vectors(args.anchor_offset) canvas = Canvas(args.width, args.height, args.xmin, args.xmax, args.ymin, args.ymax, args.mask_name, args.image_size, args.do_check_bounds) if args.background_image is not None: canvas.set_background(args.background_image) workq = [] do_hex = True if args.layout: with open(args.layout) as json_file: layout_data = json.load(json_file) xy = np.array(layout_data["xy"]) grid_size = layout_data["size"] roots = layout_data["r"] if "s" in layout_data: s = layout_data["s"] else: s = None for i, pair in enumerate(xy): x = pair[0] * canvas.canvas_xmax / grid_size[0] y = pair[1] * canvas.canvas_ymax / grid_size[1] a = (pair[0] + 0.5 * s[i]) / float(grid_size[0]) b = (pair[1] + 0.5 * s[i]) / float(grid_size[1]) r = roots[i] if s is None: scale = args.layout_scale else: scale = s[i] * args.layout_scale # print("Placing {} at {}, {} because {},{} and {}, {}".format(scale, x, y, canvas.canvas_xmax, canvas.canvas_ymax, grid_size[0], grid_size[1])) if args.passthrough: output_image = anchor_images[r] canvas.place_image(output_image, x, y, args.additive, scale=scale) else: if args.anchor_mine is not None or args.random_mine: z = create_mine_canvas(args.rows, args.cols, b, a, anchors) elif anchor_offsets is not None: z = apply_anchor_offsets(anchors[r], anchor_offsets, a, b, args.anchor_offset_a, args.anchor_offset_b) else: z = anchors[r] if global_offset is not None: z = z + global_offset # print("Storing {},{} with {}".format(x, y, len(z))) workq.append({ "z": z, "x": x, "y": y, "s": scale }) elif args.mask_layout or args.mask_radius: if args.mask_layout: rawim = imread(args.mask_layout); if len(rawim.shape) == 2: im_height, im_width = rawim.shape mask_layout = rawim else: im_height, im_width, _ = rawim.shape mask_layout = rawim[:,:,0] else: im_height, im_width = args.mask_height, args.mask_width mask_layout = make_mask_layout(im_height, im_width, args.mask_radius) for xpos in range(im_width): for ypos in range(im_height): a = float(xpos) / (im_width - 1) if do_hex and ypos % 2 == 0: a = a + 0.5 / (im_width - 1) x = args.mask_scale * canvas.xmax * a b = float(ypos) / (im_height - 1) y = args.mask_scale * canvas.ymax * b if not mask_layout[ypos][xpos] > 128: pass elif args.passthrough: if args.anchor_canvas: cur_anchor_image = canvas.get_anchor(x, y, args.image_size) else: cur_anchor_image = anchor_images[0] canvas.place_image(cur_anchor_image, x, y, args.additive, None) else: if args.anchor_canvas: cur_anchor_image = canvas.get_anchor(x, y, args.image_size) zs = dmodel.encode_images([cur_anchor_image]) z = zs[0] elif len(anchors) == 1 or anchor_offsets is not None: z = apply_anchor_offsets(anchors[0], anchor_offsets, a, b, args.anchor_offset_a, args.anchor_offset_b) else: z = create_mine_canvas(args.rows, args.cols, b, a, anchors) if global_offset is not None: z = z + global_offset workq.append({ "z": z, "x": x, "y": y, "s": None }) while(len(workq) > 0): curq = workq[:args.batch_size] workq = workq[args.batch_size:] latents = [e["z"] for e in curq] images = dmodel.sample_at(np.array(latents)) for i in range(len(curq)): # print("Placing {},{} with {}".format(curq[i]["x"], curq[i]["y"], len(latents))) canvas.place_image(images[i], curq[i]["x"], curq[i]["y"], args.additive, scale=curq[i]["s"]) # print("Placed") template_dict["SIZE"] = args.image_size outfile = plat.sampling.emit_filename(args.save_path, template_dict, args); canvas.save(outfile)
def sample(parser, context, args): parser.add_argument('--preload-model', default=False, action='store_true', help="Load the model first before starting processing") parser.add_argument("--model", dest='model', type=str, default=None, help="name of model") parser.add_argument("--model-file", dest='model_file', type=str, default=None, help="path to the saved model") parser.add_argument("--model-type", dest='model_type', type=str, default=None, help="the type of model (usually inferred from filename)") parser.add_argument("--model-interface", dest='model_interface', type=str, default=None, help="class interface for model (usually inferred from model-type)") parser.add_argument("--rows", type=int, default=3, help="number of rows of samples to display") parser.add_argument("--cols", type=int, default=7, help="number of columns of samples to display") parser.add_argument("--outfile", dest='save_path', type=str, default="plat_%DATE%_%MODEL%_%SEQ%.png", help="where to save the generated samples") parser.add_argument('--fan', dest='fan', default=False, action='store_true') parser.add_argument('--analogy', dest='analogy', default=False, action='store_true') parser.add_argument('--global-offset', dest='global_offset', default=None, help="use json file as source of global offsets") parser.add_argument('--global-indices', dest='global_indices', default=None, type=str, help="offset indices to apply globally") parser.add_argument('--global-scale', dest='global_scale', default=1.0, type=float, help="scaling factor for global offset") parser.add_argument('--global-ramp', dest='global_ramp', default=False, action='store_true', help="ramp global effect with z-step") parser.add_argument('--anchor-offset', dest='anchor_offset', default=None, help="use json file as source of each anchors offsets") parser.add_argument('--anchor-offset-x', dest='anchor_offset_x', default="5", type=str, help="which indices to combine for x offset") parser.add_argument('--anchor-offset-y', dest='anchor_offset_y', default="39", type=str, help="which indices to combine for y offset") parser.add_argument('--anchor-offset-x-minscale', dest='anchor_offset_x_minscale', default=0, type=float, help="scaling factor for min x offset") parser.add_argument('--anchor-offset-y-minscale', dest='anchor_offset_y_minscale', default=0, type=float, help="scaling factor for min y offset") parser.add_argument('--anchor-offset-x-maxscale', dest='anchor_offset_x_maxscale', default=2.0, type=float, help="scaling factor for min x offset") parser.add_argument('--anchor-offset-y-maxscale', dest='anchor_offset_y_maxscale', default=2.0, type=float, help="scaling factor for min y offset") parser.add_argument('--anchor-wave', dest='anchor_wave', default=False, action='store_true', help="interpret anchor offsets as wave paramaters") parser.add_argument('--radial-wave', dest='radial_wave', default=False, action='store_true', help="anchor-wave mode is radial") parser.add_argument('--clip-wave', dest='clip_wave', default=False, action='store_true', help="anchor-wave mode is clipped (don't wrap)") parser.add_argument('--anchor-noise', dest='anchor_noise', default=False, action='store_true', help="interpret anchor offsets as noise paramaters") parser.add_argument('--anchor-jsons', dest='anchor_jsons', default=False, help="a json paths in n dimensions") parser.add_argument('--gradient', dest='gradient', default=False, action='store_true') parser.add_argument('--linear', dest='linear', default=False, action='store_true') parser.add_argument('--gaussian', dest='gaussian', default=False, action='store_true') parser.add_argument('--uniform', dest='uniform', default=False, action='store_true', help="Random prior is uniform [-1,1] (not gaussian)") parser.add_argument('--tight', dest='tight', default=False, action='store_true') parser.add_argument("--seed", type=int, default=None, help="Optional random seed") parser.add_argument('--chain', dest='chain', default=False, action='store_true') parser.add_argument('--encircle', dest='encircle', default=False, action='store_true') parser.add_argument('--partway', dest='partway', type=float, default=None) parser.add_argument("--spacing", type=int, default=1, help="spacing of mine grid, rows,cols must be multiples of spacing +1") parser.add_argument('--anchors', dest='anchors', default=False, action='store_true', help="use reconstructed images instead of random ones") parser.add_argument('--anchor-glob', dest='anchor_glob', default=None, help="use file glob source of anchors") parser.add_argument('--anchor-directory', dest='anchor_dir', default=None, help="monitor directory for anchors") parser.add_argument('--watch', dest='watch', default=False, action='store_true', help="monitor anchor-directory indefinitely") parser.add_argument('--anchor-image', dest='anchor_image', default=None, help="use image as source of anchors") parser.add_argument("--channels", type=int, default=3, help="number of channels on input images read") parser.add_argument('--anchor-vectors', dest='anchor_vectors', default=None, help="use json file as source of anchors") parser.add_argument('--invert-anchors', dest='invert_anchors', default=False, action='store_true', help="Use antipode of given anchors.") parser.add_argument("--numanchors", type=int, default=None, help="number of anchors to generate") parser.add_argument('--dataset', dest='dataset', default=None, help="Dataset for anchors.") parser.add_argument("--with-labels", dest='with_labels', default=False, action='store_true', help="use labels for conditioning information") parser.add_argument("--clone-label", dest='clone_label', type=int, default=None, help="clone given label (used with --with-labels)") parser.add_argument('--color-convert', dest='color_convert', default=False, action='store_true', help="Convert source dataset to color from grayscale.") parser.add_argument('--split', dest='split', default="all", help="Which split to use from the dataset (train/nontrain/valid/test/any).") parser.add_argument("--offset", type=int, default=0, help="data offset to skip") parser.add_argument("--stepsize", type=int, default=1, help="data step size from offset") parser.add_argument("--allowed", dest='allowed', type=str, default=None, help="Only allow whitelisted labels L1,L2,...") parser.add_argument("--prohibited", dest='prohibited', type=str, default=None, help="Only allow blacklisted labels L1,L2,...") parser.add_argument('--passthrough', dest='passthrough', default=False, action='store_true', help="Use originals instead of reconstructions") parser.add_argument('--shoulders', dest='shoulders', default=False, action='store_true', help="Append anchors to left/right columns") parser.add_argument('--encoder', dest='encoder', default=False, action='store_true', help="Ouput dataset as encoded vectors") parser.add_argument('--write-anchors', dest='write_anchors', default=False, action='store_true', help="save anchors in anchors.json") parser.add_argument("--batch-size", dest='batch_size', type=int, default=64, help="batch size when encoding vectors") parser.add_argument("--image-size", dest='image_size', type=int, default=64, help="size of (offset) images") parser.add_argument('--anchor-image-template', dest='anchor_image_template', default=None, help="template for anchor image filename") parser.add_argument('--outfile-template', dest='save_path_template', default=None, help="template for save path filename") parser.add_argument('--multistrip', dest='multistrip', default=None, type=int, help="update anchor-offset-x for each entry in anchor-offset") parser.add_argument('--range', dest='range', default=None, help="low,high integer range for tempalte run") parser.add_argument('--z-step', dest='z_step', default=0.01, type=float, help="variable that gets stepped each template step") parser.add_argument('--z-initial', dest='z_initial', default=0.0, type=float, help="initial value of variable stepped each template step") args = parser.parse_args(args) dmodel = None if args.preload_model: dmodel = zoo.load_model(args.model, args.model_file, args.model_type, args.model_interface) z_range = None range_data = None event_handler = AnchorFileHandler() cur_z_step = args.z_initial barename = None if args.anchor_image: basename = os.path.basename(args.anchor_image) barename = os.path.splitext(basename)[0] if args.anchor_dir: event_handler.setup(args, dmodel, args.save_path, cur_z_step) for f in sorted(os.listdir(args.anchor_dir)): full_path = os.path.join(args.anchor_dir, f) if os.path.isfile(full_path): event_handler.process(full_path) if args.watch: print("Watching anchor directory {}".format(args.anchor_dir)) observer = Observer() observer.schedule(event_handler, path=args.anchor_dir, recursive=False) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join() elif args.anchor_jsons: event_handler.setup(args, dmodel, args.save_path, cur_z_step) event_handler.process(args.anchor_image) elif args.range is not None: # TODO: migrate this case to event handler like anchor_jsons above z_range = map(int, args.range.split(",")) z_step = args.z_step cur_z_step = args.z_initial if z_range is not None: template_low, template_high = z_range for i in range(template_low, template_high + 1): if args.anchor_image_template is not None: cur_anchor_image = args.anchor_image_template.format(i) else: cur_anchor_image = args.anchor_image cur_save_path = args.save_path_template.format(i) dmodel = run_with_args(args, dmodel, cur_anchor_image, cur_save_path, cur_z_step, range_data=range_data) cur_z_step += z_step else: run_with_args(args, dmodel, args.anchor_image, args.save_path, cur_z_step, barename)
def run_with_args(args, dmodel, cur_anchor_image, cur_save_path, cur_z_step, cur_basename="basename", range_data=None, template_dict={}): anchor_images = None anchor_labels = None if args.anchors: allowed = None prohibited = None include_targets = False if(args.allowed): include_targets = True allowed = map(int, args.allowed.split(",")) if(args.prohibited): include_targets = True prohibited = map(int, args.prohibited.split(",")) anchor_images = get_anchor_images(args.dataset, args.split, args.offset, args.stepsize, args.numanchors, allowed, prohibited, args.image_size, args.color_convert, include_targets=include_targets) if args.with_labels: anchor_labels = get_anchor_labels(args.dataset, args.split, args.offset, args.stepsize, args.numanchors) if args.anchor_glob is not None: files = plat.sampling.real_glob(args.anchor_glob) if args.offset > 0: files = files[args.offset:] if args.stepsize > 1: files = files[::args.stepsize] if args.numanchors is not None: files = files[:args.numanchors] anchor_images = anchors_from_filelist(files, args.channels) print("Read {} images from {} files".format(len(anchor_images), len(files))) print("First 5 files: ", files[:5]) if len(anchor_images) == 0: print("No images, cannot contine") sys.exit(0) if cur_anchor_image is not None: # _, _, anchor_images = anchors_from_image(cur_anchor_image, channels=args.channels, image_size=(args.image_size, args.image_size)) anchor_images = anchors_from_filelist([cur_anchor_image], channels=args.channels) if args.offset > 0: anchor_images = anchor_images[args.offset:] if args.stepsize > 0: anchor_images = anchor_images[::args.stepsize] if args.numanchors is not None: anchor_images = anchor_images[:args.numanchors] # at this point we can make a dummy anchor_labels if we need if anchor_images is not None and anchor_labels is None: anchor_labels = [None] * len(anchor_images) if args.passthrough: # determine final filename string image_size = anchor_images[0].shape[1] save_path = plat.sampling.emit_filename(cur_save_path, {}, args); print("Preparing image file {}".format(save_path)) img = grid2img(anchor_images, args.rows, args.cols, not args.tight) img.save(save_path) sys.exit(0) if dmodel is None: dmodel = zoo.load_model(args.model, args.model_file, args.model_type, args.model_interface) if args.seed is not None: print("Setting random seed to ", args.seed) np.random.seed(args.seed) random.seed(args.seed) else: np.random.seed(None) random.seed(None) embedded = None if anchor_images is not None: x_queue = anchor_images[:] c_queue = anchor_labels[:] anchors = None # print("========> ENCODING {} at a time".format(args.batch_size)) while(len(x_queue) > 0): cur_x = x_queue[:args.batch_size] cur_c = c_queue[:args.batch_size] x_queue = x_queue[args.batch_size:] c_queue = c_queue[args.batch_size:] # TODO: remove vestiges of conditional encode/decode # encoded = dmodel.encode_images(cur_x, cur_c) encoded = dmodel.encode_images(cur_x) try: emb_l = dmodel.embed_labels(cur_c) except AttributeError: emb_l = [None] * args.batch_size if anchors is None: anchors = encoded embedded = emb_l else: anchors = np.concatenate((anchors, encoded), axis=0) embedded = np.concatenate((embedded, emb_l), axis=0) # anchors = dmodel.encode_images(anchor_images) elif args.anchor_vectors is not None: anchors = get_json_vectors(args.anchor_vectors) # print("Read vectors: ", anchors.shape) vsize = anchors.shape[-1] anchors = anchors.reshape([-1, vsize]) print("Read vectors: ", anchors.shape) else: anchors = None if args.invert_anchors: anchors = -1 * anchors if args.encoder: if anchors is not None: plat.sampling.output_vectors(anchors, args.save_path) else: plat.sampling.stream_output_vectors(dmodel, args.dataset, args.split, args.save_path, batch_size=args.batch_size) sys.exit(0) global_offset = None if args.anchor_offset is not None: # compute anchors as offsets from existing anchor offsets = get_json_vectors_list(args.anchor_offset) if args.anchor_wave: anchors = plat.sampling.anchors_wave_offsets(anchors, offsets, args.rows, args.cols, args.spacing, args.radial_wave, args.clip_wave, cur_z_step, args.anchor_offset_x, args.anchor_offset_x_minscale, args.anchor_offset_x_maxscale) elif args.anchor_noise: anchors = plat.sampling.anchors_noise_offsets(anchors, offsets, args.rows, args.cols, args.spacing, cur_z_step, args.anchor_offset_x, args.anchor_offset_y, args.anchor_offset_x_minscale, args.anchor_offset_y_minscale, args.anchor_offset_x_maxscale, args.anchor_offset_y_maxscale) elif range_data is not None: anchors = plat.sampling.anchors_json_offsets(anchors, offsets, args.rows, args.cols, args.spacing, cur_z_step, args.anchor_offset_x, args.anchor_offset_y, args.anchor_offset_x_minscale, args.anchor_offset_y_minscale, args.anchor_offset_x_maxscale, args.anchor_offset_y_maxscale, range_data) else: anchors = plat.sampling.anchors_from_offsets(anchors[0], offsets, args.anchor_offset_x, args.anchor_offset_y, args.anchor_offset_x_minscale, args.anchor_offset_y_minscale, args.anchor_offset_x_maxscale, args.anchor_offset_y_maxscale) if args.global_offset is not None: offsets = get_json_vectors(args.global_offset) if args.global_ramp: offsets = cur_z_step * offsets global_offset = plat.sampling.get_global_offset(offsets, args.global_indices, args.global_scale) z_dim = dmodel.get_zdim() # I don't remember what partway/encircle do so they are not handling the chain layout # this handles the case (at least) of mines with random anchors if (args.partway is not None) or args.encircle or (anchors is None): srows=((args.rows // args.spacing) + 1) scols=((args.cols // args.spacing) + 1) rand_anchors = plat.sampling.generate_latent_grid(z_dim, rows=srows, cols=scols, fan=False, gradient=False, spherical=False, gaussian=False, anchors=None, anchor_images=None, mine=False, chain=False, spacing=args.spacing, analogy=False, rand_uniform=args.uniform) if args.partway is not None: l = len(rand_anchors) clipped_anchors = anchors[:l] anchors = (1.0 - args.partway) * rand_anchors + args.partway * clipped_anchors elif args.encircle: anchors = surround_anchors(srows, scols, anchors, rand_anchors) else: anchors = rand_anchors z = plat.sampling.generate_latent_grid(z_dim, args.rows, args.cols, args.fan, args.gradient, not args.linear, args.gaussian, anchors, anchor_images, True, args.chain, args.spacing, args.analogy) if args.write_anchors: plat.sampling.output_vectors(anchors, "anchors.json") if global_offset is not None: z = z + global_offset template_dict["BASENAME"] = cur_basename # emb_l = None # emb_l = [None] * len(z) embedded_labels = None # TODO: this could be more elegant if embedded is not None and embedded[0] is not None: if args.clone_label is not None: embedded_labels = np.tile(embedded[args.clone_label], [len(z), 1]) else: embedded_labels = plat.sampling.generate_latent_grid(z_dim, args.rows, args.cols, args.fan, args.gradient, not args.linear, args.gaussian, embedded, anchor_images, True, args.chain, args.spacing, args.analogy) #TODO - maybe not best way to check if labels are valid # if anchor_labels is None or anchor_labels[0] is None: # emb_l = [None] * len(z) plat.sampling.grid_from_latents(z, dmodel, args.rows, args.cols, anchor_images, args.tight, args.shoulders, cur_save_path, args, args.batch_size, template_dict=template_dict, emb_l=embedded_labels) return dmodel