def create_network(): t_size = tf.placeholder_with_default(figure_size, []) nets = [] for k in range(batch_size): with tf.variable_scope(f"CPPN_layer_{k}"): nets.append(cppn(t_size, normalize=True)) return tf.concat(nets, axis=0)
def create_network(BS): nets = [] for k in range(BS): with tf.variable_scope(f"CPPN_layer_{k}"): nets.append(cppn(t_size, normalize=False)) net = tf.concat(nets, axis=0) return net
def render_set( channel, n_iter, prefix, starting_pos=None, force=False, objective=None, ): f_model = os.path.join(save_model_dest, channel + f"_{prefix}.npy") f_image = os.path.join(save_image_dest, channel + f"_{prefix}.png") if os.path.exists(f_model) and not force: return True print("Starting", channel, prefix) obj = objective # Add this to "sharpen" the image... too much and it gets crazy #obj += 0.001*objectives.total_variation() sess = create_session() t_size = tf.placeholder_with_default(size_n, []) param_f = lambda: cppn(t_size) T = render.make_vis_T( model, obj, param_f=param_f, transforms=[], optimizer=optimizer, ) tf.global_variables_initializer().run() # Assign the starting weights if starting_pos is not None: for v, x in zip(tf.trainable_variables(), starting_pos): sess.run(tf.assign(v, x)) for i in tqdm(range(n_iter)): _, loss = sess.run([ T("vis_op"), T("loss"), ]) # Save trained variables train_vars = sess.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) params = np.array(sess.run(train_vars), object) save(params, f_model) # Save final image images = T("input").eval({t_size: 600}) img = images[0] sess.close() imsave(f_image, img)
def render_set(n, channel): print("Starting", channel, n) obj = objectives.channel(channel, n) # Add this to "sharpen" the image... too much and it gets crazy #obj += 0.001*objectives.total_variation() sess = create_session() t_size = tf.placeholder_with_default(size_n, []) f_model = os.path.join(save_model_dest, channel + f"_{n}.npy") T = render.make_vis_T( model, obj, param_f=lambda: cppn(t_size), transforms=[], optimizer=optimizer, ) tf.global_variables_initializer().run() train_vars = sess.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if not os.path.exists(f_model): for i in tqdm(range(training_steps)): _, loss = sess.run([ T("vis_op"), T("loss"), ]) # Save trained variables params = np.array(sess.run(train_vars), object) save(params, f_model) else: params = load(f_model) # Save final image feed_dict = dict(zip(train_vars, params)) feed_dict[t_size] = image_size images = T("input").eval(feed_dict) img = images[0] sess.close() f_image = os.path.join(save_image_dest, channel + f"_{n}.jpg") imageio.imwrite(f_image, img) print(f"Saved to {f_image}")
model_cutoff = 80 extension = 'png' beats_per_frame = 1 sigma_weight = 1 / 3.0 exag = 0.005 bpm = 127 fps = 30 save_dest = "results/interpolation_smooth" os.system(f'mkdir -p {save_dest}') sess = create_session() t_size = tf.placeholder_with_default(200, []) t_image = cppn(t_size) train_vars = sess.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) # avconv -y -r 30 -i "%08d.png" -b:v 2400k ../wiggle6.mp4 def render_params(params, size=224): feed_dict = dict(zip(train_vars, params)) feed_dict[t_size] = size return sess.run(t_image, feed_dict)[0] f_models = sorted(glob.glob("results/smooth_models/*000*.npy")) print(f"Loading models. Found {len(f_models)} total.")
def build_sprites(local_root='.', graph_version=None, model_loader=None, vis=None, layers=None, vis_filename='vis.js'): if graph_version is None: raise ValueError("graph_version cannot be None") if model_loader is None: raise ValueError("model_loader cannot be None") graph_version_path = os.path.join(local_root, graph_version) if not os.path.isdir(graph_version_path): raise ValueError( "No graph vis directory: {}".format(graph_version_path)) if vis is None: vis = {} update_dict_from_json(json_path=os.path.join(graph_version_path, vis_filename), updatee=vis) # graph_steps = vis['steps'] if 'steps' in vis else [] if len(graph_steps) == 0: print("no graph instances") return for graph_step in graph_steps: graph_step_dir = os.path.join(graph_version_path, graph_step) image_dir = os.path.join(graph_step_dir, 'sprites') image_consumed_dir = os.path.join(graph_step_dir, 'sprites_consumed') image_scum_dir = os.path.join(graph_step_dir, 'sprites_scum') sprite_map_dir = os.path.join(graph_step_dir, 'spritemaps') log_path = os.path.join(graph_step_dir, 'losses.csv') if not os.path.isdir(graph_step_dir): os.mkdir(graph_step_dir) if not os.path.isdir(image_dir): os.mkdir(image_dir) if not os.path.isdir(image_consumed_dir): os.mkdir(image_consumed_dir) if not os.path.isdir(sprite_map_dir): os.mkdir(sprite_map_dir) sprite_dirs = [ d for d in [image_dir, image_consumed_dir, image_scum_dir] if os.path.isdir(d) ] # graph step specific config step_vis = deepcopy(vis) update_dict_from_json(json_path=os.path.join(graph_step_dir, vis_filename), updatee=step_vis) # max_index = step_vis['max_index'] if 'max_index' in step_vis else 2048 scale = step_vis['scale'] if 'scale' in step_vis else 64 thresholds = step_vis['thresholds'] if 'thresholds' in step_vis else [ 64 ] vis_loss = step_vis['loss'] if 'loss' in step_vis else {} batch_id = get_next_batch_id(loss_log_path=log_path) # drives off model json - as might be customised graph_model = get_graph_model(graph_version=graph_version, model_loader=model_loader) layers = graph_model['layers'] # if not None and not empty then only build sprites for these layers/indexes if 'target_layers' in vis: target_layers = vis['target_layers'] layers = [ layer for layer in layers if target_layers is None or len(target_layers) == 0 or layer['index'] in target_layers ] target_indexes = [] if 'target_indexes' not in vis else vis[ 'target_indexes'] use_cppn = True if 'param' in vis and vis['param'] == 'cppn' else False # load existing sprite details existing_sprite_details = get_existing_sprite_details( sprite_dirs=sprite_dirs, scale=scale) print("\nBUILDING SPRITES: graph_version={} steps={}".format( graph_version, graph_step)) print(" layers={}".format([layer['index'] for layer in layers])) for layer in layers: layer_name = layer['name'] layer_index = layer['index'] adam = layer['adam'] transform_id = layer['transform_id'] model = None optimizer = tf.train.AdamOptimizer(adam) transforms = get_transforms(transform_id) # existing_layer_sprites = existing_sprite_details[ layer_index] if layer_index in existing_sprite_details else [] try: print("\nLAYER: {}\n".format(layer)) num_processed = 0 for index in range(0, max_index): # check for abort in vis files vf_abort = check_abort(dirs=[ os.path.join(graph_version_path, vis_filename), os.path.join(graph_step_dir, vis_filename) ]) if len(vf_abort) > 0: print("\nDetected abort in vis files: {}".format( vf_abort)) return # check any target indexes if not (target_indexes is None or len(target_indexes) == 0 or index in target_indexes): continue # existing_index_sprite_thresholds = existing_layer_sprites[ index] if index in existing_layer_sprites else [] # calculate work to do # do all thresholds already existing thresholds_to_generate = [ t for t in thresholds if t not in existing_index_sprite_thresholds ] if len(thresholds_to_generate) == 0: continue # can start from an existing threshold max_existing_threshold = max( existing_index_sprite_thresholds ) if len(existing_index_sprite_thresholds) > 0 else None if max_existing_threshold is not None and max_existing_threshold <= min( thresholds_to_generate): threshold_start = max_existing_threshold + 1 img_path = [ ip for ip in [ get_image_path(sd, layer_index, index, max_existing_threshold, scale) for sd in sprite_dirs ] if os.path.isfile(ip) ][0] with Image.open(img_path) as im: im.load() # make array im_1 = np.array(im) # add dummy batch dimension im_2 = np.expand_dims(im_1, axis=0) # reduce less than one init_val = im_2.astype(np.float32) / 256 param_f = lambda: lucid_images.image( scale, fft=False, init_val=init_val) elif use_cppn: threshold_start = 0 adam = 0.00055 optimizer = tf.train.AdamOptimizer(adam) param_f = lambda: param.cppn(scale) else: threshold_start = 0 param_f = lambda: param.image( scale, fft=True, decorrelate=True) # drop the model regularly if num_processed % 100 == 0: print("Reloading model ...") model = None num_processed = 0 if model is None: model = model_loader(graph_step) model.load_graphdef() # start the feature print("\nFEATURE: {}:{}\n".format(layer['name'], index)) log_item = { "batch_id": batch_id, "timestamp": current_milli_time(), "scale": scale, "adam": adam, "transforms": transform_id, "layer": layer_index, "index": index } visualizations = [] try: visualization = get_visualizations_and_losses( model, objectives.channel(layer_name, index), transforms=transforms, param_f=param_f, optimizer=optimizer, threshold_start=threshold_start, thresholds=thresholds, visualization_index=index, visualization_layer=layer_index, minimum_loss=vis_loss['minimum_loss_threshold'] if 'minimum_loss_threshold' in vis_loss else 0, num_bins=vis_loss['num_bins'] if 'num_bins' in vis_loss else 0, max_bin_hits=vis_loss['max_bin_hits'] if 'max_bin_hits' in vis_loss else 0, bin_factor=vis_loss['bin_factor'] if 'bin_factor' in vis_loss else 0, loss_logger=lambda l, t, s: loss_logger( log_file=log_path, item=log_item, threshold=t, loss=l, status=s)) num_processed = num_processed + 1 if len(visualization) == 0: continue # check losses losses = [v[2] for v in visualization] print("\nLOSSES: feature={}:{}; {}\n".format( layer_index, index, losses)) visualizations.append(visualization) finally: if len(visualizations) > 0: store_visualizations_and_losses( visualizations, output_dir=image_dir, scale=scale) except ValueError as e: msg = "{}".format(e) if 'slice index' in msg and 'out of bounds' in msg: print( "Closing layer: slice index out of bounds: {}".format( e)) else: raise e