def deepdream_video( video, iter_n=10, octave_n=4, octave_scale=1.4, end="inception_4c/output", clip=True, network="bvlc_googlenet", frame_rate=24, verbose=False): # Select, load DNN model NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network(network) net = Classifier( NET_FN, PARAM_FN, mean=CAFFE_MEAN, channel_swap=CHANNEL_SWAP) print("Extracting video...") _extract_video(video) output_dir = _output_video_dir(video) images = listdir(output_dir) print("Dreaming...") for image in images: image = "{}/{}".format(output_dir, image) img = np.float32(img_open(image)) img = _deepdream( net, img, iter_n=iter_n, octave_n=octave_n, octave_scale=octave_scale, end=end, clip=clip, verbose=verbose) img_fromarray(np.uint8(img)).save(image) print("Creating dream video...") _create_video(video, frame_rate) print("Dream video created.")
def list_layers(network="bvlc_googlenet"): # Load DNN model NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network(network) net = Classifier(NET_FN, PARAM_FN, mean=CAFFE_MEAN, channel_swap=CHANNEL_SWAP) net.blobs.keys()
def make_i2v_with_caffe(net_path, param_path, tag_path=None): mean = np.array([164.76139251, 167.47864617, 181.13838569]) net = Classifier(net_path, param_path, mean=mean, channel_swap=(2, 1, 0)) if tag_path is not None: tags = json.loads(open(tag_path, 'r').read()) assert (len(tags) == 1539) return CaffeI2V(net, tags) else: return CaffeI2V(net)
def deepdream( img_path, zoom=True, scale_coefficient=0.05, irange=100, iter_n=10, octave_n=4, octave_scale=1.4, end="inception_4c/output", clip=True, network="bvlc_googlenet", gif=False, reverse=False, duration=0.1, loop=False, gpu=False, gpuid=0, verbose=False): img = np.float32(img_open(img_path)) s = scale_coefficient h, w = img.shape[:2] if gpu: print("Enabling GPU {}...".format(gpuid)) set_device(gpuid) set_mode_gpu() # Select, load DNN model NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network(network) net = Classifier( NET_FN, PARAM_FN, mean=CAFFE_MEAN, channel_swap=CHANNEL_SWAP) img_pool = [img_path] # Save settings used in a log file logging.info(( "{} zoom={}, scale_coefficient={}, irange={}, iter_n={}, " "octave_n={}, octave_scale={}, end={}, clip={}, network={}, gif={}, " "reverse={}, duration={}, loop={}").format( img_path, zoom, scale_coefficient, irange, iter_n, octave_n, octave_scale, end, clip, network, gif, reverse, duration, loop)) print("Dreaming...") for i in xrange(irange): img = _deepdream( net, img, iter_n=iter_n, octave_n=octave_n, octave_scale=octave_scale, end=end, clip=clip, verbose=verbose) img_fromarray(np.uint8(img)).save("{}_{}.jpg".format( img_path, i)) if gif: img_pool.append("{}_{}.jpg".format(img_path, i)) print("Dream {} saved.".format(i)) if zoom: img = affine_transform( img, [1 - s, 1 - s, 1], [h * s / 2, w * s / 2, 0], order=1) if gif: print("Creating gif...") frames = None if reverse: frames = [img_open(f) for f in img_pool[::-1]] else: frames = [img_open(f) for f in img_pool] writeGif( "{}.gif".format(img_path), frames, duration=duration, repeat=loop) print("gif created.")
def make_i2v_with_caffe(net_path, param_path, tag_path=None, threshold_path=None): mean = np.array([ 164.76139251, 167.47864617, 181.13838569]) net = Classifier( net_path, param_path, mean=mean, channel_swap=(2, 1, 0)) kwargs = {} if tag_path is not None: tags = json.loads(open(tag_path, 'r').read()) assert(len(tags) == 1539) kwargs['tags'] = tags if threshold_path is not None: fscore_threshold = np.load(threshold_path)['threshold'] kwargs['threshold'] = fscore_threshold return CaffeI2V(net, **kwargs)
def deepdream( img_path, zoom=True, scale_coefficient=0.05, irange=100, iter_n=10, octave_n=4, octave_scale=1.4, end="inception_4c/output", clip=True, network="bvlc_googlenet", gif=False, reverse=False, duration=0.1, loop=False, gpu=False, gpuid=0, ): img = np.float32(img_open(img_path)) s = scale_coefficient h, w = img.shape[:2] if gpu: tqdm.write("Enabling GPU {}...".format(gpuid)) set_device(gpuid) set_mode_gpu() # Select, load DNN model NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network(network) net = Classifier(NET_FN, PARAM_FN, mean=CAFFE_MEAN, channel_swap=CHANNEL_SWAP) img_pool = [img_path] # Save settings used in a log file logging.info( ("{} zoom={}, scale_coefficient={}, irange={}, iter_n={}, " "octave_n={}, octave_scale={}, end={}, clip={}, network={}, gif={}, " "reverse={}, duration={}, loop={}").format( img_path, zoom, scale_coefficient, irange, iter_n, octave_n, octave_scale, end, clip, network, gif, reverse, duration, loop, )) tqdm.write("Dreaming... (ctrl-C to wake up early)") for i in tqdm( range(irange), bar_format="{l_bar:>20}{bar:10}{r_bar}", desc="Total", position=-1, unit="dream", ): img = _deepdream( net, img, iter_n=iter_n, octave_n=octave_n, octave_scale=octave_scale, end=end, clip=clip, ) img_fromarray(np.uint8(img)).save("{}_{}_{:03d}.jpg".format( path.splitext(img_path)[0], re.sub('[\\/:"*?<>|]+', "_", end), i)) if gif: img_pool.append("{}_{}_{:03d}.jpg".format( path.splitext(img_path)[0], re.sub('[\\/:"*?<>|]+', "_", end), i)) tqdm.write("Dream {} of {} saved.".format(i + 1, irange)) if zoom: with warnings.catch_warnings(): warnings.simplefilter("ignore") img = affine_transform(img, [1 - s, 1 - s, 1], [h * s / 2, w * s / 2, 0], order=1) if gif: tqdm.write("Creating gif...") frames = None if reverse: frames = [img_open(f) for f in img_pool[::-1]] else: frames = [img_open(f) for f in img_pool] frames[0].save( "{}.gif".format(img_path), save_all=True, optimize=True, append_images=frames[1:], loop=0, ) tqdm.write("gif created.")
def deepdream(q, dreamname, toepath, img_path, modegpu=True, gpudevice=0, zoom=True, scale_coefficient=0.05, irange=5, iter_n=10, octave_n=4, octave_scale=1.4, end="inception_4c/output", clip=True, network="bvlc_googlenet", gif=False, reverse=False, duration=0.1, loop=False): #logging.debug('Starting') if modegpu: caffe.set_mode_gpu() caffe.set_device(gpudevice) print("GPU mode [device id: {}]".format(gpudevice)) print("using GPU, but you'd still better make a cup of coffee") else: caffe.set_mode_cpu() print("using CPU...") img = np.float32(img_open(toepath + "actualframe.jpg")) s = scale_coefficient h, w = img.shape[:2] # Select, load DNN model NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network( network, toepath) net = Classifier(NET_FN, PARAM_FN, mean=CAFFE_MEAN, channel_swap=CHANNEL_SWAP) img_pool = [ img_path, ] # Save settings used in a log file ''' logging.info("{} zoom={}, scale_coefficient={}, irange={}, iter_n={}, octave_n={}, octave_scale={}, end={},"\ "clip={}, network={}, gif={}, reverse={}, duration={}, loop={}".format( img_path, zoom, scale_coefficient, irange, iter_n, octave_n, octave_scale, end, clip, network, gif, reverse, duration, loop)) ''' print("Dreaming...") for i in range(irange): img = _deepdream(net, img, iter_n=iter_n, octave_n=octave_n, octave_scale=octave_scale, end=end, clip=clip) img_fromarray(np.uint8(img)).save("{}_{}.jpg".format( img_path + dreamname, i)) if gif: img_pool.append("{}_{}.jpg".format(img_path + dreamname, i)) print("Dream layer depth {} saved.".format(i)) print("{}_{}.jpg".format(img_path + dreamname, i)) q.put([i, "{}_{}.jpg".format(img_path + dreamname, i)]) if zoom: img = affine_transform(img, [1 - s, 1 - s, 1], [h * s / 2, w * s / 2, 0], order=1) if gif: frames = None if reverse: frames = [img_open(f) for f in img_pool[::-1]] else: frames = [img_open(f) for f in img_pool] writeGif("{}.gif".format(img_path), frames, duration=duration, repeat=loop) print("gif created.") print("Weak up") print("¡¡Awake!!")