def dream_that_image(before, after, layer, seed, filehash, iteration): # dreaming... mydebugmsg("Dreaming dream #" + str(iteration)) mydebugmsg("before = [" + before + "]") mydebugmsg("after = [" + after + "]") bc = BatCountry(DREAMMODEL) features = bc.prepare_guide(Image.open(seed), end=layer) image = bc.dream(np.float32(Image.open(before)), end=layer, iter_n=20, objective_fn=BatCountry.guided_objective, objective_features=features, verbose=VERBOSITY) bc.cleanup() # # write the output image to file # result = Image.fromarray(np.uint8(image)) result.save(after) # # Save both the input image and output image to S3 using the MD5 hash of the original file content as the key name # keyname = filehash + ".jpg" key = beforebucket.new_key(keyname) key.set_contents_from_filename(before) key.set_acl('public-read') mydebugmsg("new key name = [" + keyname + "]") create_thumbnail(before, keyname, before_thumbnails_bucket) # # keyname should look like hashvalue.1.jpg # keyname = filehash + "." + str(iteration) + ".jpg" key = afterbucket.new_key(keyname) key.set_contents_from_filename(after) key.set_acl('public-read') mydebugmsg("new key name = [" + keyname + "]") create_thumbnail(after, keyname, after_thumbnails_bucket) photo_after_url = "https://{}.{}/{}".format(after_bucket_name, s3.server_name(), keyname) tweet_the_nightmare(photo_after_url) mydebugmsg("url for tweepy = " + photo_after_url) mydebugmsg("------------------------------------------") return
# --image initial_images/clouds.jpg \ # --guide-image initial_images/seed_images/starry_night.jpg \ # --output examples/output/seeded/clouds_and_starry_night.jpg # import the necessary packages from batcountry import BatCountry from PIL import Image import numpy as np import argparse # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-b", "--base-model", required=True, help="base model path") ap.add_argument("-l", "--layer", type=str, default="inception_4c/output", help="layer of CNN to use") ap.add_argument("-i", "--image", required=True, help="path to base image") ap.add_argument("-g", "--guide-image", required=True, help="path to guide image") ap.add_argument("-o", "--output", required=True, help="path to output image") args = ap.parse_args() # we can't stop here... bc = BatCountry(args.base_model) features = bc.prepare_guide(Image.open(args.guide_image), end=args.layer) image = bc.dream(np.float32(Image.open(args.image)), end=args.layer, iter_n=20, objective_fn=BatCountry.guided_objective, objective_features=features,) # write the output image to file result = Image.fromarray(np.uint8(image)) result.save(args.output)
from batcountry import BatCountry import numpy as np from PIL import * import pdb bc = BatCountry("/home/dylan/caffe/models/bvlc_googlenet") features = bc.prepare_guide(Image.open('./guide.jpg'), end='inception_5b/5x5_reduce') image = bc.dream(np.float32(Image.open('./image.jpg')), end='inception_5b/5x5_reduce', iter_n=20, objective_fn=BatCountry.guided_objective, objective_features=features,) pdb.set_trace() bc.cleanup()
# --guide-image initial_images/seed_images/starry_night.jpg \ # --output examples/output/seeded/clouds_and_starry_night.jpg # import the necessary packages from batcountry import BatCountry from PIL import Image import numpy as np import argparse # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-b", "--base-model", required=True, help="base model path") ap.add_argument("-l", "--layer", type=str, default="inception_4c/output", help="layer of CNN to use") ap.add_argument("-i", "--image", required=True, help="path to base image") ap.add_argument("-g", "--guide-image", required=True, help="path to guide image") ap.add_argument("-o", "--output", required=True, help="path to output image") args = ap.parse_args() # we can't stop here... bc = BatCountry(args.base_model) features = bc.prepare_guide(Image.open(args.guide_image), end=args.layer) image = bc.dream(np.float32(Image.open(args.image)), end=args.layer, iter_n=20, objective_fn=BatCountry.guided_objective, objective_features=features,) bc.cleanup() # write the output image to file result = Image.fromarray(np.uint8(image)) result.save(args.output)
#!/usr/bin/python import sys from batcountry import BatCountry from PIL import Image import numpy as np # dream.py <path_to_guide_image> <path_to_source_image> <path_to_save_image> # ./dream ./guide.jpg ./in.jpg ./out.jpg guide = sys.argv[1] imgin = sys.argv[2] imgout = sys.argv[3] bc = BatCountry("/opt/caffe/models/bvlc_googlenet") features = bc.prepare_guide(Image.open(guide)) image = bc.dream(np.float32(Image.open(imgin)), iter_n=20, objective_fn=BatCountry.guided_objective, objective_features=features,) bc.cleanup() result = Image.fromarray(np.uint8(image)) result.save(imgout)
#!/usr/bin/python import sys from batcountry import BatCountry from PIL import Image import numpy as np # dream.py <path_to_guide_image> <path_to_source_image> <path_to_save_image> # ./dream ./guide.jpg ./in.jpg ./out.jpg guide = sys.argv[1] imgin = sys.argv[2] imgout = sys.argv[3] bc = BatCountry("/opt/caffe/models/bvlc_googlenet") features = bc.prepare_guide(Image.open(guide)) image = bc.dream( np.float32(Image.open(imgin)), iter_n=20, objective_fn=BatCountry.guided_objective, objective_features=features, ) bc.cleanup() result = Image.fromarray(np.uint8(image)) result.save(imgout)
# Rename model file so inference script can pick it up search_dir = args.base_model files = filter(os.path.isfile, glob.glob(search_dir + "*.caffemodel")) files.sort(key=lambda x: os.path.getmtime(x)) model_file = files[-1] shutil.move(model_file, args.base_model+'/bvlc_googlenet.caffemodel') # we can't stop here... if args.classtoshow: bc = BatCountry(args.base_model, deploy_path='/data/model_cache/deploy_class.prototxt') else: bc = BatCountry(args.base_model) for layer in args.layer: if args.guide: features = bc.prepare_guide(Image.open(args.guide), end=layer) image = bc.dream(np.float32(Image.open(args.image)), end=layer, iter_n=args.iteration_count, objective_fn=BatCountry.guided_objective, objective_features=features,) elif args.mixlayer: mixed_features = bc.prepare_guide(Image.open(args.image), end=args.mixlayer) image = bc.dream(np.float32(Image.open(args.image)), end=layer, iter_n=args.iteration_count, objective_fn=BatCountry.guided_objective, objective_features=mixed_features, ) elif args.classtoshow: octaves = [ { 'layer':'loss3/classifier_zzzz', 'iter_n':190, 'start_sigma':2.5, 'end_sigma':0.78,