コード例 #1
0
    def iterate(self):
        THIS_DIR = os.getcwd()
        os.chdir(os.path.join(ANNEX_DIR, self.base_path))

        try:
            iter_num = len(self.getAssetsByTagName(ASSET_TAGS['DLXDD_DD']))

            bc = BatCountry(
                os.path.join(getConfig('caffe_root'), "models",
                             "bvlc_googlenet"))
            img = bc.dream(
                np.float32(self.get_image(file_name="dream_%d.jpg" %
                                          iter_num)))
            bc.cleanup()

            os.chdir(THIS_DIR)

            iter_num += 1
            dream = Image.fromarray(np.uint8(img))
            asset_path = self.addAsset(None, "dream_%d.jpg" % iter_num, \
             tags=[ASSET_TAGS['DLXDD_DD']], description="deep dream iteration")

            if asset_path is not None:
                dream.save(os.path.join(ANNEX_DIR, asset_path))
                return True

        except Exception as e:
            print "ERROR ON ITERATION:"
            print e, type(e)

        return False
コード例 #2
0
def dream_that_image(before, after, layer, seed, filehash, iteration):

    # dreaming...
    mydebugmsg("Dreaming dream #" + str(iteration))
    mydebugmsg("before = [" + before + "]")
    mydebugmsg("after  = [" + after + "]")

    bc = BatCountry(DREAMMODEL)
    features = bc.prepare_guide(Image.open(seed), end=layer)
    image = bc.dream(np.float32(Image.open(before)),
                     end=layer,
                     iter_n=20,
                     objective_fn=BatCountry.guided_objective,
                     objective_features=features,
                     verbose=VERBOSITY)

    bc.cleanup()

    #
    # write the output image to file
    #

    result = Image.fromarray(np.uint8(image))
    result.save(after)

    #
    # Save both the input image and output image to S3 using the MD5 hash of the original file content as the key name
    #

    keyname = filehash + ".jpg"
    key = beforebucket.new_key(keyname)

    key.set_contents_from_filename(before)
    key.set_acl('public-read')

    mydebugmsg("new key name = [" + keyname + "]")

    create_thumbnail(before, keyname, before_thumbnails_bucket)

    #
    # keyname should look like hashvalue.1.jpg
    #

    keyname = filehash + "." + str(iteration) + ".jpg"
    key = afterbucket.new_key(keyname)

    key.set_contents_from_filename(after)
    key.set_acl('public-read')

    mydebugmsg("new key name = [" + keyname + "]")

    create_thumbnail(after, keyname, after_thumbnails_bucket)

    photo_after_url = "https://{}.{}/{}".format(after_bucket_name,
                                                s3.server_name(), keyname)
    tweet_the_nightmare(photo_after_url)
    mydebugmsg("url for tweepy = " + photo_after_url)

    mydebugmsg("------------------------------------------")
    return
コード例 #3
0
ファイル: demo_vis.py プロジェクト: crowsonkb/bat-country
import numpy as np
import argparse

# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-b", "--base-model", required=True, help="base model path")
ap.add_argument("-l",
                "--layer",
                type=str,
                default="conv2/3x3",
                help="layer of CNN to use")
ap.add_argument("-i", "--image", required=True, help="path to base image")
ap.add_argument("-v",
                "--vis",
                required=True,
                help="path to output directory for visualizations")
args = ap.parse_args()

# we can't stop here...
bc = BatCountry(args.base_model)
(image, visualizations) = bc.dream(np.float32(Image.open(args.image)),
                                   end=args.layer,
                                   visualize=True)

# loop over the visualizations
for (k, vis) in visualizations:
    # write the visualization to file
    outputPath = "{}/{}.jpg".format(args.vis, k)
    result = Image.fromarray(np.uint8(vis))
    result.save(outputPath)
コード例 #4
0
ファイル: bat_example.py プロジェクト: n800sau/roborep
import os
from batcountry import BatCountry
import numpy as np
from PIL import Image

bc = BatCountry(os.path.expanduser("~/install/caffe/models/bvlc_googlenet"))
image = bc.dream(np.float32(Image.open("cat.1.jpg")))
bc.cleanup()

result = Image.fromarray(np.uint8(image))
result.save("output.jpg")
コード例 #5
0
ap.add_argument("-s",
                "--octave-scale",
                required=False,
                type=float,
                help="Scale of the octaves (default = 1.4)")
ap.add_argument("-i", "--image", required=True, help="path to image file")
ap.add_argument("-o",
                "--output",
                required=True,
                help="path to output directory")
args = ap.parse_args()

# filter warnings, initialize bat country, and grab the layer names of
# the CNN
warnings.filterwarnings("ignore")
bc = BatCountry(args.base_model, args.proto, args.caffe_model)
layers = bc.layers()

# extract the filename and extension of the input image
filename = args.image[args.image.rfind("/") + 1:]
(filename, ext) = filename.split(".")

# loop over the layers -- VISUALIZING ALL LAYERS of the model
for (i, layer) in enumerate(layers):
    # perform visualizing using the current layer
    print("[INFO] processing layer `{}` {}/{}".format(layer, i + 1,
                                                      len(layers)))

    try:
        # pass the image through the network
        image = bc.dream(np.float32(Image.open(args.image)),
コード例 #6
0
#!/usr/bin/python
import sys
from batcountry import BatCountry
from PIL import Image
import numpy as np

# dream.py <path_to_guide_image> <path_to_source_image> <path_to_save_image>
# ./dream ./guide.jpg ./in.jpg ./out.jpg
guide = sys.argv[1]
imgin = sys.argv[2]
imgout = sys.argv[3]

bc = BatCountry("/opt/caffe/models/bvlc_googlenet")
features = bc.prepare_guide(Image.open(guide))
image = bc.dream(
    np.float32(Image.open(imgin)),
    iter_n=20,
    objective_fn=BatCountry.guided_objective,
    objective_features=features,
)
bc.cleanup()
result = Image.fromarray(np.uint8(image))
result.save(imgout)
コード例 #7
0
ファイル: inference.py プロジェクト: jtoy/caffe_inception
ap.add_argument("--guide", help="Image to guide deep dream")
ap.add_argument("--mixlayer", help="Layer to mix")
ap.add_argument("--classtoshow", help="Specific image to show")
args = ap.parse_args()
if args.output == None:
  args.output = "/data/output/"+ str(int(time.time())) + ".jpg"

# Rename model file so inference script can pick it up
search_dir = args.base_model
files = filter(os.path.isfile, glob.glob(search_dir + "*.caffemodel"))
files.sort(key=lambda x: os.path.getmtime(x))
model_file = files[-1]
shutil.move(model_file, args.base_model+'/bvlc_googlenet.caffemodel')
# we can't stop here...
if args.classtoshow:
	bc = BatCountry(args.base_model, deploy_path='/data/model_cache/deploy_class.prototxt')
else:
	bc = BatCountry(args.base_model)


for layer in args.layer:
	if args.guide:
		features = bc.prepare_guide(Image.open(args.guide), end=layer)
		image = bc.dream(np.float32(Image.open(args.image)), end=layer,
    iter_n=args.iteration_count, objective_fn=BatCountry.guided_objective,
    objective_features=features,)

	elif args.mixlayer:
		mixed_features = bc.prepare_guide(Image.open(args.image), end=args.mixlayer)
		image = bc.dream(np.float32(Image.open(args.image)), end=layer, iter_n=args.iteration_count, objective_fn=BatCountry.guided_objective, objective_features=mixed_features, )
コード例 #8
0
ap.add_argument("-g",
                "--guide-image",
                required=False,
                help="path to guide image")
ap.add_argument("-o", "--output", required=False, help="path to output image")
ap.add_argument("-t", "--test", required=False, help="not used right now")
ap.add_argument("-p",
                "--patch-model",
                required=False,
                help="path to patch model")
args = ap.parse_args()
if args.output == None:
    args.output = "/data/output/" + str(int(time.time())) + ".jpg"

# we can't stop here...
bc = BatCountry(args.base_model, patch_model=args.patch_model)
if args.guide_image == None:
    if args.layer == None:
        args.layer = "conv2/3x3"
    image = bc.dream(np.float32(Image.open(args.image)), end=args.layer)
else:
    if args.layer == None:
        args.layer = "inception_4c/output"
    features = bc.prepare_guide(Image.open(args.guide_image), end=args.layer)
    image = bc.dream(
        np.float32(Image.open(args.image)),
        end=args.layer,
        iter_n=20,
        objective_fn=BatCountry.guided_objective,
        objective_features=features,
    )