Ejemplo n.º 1
0
def deepdream_video(
        video, iter_n=10, octave_n=4, octave_scale=1.4,
        end="inception_4c/output", clip=True, network="bvlc_googlenet",
        frame_rate=24):

    # Select, load DNN model
    NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network(network)
    net = Classifier(
        NET_FN, PARAM_FN, mean=CAFFE_MEAN, channel_swap=CHANNEL_SWAP)

    print("Extracting video...")
    _extract_video(video)

    output_dir = _output_video_dir(video)
    images = listdir(output_dir)

    print("Dreaming...")
    for image in images:
        image = "{}/{}".format(output_dir, image)
        img = np.float32(img_open(image))
        img = _deepdream(
            net, img, iter_n=iter_n, octave_n=octave_n,
            octave_scale=octave_scale, end=end, clip=clip)
        img_fromarray(np.uint8(img)).save(image)

    print("Creating dream video...")
    _create_video(video, frame_rate)
    print("Dream video created.")
Ejemplo n.º 2
0
def deepdream_video(
        video, iter_n=10, octave_n=4, octave_scale=1.4,
        end="inception_4c/output", clip=True, network="bvlc_googlenet",
        frame_rate=24, verbose=False):

  # Select, load DNN model
  NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network(network)
  net = Classifier(
      NET_FN, PARAM_FN, mean=CAFFE_MEAN, channel_swap=CHANNEL_SWAP)

  print("Extracting video...")
  _extract_video(video)

  output_dir = _output_video_dir(video)
  images = listdir(output_dir)

  print("Dreaming...")
  for image in images:
    image = "{}/{}".format(output_dir, image)
    img = np.float32(img_open(image))
    img = _deepdream(
        net, img, iter_n=iter_n, octave_n=octave_n,
        octave_scale=octave_scale, end=end, clip=clip, verbose=verbose)
    img_fromarray(np.uint8(img)).save(image)

  print("Creating dream video...")
  _create_video(video, frame_rate)
  print("Dream video created.")
Ejemplo n.º 3
0
def deepdream(
        img_path, zoom=True, scale_coefficient=0.05, irange=100, iter_n=10,
        octave_n=4, octave_scale=1.4, end="inception_4c/output", clip=True,
        network="bvlc_googlenet", gif=False, reverse=False, duration=0.1,
        loop=False, gpu=False, gpuid=0, verbose=False):
  img = np.float32(img_open(img_path))
  s = scale_coefficient
  h, w = img.shape[:2]

  if gpu:
    print("Enabling GPU {}...".format(gpuid))
    set_device(gpuid)
    set_mode_gpu()

  # Select, load DNN model
  NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network(network)
  net = Classifier(
      NET_FN, PARAM_FN, mean=CAFFE_MEAN, channel_swap=CHANNEL_SWAP)

  img_pool = [img_path]

  # Save settings used in a log file
  logging.info((
      "{} zoom={}, scale_coefficient={}, irange={}, iter_n={}, "
      "octave_n={}, octave_scale={}, end={}, clip={}, network={}, gif={}, "
      "reverse={}, duration={}, loop={}").format(
          img_path, zoom, scale_coefficient, irange, iter_n, octave_n,
          octave_scale, end, clip, network, gif, reverse, duration, loop))

  print("Dreaming...")
  for i in xrange(irange):
    img = _deepdream(
        net, img, iter_n=iter_n, octave_n=octave_n,
        octave_scale=octave_scale, end=end, clip=clip, verbose=verbose)
    img_fromarray(np.uint8(img)).save("{}_{}.jpg".format(
        img_path, i))
    if gif:
      img_pool.append("{}_{}.jpg".format(img_path, i))
    print("Dream {} saved.".format(i))
    if zoom:
      img = affine_transform(
          img, [1 - s, 1 - s, 1], [h * s / 2, w * s / 2, 0], order=1)
  if gif:
    print("Creating gif...")
    frames = None
    if reverse:
      frames = [img_open(f) for f in img_pool[::-1]]
    else:
      frames = [img_open(f) for f in img_pool]
    writeGif(
        "{}.gif".format(img_path), frames, duration=duration,
        repeat=loop)
    print("gif created.")
Ejemplo n.º 4
0
def deepdream(
        img_path, zoom=True, scale_coefficient=0.05, irange=100, iter_n=10,
        octave_n=4, octave_scale=1.4, end="inception_4c/output", clip=True,
        network="bvlc_googlenet", loc="", gif=False, reverse=False, duration=0.1,
        loop=False, gpu=False, gpuid=0):
    img = np.float32(img_open(img_path))
    s = scale_coefficient
    h, w = img.shape[:2]

    if gpu:
        print("Enabling GPU {}...".format(gpuid))
        set_device(gpuid)
        set_mode_gpu()

    # Select, load DNN model
    NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network(network, loc)
    net = Classifier(
        NET_FN, PARAM_FN, mean=CAFFE_MEAN, channel_swap=CHANNEL_SWAP)

    img_pool = [img_path]

    # Save settings used in a log file
    logging.info((
        "{} zoom={}, scale_coefficient={}, irange={}, iter_n={}, "
        "octave_n={}, octave_scale={}, end={}, clip={}, network={}, gif={}, "
        "reverse={}, duration={}, loop={}").format(
            img_path, zoom, scale_coefficient, irange, iter_n, octave_n,
            octave_scale, end, clip, network, gif, reverse, duration, loop))

    print("Dreaming...")
    for i in xrange(irange):
        img = _deepdream(
            net, img, iter_n=iter_n, octave_n=octave_n,
            octave_scale=octave_scale, end=end, clip=clip)
        img_fromarray(np.uint8(img)).save("{}_{}.jpg".format(
            img_path, i))
        if gif:
            img_pool.append("{}_{}.jpg".format(img_path, i))
        print("Dream {} saved.".format(i))
        if zoom:
            img = affine_transform(
                img, [1-s, 1-s, 1], [h*s/2, w*s/2, 0], order=1)
    if gif:
        print("Creating gif...")
        frames = None
        if reverse:
            frames = [img_open(f) for f in img_pool[::-1]]
        else:
            frames = [img_open(f) for f in img_pool]
        writeGif(
            "{}.gif".format(img_path), frames, duration=duration,
            repeat=loop)
        print("gif created.")
Ejemplo n.º 5
0
def deepdream(
        img_path, zoom=True, scale_coefficient=0.05, irange=100, iter_n=10,
        octave_n=4, octave_scale=1.4, end="inception_4c/output", clip=True):
    img = np.float32(img_open(img_path))
    s = scale_coefficient
    h, w = img.shape[:2]

    # Load DNN model
    net = Classifier(
        NET_FN, PARAM_FN, mean=CAFFE_MEAN, channel_swap=CHANNEL_SWAP)

    print("Dreaming...")
    for i in xrange(irange):
        img = _deepdream(
            net, img, iter_n=iter_n, octave_n=octave_n,
            octave_scale=octave_scale, end=end, clip=clip)
        img_fromarray(np.uint8(img)).save("{}_{}.jpg".format(
            img_path, i))
        print("Dream {} saved.".format(i))
        if zoom:
            img = affine_transform(
                img, [1-s, 1-s, 1], [h*s/2, w*s/2, 0], order=1)
Ejemplo n.º 6
0
def deepdream(
    img_path,
    zoom=True,
    scale_coefficient=0.05,
    irange=100,
    iter_n=10,
    octave_n=4,
    octave_scale=1.4,
    end="inception_4c/output",
    clip=True,
    network="bvlc_googlenet",
    gif=False,
    reverse=False,
    duration=0.1,
    loop=False,
    gpu=False,
    gpuid=0,
):
    img = np.float32(img_open(img_path))
    s = scale_coefficient
    h, w = img.shape[:2]

    if gpu:
        tqdm.write("Enabling GPU {}...".format(gpuid))
        set_device(gpuid)
        set_mode_gpu()

    # Select, load DNN model
    NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network(network)
    net = Classifier(NET_FN,
                     PARAM_FN,
                     mean=CAFFE_MEAN,
                     channel_swap=CHANNEL_SWAP)

    img_pool = [img_path]

    # Save settings used in a log file
    logging.info(
        ("{} zoom={}, scale_coefficient={}, irange={}, iter_n={}, "
         "octave_n={}, octave_scale={}, end={}, clip={}, network={}, gif={}, "
         "reverse={}, duration={}, loop={}").format(
             img_path,
             zoom,
             scale_coefficient,
             irange,
             iter_n,
             octave_n,
             octave_scale,
             end,
             clip,
             network,
             gif,
             reverse,
             duration,
             loop,
         ))

    tqdm.write("Dreaming... (ctrl-C to wake up early)")
    for i in tqdm(
            range(irange),
            bar_format="{l_bar:>20}{bar:10}{r_bar}",
            desc="Total",
            position=-1,
            unit="dream",
    ):
        img = _deepdream(
            net,
            img,
            iter_n=iter_n,
            octave_n=octave_n,
            octave_scale=octave_scale,
            end=end,
            clip=clip,
        )
        img_fromarray(np.uint8(img)).save("{}_{}_{:03d}.jpg".format(
            path.splitext(img_path)[0], re.sub('[\\/:"*?<>|]+', "_", end), i))
        if gif:
            img_pool.append("{}_{}_{:03d}.jpg".format(
                path.splitext(img_path)[0], re.sub('[\\/:"*?<>|]+', "_", end),
                i))
        tqdm.write("Dream {} of {} saved.".format(i + 1, irange))
        if zoom:
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                img = affine_transform(img, [1 - s, 1 - s, 1],
                                       [h * s / 2, w * s / 2, 0],
                                       order=1)
    if gif:
        tqdm.write("Creating gif...")
        frames = None
        if reverse:
            frames = [img_open(f) for f in img_pool[::-1]]
        else:
            frames = [img_open(f) for f in img_pool]
        frames[0].save(
            "{}.gif".format(img_path),
            save_all=True,
            optimize=True,
            append_images=frames[1:],
            loop=0,
        )
        tqdm.write("gif created.")
Ejemplo n.º 7
0
def main(img_name,octaves=4,octave_scale=1.4,iterations=10,jitter=32,step_size=1.5,layer='inception_4c/output',gpu=0,go_deeper=50,scale_coefficient=0.05):
    path = os.getcwd()
    
    gif_mode = 0
    
    if not os.path.exists(path+"\\gifsicle.exe"):
        print "Can't process GIFs, gifsicle.exe not found!"
        exit(1)
    if os.system("mkdir dreams"): #create a remporary file for image processing/storing
        print "temporary output folder already exists, deleting exisisting one.."
        os.system("rm -r dreams")
        os.system("mkdir dreams")
    #Use gifsicle to get frames from the gif and put it in an temporary output folder
    if not os.path.exists(img_name):
        print "No input file found!"
        exit(1)
    if img_name[-3:]=='gif':
        gif_mode = 1
        os.system("gifsicle --explode -U "+img_name+" --output frame")
        os.system("ren frame.* frame.*.jpg")
    
        if os.system("move *.jpg dreams"):
            print "Can't move files!"
            exit(1)
#    else,
        #go-deeper mode | param required or - default is 50
        
    
    if gpu:
        caffe.set_mode_gpu()
    else:
        print "You are using CPU mode, this might take some time"
       
    
    model_path = './caffe/models/bvlc_googlenet/' # substitute your path here
    net_fn   = model_path + 'deploy.prototxt'
    param_fn = model_path + 'bvlc_googlenet.caffemodel'
    
    """
    Other models : ( need to change default end param if you are going to use this )
    
    model_path = './caffe/models/vgg_face_caffe/' 
    net_fn   = model_path + 'VGG_FACE_deploy.prototxt'
    param_fn = model_path + 'VGG_FACE.caffemodel'
    #(example end params: 'conv1_1','conv1_2','pool1','conv2_1','conv2_2','pool2','conv3_1','conv3_2','conv3_3','pool3','conv4_1','conv4_2','conv4_3','pool4','conv5_1','conv5_2','conv5_3','pool5')
    
    model_path = './caffe/models/finetune_flickr_style/' 
    net_fn   = model_path + 'deploy.prototxt'
    param_fn = model_path + 'finetune_flickr_style.caffemodel'
    #(example end params: 'conv1','pool1','norm1','conv2','pool2','norm2','conv3','conv4','conv5','pool5')
    """
    
    # Patching model to be able to compute gradients.
    # Note that you can also manually add "force_backward: true" line to "deploy.prototxt".
    model = caffe.io.caffe_pb2.NetParameter()
    text_format.Merge(open(net_fn).read(), model)
    model.force_backward = True
    
    open('tmp.prototxt', 'w').write(str(model))

    net = caffe.Classifier('tmp.prototxt', param_fn,
                           mean = np.float32([104.0, 116.0, 122.0]), # ImageNet mean, training set dependent
                           channel_swap = (2,1,0)) # the reference model has channels in BGR order instead of RGB
    
    
    if gif_mode:
        cnt = 0
        dr = path+"\\dreams"
        total = len(os.listdir(dr))-1
        for i in os.listdir(dr):
            if i.endswith(".jpg"):
                frame_name =  dr+"\\"+i
                img = (PIL.Image.open(frame_name))
                img = img.convert('RGB')
                #print(img.format, img.size, img.mode) 
                dream_img = deepdream(net, np.array(img),iter_n=iterations,octave_n=octaves,octave_scale=octave_scale,end=layer,jitter=jitter,step_size=step_size)
                dream_img = img_fromarray(np.uint8(dream_img)).convert('P', palette=PIL.Image.ADAPTIVE)
                dream_img.save(dr+"\\dreamimg"+str(i)+".gif")
                os.system("rm dreams\\"+i)
                cnt+=1
                print str(cnt)+" frames completed out of "+str(total)
    else: #go-deeper mode, takes in a single jpg image and dreates dreams of itself.
        img = (PIL.Image.open(path+"\\"+img_name))
        img = img.convert('RGB')
        img = np.array(img)
        frame = img
        h, w = frame.shape[:2]
        s = scale_coefficient # scale coefficient

        for i in xrange(go_deeper):
            frame = deepdream(net, frame)
            PIL.Image.fromarray(np.uint8(frame)).convert('P', palette=PIL.Image.ADAPTIVE).save(path+"\\"+"dreams\\%04d.gif"%i)
            frame = nd.affine_transform(frame, [1-s,1-s,1], [h*s/2,w*s/2,0], order=1)
            print str(i)+" frames completed out of "+str(go_deeper)
            


    os.system("gifsicle --loop=0 dreams/*.gif > "+img_name[:-4]+"-dream.gif");
    os.system("rm -r dreams");
    print "File saved as "+img_name[:-4]+"-dream.gif"
    print "Done!"
    exit(1)
Ejemplo n.º 8
0
def deepdream(
		q, dreamname,toepath, img_path, modegpu=True, gpudevice = 0, zoom=True, scale_coefficient=0.05, irange=5, iter_n=10,
		octave_n=4, octave_scale=1.4, end="inception_4c/output", clip=True,
		network="bvlc_googlenet", gif=False, reverse=False, duration=0.1,
		loop=False):
	#logging.debug('Starting')
	if modegpu:
		caffe.set_mode_gpu()
		caffe.set_device(gpudevice)
		print("GPU mode [device id: {}]".format(gpudevice))
		print("using GPU, but you'd still better make a cup of coffee")
	else:
		caffe.set_mode_cpu()
		print("using CPU...")

	img = np.float32(img_open(toepath+"actualframe.jpg"))

	s = scale_coefficient
	h, w = img.shape[:2]

	# Select, load DNN model
	NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network(network, toepath)
	net = Classifier(
		NET_FN, PARAM_FN, mean=CAFFE_MEAN, channel_swap=CHANNEL_SWAP)

	img_pool = [img_path,]

	# Save settings used in a log file
	'''
	logging.info("{} zoom={}, scale_coefficient={}, irange={}, iter_n={}, octave_n={}, octave_scale={}, end={},"\
			"clip={}, network={}, gif={}, reverse={}, duration={}, loop={}".format(
		img_path, zoom, scale_coefficient, irange, iter_n, octave_n, octave_scale, end, clip, network, gif, reverse,
		duration, loop))
	'''
	print("Dreaming...")
	for i in range(irange):
		img = _deepdream(
			net, img, iter_n=iter_n, octave_n=octave_n,
			octave_scale=octave_scale, end=end, clip=clip)
		img_fromarray(np.uint8(img)).save("{}_{}.jpg".format(
			img_path+dreamname, i))
		if gif:
			img_pool.append("{}_{}.jpg".format(img_path+dreamname, i))
		
		print("Dream layer depth {} saved.".format(i))
		print("{}_{}.jpg".format(img_path+dreamname, i))
		
		q.put([i, "{}_{}.jpg".format(img_path+dreamname, i)])

		if zoom:
			img = affine_transform(
				img, [1-s, 1-s, 1], [h*s/2, w*s/2, 0], order=1)
	if gif:
		frames = None
		if reverse:
			frames = [img_open(f) for f in img_pool[::-1]]
		else:
			frames = [img_open(f) for f in img_pool]
		writeGif(
			"{}.gif".format(img_path), frames, duration=duration,
			repeat=loop)
		print("gif created.")
	print("Weak up")
	print("¡¡Awake!!")
Ejemplo n.º 9
0
def deepdream(q,
              dreamname,
              toepath,
              img_path,
              modegpu=True,
              gpudevice=0,
              zoom=True,
              scale_coefficient=0.05,
              irange=5,
              iter_n=10,
              octave_n=4,
              octave_scale=1.4,
              end="inception_4c/output",
              clip=True,
              network="bvlc_googlenet",
              gif=False,
              reverse=False,
              duration=0.1,
              loop=False):
    #logging.debug('Starting')
    if modegpu:
        caffe.set_mode_gpu()
        caffe.set_device(gpudevice)
        print("GPU mode [device id: {}]".format(gpudevice))
        print("using GPU, but you'd still better make a cup of coffee")
    else:
        caffe.set_mode_cpu()
        print("using CPU...")

    img = np.float32(img_open(toepath + "actualframe.jpg"))

    s = scale_coefficient
    h, w = img.shape[:2]

    # Select, load DNN model
    NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network(
        network, toepath)
    net = Classifier(NET_FN,
                     PARAM_FN,
                     mean=CAFFE_MEAN,
                     channel_swap=CHANNEL_SWAP)

    img_pool = [
        img_path,
    ]

    # Save settings used in a log file
    '''
	logging.info("{} zoom={}, scale_coefficient={}, irange={}, iter_n={}, octave_n={}, octave_scale={}, end={},"\
			"clip={}, network={}, gif={}, reverse={}, duration={}, loop={}".format(
		img_path, zoom, scale_coefficient, irange, iter_n, octave_n, octave_scale, end, clip, network, gif, reverse,
		duration, loop))
	'''
    print("Dreaming...")
    for i in range(irange):
        img = _deepdream(net,
                         img,
                         iter_n=iter_n,
                         octave_n=octave_n,
                         octave_scale=octave_scale,
                         end=end,
                         clip=clip)
        img_fromarray(np.uint8(img)).save("{}_{}.jpg".format(
            img_path + dreamname, i))
        if gif:
            img_pool.append("{}_{}.jpg".format(img_path + dreamname, i))

        print("Dream layer depth {} saved.".format(i))
        print("{}_{}.jpg".format(img_path + dreamname, i))

        q.put([i, "{}_{}.jpg".format(img_path + dreamname, i)])

        if zoom:
            img = affine_transform(img, [1 - s, 1 - s, 1],
                                   [h * s / 2, w * s / 2, 0],
                                   order=1)
    if gif:
        frames = None
        if reverse:
            frames = [img_open(f) for f in img_pool[::-1]]
        else:
            frames = [img_open(f) for f in img_pool]
        writeGif("{}.gif".format(img_path),
                 frames,
                 duration=duration,
                 repeat=loop)
        print("gif created.")
    print("Weak up")
    print("¡¡Awake!!")