def handle_requests(): global lastFrames print 'handling request' if request.method == 'POST': if 'buffer' in request.form: # print request.form # img = np.float32(PIL.Image.open(BytesIO(base64.b64decode(request.form['buffer'].partition('base64, ')[2])))) # img = np.float32(PIL.Image.open(StringIO(request.form['buffer']))) # print request.form['buffer'] frame = np.float32(PIL.Image.open(BytesIO(base64.b64decode(request.form['buffer'].partition('data:image/jpeg;base64,')[2])))) h, w = frame.shape[:2] if not request.form['guid'] in lastFrames: lastFrames[request.form['guid']] = np.zeros(frame.shape) frame = np.add(frame * 0.9, lastFrames[request.form['guid']] * 0.3) # frame -= lastFrames[request.environ['REMOTE_ADDR']] # only in dreams frame = deepdream(net, frame, iter_n=1, octave_n=5, end=net.blobs.keys()[42]) lastFrames[request.form['guid']] = frame # if lastFrames != None: # frame = np.add(frame, lastFrames[request.environ['REMOTE_ADDR']]) # import code # code.interact(local=locals()) # PIL.Image.fromarray(np.uint8(frame)).save(filename) buf = StringIO() pil = PIL.Image.fromarray(np.uint8(frame)) pil.save(buf, format='jpeg') buf.seek(0) return 'data:image/jpeg;base64,' + base64.b64encode(buf.getvalue()) else: return 'no data' else: return 'what is this'
def deepDreamMaker(killProcess, inputImg, outputLoc, args, progressBarQueue = None, previewImgQueue=None): #print "Dreaming in process id: ", os.getpid() image = PIL.Image.open(inputImg).convert("RGB") #image.thumbnail( (500, 500), PIL.Image.ANTIALIAS) image = np.float32(image) #actually run google's deepdream dreamifiedImg = deepdream(killProcess, net, image, args["iterations"], args["octaves"], args["octaveScale"], args["layers"], True, progressBarQueue, previewImgQueue, args["jitter"], args["stepSize"]) #if not killProcess.value: saveDream(dreamifiedImg, outputLoc)
def main(args): input_dir = args.input_dir output_dir = args.output_dir layer = args.layer amplify = args.amplify model_dir = args.model_dir prototxt = args.prototxt caffemodel = args.caffemodel try: os.makedirs(output_dir) except: pass net, model = make_net(model_dir=model_dir, prototxt=prototxt, caffemodel=caffemodel) # verify model name provided if not layer in net.blobs.keys(): sys.stderr.write('Invalid model name: %s' % (layer,) + '\n') sys.stderr.write('Valid models are:' + repr(net.blobs.keys()) + '\n') sys.exit(1) ### files = [v for v in os.listdir(input_dir) if v.endswith('.jpg')] files.sort() # scan existing output images i = 0 while i < len(files): f = files[i] output_file = os.path.join(output_dir, f) if not os.path.exists(output_file): break i += 1 if i > 0: # make guide function from last image guide_image = np.float32(PIL.Image.open(os.path.join(input_dir,files[i-1]))) objective = make_objective_guided(net, layer, guide_image) else: # make initial L2 guide function objective = objective_L2 # start next images check2 = nperf.nperf(interval = 30.0, maxcount = (len(files) - i) * amplify) if os.getenv('USE_CUDA'): perf_tag2 = '[cuda] framedream' else: perf_tag2 = '[cpu] framedream' print('####################################') print('# loop starts from', i) print('####################################') while i < len(files): f = files[i] input_file = os.path.join(input_dir, f) output_file = os.path.join(output_dir, f) frame = np.float32(PIL.Image.open(input_file)) print("processing:", f, frame.shape, layer, amplify) for short_i in xrange(amplify): frame = deepdream(net, frame, end=layer, objective=objective) check2(perf_tag2) # use this frame as guide image for next iteration objective = make_objective_guided(net, layer, frame) PIL.Image.fromarray(np.uint8(frame)).save(output_file) i += 1
check2 = nperf.nperf(interval = 30.0, maxcount = (len(net.blobs.keys()) * amplify)) i = 1 for layer in net.blobs.keys(): if layer in net._layer_names: try: output_file = '%03d_%s.jpg' % (i, layer.replace('/', '_'),) # skip if already created in prev session if os.path.exists(os.path.join(output_dir,output_file)): i += 1 continue if layer in blacklist_layers: print('skip:', layer) continue print('layer:', layer, output_file) frame = img.copy() for amplify_i in xrange(amplify): frame = deepdream(net, frame, end=layer, objective=objective_L2) check2("dreamlayers") PIL.Image.fromarray(np.uint8(frame)).save(os.path.join(output_dir,output_file)) i += 1 except KeyboardInterrupt: sys.exit(1) except: print_exc() make_catalogue(output_dir) # vim: set sw=4 sts=4 ts=8 et ft=python
# see ya on the other side frame = img h, w = frame.shape[:2] s = args.scaleCoef # scale coefficient # run all blobs, adopted from script by Cranial_Vault if args.blob == 'all': PIL.Image.fromarray(np.uint8(frame)).save(framepath+'/source.'+ext) j = 0 for blob in net.blobs.keys(): safeblob = blob.replace('/', '-') try: # if we've already generated this image, then don't bother if not os.path.exists(framepath+'/'+safeblob+'.'+ext): frame = deepdream(net, img, end=blob) PIL.Image.fromarray(np.uint8(frame)).save(framepath+'/'+safeblob+'.'+ext) print j, str(blob) else: print 'skipping', blob, 'because the output file already exists' except ValueError as err: print 'ValueError:', str(blob), err pass except KeyError as err: print 'KeyError:', str(blob), err else: safeblob = args.blob.replace('/', '-') for i in xrange(args.iterations): # save the original as 000.ext and hallucinations as 00i.ext # this also checks the save path so that we don't crash after 1 deepdream
layer = 'inception_4c/output' if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description='Computer dreams') parser.add_argument('image', metavar='IMAGE', help='the image to dream about') parser.add_argument('--model', dest='model', default='bvlc_googlenet', help='the model to use') parser.add_argument('--layer', dest='layer', default=layer, help='the layer to optimise') parser.add_argument('--scale', dest='scale', type=float, default=0.05, help='the scale coefficient') parser.add_argument('--iterations', dest='iterations', type=int, default=100, help='the number of iterations to run') parser.add_argument('--output', dest='output', default='output', help='the directory to output frames to') args = parser.parse_args() frame = np.float32(PIL.Image.open(args.image)) net = loadnet(args.model) h, w = frame.shape[:2] s = args.scale for i in xrange(args.iterations): frame = deepdream(net, frame, args.layer) saveimage(frame, os.path.join(args.output, "frame-%04d" % i)) frame = nd.affine_transform(frame, [1-s,1-s,1], [h*s/2,w*s/2,0], order=1)
default='bvlc_googlenet', help='the model to use') parser.add_argument('--output', dest='output', default='output', help='the directory to output results to') args = parser.parse_args() image = np.float32(PIL.Image.open(args.image)) (name, ext) = os.path.splitext(os.path.basename(args.image)) model = loadmodel(args.model) net = loadnet(args.model) for layer in model.layer: # Seem to be invalid somehow if layer.type in ["Dropout", "ReLU"]: continue # Cause crashes if layer.type in ["Softmax", "InnerProduct"]: continue if layer.type == "Pooling" and layer.pooling_param.pool == 1: continue frame = deepdream(net, image, layer.name) saveimage( frame, os.path.join(args.output, name + '_' + '-'.join(layer.name.split('/'))))
if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description='Computer dreams') parser.add_argument('image', metavar='IMAGE', help='the image to dream about') parser.add_argument('--model', dest='model', default='bvlc_googlenet', help='the model to use') parser.add_argument('--output', dest='output', default='output', help='the directory to output results to') args = parser.parse_args() image = np.float32(PIL.Image.open(args.image)) (name, ext) = os.path.splitext(os.path.basename(args.image)) model = loadmodel(args.model) net = loadnet(args.model) for layer in model.layer: # Seem to be invalid somehow if layer.type in ["Dropout", "ReLU"]: continue # Cause crashes if layer.type in ["Softmax", "InnerProduct"]: continue if layer.type == "Pooling" and layer.pooling_param.pool == 1: continue frame = deepdream(net, image, layer.name) saveimage(frame, os.path.join(args.output, name + '_' + '-'.join(layer.name.split('/'))))
def main(args): input_file = args.input_file output_dir = args.output_dir layer = args.layer iterations = args.iterations scale = args.scale guide = args.guide model_dir = args.model_dir prototxt = args.prototxt caffemodel = args.caffemodel try: os.makedirs(output_dir) except: pass print("Processing file: " + input_file) print("Iterations = %s" % iterations) print("Scale = %s" % scale) print("Model = %s" % layer) net, model = make_net(model_dir=model_dir, prototxt=prototxt, caffemodel=caffemodel) #prototxt = os.path.join(output_dir, 'prototxt') #open(prototxt, 'w').write(str(model)) # verify model name provided if not layer in net.blobs.keys(): sys.stderr.write('Invalid model name: %s' % (layer,) + '\n') sys.stderr.write('Valid models are:' + repr(net.blobs.keys()) + '\n') sys.exit(1) if guide: guide_image = np.float32(PIL.Image.open(guide)) objective = make_objective_guided(net, layer, guide_image) else: objective = objective_L2 ### def output_fn(i): return os.path.join(output_dir, '%04d.jpg' % (i,)) frame = None i = 1 while i < iterations + 1: if not os.path.exists(output_fn(i)): break i += 1 if i > 1: frame = np.float32(PIL.Image.open(output_fn(i-1))) h, w = frame.shape[:2] frame = nd.affine_transform(frame, [1-scale,1-scale,1], [h*scale/2,w*scale/2,0], order=1) else: frame = np.float32(PIL.Image.open(input_file)) h, w = frame.shape[:2] PIL.Image.fromarray(np.uint8(frame)).save(output_fn(1)) # start next images check2 = nperf.nperf(interval = 30.0, maxcount = (iterations - i + 1)) if os.getenv('USE_CUDA'): perf_tag2 = '[cuda] fastdream' else: perf_tag2 = '[cpu] fastdream' print('####################################') print('# loop starts from', i) print('####################################') while i <= iterations: frame = deepdream(net, frame, end=layer, objective=objective) PIL.Image.fromarray(np.uint8(frame)).save(output_fn(i)) # affine transform (zoom-in) before feed as next step input frame = nd.affine_transform(frame,[1 - scale, 1 - scale, 1],[h * scale / 2, w * scale / 2, 0],order=1) check2(perf_tag2) i += 1
default=layer, help='the layer to optimise') parser.add_argument('--scale', dest='scale', type=float, default=0.05, help='the scale coefficient') parser.add_argument('--iterations', dest='iterations', type=int, default=100, help='the number of iterations to run') parser.add_argument('--output', dest='output', default='output', help='the directory to output frames to') args = parser.parse_args() frame = np.float32(PIL.Image.open(args.image)) net = loadnet(args.model) h, w = frame.shape[:2] s = args.scale for i in xrange(args.iterations): frame = deepdream(net, frame, args.layer) saveimage(frame, os.path.join(args.output, "frame-%04d" % i)) frame = nd.affine_transform(frame, [1 - s, 1 - s, 1], [h * s / 2, w * s / 2, 0], order=1)