예제 #1
0
def deepdream(
        img_path, zoom=True, scale_coefficient=0.05, irange=100, iter_n=10,
        octave_n=4, octave_scale=1.4, end="inception_4c/output", clip=True,
        network="bvlc_googlenet", loc="", gif=False, reverse=False, duration=0.1,
        loop=False, gpu=False, gpuid=0):
    img = np.float32(img_open(img_path))
    s = scale_coefficient
    h, w = img.shape[:2]

    if gpu:
        print("Enabling GPU {}...".format(gpuid))
        set_device(gpuid)
        set_mode_gpu()

    # Select, load DNN model
    NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network(network, loc)
    net = Classifier(
        NET_FN, PARAM_FN, mean=CAFFE_MEAN, channel_swap=CHANNEL_SWAP)

    img_pool = [img_path]

    # Save settings used in a log file
    logging.info((
        "{} zoom={}, scale_coefficient={}, irange={}, iter_n={}, "
        "octave_n={}, octave_scale={}, end={}, clip={}, network={}, gif={}, "
        "reverse={}, duration={}, loop={}").format(
            img_path, zoom, scale_coefficient, irange, iter_n, octave_n,
            octave_scale, end, clip, network, gif, reverse, duration, loop))

    print("Dreaming...")
    for i in xrange(irange):
        img = _deepdream(
            net, img, iter_n=iter_n, octave_n=octave_n,
            octave_scale=octave_scale, end=end, clip=clip)
        img_fromarray(np.uint8(img)).save("{}_{}.jpg".format(
            img_path, i))
        if gif:
            img_pool.append("{}_{}.jpg".format(img_path, i))
        print("Dream {} saved.".format(i))
        if zoom:
            img = affine_transform(
                img, [1-s, 1-s, 1], [h*s/2, w*s/2, 0], order=1)
    if gif:
        print("Creating gif...")
        frames = None
        if reverse:
            frames = [img_open(f) for f in img_pool[::-1]]
        else:
            frames = [img_open(f) for f in img_pool]
        writeGif(
            "{}.gif".format(img_path), frames, duration=duration,
            repeat=loop)
        print("gif created.")
예제 #2
0
def deepdream_video(
        video, iter_n=10, octave_n=4, octave_scale=1.4,
        end="inception_4c/output", clip=True, network="bvlc_googlenet",
        frame_rate=24):

    # Select, load DNN model
    NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network(network)
    net = Classifier(
        NET_FN, PARAM_FN, mean=CAFFE_MEAN, channel_swap=CHANNEL_SWAP)

    print("Extracting video...")
    _extract_video(video)

    output_dir = _output_video_dir(video)
    images = listdir(output_dir)

    print("Dreaming...")
    for image in images:
        image = "{}/{}".format(output_dir, image)
        img = np.float32(img_open(image))
        img = _deepdream(
            net, img, iter_n=iter_n, octave_n=octave_n,
            octave_scale=octave_scale, end=end, clip=clip)
        img_fromarray(np.uint8(img)).save(image)

    print("Creating dream video...")
    _create_video(video, frame_rate)
    print("Dream video created.")
예제 #3
0
def get_png_photo(
    png_factor: int = 9
) -> typing.Tuple[typing.Optional[Image], typing.List[str]]:
    """Get image from web camera.
    apt-get install fswebcam
    """
    img_path = f"/tmp/{uuid.uuid4().hex}.png"
    result = subprocess.run([
        "/usr/bin/fswebcam",
        "-r",
        RESOLUTION,
        "--no-banner",
        "--device",
        f"/dev/{DEVICE}",
        "--png",
        f"{png_factor}",
        img_path,
    ],
                            capture_output=True,
                            text=True)
    lines = result.stdout.split("\n")
    if os.path.exists(img_path):
        image = img_open(img_path)
        os.remove(img_path)
    else:
        image = None

    return image, lines
예제 #4
0
    def compare_scenes(self, img_data=None):
        if img_data is None:
            img_data = self.screen
        io_img = BytesIO(img_data)
        img = img_open(io_img)
        for scene in self._scenes.values():
            scene.compare(img)
        io_img.close()

        most_acc_scene = self.most_acc_scene
        self.callback_scenes_update(most_acc_scene)

        return most_acc_scene
예제 #5
0
def avatar(user, **kwargs):
    #user=a.user.get()
    avatar = user.user_avatar.earliest()
    return avatar.avatar.url
    #with img_open(avatar.avatar.path) as fin:
    #	return HttpResponse(content=fin.read(), content_type='image/png')
    response = HttpResponse(content_type='image/jpeg')
    im = img_open(avatar.avatar.path)
    im.save(response, 'JPEG')
    #print(dir(response))
    return b64encode(response.content)
    return response.content
    '''
예제 #6
0
def deepdream(
        img_path, zoom=True, scale_coefficient=0.05, irange=100, iter_n=10,
        octave_n=4, octave_scale=1.4, end="inception_4c/output", clip=True):
    img = np.float32(img_open(img_path))
    s = scale_coefficient
    h, w = img.shape[:2]

    # Load DNN model
    net = Classifier(
        NET_FN, PARAM_FN, mean=CAFFE_MEAN, channel_swap=CHANNEL_SWAP)

    print("Dreaming...")
    for i in xrange(irange):
        img = _deepdream(
            net, img, iter_n=iter_n, octave_n=octave_n,
            octave_scale=octave_scale, end=end, clip=clip)
        img_fromarray(np.uint8(img)).save("{}_{}.jpg".format(
            img_path, i))
        print("Dream {} saved.".format(i))
        if zoom:
            img = affine_transform(
                img, [1-s, 1-s, 1], [h*s/2, w*s/2, 0], order=1)
예제 #7
0
 def load_from_maker(self, path_dir: str):
     project = Project.open(path_dir)
     scenes = {}
     for name, scene in project.scenes.items():
         scene: MakerSceneModel
         io_img = open(scene.img_path, 'rb')
         img = img_open(io_img)  # type: Image
         new_scene = M.SceneModel(self._event)
         new_scene.name = scene.name
         for feature in scene.features:
             new_feature = M.FeatureModel()
             new_feature.load_data(**feature.data)
             new_feature.img.load_image(img, *feature.rect)
             new_scene.features.append(new_feature)
         for object_ in scene.objects:
             new_object = M.ObjectModel()
             new_object.load_data(**object_.data)
             new_object.img.load_image(img, *object_.rect)
             new_scene.objects.append(new_object)
         io_img.close()
         scenes[name] = new_scene
     self._scenes = scenes
예제 #8
0
 def load_image(self):
     self.img = img_open(self.file_name).convert(self._mode_)
예제 #9
0
def deepdream(
		q, dreamname,toepath, img_path, modegpu=True, gpudevice = 0, zoom=True, scale_coefficient=0.05, irange=5, iter_n=10,
		octave_n=4, octave_scale=1.4, end="inception_4c/output", clip=True,
		network="bvlc_googlenet", gif=False, reverse=False, duration=0.1,
		loop=False):
	#logging.debug('Starting')
	if modegpu:
		caffe.set_mode_gpu()
		caffe.set_device(gpudevice)
		print("GPU mode [device id: {}]".format(gpudevice))
		print("using GPU, but you'd still better make a cup of coffee")
	else:
		caffe.set_mode_cpu()
		print("using CPU...")

	img = np.float32(img_open(toepath+"actualframe.jpg"))

	s = scale_coefficient
	h, w = img.shape[:2]

	# Select, load DNN model
	NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network(network, toepath)
	net = Classifier(
		NET_FN, PARAM_FN, mean=CAFFE_MEAN, channel_swap=CHANNEL_SWAP)

	img_pool = [img_path,]

	# Save settings used in a log file
	'''
	logging.info("{} zoom={}, scale_coefficient={}, irange={}, iter_n={}, octave_n={}, octave_scale={}, end={},"\
			"clip={}, network={}, gif={}, reverse={}, duration={}, loop={}".format(
		img_path, zoom, scale_coefficient, irange, iter_n, octave_n, octave_scale, end, clip, network, gif, reverse,
		duration, loop))
	'''
	print("Dreaming...")
	for i in range(irange):
		img = _deepdream(
			net, img, iter_n=iter_n, octave_n=octave_n,
			octave_scale=octave_scale, end=end, clip=clip)
		img_fromarray(np.uint8(img)).save("{}_{}.jpg".format(
			img_path+dreamname, i))
		if gif:
			img_pool.append("{}_{}.jpg".format(img_path+dreamname, i))
		
		print("Dream layer depth {} saved.".format(i))
		print("{}_{}.jpg".format(img_path+dreamname, i))
		
		q.put([i, "{}_{}.jpg".format(img_path+dreamname, i)])

		if zoom:
			img = affine_transform(
				img, [1-s, 1-s, 1], [h*s/2, w*s/2, 0], order=1)
	if gif:
		frames = None
		if reverse:
			frames = [img_open(f) for f in img_pool[::-1]]
		else:
			frames = [img_open(f) for f in img_pool]
		writeGif(
			"{}.gif".format(img_path), frames, duration=duration,
			repeat=loop)
		print("gif created.")
	print("Weak up")
	print("¡¡Awake!!")
예제 #10
0
def get_image_last_area() -> Image:
    """Read lasr image.
    """
    return img_open(PATH_ACTUAL_IMG)
예제 #11
0
def deepdream(q,
              dreamname,
              toepath,
              img_path,
              modegpu=True,
              gpudevice=0,
              zoom=True,
              scale_coefficient=0.05,
              irange=5,
              iter_n=10,
              octave_n=4,
              octave_scale=1.4,
              end="inception_4c/output",
              clip=True,
              network="bvlc_googlenet",
              gif=False,
              reverse=False,
              duration=0.1,
              loop=False):
    #logging.debug('Starting')
    if modegpu:
        caffe.set_mode_gpu()
        caffe.set_device(gpudevice)
        print("GPU mode [device id: {}]".format(gpudevice))
        print("using GPU, but you'd still better make a cup of coffee")
    else:
        caffe.set_mode_cpu()
        print("using CPU...")

    img = np.float32(img_open(toepath + "actualframe.jpg"))

    s = scale_coefficient
    h, w = img.shape[:2]

    # Select, load DNN model
    NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network(
        network, toepath)
    net = Classifier(NET_FN,
                     PARAM_FN,
                     mean=CAFFE_MEAN,
                     channel_swap=CHANNEL_SWAP)

    img_pool = [
        img_path,
    ]

    # Save settings used in a log file
    '''
	logging.info("{} zoom={}, scale_coefficient={}, irange={}, iter_n={}, octave_n={}, octave_scale={}, end={},"\
			"clip={}, network={}, gif={}, reverse={}, duration={}, loop={}".format(
		img_path, zoom, scale_coefficient, irange, iter_n, octave_n, octave_scale, end, clip, network, gif, reverse,
		duration, loop))
	'''
    print("Dreaming...")
    for i in range(irange):
        img = _deepdream(net,
                         img,
                         iter_n=iter_n,
                         octave_n=octave_n,
                         octave_scale=octave_scale,
                         end=end,
                         clip=clip)
        img_fromarray(np.uint8(img)).save("{}_{}.jpg".format(
            img_path + dreamname, i))
        if gif:
            img_pool.append("{}_{}.jpg".format(img_path + dreamname, i))

        print("Dream layer depth {} saved.".format(i))
        print("{}_{}.jpg".format(img_path + dreamname, i))

        q.put([i, "{}_{}.jpg".format(img_path + dreamname, i)])

        if zoom:
            img = affine_transform(img, [1 - s, 1 - s, 1],
                                   [h * s / 2, w * s / 2, 0],
                                   order=1)
    if gif:
        frames = None
        if reverse:
            frames = [img_open(f) for f in img_pool[::-1]]
        else:
            frames = [img_open(f) for f in img_pool]
        writeGif("{}.gif".format(img_path),
                 frames,
                 duration=duration,
                 repeat=loop)
        print("gif created.")
    print("Weak up")
    print("¡¡Awake!!")