def handle(self, *args, **options):
        height = 16
        bar = np.zeros((256, height, 3))

        for i in xrange(256):
            t = i / 255.0
            lab0 = np.array([40, 30, -70])
            lab1 = np.array([70, 30, 70])
            lab = t * lab0 + (1 - t) * lab1
            rgb = np.clip(LabColor(*lab).convert_to('rgb').get_value_tuple(), 0, 255)
            for j in xrange(height):
                bar[i, j, :] = rgb / 255.0

        image = numpy_to_pil(bar)
        image.save('bar.png')
        print "Saved to bar.png"
    def handle(self, *args, **options):
        height = 16
        bar = np.zeros((256, height, 3))

        for i in xrange(256):
            t = i / 255.0
            lab0 = np.array([40, 30, -70])
            lab1 = np.array([70, 30, 70])
            lab = t * lab0 + (1 - t) * lab1
            rgb = np.clip(
                LabColor(*lab).convert_to('rgb').get_value_tuple(), 0, 255)
            for j in xrange(height):
                bar[i, j, :] = rgb / 255.0

        image = numpy_to_pil(bar)
        image.save('bar.png')
        print "Saved to bar.png"
Esempio n. 3
0
def open_multilayer_exr(filename, tonemap=False, thumb_size=0, show_progress=False):
    """
    Load a multilayer OpenEXR file and return a dictionary mapping layers to
    either numpy float32 arrays (if ``tonemap=False``) or to 8bit PIL images
    (if ``tonemap=True``).

    :param filename: string filename
    :param tonemap: if ``True``, map to sRGB
    :param thumb_size: if nonzero, resize images to have this as their max dimension
    :param show_progress: if ``True``, print info about loading
    """

    if show_progress:
        print "Reading %s: %s layers..." % (filename, len(LAYER_CHANNELS))

    # Open the input file
    f = OpenEXR.InputFile(filename)
    header = f.header()

    # Compute the size
    dw = header['dataWindow']
    cols, rows = dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1

    multilayer = {}

    # load channels
    FLOAT = Imath.PixelType(Imath.PixelType.FLOAT)
    for key, channels in LAYER_CHANNELS.iteritems():
        print "Loading layer %s..." % key
        image = np.empty((rows, cols, 3), dtype=np.float32)
        for (i, c) in enumerate(channels):
            data = f.channel(c, FLOAT)
            image[:, :, i] = np.fromstring(data, dtype=np.float32) \
                .reshape((rows, cols))
        multilayer[key] = image

    #if denoise:
        #for t in ["diff", "gloss", "trans"]:
            #print "Denoising layer %s..." % t
            #multilayer["%s_ind" % t] = denoise_indirect_image(multilayer["%s_ind" % t])

        ## recompute combined image using denoised layer
        #multilayer["combined"] = multilayer["emit"] + multilayer["env"]
        #for t in ["diff", "gloss", "trans"]:
            #multilayer["combined"] += (
                #multilayer["%s_col" % t] * (multilayer["%s_dir" % t] + multilayer["%s_ind" % t])
            #)

    # resize and tonemap
    if tonemap:
        for key, channels in LAYER_CHANNELS.iteritems():
            print "Tonemapping layer %s..." % key
            image = multilayer[key]
            if key == "depth":
                image /= np.max(image[np.isfinite(image)])

            # convert to sRGB PIL
            image = numpy_to_pil(rgb_to_srgb(np.clip(image, 0.0, 1.0)))
            if thumb_size and key != "combined":
                image = ResizeToFit(thumb_size, thumb_size).process(image)
            multilayer[key] = image

    return multilayer
Esempio n. 4
0
def intrinsic_decomposition_task(photo_id, algorithm_id, task_version=0):
    """
    Decompose an image with a given algorithm and set of parameters.  The image
    is resized to fit in a 512 by 512 box.  The resize operation happens in the
    file's storage colorspace (likely sRGB).

    :param photo_id: ``photo.id``

    :param algorithm_id: ``algorithm.id``
    """

    #algorithm, _ = IntrinsicImagesAlgorithm.objects.get_or_create(
    #slug=algorithm_slug, parameters=json.dumps(parameters, sort_keys=True),
    #baseline=algorithm_slug.startswith('baseline_'))

    algorithm = IntrinsicImagesAlgorithm.objects.get(id=algorithm_id)
    parameters = json.loads(algorithm.parameters)

    if task_version != algorithm.task_version:
        print "Version changed (%s --> %s): exiting" % (task_version,
                                                        algorithm.task_version)
        return
    elif not algorithm.active:
        print "Algorithm not active: %s %s: exiting" % (algorithm.slug,
                                                        algorithm.parameters)
        return
    elif IntrinsicImagesDecomposition.objects.filter(
            photo_id=photo_id, algorithm=algorithm).exists():
        print "Already decomposed: photo_id: %s, algorithm: %s %s: exiting" % (
            photo_id, algorithm.slug, algorithm.parameters)
        return

    print 'intrinsic_decomposition_task: photo_id: %s, slug: %s, parameters: %s' % (
        photo_id, algorithm.slug, parameters)

    # download image
    photo = Photo.objects.get(id=photo_id)
    image = ResizeToFit(512, 512).process(photo.open_image(width='orig'))

    # decompose
    import intrinsic.algorithm
    func = getattr(intrinsic.algorithm, algorithm.slug)
    r, s, runtime = func(image, **parameters)
    r = numpy_to_pil(r)
    s = numpy_to_pil(s)

    # save: use atomic so that if the image save fails, the record is not kept
    with transaction.atomic():
        decomposition, _ = IntrinsicImagesDecomposition.objects \
            .get_or_create(photo_id=photo_id, algorithm=algorithm)

        decomposition.runtime = runtime
        save_obj_attr_image(decomposition,
                            attr='reflectance_image',
                            img=r,
                            format='png',
                            save=False)
        save_obj_attr_image(decomposition,
                            attr='shading_image',
                            img=s,
                            format='png',
                            save=False)

        from intrinsic.evaluation import evaluate_error
        update_kwargs = evaluate_error(photo_id, r)
        for k, v in update_kwargs.iteritems():
            setattr(decomposition, k, v)

        decomposition.save()
Esempio n. 5
0
def _run_algorithm(photo, function, slug, parameters={},
                   image_size=512, baseline=False):
    """ Sets up an algorithm in the database, calls ``function``, then stores
    the result in the database """

    if not isinstance(photo, Photo):
        time_start = timeit.default_timer()
        r, s = function(image=photo, **parameters)
        time_end = timeit.default_timer()
        runtime = time_end - time_start
        r, s = [process_layer(x) for x in (r, s)]
        return r, s, runtime

    # ensure a consistent order for parameters
    algorithm, _ = IntrinsicImagesAlgorithm.objects.get_or_create(
        slug=slug, parameters=json.dumps(parameters, sort_keys=True), baseline=baseline)

    if IntrinsicImagesDecomposition.objects.filter(
            photo=photo, algorithm=algorithm).exists():
        print '_run_algorithm: EXISTS: photo %s, algorithm %s, params %s' % (
            photo.id, slug, parameters)
        return
    else:
        print '_run_algorithm: starting: photo %s, algorithm %s, params %s' % (
            photo.id, slug, parameters)

    # load and resize image (do it here rather than load the pre-resized photo
    # thumbnail to avoid jpg artifacts)
    attr = '_intrinsic_algorithm_photo_%s' % image_size
    if hasattr(photo, attr):
        image = getattr(photo, attr)
    else:
        image = photo.open_image(width='orig')
        image = ResizeToFit(image_size, image_size).process(image)
        setattr(photo, attr, image)

    time_start = timeit.default_timer()
    # r, s: linar numpy arrays
    r, s = function(image=image, **parameters)
    time_end = timeit.default_timer()
    runtime = time_end - time_start

    # r, s: sRGB numpy arrays
    r, s = [process_layer(x) for x in (r, s)]

    # r, s: sRGB PIL images
    reflectance_image = numpy_to_pil(r)
    shading_image = numpy_to_pil(s)

    # save in database
    with transaction.atomic():
        decomposition, _ = IntrinsicImagesDecomposition.objects \
            .get_or_create(photo=photo, algorithm=algorithm)

        # fill in fields
        decomposition.runtime = runtime
        save_obj_attr_image(
            decomposition, attr='reflectance_image',
            img=reflectance_image, format='png', save=False)
        save_obj_attr_image(
            decomposition, attr='shading_image',
            img=shading_image, format='png', save=False)

        # comupte error
        from intrinsic.evaluation import evaluate_error
        update_kwargs = evaluate_error(photo.id, reflectance_image)
        for k, v in update_kwargs.iteritems():
            setattr(decomposition, k, v)

        # save to database
        decomposition.save()

    print '_run_algorithm: DONE: photo %s, algorithm %s, params %s, runtime: %s' % (
        photo.id, slug, parameters, runtime)

    return r, s, runtime
Esempio n. 6
0
def _run_algorithm(photo,
                   function,
                   slug,
                   parameters={},
                   image_size=512,
                   baseline=False):
    """ Sets up an algorithm in the database, calls ``function``, then stores
    the result in the database """

    if not isinstance(photo, Photo):
        time_start = timeit.default_timer()
        r, s = function(image=photo, **parameters)
        time_end = timeit.default_timer()
        runtime = time_end - time_start
        r, s = [process_layer(x) for x in (r, s)]
        return r, s, runtime

    # ensure a consistent order for parameters
    algorithm, _ = IntrinsicImagesAlgorithm.objects.get_or_create(
        slug=slug,
        parameters=json.dumps(parameters, sort_keys=True),
        baseline=baseline)

    if IntrinsicImagesDecomposition.objects.filter(
            photo=photo, algorithm=algorithm).exists():
        print '_run_algorithm: EXISTS: photo %s, algorithm %s, params %s' % (
            photo.id, slug, parameters)
        return
    else:
        print '_run_algorithm: starting: photo %s, algorithm %s, params %s' % (
            photo.id, slug, parameters)

    # load and resize image (do it here rather than load the pre-resized photo
    # thumbnail to avoid jpg artifacts)
    attr = '_intrinsic_algorithm_photo_%s' % image_size
    if hasattr(photo, attr):
        image = getattr(photo, attr)
    else:
        image = photo.open_image(width='orig')
        image = ResizeToFit(image_size, image_size).process(image)
        setattr(photo, attr, image)

    time_start = timeit.default_timer()
    # r, s: linar numpy arrays
    r, s = function(image=image, **parameters)
    time_end = timeit.default_timer()
    runtime = time_end - time_start

    # r, s: sRGB numpy arrays
    r, s = [process_layer(x) for x in (r, s)]

    # r, s: sRGB PIL images
    reflectance_image = numpy_to_pil(r)
    shading_image = numpy_to_pil(s)

    # save in database
    with transaction.atomic():
        decomposition, _ = IntrinsicImagesDecomposition.objects \
            .get_or_create(photo=photo, algorithm=algorithm)

        # fill in fields
        decomposition.runtime = runtime
        save_obj_attr_image(decomposition,
                            attr='reflectance_image',
                            img=reflectance_image,
                            format='png',
                            save=False)
        save_obj_attr_image(decomposition,
                            attr='shading_image',
                            img=shading_image,
                            format='png',
                            save=False)

        # comupte error
        from intrinsic.evaluation import evaluate_error
        update_kwargs = evaluate_error(photo.id, reflectance_image)
        for k, v in update_kwargs.iteritems():
            setattr(decomposition, k, v)

        # save to database
        decomposition.save()

    print '_run_algorithm: DONE: photo %s, algorithm %s, params %s, runtime: %s' % (
        photo.id, slug, parameters, runtime)

    return r, s, runtime
Esempio n. 7
0
def open_multilayer_exr(filename,
                        tonemap=False,
                        thumb_size=0,
                        show_progress=False):
    """
    Load a multilayer OpenEXR file and return a dictionary mapping layers to
    either numpy float32 arrays (if ``tonemap=False``) or to 8bit PIL images
    (if ``tonemap=True``).

    :param filename: string filename
    :param tonemap: if ``True``, map to sRGB
    :param thumb_size: if nonzero, resize images to have this as their max dimension
    :param show_progress: if ``True``, print info about loading
    """

    if show_progress:
        print "Reading %s: %s layers..." % (filename, len(LAYER_CHANNELS))

    # Open the input file
    f = OpenEXR.InputFile(filename)
    header = f.header()

    # Compute the size
    dw = header['dataWindow']
    cols, rows = dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1

    multilayer = {}

    # load channels
    FLOAT = Imath.PixelType(Imath.PixelType.FLOAT)
    for key, channels in LAYER_CHANNELS.iteritems():
        print "Loading layer %s..." % key
        image = np.empty((rows, cols, 3), dtype=np.float32)
        for (i, c) in enumerate(channels):
            data = f.channel(c, FLOAT)
            image[:, :, i] = np.fromstring(data, dtype=np.float32) \
                .reshape((rows, cols))
        multilayer[key] = image

    #if denoise:
    #for t in ["diff", "gloss", "trans"]:
    #print "Denoising layer %s..." % t
    #multilayer["%s_ind" % t] = denoise_indirect_image(multilayer["%s_ind" % t])

    ## recompute combined image using denoised layer
    #multilayer["combined"] = multilayer["emit"] + multilayer["env"]
    #for t in ["diff", "gloss", "trans"]:
    #multilayer["combined"] += (
    #multilayer["%s_col" % t] * (multilayer["%s_dir" % t] + multilayer["%s_ind" % t])
    #)

    # resize and tonemap
    if tonemap:
        for key, channels in LAYER_CHANNELS.iteritems():
            print "Tonemapping layer %s..." % key
            image = multilayer[key]
            if key == "depth":
                image /= np.max(image[np.isfinite(image)])

            # convert to sRGB PIL
            image = numpy_to_pil(rgb_to_srgb(np.clip(image, 0.0, 1.0)))
            if thumb_size and key != "combined":
                image = ResizeToFit(thumb_size, thumb_size).process(image)
            multilayer[key] = image

    return multilayer