def alg_change(self): algorithms = IntrinsicImagesAlgorithm.objects.filter(active=True) \ .order_by('slug', '-id') algorithm_errors = { alg: [] for alg in algorithms } light_stacks = PhotoLightStack.objects.all() for alg in progress_bar(algorithms): use_alg = True for light_stack in light_stacks: photo_ids = light_stack.photos.values_list('id', flat=True) decompositions = IntrinsicImagesDecomposition.objects.filter( algorithm=alg, photo_id__in=photo_ids) if len(decompositions) != len(photo_ids): use_alg = False break errors = [] for d1 in decompositions: r1 = open_image(d1.reflectance_image) r1 = srgb_to_rgb(np.asarray(r1).astype(float) / 255.0) r1 = np.mean(r1, axis=-1) for d2 in decompositions: if d1.photo_id == d2.photo_id: continue r2 = open_image(d2.reflectance_image) r2 = srgb_to_rgb(np.asarray(r2).astype(float) / 255.0) r2 = np.mean(r2, axis=-1) errors.append(lmse(r1, r2)) algorithm_errors[alg].append(np.mean(errors)) if use_alg: print alg.slug, alg.id, \ np.mean(algorithm_errors[alg]), \ np.median(algorithm_errors[alg]), \ np.std(algorithm_errors[alg]) errors = [ (alg, np.mean(errors), np.median(errors), np.std(errors)) for alg, errors in algorithm_errors.iteritems() if len(errors) == len(light_stacks) ] errors.sort(key=lambda x: x[1]) for alg, e, m, s in errors: print alg.slug, alg.id, e, m, s
def alg_change(self): algorithms = IntrinsicImagesAlgorithm.objects.filter(active=True) \ .order_by('slug', '-id') algorithm_errors = {alg: [] for alg in algorithms} light_stacks = PhotoLightStack.objects.all() for alg in progress_bar(algorithms): use_alg = True for light_stack in light_stacks: photo_ids = light_stack.photos.values_list('id', flat=True) decompositions = IntrinsicImagesDecomposition.objects.filter( algorithm=alg, photo_id__in=photo_ids) if len(decompositions) != len(photo_ids): use_alg = False break errors = [] for d1 in decompositions: r1 = open_image(d1.reflectance_image) r1 = srgb_to_rgb(np.asarray(r1).astype(float) / 255.0) r1 = np.mean(r1, axis=-1) for d2 in decompositions: if d1.photo_id == d2.photo_id: continue r2 = open_image(d2.reflectance_image) r2 = srgb_to_rgb(np.asarray(r2).astype(float) / 255.0) r2 = np.mean(r2, axis=-1) errors.append(lmse(r1, r2)) algorithm_errors[alg].append(np.mean(errors)) if use_alg: print alg.slug, alg.id, \ np.mean(algorithm_errors[alg]), \ np.median(algorithm_errors[alg]), \ np.std(algorithm_errors[alg]) errors = [(alg, np.mean(errors), np.median(errors), np.std(errors)) for alg, errors in algorithm_errors.iteritems() if len(errors) == len(light_stacks)] errors.sort(key=lambda x: x[1]) for alg, e, m, s in errors: print alg.slug, alg.id, e, m, s
def function(image, **kwargs): from intrinsic.algorithm.grosse2009 import intrinsic image = srgb_to_rgb(pil_to_numpy(image)) * 255.0 mask = np.ones((image.shape[0:2]), dtype=bool) s, r = intrinsic.color_retinex(image, mask, **kwargs) r = image / np.clip(s, 1e-3, float('inf'))[:, :, np.newaxis] return r, s
def function(image, **kwargs): from intrinsic.algorithm.bell2014.solver import IntrinsicSolver from intrinsic.algorithm.bell2014.input import IntrinsicInput solver = IntrinsicSolver( input=IntrinsicInput(image_rgb=srgb_to_rgb(pil_to_numpy(image)), ), params=parameters, ) r, s, decomposition = solver.solve() return r, s
def function(image, **kwargs): from intrinsic.algorithm.bell2014.solver import IntrinsicSolver from intrinsic.algorithm.bell2014.input import IntrinsicInput solver = IntrinsicSolver( input=IntrinsicInput( image_rgb=srgb_to_rgb(pil_to_numpy(image)), ), params=parameters, ) r, s, decomposition = solver.solve() return r, s
def evaluate_error(photo_id, reflectance_image, thresh=0.10, is_sRGB=True): """ Evaluate the error for intrinsic image decomposition of a photo. :param photo_id: photo being decomposed :param reflectance_image: candidate reflectance image (in sRGB space) :thresh when a user states ``A < B``, we interpret that to mean that ``A < B - thresh``. This must be a positive value in order to ensure that a constant reflectance image receives a nonzero error. :return: dict corresponding to fields on an :class:`intrinsic.models.IntrinsicImagesDecomposition` object. """ if isinstance(reflectance_image, basestring): reflectance_image = imread(reflectance_image).astype(float) / 255.0 elif not isinstance(reflectance_image, np.ndarray): reflectance_image = np.asarray(reflectance_image).astype(float) / 255.0 #if reflectance_image.shape[1] != 300: #z = 300.0 / reflectance_image.shape[1] #reflectance_image = interpolation.zoom( #reflectance_image, zoom=(z, z, 1)) rows, cols, _ = reflectance_image.shape if is_sRGB: reflectance_image_linear = srgb_to_rgb(reflectance_image) else: reflectance_image_linear = reflectance_image # get the luminance of the reflectance channel # fetch comparisons comparisons = list( IntrinsicPointComparison.objects.filter( photo_id=photo_id, point1__opaque=True, point2__opaque=True, darker__isnull=False, darker__in=("1", "2", "E"), darker_score__isnull=False, darker_score__gt=0 ).select_related('point1') ) # fetch points points = IntrinsicPoint.objects.filter(photo_id=photo_id) point_id_to_l = { p.id: np.mean(reflectance_image_linear[int(p.y * rows), int(p.x * cols), :]) for p in points } # ratio thresholds eq_thresh = 1.0 + thresh # error from a set of comparisons def comparison_error(comps): error_num = 0.0 error_den = 0.0 for c in comps: if c.darker not in ('1', '2', 'E'): raise ValueError("Unknown value of darker: %s" % c.darker) l1 = max(point_id_to_l[c.point1_id], 1e-10) l2 = max(point_id_to_l[c.point2_id], 1e-10) if l2 / l1 > eq_thresh: r_darker = '1' elif l1 / l2 > eq_thresh: r_darker = '2' else: r_darker = 'E' if c.darker != r_darker: error_num += c.darker_score error_den += c.darker_score if error_den: return error_num / error_den else: return None # return value update_kwargs = { 'error_comparison_thresh': thresh, } # all errors update_kwargs['num'] = len(comparisons) if comparisons: update_kwargs['mean_error'] = comparison_error(comparisons) else: update_kwargs['mean_error'] = None # all dense errors comparisons_dense = [c for c in comparisons if c.point1.min_separation < 0.05] update_kwargs['num_dense'] = len(comparisons_dense) if comparisons_dense: update_kwargs['mean_dense_error'] = comparison_error(comparisons_dense) else: update_kwargs['mean_dense_error'] = None # all dense errors comparisons_sparse = [c for c in comparisons if c.point1.min_separation > 0.05] update_kwargs['num_sparse'] = len(comparisons_sparse) if comparisons_sparse: update_kwargs['mean_sparse_error'] = comparison_error(comparisons_sparse) else: update_kwargs['mean_sparse_error'] = None # equality errors comparisons_eq = [c for c in comparisons if c.darker == "E"] update_kwargs['num_eq'] = len(comparisons_eq) if comparisons_eq: update_kwargs['mean_eq_error'] = comparison_error(comparisons_eq) else: update_kwargs['mean_eq_error'] = None # inequality errors comparisons_neq = [c for c in comparisons if c.darker in ("1", "2")] update_kwargs['num_neq'] = len(comparisons_neq) if comparisons_neq: update_kwargs['mean_neq_error'] = comparison_error(comparisons_neq) else: update_kwargs['mean_neq_error'] = None # sum of two split errors if (update_kwargs['mean_eq_error'] is not None or update_kwargs['mean_neq_error'] is not None): f = lambda x: x if x else 0 update_kwargs['mean_sum_error'] = ( f(update_kwargs['mean_eq_error']) + f(update_kwargs['mean_neq_error'])) else: update_kwargs['mean_sum_error'] = None return update_kwargs
def evaluate_error(photo_id, reflectance_image, thresh=0.10, is_sRGB=True): """ Evaluate the error for intrinsic image decomposition of a photo. :param photo_id: photo being decomposed :param reflectance_image: candidate reflectance image (in sRGB space) :thresh when a user states ``A < B``, we interpret that to mean that ``A < B - thresh``. This must be a positive value in order to ensure that a constant reflectance image receives a nonzero error. :return: dict corresponding to fields on an :class:`intrinsic.models.IntrinsicImagesDecomposition` object. """ if isinstance(reflectance_image, basestring): reflectance_image = imread(reflectance_image).astype(float) / 255.0 elif not isinstance(reflectance_image, np.ndarray): reflectance_image = np.asarray(reflectance_image).astype(float) / 255.0 #if reflectance_image.shape[1] != 300: #z = 300.0 / reflectance_image.shape[1] #reflectance_image = interpolation.zoom( #reflectance_image, zoom=(z, z, 1)) rows, cols, _ = reflectance_image.shape if is_sRGB: reflectance_image_linear = srgb_to_rgb(reflectance_image) else: reflectance_image_linear = reflectance_image # get the luminance of the reflectance channel # fetch comparisons comparisons = list( IntrinsicPointComparison.objects.filter( photo_id=photo_id, point1__opaque=True, point2__opaque=True, darker__isnull=False, darker__in=("1", "2", "E"), darker_score__isnull=False, darker_score__gt=0).select_related('point1')) # fetch points points = IntrinsicPoint.objects.filter(photo_id=photo_id) point_id_to_l = { p.id: np.mean(reflectance_image_linear[int(p.y * rows), int(p.x * cols), :]) for p in points } # ratio thresholds eq_thresh = 1.0 + thresh # error from a set of comparisons def comparison_error(comps): error_num = 0.0 error_den = 0.0 for c in comps: if c.darker not in ('1', '2', 'E'): raise ValueError("Unknown value of darker: %s" % c.darker) l1 = max(point_id_to_l[c.point1_id], 1e-10) l2 = max(point_id_to_l[c.point2_id], 1e-10) if l2 / l1 > eq_thresh: r_darker = '1' elif l1 / l2 > eq_thresh: r_darker = '2' else: r_darker = 'E' if c.darker != r_darker: error_num += c.darker_score error_den += c.darker_score if error_den: return error_num / error_den else: return None # return value update_kwargs = { 'error_comparison_thresh': thresh, } # all errors update_kwargs['num'] = len(comparisons) if comparisons: update_kwargs['mean_error'] = comparison_error(comparisons) else: update_kwargs['mean_error'] = None # all dense errors comparisons_dense = [ c for c in comparisons if c.point1.min_separation < 0.05 ] update_kwargs['num_dense'] = len(comparisons_dense) if comparisons_dense: update_kwargs['mean_dense_error'] = comparison_error(comparisons_dense) else: update_kwargs['mean_dense_error'] = None # all dense errors comparisons_sparse = [ c for c in comparisons if c.point1.min_separation > 0.05 ] update_kwargs['num_sparse'] = len(comparisons_sparse) if comparisons_sparse: update_kwargs['mean_sparse_error'] = comparison_error( comparisons_sparse) else: update_kwargs['mean_sparse_error'] = None # equality errors comparisons_eq = [c for c in comparisons if c.darker == "E"] update_kwargs['num_eq'] = len(comparisons_eq) if comparisons_eq: update_kwargs['mean_eq_error'] = comparison_error(comparisons_eq) else: update_kwargs['mean_eq_error'] = None # inequality errors comparisons_neq = [c for c in comparisons if c.darker in ("1", "2")] update_kwargs['num_neq'] = len(comparisons_neq) if comparisons_neq: update_kwargs['mean_neq_error'] = comparison_error(comparisons_neq) else: update_kwargs['mean_neq_error'] = None # sum of two split errors if (update_kwargs['mean_eq_error'] is not None or update_kwargs['mean_neq_error'] is not None): f = lambda x: x if x else 0 update_kwargs['mean_sum_error'] = (f(update_kwargs['mean_eq_error']) + f(update_kwargs['mean_neq_error'])) else: update_kwargs['mean_sum_error'] = None return update_kwargs
def function(image, **kwargs): image = srgb_to_rgb(pil_to_numpy(image)) return image, np.ones_like(image)
def function(image, **kwargs): image = srgb_to_rgb(pil_to_numpy(image)) s = np.clip(np.sum(image, axis=-1), 1e-3, float('inf')) r = image / s[:, :, np.newaxis] return r, s