Пример #1
0
    def getDistance(self, pixels1, pixels2):
        lab1 = colour.XYZ_to_Lab(colour.sRGB_to_XYZ(pixels1))
        lab2 = colour.XYZ_to_Lab(colour.sRGB_to_XYZ(pixels2))
        dist = mean_squared_error(pixels1, pixels2)

        #dist = np.sqrt((pixels1[:, 0] - pixels2[:, 0])**2 + (pixels1[:, 1] - pixels2[:, 1])**2 + (pixels1[:, 2] - pixels2[:, 2])**2)
        #dist = np.sum(dist)

        return dist
Пример #2
0
def convert_to_XYZ(img):
    img_data = np.asarray(img)
    r, g, b = [img_data[:, :, i] for i in range(img_data.shape[-1])]
    isAdobeRGB = is_adobe_rgb(img)
    if isAdobeRGB: XYZ = AdobeRGB_to_XYZn(r, g, b)
    else: XYZ = colour.sRGB_to_XYZ(img_data / 255)
    return XYZ
def calculate_EVILS_CLAW(
    RGB_input,
    CLAW_compression=1.0,
    CLAW_identity_limit=None,
    CLAW_maximum_input=None,
    CLAW_maximum_output=None,
):
    XYZ_RGB = colour.sRGB_to_XYZ(RGB_input)
    EVILS_eiY = calculate_EVILS_eiY(XYZ_RGB)
    eiH, eiC, eiY = np.split(calculate_EVILS_eiHCY(EVILS_eiY), 3, axis=-1)

    eiC_max = np.amax(eiC)
    # eiC_n = np.ma.divide(
    #     eiC,
    #     eiC_max
    # ).filled(fill_value=0.0)

    if CLAW_maximum_input is None:
        # Set CLAW_maximum_input to a reasonable portion of the output
        # range. Scalar.
        CLAW_maximum_input = eiC_max
    else:
        CLAW_maximum_input *= eiC_max

    if CLAW_maximum_output is None:
        # Set CLAW_maximum_output to the input to assure that a majority of
        # values are unchanged. Scalar
        CLAW_maximum_output = CLAW_maximum_input * 0.6
    else:
        CLAW_maximum_output *= CLAW_maximum_input

    if CLAW_identity_limit is None:
        # Set breakpoint to a reasonably high value to leave a majority
        # of values untouched.
        CLAW_identity_limit = 0.0
    else:
        CLAW_identity_limit *= CLAW_maximum_output

    EVILS_CLAW_ratio = np.where(
        eiC > CLAW_identity_limit,
        power_compression(
            X_input=eiC,
            X_maximum=CLAW_maximum_input,
            Y_maximum=CLAW_maximum_output,
            compression_power=CLAW_compression,
            identity_breakpoint=CLAW_identity_limit,
        ),
        eiC,
    )

    # Chroma scale here is relative to what the adjust_chroma tool expects.
    # So here we assume that maximal chroma is 100%, and simply subtract
    # the normalized output of the input chroma from the compressed signal
    # chroma. This scaling could likely be improved.
    chroma_scalar = EVILS_CLAW_ratio / eiC

    return adjust_chroma(RGB_input, chroma_scalar)
Пример #4
0
def test_de2000() -> None:

    rgb = get_random_01x01x01_with_corners(1000)
    xyz = sRGB_to_XYZ(rgb)
    lab = XYZ_to_Lab(xyz)

    de = delta_E_CIE2000(lab[:-1], lab[1:])
    my_de = dE_2000_jit(lab[:-1], lab[1:])

    assert np.allclose(de, my_de), np.hstack(
        [de.reshape(-1, 1), my_de.reshape(-1, 1)])
Пример #5
0
def rgb_to_k(r, g, b):
    RGB = np.array([r, g, b])
    # Conversion to tristimulus values.
    XYZ = colour.sRGB_to_XYZ(RGB / 255)
    # Conversion to chromaticity coordinates.
    xy = colour.XYZ_to_xy(XYZ)
    # Conversion to correlated colour temperature in K.
    CCT = colour.xy_to_CCT(xy, 'hernandez1999')

    #print(f"{r}{g}{b}")

    return CCT
Пример #6
0
def set_convertor(name, ill='D65'):
    """ Binds the conversion functions LCH2RGB() and RGB2LCH() to the choosen colour package
    """
    global LCH2RGB, RGB2LCH, convertor, illuminant
    if name not in ['custom', 'colorspacious', 'colourscience']:
        print("Unknown conversion module")
        return
    convertor = name
    illuminant = ill
    if name == 'custom':
        LCH2RGB = lambda L, C, H: XYZ2RGB(Lab2XYZ(LCH2Lab((L, C, H))))
        RGB2LCH = lambda R, G, B: Lab2LCH(XYZ2Lab(RGB2XYZ((R, G, B))))
    if name == 'colorspacious':
        from colorspacious import cspace_convert
        func_LCH2RGB = lambda L, C, H: cspace_convert([L, C, H], {
            "name": "CIELCh",
            "XYZ100_w": ill
        }, "sRGB1")
        func_RGB2LCH = lambda R, G, B: cspace_convert([R, G, B], "sRGB1", {
            "name": "CIELCh",
            "XYZ100_w": ill
        })
    if name == 'colourscience':
        import colour as cs
        cs_ill = cs.ILLUMINANTS['CIE 1931 2 Degree Standard Observer'][ill]
        func_LCH2RGB = lambda L, C, H: cs.XYZ_to_sRGB(
            cs.Lab_to_XYZ(cs.LCHab_to_Lab([L, C, H]), illuminant=cs_ill))
        func_RGB2LCH = lambda R, G, B: cs.Lab_to_LCHab(
            cs.XYZ_to_Lab(cs.sRGB_to_XYZ([R, G, B]), illuminant=cs_ill))
    if name == 'colorspacious' or name == 'colourscience':

        def LCH2RGB(L, C, H):
            if hasattr(L, '__iter__'):
                RGB = np.array(list(map(func_LCH2RGB, L, C, H)))
                R = RGB[:, 0]
                G = RGB[:, 1]
                B = RGB[:, 2]
            else:
                R, G, B = func_LCH2RGB(L, C, H)
            return R, G, B

        def RGB2LCH(R, G, B):
            if hasattr(R, '__iter__'):
                LCH = np.array(list(map(func_RGB2LCH, R, G, B)))
                L = LCH[:, 0]
                C = LCH[:, 1]
                H = LCH[:, 2]
            else:
                L, C, H = func_RGB2LCH(R, G, B)
            return L, C, H

    print("convertor = '%s' (illuminant = '%s')" % (name, illuminant))
Пример #7
0
def image_color_temperature(path):
    #convert image to array of pixels
    array = np.array(image_dominant_color_rgb(path))
    
    #conver to xyz https://www.colourphil.co.uk/xyz_colour_space.shtml
    XYZ = colour.sRGB_to_XYZ(array / 255)
    
    #dont need the z value
    xy = colour.XYZ_to_xy(XYZ)
    
    #convert to its kelvin temperature
    CCT = colour.xy_to_CCT(xy, 'hernandez1999')
    return CCT
Пример #8
0
def ColorFidelityMetric(rgbX, rgbY):
    M = 0
    Q = 0

    #W * H * 3に変形
    rgbX_ = np.reshape(rgbX, (inputImg.shape[0], inputImg.shape[1], 3))
    rgbY_ = np.reshape(rgbY, (inputImg.shape[0], inputImg.shape[1], 3))

    #RGB-->Lab
    XYZ_X = colour.sRGB_to_XYZ(rgbX_)
    XYZ_Y = colour.sRGB_to_XYZ(rgbY_)
    Lab_X = colour.XYZ_to_Lab(XYZ_X)
    Lab_Y = colour.XYZ_to_Lab(XYZ_Y)

    #ウィンドウサイズごとのFidelityMetricを計算
    for (cropX, cropY) in SlidingWindow(Lab_X, Lab_Y, 1, 8):
        # if the window does not meet our desired window size, ignore it
        if cropX.shape[0] != 8 or cropX.shape[1] != 8:
            continue

        #オリジナル画像のクリップ
        cropL_X = cropX[:, :, 0].flatten()
        cropa_X = cropX[:, :, 1].flatten()
        cropb_X = cropX[:, :, 2].flatten()

        #加工画像のクリップ
        cropL_Y = cropY[:, :, 0].flatten()
        cropa_Y = cropY[:, :, 1].flatten()
        cropb_Y = cropY[:, :, 2].flatten()

        Ql = FidelityMetric(cropL_X, cropL_Y)
        Qa = FidelityMetric(cropa_X, cropa_Y)
        Qb = FidelityMetric(cropb_X, cropb_Y)

        Q += np.sqrt(Ql**2 + Qa**2 + Qb**2)

        M += 1

    return Q / M
Пример #9
0
def main():
    while True:
        time.sleep(1)
        img = get_cap()
        avg_color_rgb = bgr_to_rgb(avg_image_colors(img))
        avg_color_xyz = colour.sRGB_to_XYZ(avg_color_rgb)
        avg_color_xy = colour.XYZ_to_xy(avg_color_xyz)
        avg_color_cct = colour.xy_to_CCT(avg_color_xy, 'hernandez1999')
        avg_img_color_cct_xy = colour.temperature.CCT_to_xy(avg_color_cct)

        print("Average RGB Value: {}, Rounded CCT: {}, Sent xy: {}".format(
            avg_color_rgb, avg_color_cct, avg_img_color_cct_xy))

        set_acs_sample(avg_img_color_cct_xy[0], avg_img_color_cct_xy[1])
Пример #10
0
    def getNaturalness(self, rgb):
        XYZ = colour.sRGB_to_XYZ(rgb)
        LUV = colour.XYZ_to_Luv(XYZ)

        #彩度と色相の計算(色相は0-2pi-->角度(0-360)に変換)
        sat = np.sqrt(LUV[:, 1]**2 + LUV[:, 2]**2) / 100
        hue = np.arctan2(LUV[:, 2], LUV[:, 1])
        hue[hue < 0] += 2 * np.pi

        LHS = np.c_[LUV[:, 0], np.rad2deg(hue), sat]

        #Thresolding L and S components
        LHS = LHS[np.where((LHS[:, 0] >= 20) & (LHS[:, 0] <= 80)
                           & (LHS[:, 2] >= 0.1))]

        #Calcurate average and pixel num of saturation value
        skin = LHS[np.where((LHS[:, 1] >= 25) & (LHS[:, 1] <= 70)), 2]
        grass = LHS[np.where((LHS[:, 1] >= 95) & (LHS[:, 1] <= 135)), 2]
        sky = LHS[np.where((LHS[:, 1] >= 185) & (LHS[:, 1] <= 260)), 2]

        if (skin.shape[1] == 0):
            n_skin = 0
            S_skin = 0
        else:
            n_skin = skin.shape[1]
            S_skin = np.mean(skin)

        if (grass.shape[1] == 0):
            n_grass = 0
            S_grass = 0
        else:
            n_grass = grass.shape[1]
            S_grass = np.mean(grass)

        if (sky.shape[1] == 0):
            n_sky = 0
            S_sky = 0
        else:
            n_sky = sky.shape[1]
            S_sky = np.mean(sky)

        #Calcurate local CNI value
        N_skin = np.power(np.exp(-0.5 * ((S_skin - 0.76) / 0.52)**2), 4)
        N_grass = np.exp(-0.5 * ((S_grass - 0.81) / 0.53)**2)
        N_sky = np.exp(-0.5 * ((S_sky - 0.43) / 0.22)**2)

        return (n_skin * N_skin + n_grass * N_grass +
                n_sky * N_sky) / (n_skin + n_grass + n_sky)
Пример #11
0
def BGR2LAB(image):
    """
    Converts OpenCV BGR image to CIE Lab image
    Lab   Scale
    L|    0:100 |
    a| -100:100 |
    b| -100:100 |

    :param image: BGR image
    :return: Lab image
    """
    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image_srgb = image_rgb.astype(np.float32) / 255
    image_lab = colour.XYZ_to_Lab(colour.sRGB_to_XYZ(image_srgb))

    return image_lab
Пример #12
0
def calculate_input_temp(frame) -> float:
    array_RGB = numpy.array(list(cv2.mean(frame)[0:3][::-1]))

    array_tristimulus = colour.sRGB_to_XYZ(array_RGB / 255)

    array_chromaticity = colour.XYZ_to_xy(array_tristimulus)

    temporary = colour.xy_to_CCT(array_chromaticity, 'hernandez1999')

    if temporary < 1000:
        # print('Warm bound')
        return 1000
    elif temporary > 12000:
        # print('Cold bound')
        return 12000
    return temporary
Пример #13
0
def rgb_to_temperature(rgb):
    """ Convert sRGB to temperature in Kelvin.
    First, we convert to tristimulus values.
    Second, we convert to chromaticity coordinates.
    Third, we convert to color temp in K from Hernandez paper.

    Parameters
    ----------
    rgb : List/tuple of rgb values, e.g. [255.0, 235.0, 12.0]

    Returns
    -------
    numeric : CCT (correlated color temperature) in kelvin
    """
    rgb_array = numpy.array(rgb)
    xyz = colour.sRGB_to_XYZ(rgb_array / 255)
    xy = colour.XYZ_to_xy(xyz)
    cct = colour.xy_to_CCT_Hernandez1999(xy)
    return cct
Пример #14
0
def get_main_color(R,G,B):
    sRGB = [R/255,G/255,B/255]
    RGB = np.array(sRGB)
    XYZ = colour.sRGB_to_XYZ(sRGB)
    Lab = colour.XYZ_to_Lab(XYZ)
    LCHab = colour.Lab_to_LCHab(Lab)
    L = int(LCHab[0]);C = int(LCHab[1]);ab = int(LCHab[2])
    print([L,C,ab])
    if C<40:# 1
        if L<10:
            return 'black'
        elif L>90:
            return 'white'
        else:
            return 'gray'
    elif ab<45:# 2
        if L<30:
            return 'brown'
        elif L>70:
            return 'pink'
        else:
            return 'red'
    elif ab<75:# 3
        if L<30:
            return 'brown'
        else:
            return 'orange'
    elif ab<105:# 4
        return 'yellow'
    elif ab<210:# 5
        return 'green'
    elif ab<315:# 6
        return 'blue'
    elif ab>330:# 7
        if L>50:
            return 'pink'
        else:
            return 'purple'
    else:
        return 'purple'
Пример #15
0
    def parse_value(self, value):
        if self.sensor_type == "relay_a":
            value = value[0]
        elif self.sensor_type == "relay_b":
            value = value[1]
        elif self.sensor_type == "acceleration":
            value = value[2]
        elif self.sensor_type == "colour_temp":
            r, g, b, c = [int(x / 257) for x in value]
            RGB = np.array([r, g, b])
            XYZ = colour.sRGB_to_XYZ(RGB / 255)
            xy = colour.XYZ_to_xy(XYZ)
            CCT = colour.xy_to_CCT_Hernandez1999(xy)
            value = CCT

        if isinstance(value, numbers.Number):
            if hasattr(self, "multiplier"):
                value = value * self.multiplier
            if hasattr(self, "offset"):
                value = value + self.offset

        return value
Пример #16
0
def test_converters():
    np.set_printoptions(precision=4)

    hsv = get_random_01x01x01_with_corners(size=10)

    # ground truths
    rgb = HSV_to_RGB(hsv)
    xyz = sRGB_to_XYZ(rgb)
    xyy = XYZ_to_xyY(xyz)
    luv = XYZ_to_Luv(xyz)
    lchuv = Luv_to_LCHuv(luv)
    lab = XYZ_to_Lab(xyz)
    lchab = Lab_to_LCHab(lab)

    for fun, src, trg in [
        [HSV_to_RGB_jit, hsv, rgb],
        [sRGB_to_XYZ_jit, rgb, xyz],
        [xyY_to_XYZ_jit, xyy, xyz],
        [XYZ_to_xyY_jit, xyz, xyy],
        [XYZ_to_Luv_D65_jit, xyz, luv],
        [XYZ_to_Lab_D65_jit, xyz, lab],
        [Luv_to_LCHuv_jit, luv, lchuv],
        [Lab_to_LCHab_jit, lab, lchab],
    ]:
        print(f"testing {fun.__name__}")
        my_trg = fun(src)
        succ = np.allclose(my_trg, trg, equal_nan=True, atol=1e-3)
        if not succ:
            bad_rows = ((
                ~np.isclose(my_trg, trg, equal_nan=True, atol=1e-3)).sum(
                    axis=-1).astype(bool))
            print("FAILED")
            print("output comparison (bad rows)")
            print(np.hstack([my_trg, trg])[bad_rows])
            print("absolute difference (bad rows)")
            print(np.hstack([src, np.abs(my_trg - trg)])[bad_rows])
            assert 0
Пример #17
0
        drawtype='box',
        useblit=True,
        button=[1, 3],  # don't use middle button
        minspanx=5,
        minspany=5,
        spancoords='pixels',
        interactive=True)
    plt.connect('key_press_event', toggle_selector)
    input("Choose ROI")
    roi = toggle_selector.RS.extents  #xmin,xmax,ymin,ymax
    roi = [int(r) for r in roi]

    color = np.array(
        [i[roi[2]:roi[3], roi[0]:roi[1]].mean((0, 1)) / 255 for i in images])
    ##now, convert this into the XYZ domain
    cXYZ = [colour.sRGB_to_XYZ(col) for col in color]
    delE = np.array([[colour.delta_E(c1 * 100, c2) for c2 in XYZ]
                     for c1 in cXYZ])
    bif = np.array([retL[i.argmin()] / 5 for i in delE])

    #Now, extract the voltage from the data files

    #first, pull the appropriate columsn

    nameRegex = re.compile('[wW](\d{3}).*')
    compound = nameRegex.search(name)[1]

    cmpGex = re.compile('^W' + compound + '.*')

    col1 = [i for i, el in enumerate(voltData.columns) if cmpGex.search(el)][0]
    col2 = col1 + 1
Пример #18
0
import numpy as np
import colour

RGB_r = np.array([1, 0, 0])
RGB_g = np.array([0, 1, 0])
RGB_b = np.array([0, 0, 1])

XYZ_r = colour.sRGB_to_XYZ(RGB_r, apply_EOCF=False)
XYZ_g = colour.sRGB_to_XYZ(RGB_g, apply_EOCF=False)
XYZ_b = colour.sRGB_to_XYZ(RGB_b, apply_EOCF=False)

xyY_r = colour.XYZ_to_xyY(XYZ_r);
xyY_g = colour.XYZ_to_xyY(XYZ_g);
xyY_b = colour.XYZ_to_xyY(XYZ_b);

print(xyY_r)
print(xyY_g)
print(xyY_b)
Пример #19
0
def get_artwork_colors():
    
# Define scope
    scope = 'current_user_playing_track user-read-private user-read-playback-state user-modify-playback-state'

# Obtain Spotify Token (username, client_id, and client_secret will vary per user - for additional information see Spotify API documentation.)
    try:
        token = util.prompt_for_user_token("Chris Penny", client_id='dd41386aabca41aa8dd7ba2f947782b3',client_secret='4bf7eefcbf7241298d92b9f5ad6fe3f0',redirect_uri='http://google.com/')
# Error Case - Remove token cache and replace
    except (AttributeError, JSONDE):
        os.remove(f".cache-{username}")
        token = util.prompt_for_user_token("Chris Penny", client_id='dd41386aabca41aa8dd7ba2f947782b3',client_secret='4bf7eefcbf7241298d92b9f5ad6fe3f0',redirect_uri='http://google.com/')

# Generate spotipy object using Spotify Token
    spot_obj = spotipy.Spotify(auth=token)

# Call spotipy attribute for the currently playing track
    track = spot_obj.current_user_playing_track()
    #print(list(track)) # For Debug

# NOTE: While the current_user_playing_track() attribute results in a dictionary, the album art image URL is not assigned to a key and must be extracted by parsing.
    items = track['item']
    
# Convert dictionary object to string and parse the album art image URL.
    string_items = str(items)
    image_loc = string_items.find('images')
    image_loc_end = string_items.find('width', image_loc)
    image_url = string_items[image_loc+34:image_loc_end-4]

# Use the urllib library to save the image as a temporary file in a sub-directory.
    req.urlretrieve(image_url, "Temporary Image Directory/temp_artwork.jpg")
    #print(image_url) # For Debug

    album_art_test = Image.open("Temporary Image Directory/temp_artwork.jpg").convert("L")
    album_art = Image.open("Temporary Image Directory/temp_artwork.jpg")
#    print(album_art.mode) # For Debug

    histogram = album_art.histogram()
#    print(rgb_data) # For Debug

# Convert image to a numpy array.
    rgb_tuples = np.asarray(album_art)
#    print(rgb_tuples) # For Debug
#    print(type(rgb_tuples.shape)) # For Debug

# Stack with Numpy
    for i in range(0, len(rgb_tuples)):
        if i==0:
            np_stack = rgb_tuples[i]
        if i > 0:
            np_stack = np.vstack((np_stack, rgb_tuples[i]))

#    print(np.shape(np_stack)) # For Debug

    pd_rgb_stack = pd.DataFrame(np_stack, columns = ['r', 'g', 'b'])
    pd_rgb_stack['rgb'] = (pd_rgb_stack['r'].astype(str)) + '_' + (pd_rgb_stack['g'].astype(str)) + '_' + (pd_rgb_stack['b'].astype(str))
    pd_rgb_stack['check_sum'] = pd_rgb_stack[['r', 'b', 'g']].sum(axis=1)
#    print(pd_rgb_stack['check_sum'].head(20)) # For Debug
    check_sum_mean = pd_rgb_stack['check_sum'].mean()
#    print(check_sum_mean) # For Debug


#   Sort by Value Counts to find modal values and return in order of frequency
    pd_rgb_sorted = pd_rgb_stack['rgb'].value_counts().to_frame()
    pd_rgb_sorted.astype(int)
#    print(pd_rgb_sorted.iloc[0]) # For Debug
#    print(pd_rgb_sorted) # For Debug
#    print(pd_rgb_stack['rgb'].head()) # For Debug
#    print(pd_rgb_stack['r']) # For Debug

# Initialize Values for While Loop
    rgb_temp_std = 0
    rgb_temp_sum = 0
    i = -1

    #and (rgb_temp_sum < 50)
# Implement While Loop to ensure selected color is not black/white/greyscale (in which all rgb values are approximately the same)
    while (rgb_temp_std < 15):
        i += 1
        temp = pd_rgb_sorted.index[i]
        parse_1 = temp.find('_')
        parse_2 = temp.find('_', parse_1+1)
        rgb_temp = pd.DataFrame({'r':[temp[0:parse_1]], 'g':[temp[parse_1+1: parse_2]], 'b':[temp[parse_2+1:]]}).astype(int)
        rgb_temp_std = np.std(rgb_temp, 1)[0]
        rgb_temp_sum = rgb_temp['r'] + rgb_temp['g'] + rgb_temp['b']
        rgb_temp_sum = rgb_temp_sum.astype(int)

    rgb_select_1 = rgb_temp
    print('The first selected color is:')
    print(rgb_select_1)

# Write selected rgb values to respective variables for use in distance calculation
    pd_rgb_stack['r_select'] = rgb_select_1['r'][0]
    pd_rgb_stack['g_select'] = rgb_select_1['g'][0]
    pd_rgb_stack['b_select'] = rgb_select_1['b'][0]

# Perform distance calculation (<-- distance formula applied to RGB values)
    pd_rgb_stack['dist_score'] = np.sqrt(np.square(pd_rgb_stack['r'] - pd_rgb_stack['r_select']) + np.square(pd_rgb_stack['g'] - pd_rgb_stack['g_select']) + np.square(pd_rgb_stack['b'] - pd_rgb_stack['b_select']))
#    print(pd_rgb_stack['dist_score'].head(20))
# Inspect Counts
    dist_score_sorted = pd_rgb_stack['dist_score'].value_counts().to_frame()
# Calculate Standard Deviation of Distance Score
    dist_score_std = np.std(pd_rgb_stack['dist_score'])
#    print(dist_score_std)

# Recreate Sorted List on DF
##    pd_rgb_sorted = pd_rgb_stack['rgb'].value_counts().to_frame()
##    pd_rgb_sorted.astype(int)

# Perform left join on pd_rgb_stack to get distance scores in same dataframe
    pd_rgb_merged = pd_rgb_stack.join(pd_rgb_sorted, on = 'rgb', how = 'left', rsuffix = '_count')

# Sort Merged DF and remove duplicates
    pd_rgb_merged = pd_rgb_merged.sort_values(by = ['rgb_count', 'dist_score'], ascending = False)
    pd_rgb_merged['std'] = np.std(pd_rgb_merged[['r','g','b']], axis = 1)
    pd_rgb_merged = pd_rgb_merged.drop_duplicates()
    
# Reset Initial Values for Second While Loop
    rgb_temp_std = 0
    rgb_temp_sum = 0
    temp_dist_score = 0
    j = -1

# Second While Loop
    while (rgb_temp_std < 15 or temp_dist_score < 2*dist_score_std or temp_sum < 120):
        j += 1
        temp = pd_rgb_merged.iloc[j]['rgb']
        temp_r = pd_rgb_merged.iloc[j]['r']
        temp_g = pd_rgb_merged.iloc[j]['g']
        temp_b = pd_rgb_merged.iloc[j]['b']
        temp_dist_score = pd_rgb_merged.iloc[j]['dist_score']
        temp_sum = pd_rgb_merged.iloc[j]['check_sum']
        rgb_temp_std = pd_rgb_merged.iloc[j]['std']

    rgb_select_2 = pd.DataFrame({'r':[temp_r], 'g':[temp_g], 'b':[temp_b]}).astype(int)
    print('The second selected color is:')
    print(rgb_select_2)

# Generate Histograms and Plot
            
    histogram_test = album_art_test.histogram()
    #print(histogram_test)
    r_hist = histogram[1:256]
    r_hist_max = r_hist.index(max(r_hist))
    g_hist = histogram[257:512]
    g_hist_max = g_hist.index(max(g_hist))
    b_hist = histogram[513:768]
    b_hist_max = b_hist.index(max(b_hist))
    rgb_max = [r_hist_max, g_hist_max, b_hist_max]
#    print(rgb_max)

    # Generate Histogram Plot with RGB channels
#    fig, histo = pypl.subplots()
#    histo.plot(r_hist, 'r')
#    histo.plot(g_hist, 'g')
#    histo.plot(b_hist, 'b')
#    histo.plot(histogram_test, 'black')
#    pypl.show() Uncomment to show histogram plot

    # Assuming sRGB encoded colour values.
    RGB = np.array([r_hist_max, g_hist_max, b_hist_max])

    # RGB Values
    rgb_for_conversion = np.vstack((rgb_select_1.values, rgb_select_2.values))
#    rgb_select_1 = rgb_select_1.values
#    rgb_select_2 = rgb_select_2.values

#    rgb_for_conversion = [rgb_select_1, rgb_select_2]


#    print(rgb_for_conversion)
    
    # Conversion to tristimulus values.
    XYZ = colour.sRGB_to_XYZ(rgb_for_conversion / 256)
    print(XYZ)

    # Conversion to chromaticity coordinates.
    xy = colour.XYZ_to_xy(XYZ)
    print(xy)
    return(xy)
Пример #20
0
print(img.shape)  # (height,width,channel)
print(img.size)  # 像素數量 print(img.dtype) # 數據類型
print(img)  # 列印圖像的numpy數組,3緯數組
#%%
# 網址1:https://colour.readthedocs.io/en/v0.3.13/generated/colour.sRGB_to_XYZ.html
# 網址2:https://colour.readthedocs.io/en/v0.3.7/colour.html
# 網址3:https://colour.readthedocs.io/en/v0.3.13/generated/colour.Lab_to_LCHab.html?highlight=lch#colour.Lab_to_LCHab
# sRGB_to_XYZ
#import colormath
#from colormath.color_objects import LabColor, XYZColor
#from colormath.color_conversions import convert_color
import colour
import numpy as np
sRGB = [200 / 255, 150 / 255, 100 / 255]
RGB = np.array(sRGB)
XYZ = colour.sRGB_to_XYZ(sRGB)
Lab = colour.XYZ_to_Lab(XYZ)
LCHab = colour.Lab_to_LCHab(Lab)
L = int(LCHab[0])
C = int(LCHab[1])
ab = int(LCHab[2])
#%%
LCHab = ['41', '59', '27']
LCHab = np.array(LCHab)
L = int(LCHab[0])
C = int(LCHab[1])
ab = int(LCHab[2])
#%%
r = 255
g = 89
b = 0
Пример #21
0
def HEX_2_LAB(hex):
    return colour.XYZ_to_Lab(
        colour.sRGB_to_XYZ(colour.notation.HEX_to_RGB(hex)))
Пример #22
0
def HEX_2_XYY(hex):
    return colour.XYZ_to_xyY(
        colour.sRGB_to_XYZ(colour.notation.HEX_to_RGB(hex)))
    parser.add_argument('--alpha', type=float, default=1.0)
    parser.add_argument('--gamma', type=float, default=1.0)
    args = parser.parse_args()

    print('load training data')

    with gzip.open(args.training_data_path, 'rb') as f:
        raw, jpeg = pickle.load(f)
    raw = np.array(raw, dtype=np.float32)
    jpeg = np.array(jpeg, dtype=np.float32)

    raw /= 255
    jpeg /= 255

    if args.color_space == 'lab':
        raw = colour.XYZ_to_Lab(colour.sRGB_to_XYZ(raw))
        jpeg = colour.XYZ_to_Lab(colour.sRGB_to_XYZ(jpeg))

    print('load xgboost models')

    with open(args.model_file_path, 'rb') as f:
        models = pickle.load(f)

    print('lattice regression')

    print('define A')

    lattice_size = args.lut_size
    lattice_points = np.linspace(0.0, 1.0, lattice_size)
    A = np.array(
        [p for p in product(lattice_points, lattice_points, lattice_points)],
Пример #24
0
def generate_documentation_plots(output_directory):
    """
    Generates documentation plots.

    Parameters
    ----------
    output_directory : unicode
        Output directory.
    """

    colour.utilities.filter_warnings()

    colour_style()

    np.random.seed(0)

    # *************************************************************************
    # "README.rst"
    # *************************************************************************
    arguments = {
        'tight_layout':
            True,
        'transparent_background':
            True,
        'filename':
            os.path.join(output_directory,
                         'Examples_Plotting_Visible_Spectrum.png')
    }
    plot_visible_spectrum('CIE 1931 2 Degree Standard Observer', **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Examples_Plotting_Illuminant_F1_SD.png')
    plot_single_illuminant_sd('FL1', **arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Examples_Plotting_Blackbodies.png')
    blackbody_sds = [
        colour.sd_blackbody(i, colour.SpectralShape(0, 10000, 10))
        for i in range(1000, 15000, 1000)
    ]
    plot_multi_sds(
        blackbody_sds,
        y_label='W / (sr m$^2$) / m',
        use_sds_colours=True,
        normalise_sds_colours=True,
        legend_location='upper right',
        bounding_box=(0, 1250, 0, 2.5e15),
        **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Examples_Plotting_Cone_Fundamentals.png')
    plot_single_cmfs(
        'Stockman & Sharpe 2 Degree Cone Fundamentals',
        y_label='Sensitivity',
        bounding_box=(390, 870, 0, 1.1),
        **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Examples_Plotting_Luminous_Efficiency.png')
    sd_mesopic_luminous_efficiency_function = (
        colour.sd_mesopic_luminous_efficiency_function(0.2))
    plot_multi_sds(
        (sd_mesopic_luminous_efficiency_function,
         colour.PHOTOPIC_LEFS['CIE 1924 Photopic Standard Observer'],
         colour.SCOTOPIC_LEFS['CIE 1951 Scotopic Standard Observer']),
        y_label='Luminous Efficiency',
        legend_location='upper right',
        y_tighten=True,
        margins=(0, 0, 0, .1),
        **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Examples_Plotting_BabelColor_Average.png')
    plot_multi_sds(
        colour.COLOURCHECKERS_SDS['BabelColor Average'].values(),
        use_sds_colours=True,
        title=('BabelColor Average - '
               'Spectral Distributions'),
        **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Examples_Plotting_ColorChecker_2005.png')
    plot_single_colour_checker(
        'ColorChecker 2005', text_parameters={'visible': False}, **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Examples_Plotting_Chromaticities_Prediction.png')
    plot_corresponding_chromaticities_prediction(2, 'Von Kries', 'Bianco',
                                                 **arguments)

    arguments['filename'] = os.path.join(
        output_directory,
        'Examples_Plotting_CCT_CIE_1960_UCS_Chromaticity_Diagram.png')
    plot_planckian_locus_in_chromaticity_diagram_CIE1960UCS(['A', 'B', 'C'],
                                                            **arguments)

    arguments['filename'] = os.path.join(
        output_directory,
        'Examples_Plotting_Chromaticities_CIE_1931_Chromaticity_Diagram.png')
    RGB = np.random.random((32, 32, 3))
    plot_RGB_chromaticities_in_chromaticity_diagram_CIE1931(
        RGB,
        'ITU-R BT.709',
        colourspaces=['ACEScg', 'S-Gamut'],
        show_pointer_gamut=True,
        **arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Examples_Plotting_CRI.png')
    plot_single_sd_colour_rendering_index_bars(colour.ILLUMINANTS_SDS['FL2'],
                                               **arguments)

    # *************************************************************************
    # Documentation
    # *************************************************************************
    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_CVD_Simulation_Machado2009.png')
    plot_cvd_simulation_Machado2009(RGB, **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Single_Colour_Checker.png')
    plot_single_colour_checker('ColorChecker 2005', **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Multi_Colour_Checkers.png')
    plot_multi_colour_checkers(['ColorChecker 1976', 'ColorChecker 2005'],
                               **arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Plotting_Plot_Single_SD.png')
    data = {
        500: 0.0651,
        520: 0.0705,
        540: 0.0772,
        560: 0.0870,
        580: 0.1128,
        600: 0.1360
    }
    sd = colour.SpectralDistribution(data, name='Custom')
    plot_single_sd(sd, **arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Plotting_Plot_Multi_SDs.png')
    data_1 = {
        500: 0.004900,
        510: 0.009300,
        520: 0.063270,
        530: 0.165500,
        540: 0.290400,
        550: 0.433450,
        560: 0.594500
    }
    data_2 = {
        500: 0.323000,
        510: 0.503000,
        520: 0.710000,
        530: 0.862000,
        540: 0.954000,
        550: 0.994950,
        560: 0.995000
    }
    spd1 = colour.SpectralDistribution(data_1, name='Custom 1')
    spd2 = colour.SpectralDistribution(data_2, name='Custom 2')
    plot_multi_sds([spd1, spd2], **arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Plotting_Plot_Single_CMFS.png')
    plot_single_cmfs('CIE 1931 2 Degree Standard Observer', **arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Plotting_Plot_Multi_CMFS.png')
    cmfs = ('CIE 1931 2 Degree Standard Observer',
            'CIE 1964 10 Degree Standard Observer')
    plot_multi_cmfs(cmfs, **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Single_Illuminant_SD.png')
    plot_single_illuminant_sd('A', **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Multi_Illuminant_SDs.png')
    plot_multi_illuminant_sds(['A', 'B', 'C'], **arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Plotting_Plot_Visible_Spectrum.png')
    plot_visible_spectrum(**arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Single_Lightness_Function.png')
    plot_single_lightness_function('CIE 1976', **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Multi_Lightness_Functions.png')
    plot_multi_lightness_functions(['CIE 1976', 'Wyszecki 1963'], **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Single_Luminance_Function.png')
    plot_single_luminance_function('CIE 1976', **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Multi_Luminance_Functions.png')
    plot_multi_luminance_functions(['CIE 1976', 'Newhall 1943'], **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Blackbody_Spectral_Radiance.png')
    plot_blackbody_spectral_radiance(
        3500, blackbody='VY Canis Major', **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Blackbody_Colours.png')
    plot_blackbody_colours(colour.SpectralShape(150, 12500, 50), **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Single_Colour_Swatch.png')
    RGB = ColourSwatch(RGB=(0.32315746, 0.32983556, 0.33640183))
    plot_single_colour_swatch(RGB, **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Multi_Colour_Swatches.png')
    RGB_1 = ColourSwatch(RGB=(0.45293517, 0.31732158, 0.26414773))
    RGB_2 = ColourSwatch(RGB=(0.77875824, 0.57726450, 0.50453169))
    plot_multi_colour_swatches([RGB_1, RGB_2], **arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Plotting_Plot_Single_Function.png')
    plot_single_function(lambda x: x ** (1 / 2.2), **arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Plotting_Plot_Multi_Functions.png')
    functions = {
        'Gamma 2.2': lambda x: x ** (1 / 2.2),
        'Gamma 2.4': lambda x: x ** (1 / 2.4),
        'Gamma 2.6': lambda x: x ** (1 / 2.6),
    }
    plot_multi_functions(functions, **arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Plotting_Plot_Image.png')
    path = os.path.join(colour.__path__[0], '..', 'docs', '_static',
                        'Logo_Medium_001.png')
    plot_image(colour.read_image(str(path)), **arguments)

    arguments['filename'] = os.path.join(
        output_directory,
        'Plotting_Plot_Corresponding_Chromaticities_Prediction.png')
    plot_corresponding_chromaticities_prediction(1, 'Von Kries', 'CAT02',
                                                 **arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Plotting_Plot_Spectral_Locus.png')
    plot_spectral_locus(spectral_locus_colours='RGB', **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Chromaticity_Diagram_Colours.png')
    plot_chromaticity_diagram_colours(**arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Chromaticity_Diagram.png')
    plot_chromaticity_diagram(**arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Chromaticity_Diagram_CIE1931.png')
    plot_chromaticity_diagram_CIE1931(**arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Chromaticity_Diagram_CIE1960UCS.png')
    plot_chromaticity_diagram_CIE1960UCS(**arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Chromaticity_Diagram_CIE1976UCS.png')
    plot_chromaticity_diagram_CIE1976UCS(**arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_SDs_In_Chromaticity_Diagram.png')
    A = colour.ILLUMINANTS_SDS['A']
    D65 = colour.ILLUMINANTS_SDS['D65']
    plot_sds_in_chromaticity_diagram([A, D65], **arguments)

    arguments['filename'] = os.path.join(
        output_directory,
        'Plotting_Plot_SDs_In_Chromaticity_Diagram_CIE1931.png')
    plot_sds_in_chromaticity_diagram_CIE1931([A, D65], **arguments)

    arguments['filename'] = os.path.join(
        output_directory,
        'Plotting_Plot_SDs_In_Chromaticity_Diagram_CIE1960UCS.png')
    plot_sds_in_chromaticity_diagram_CIE1960UCS([A, D65], **arguments)

    arguments['filename'] = os.path.join(
        output_directory,
        'Plotting_Plot_SDs_In_Chromaticity_Diagram_CIE1976UCS.png')
    plot_sds_in_chromaticity_diagram_CIE1976UCS([A, D65], **arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Plotting_Plot_Pointer_Gamut.png')
    plot_pointer_gamut(**arguments)

    arguments['filename'] = os.path.join(
        output_directory,
        'Plotting_Plot_RGB_Colourspaces_In_Chromaticity_Diagram.png')
    plot_RGB_colourspaces_in_chromaticity_diagram(
        ['ITU-R BT.709', 'ACEScg', 'S-Gamut'], **arguments)

    arguments['filename'] = os.path.join(
        output_directory,
        'Plotting_Plot_RGB_Colourspaces_In_Chromaticity_Diagram_CIE1931.png')
    plot_RGB_colourspaces_in_chromaticity_diagram_CIE1931(
        ['ITU-R BT.709', 'ACEScg', 'S-Gamut'], **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_RGB_Colourspaces_In_'
        'Chromaticity_Diagram_CIE1960UCS.png')
    plot_RGB_colourspaces_in_chromaticity_diagram_CIE1960UCS(
        ['ITU-R BT.709', 'ACEScg', 'S-Gamut'], **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_RGB_Colourspaces_In_'
        'Chromaticity_Diagram_CIE1976UCS.png')
    plot_RGB_colourspaces_in_chromaticity_diagram_CIE1976UCS(
        ['ITU-R BT.709', 'ACEScg', 'S-Gamut'], **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_RGB_Chromaticities_In_'
        'Chromaticity_Diagram_Plot.png')
    RGB = np.random.random((128, 128, 3))
    plot_RGB_chromaticities_in_chromaticity_diagram(RGB, 'ITU-R BT.709',
                                                    **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_RGB_Chromaticities_In_'
        'Chromaticity_Diagram_CIE1931.png')
    plot_RGB_chromaticities_in_chromaticity_diagram_CIE1931(
        RGB, 'ITU-R BT.709', **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_RGB_Chromaticities_In_'
        'Chromaticity_Diagram_CIE1960UCS.png')
    plot_RGB_chromaticities_in_chromaticity_diagram_CIE1960UCS(
        RGB, 'ITU-R BT.709', **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_RGB_Chromaticities_In_'
        'Chromaticity_Diagram_CIE1976UCS.png')
    plot_RGB_chromaticities_in_chromaticity_diagram_CIE1976UCS(
        RGB, 'ITU-R BT.709', **arguments)

    arguments['filename'] = os.path.join(
        output_directory,
        'Plotting_Plot_Ellipses_MacAdam1942_In_Chromaticity_Diagram.png')
    plot_ellipses_MacAdam1942_in_chromaticity_diagram(**arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Ellipses_MacAdam1942_In_'
        'Chromaticity_Diagram_CIE1931.png')
    plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1931(**arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Ellipses_MacAdam1942_In_'
        'Chromaticity_Diagram_CIE1960UCS.png')
    plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1960UCS(**arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Ellipses_MacAdam1942_In_'
        'Chromaticity_Diagram_CIE1976UCS.png')
    plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1976UCS(**arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Plotting_Plot_Single_CCTF.png')
    plot_single_cctf('ITU-R BT.709', **arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Plotting_Plot_Multi_CCTFs.png')
    plot_multi_cctfs(['ITU-R BT.709', 'sRGB'], **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Single_Munsell_Value_Function.png')
    plot_single_munsell_value_function('ASTM D1535-08', **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Multi_Munsell_Value_Functions.png')
    plot_multi_munsell_value_functions(['ASTM D1535-08', 'McCamy 1987'],
                                       **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Single_SD_Rayleigh_Scattering.png')
    plot_single_sd_rayleigh_scattering(**arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Plotting_Plot_The_Blue_Sky.png')
    plot_the_blue_sky(**arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_Colour_Quality_Bars.png')
    illuminant = colour.ILLUMINANTS_SDS['FL2']
    light_source = colour.LIGHT_SOURCES_SDS['Kinoton 75P']
    light_source = light_source.copy().align(colour.SpectralShape(360, 830, 1))
    cqs_i = colour.colour_quality_scale(illuminant, additional_data=True)
    cqs_l = colour.colour_quality_scale(light_source, additional_data=True)
    plot_colour_quality_bars([cqs_i, cqs_l], **arguments)

    arguments['filename'] = os.path.join(
        output_directory,
        'Plotting_Plot_Single_SD_Colour_Rendering_Index_Bars.png')
    illuminant = colour.ILLUMINANTS_SDS['FL2']
    plot_single_sd_colour_rendering_index_bars(illuminant, **arguments)

    arguments['filename'] = os.path.join(
        output_directory,
        'Plotting_Plot_Multi_SDs_Colour_Rendering_Indexes_Bars.png')
    light_source = colour.LIGHT_SOURCES_SDS['Kinoton 75P']
    plot_multi_sds_colour_rendering_indexes_bars([illuminant, light_source],
                                                 **arguments)

    arguments['filename'] = os.path.join(
        output_directory,
        'Plotting_Plot_Single_SD_Colour_Quality_Scale_Bars.png')
    illuminant = colour.ILLUMINANTS_SDS['FL2']
    plot_single_sd_colour_quality_scale_bars(illuminant, **arguments)

    arguments['filename'] = os.path.join(
        output_directory,
        'Plotting_Plot_Multi_SDs_Colour_Quality_Scales_Bars.png')
    light_source = colour.LIGHT_SOURCES_SDS['Kinoton 75P']
    plot_multi_sds_colour_quality_scales_bars([illuminant, light_source],
                                              **arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Plotting_Plot_Planckian_Locus.png')
    plot_planckian_locus(**arguments)

    arguments['filename'] = os.path.join(
        output_directory,
        'Plotting_Plot_Planckian_Locus_In_Chromaticity_Diagram.png')
    plot_planckian_locus_in_chromaticity_diagram(['A', 'B', 'C'], **arguments)

    arguments['filename'] = os.path.join(
        output_directory,
        'Plotting_Plot_Planckian_Locus_In_Chromaticity_Diagram_CIE1931.png')
    plot_planckian_locus_in_chromaticity_diagram_CIE1931(['A', 'B', 'C'],
                                                         **arguments)

    arguments['filename'] = os.path.join(
        output_directory,
        'Plotting_Plot_Planckian_Locus_In_Chromaticity_Diagram_CIE1960UCS.png')
    plot_planckian_locus_in_chromaticity_diagram_CIE1960UCS(['A', 'B', 'C'],
                                                            **arguments)
    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_RGB_Colourspaces_Gamuts.png')
    plot_RGB_colourspaces_gamuts(['ITU-R BT.709', 'ACEScg', 'S-Gamut'],
                                 **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Plotting_Plot_RGB_Colourspaces_Gamuts.png')
    plot_RGB_colourspaces_gamuts(['ITU-R BT.709', 'ACEScg', 'S-Gamut'],
                                 **arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Plotting_Plot_RGB_Scatter.png')
    plot_RGB_scatter(RGB, 'ITU-R BT.709', **arguments)

    # *************************************************************************
    # "tutorial.rst"
    # *************************************************************************
    arguments['filename'] = os.path.join(output_directory,
                                         'Tutorial_Visible_Spectrum.png')
    plot_visible_spectrum(**arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Tutorial_Sample_SD.png')
    sample_sd_data = {
        380: 0.048,
        385: 0.051,
        390: 0.055,
        395: 0.060,
        400: 0.065,
        405: 0.068,
        410: 0.068,
        415: 0.067,
        420: 0.064,
        425: 0.062,
        430: 0.059,
        435: 0.057,
        440: 0.055,
        445: 0.054,
        450: 0.053,
        455: 0.053,
        460: 0.052,
        465: 0.052,
        470: 0.052,
        475: 0.053,
        480: 0.054,
        485: 0.055,
        490: 0.057,
        495: 0.059,
        500: 0.061,
        505: 0.062,
        510: 0.065,
        515: 0.067,
        520: 0.070,
        525: 0.072,
        530: 0.074,
        535: 0.075,
        540: 0.076,
        545: 0.078,
        550: 0.079,
        555: 0.082,
        560: 0.087,
        565: 0.092,
        570: 0.100,
        575: 0.107,
        580: 0.115,
        585: 0.122,
        590: 0.129,
        595: 0.134,
        600: 0.138,
        605: 0.142,
        610: 0.146,
        615: 0.150,
        620: 0.154,
        625: 0.158,
        630: 0.163,
        635: 0.167,
        640: 0.173,
        645: 0.180,
        650: 0.188,
        655: 0.196,
        660: 0.204,
        665: 0.213,
        670: 0.222,
        675: 0.231,
        680: 0.242,
        685: 0.251,
        690: 0.261,
        695: 0.271,
        700: 0.282,
        705: 0.294,
        710: 0.305,
        715: 0.318,
        720: 0.334,
        725: 0.354,
        730: 0.372,
        735: 0.392,
        740: 0.409,
        745: 0.420,
        750: 0.436,
        755: 0.450,
        760: 0.462,
        765: 0.465,
        770: 0.448,
        775: 0.432,
        780: 0.421
    }

    sd = colour.SpectralDistribution(sample_sd_data, name='Sample')
    plot_single_sd(sd, **arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Tutorial_SD_Interpolation.png')
    sd_copy = sd.copy()
    sd_copy.interpolate(colour.SpectralShape(400, 770, 1))
    plot_multi_sds(
        [sd, sd_copy], bounding_box=[730, 780, 0.25, 0.5], **arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Tutorial_Sample_Swatch.png')
    sd = colour.SpectralDistribution(sample_sd_data)
    cmfs = colour.STANDARD_OBSERVERS_CMFS[
        'CIE 1931 2 Degree Standard Observer']
    illuminant = colour.ILLUMINANTS_SDS['D65']
    with domain_range_scale('1'):
        XYZ = colour.sd_to_XYZ(sd, cmfs, illuminant)
        RGB = colour.XYZ_to_sRGB(XYZ)
    plot_single_colour_swatch(
        ColourSwatch('Sample', RGB),
        text_parameters={'size': 'x-large'},
        **arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Tutorial_Neutral5.png')
    patch_name = 'neutral 5 (.70 D)'
    patch_sd = colour.COLOURCHECKERS_SDS['ColorChecker N Ohta'][patch_name]
    with domain_range_scale('1'):
        XYZ = colour.sd_to_XYZ(patch_sd, cmfs, illuminant)
        RGB = colour.XYZ_to_sRGB(XYZ)
    plot_single_colour_swatch(
        ColourSwatch(patch_name.title(), RGB),
        text_parameters={'size': 'x-large'},
        **arguments)

    arguments['filename'] = os.path.join(output_directory,
                                         'Tutorial_Colour_Checker.png')
    plot_single_colour_checker(
        colour_checker='ColorChecker 2005',
        text_parameters={'visible': False},
        **arguments)

    arguments['filename'] = os.path.join(
        output_directory, 'Tutorial_CIE_1931_Chromaticity_Diagram.png')
    xy = colour.XYZ_to_xy(XYZ)
    plot_chromaticity_diagram_CIE1931(standalone=False)
    x, y = xy
    plt.plot(x, y, 'o-', color='white')
    # Annotating the plot.
    plt.annotate(
        patch_sd.name.title(),
        xy=xy,
        xytext=(-50, 30),
        textcoords='offset points',
        arrowprops=dict(arrowstyle='->', connectionstyle='arc3, rad=-0.2'))
    render(
        standalone=True,
        limits=(-0.1, 0.9, -0.1, 0.9),
        x_tighten=True,
        y_tighten=True,
        **arguments)

    # *************************************************************************
    # "basics.rst"
    # *************************************************************************
    arguments['filename'] = os.path.join(output_directory,
                                         'Basics_Logo_Small_001_CIE_XYZ.png')
    RGB = colour.read_image(
        os.path.join(output_directory, 'Logo_Small_001.png'))[..., 0:3]
    XYZ = colour.sRGB_to_XYZ(RGB)
    colour.plotting.plot_image(
        XYZ, text_parameters={'text': 'sRGB to XYZ'}, **arguments)
Пример #25
0
data = response.json()
image_id = data['id']
url2 = data['image']
response = requests.get(url2)

with open(r'%s.jpg' %image_id,'wb') as f:
    f.write(response.content)

# Open the image. 
img_bgr = cv2.imread('%s.jpg' %image_id)
img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)

#Apply gamma correction.
gamma = 2.4
gamma_img = np.array(255*(img / 255) ** gamma, dtype = 'uint8')

avg_color_gamma = gamma_img.mean(axis=0).mean(axis=0)
dominant_color_gamma = dominant(gamma_img)

XYZ1 = colour.sRGB_to_XYZ(dominant_color_gamma)


xy =  colour.XYZ_to_xy(XYZ1)

saveCIE(xy, image_id)

url = 'https://www.gyanvihar.org/arastu/uploads/'
files = {'image': open('%s.png' %image_id, 'rb')}
requests.post(url, files=files)

Пример #26
0
            sd_dark_skin, illuminant=colour.SDS_ILLUMINANTS["D65"]
        )
        / 100
    )
)

print("\n")

RGB = np.array([0.45675795, 0.30986982, 0.24861924])
message_box(
    f'Converting to the "CAM16-UCS" colourspace from given "Output-Referred" '
    f'"sRGB" colourspace values:\n\n\t{RGB}'
)
print(colour.convert(RGB, "Output-Referred RGB", "CAM16UCS"))
specification = colour.XYZ_to_CAM16(
    colour.sRGB_to_XYZ(RGB) * 100,
    XYZ_w=colour.xy_to_XYZ(
        colour.CCS_ILLUMINANTS["CIE 1931 2 Degree Standard Observer"]["D65"]
    )
    * 100,
    L_A=64 / np.pi * 0.2,
    Y_b=20,
)
print(
    colour.JMh_CAM16_to_CAM16UCS(
        colour.utilities.tstack(
            [
                specification.J,
                specification.M,
                specification.h,
            ]
Пример #27
0
    def on_click4(self):  #foto
        stream = io.BytesIO()
        with picamera.PiCamera() as camera:
            camera.start_preview()
            time.sleep(2)
            camera.capture(stream, format='jpeg')
        # Construct a numpy array from the stream
        data = np.frombuffer(stream.getvalue(), dtype=np.uint8)
        # "Decode" the image from the array, preserving colour
        image = cv2.imdecode(data, 1)
        # OpenCV returns an array with data in BGR order. If you want RGB instead
        # use the following...
        image = image[:, :, ::-1]
        cv2.imwrite('original.png', image)

        pixmap = QtGui.QPixmap('./original.png').scaled(
            200, 180, QtCore.Qt.KeepAspectRatio)
        self.label_3.setPixmap(pixmap)

        image = image[150:190, 300:360]
        cv2.imwrite('escala.png', image)

        pixmap = QtGui.QPixmap('./escala.png').scaled(
            200, 180, QtCore.Qt.KeepAspectRatio)
        self.label_4.setPixmap(pixmap)

        ###########################################################
        #Promedio de pixeles#######################################
        ###########################################################
        r = image[:, :, 0]
        g = image[:, :, 1]
        b = image[:, :, 2]

        rp = r.sum() / (r.shape[0] * r.shape[1])
        gp = g.sum() / (g.shape[0] * g.shape[1])
        bp = b.sum() / (b.shape[0] * b.shape[1])
        ###########################################################
        #uso de la libreria colour-science para hallar los kelvin##
        ############################################################
        RGB = np.array([rp, gp, bp])

        # Conversion to tristimulus values.
        XYZ = colour.sRGB_to_XYZ(RGB / 255)

        # Conversion to chromaticity coordinates.
        xy = colour.XYZ_to_xy(XYZ)

        # Conversion to correlated colour temperature in K.
        CCT = colour.temperature.xy_to_CCT_Hernandez1999(xy)
        #rp,gp,bp
        self.CCT = CCT
        if self.CCT < 0:
            self.label_5.setText(
                _translate("MainWindow",
                           "KELVIN OUT: " + str("intenta de nuevo"), None))
        self.label_5.setText(
            _translate("MainWindow", "KELVIN OUT: " + str(round(self.CCT)),
                       None))

        offRed()
        offBlue()
        self.pushButton_4.setStyleSheet(
            _fromUtf8("background-color: rgb(29, 29, 29);"))
        print(self.CCT)
Пример #28
0
def detect_breaking_events(time,
                           crx_dist,
                           rgb,
                           crx_start=None,
                           crx_end=None,
                           px_mtrc="lightness",
                           colours=None,
                           resample_rule="100L",
                           algorithm="peaks",
                           peak_detection="local_maxima",
                           posterize=False,
                           ncolours=0,
                           threshold=0.1,
                           tswindow=11,
                           denoise=True,
                           pxwindow=3,
                           mask_drysand=False,
                           fix_constrast=False):
    """
    Detect wave breaking events.

    Two main methods are implemented:

    1 - Peak detection: detect wave breaking as lightness peaks in the
                        timestack

        Two peak detection methods are implemented:

        1-a Local maxima. Uses peaklocalextremas() function from pywavelearn
                          to detect local maximas corresponding to wave
                          breaking.

        1-b Differential. Uses the first temporal derivative of the pixel
                          intensity to detect sharp transitions in the
                          timestack that should correspond to wave breaking.

        In both cases, the user can tell to the script to classifiy the
        identified pixel peaks based on known colours. For exemple, water is
        usually blue, sand is brownish and breaking waves are whiteish.
        Only peaks corresponding to wave breakin are append to the output
        structure. This is step done using classifiy_colour()
        from pywavelearn.

    2 - Edge detection: detect wave breaking as sharp edges in the timestack

        Two-options are available:

        2-a Edges only. Wave breaking events are obtained applying a sobel
                        filter to the timestack. Edge locations (time,space)
                        are obrained as:

                        - argument of the maxima (argmax) of a cross-shore
                          pixel intenstiy series obtained at every timestamp.

                        - local maximas of a cross-shore pixel intenstiy series
                          obtained at every timestamp.

        2-b Edges and colours. Wave breaking events are obtained applying a
                               Sobel filter to the timestack and the detected
                               Edges are classified using the colour
                               information as in 1-a. Edge locations
                               (time,space) are obrained as:

                               - argument of the maxima (argmax) of a
                                 cross-shore pixel intenstiy series obtained
                                 at every timestamp.


                               - local maximas of a cross-shore pixel intenstiy
                                 series obtained at every timestamp.
    ----------
    Args:
        time (Mandatory [np.array]): Array of datetimes.

        crx_dist (Mandatory [np.array]): Array of cross-shore locations.

        rgb (Mandatory [np.array]): timestack array.
                                    Shape is [time,crx_dist,3].

        crx_start (Optional [float]): where in the cross-shore orientation to
                                       start the analysis.
                                       Default is crx_dist.min().

        crx_start (Optional [float]): where in the cross-shore orientation to
                                       finish the analysis.
                                       Default is crx_dist.max().

        px_mtrc (Optional [float]): Which pixel intenstiy metric to use.
                                    Default is "lightness".

        resample_rule (Optional [str]): To which frequency interpolate
                                        timeseries Default is  "100L".

        algorithm (Optional [str]): Wave breaking detection algorithm.
                                    Default is "peaks".

        peak_detection (Optional [str]): Peak detection algorithm.
                                         Default is  "local_maxima".

        threshold (Optional [float]): Threshold for peak detection algorithm.
                                      Default is 0.1

        tswindow (Optional [int]): Window for peak detection algorithm.
                                   Default is 11.

        denoise (Optional [bool]): = Denoise timestack using denoise_bilateral
                                     Default is True.

        pxwindow (Optional [int]): Window for denoise_bilateral. Default is 3.

        posterize (Optional [bool]): If true will reduce the number of colours
                                     in the timestack. Default is False.

        ncolours (Optional [str]): Number of colours to posterize.
                                   Default is 16.

        colours (Optional [dict]): A dictionary for the colour learning step.
                                    Something like:
                                    train_colours = {'labels':[0,1,2],
                                                     'aliases':
                                                     ["sand","water","foam"],
                                                     'rgb':[[195,185,155],
                                                            [30,75,75],
                                                            [255,255,255]]
                                                     'target':2}
                                    Default is None.

        mask_drysand (Experimental [bool]) = Mask dry sand using a
                                             colour-temperature (CCT)
                                             relationship. Default is False.
    ----------
    Return:
         time (Mandatory [np.array]): time of occurance of wave breaking
                                      events.

         breakers (Mandatory [np.array]): cross-shore location of wave breaking
                                          events.
    """
    if not crx_start:
        crx_start = crx_dist.min()
        crx_end = crx_dist.max()

    if posterize:
        print("  + >> posterizing")
        rgb = colour_quantization(rgb, ncolours=ncolours)

    # get colour data
    if algorithm == "colour" or algorithm == "edges_and_colour":
        target = colours["target"]
        labels = colours["labels"]
        dom_colours = colours["rgb"]

    # denoise a little bedore computing edges
    if denoise:
        rgb = denoise_bilateral(rgb, pxwindow, multichannel=True)
        # scale back to 0-255
        rgb = (rgb - rgb.min()) / (rgb.max() - rgb.min()) * 255

    # mask sand - Not fully tested
    if mask_drysand:
        print("  + >> masking dry sand [Experimental]")
        # calculate colour temperature
        cct = colour.xy_to_CCT_Hernandez1999(
            colour.XYZ_to_xy(colour.sRGB_to_XYZ(rgb / 255)))
        # scale back to 0-1
        cct = (cct - cct.min()) / (cct.max() - cct.min()) * 255
        # mask
        i, j = np.where(cct == 0)
        rgb[i, j, :] = 0

    if fix_constrast:
        print("  + >> fixing contrast")
        rgb = exposure.equalize_hist(rgb)
        # rgb = (rgb-rgb.min())/(rgb.max()-rgb.min())*255

    # detect edges
    if algorithm == "edges" or algorithm == "edges_and_colour":
        print("  + >> calculating edges")
        edges = sobel_h(rgb2grey(rgb))

    # get pixel lines and RGB values at selected locations only
    if algorithm == "peaks" or algorithm == "colour":
        print("  + >> extracting cross-shore pixels")
        # rescale
        rgb = (rgb - rgb.min()) / (rgb.max() - rgb.min()) * 255
        Y, crx_idx = get_analysis_locations(crx_dist, crx_start, crx_end)
        Time, PxInts, RGB = get_pixel_lines(time,
                                            rgb,
                                            crx_idx,
                                            resample_rule=resample_rule,
                                            pxmtc=px_mtrc)

    # get analysis frequency and a 1 sececond time window
    if not tswindow:
        fs = (time[1] - time[0]).total_seconds()
        win = np.int((1 / fs))
    else:
        win = tswindow

    print("  + >> detecting breaking events")
    PeakTimes = []
    print_check = False
    if algorithm == "peaks" or algorithm == "colour":
        if peak_detection == "argmax":
            peak_detection = "local_maxima"
            print("  - >> setting peak detection to local maxima")
        # loop over data rows
        for pxint, rgb in zip(PxInts, RGB):
            # calculate baseline
            bline = baseline(pxint, 2)
            # calculate pixel peaks
            if peak_detection == "local_maxima":
                _, max_idx = peaklocalextremas(pxint - bline,
                                               lookahead=win,
                                               delta=threshold *
                                               (pxint - bline).max())
            elif peak_detection == "differential":
                # calculate first derivative
                pxintdt = np.diff(pxint - bline)
                # remove values below zero
                pxintdt[pxintdt <= 0] = 0
                # scale from 0 to 1
                pxintdt = pxintdt / pxintdt.max()
                # get indexes
                max_idx = indexes(pxintdt, thres=threshold, min_dist=win)
            else:
                raise ValueError
            # colour learning step
            if algorithm == "colour":
                if not print_check:
                    print("  + >> colour learning")
                    print_check = True
                # classifiy pixels
                breaker_idxs = []
                for idx in max_idx:
                    y_pred = classify_colour(rgb[idx], dom_colours, labels)
                    if y_pred[0] == target:
                        breaker_idxs.append(idx)
            # peaks only
            else:
                breaker_idxs = max_idx
            PeakTimes.append(Time[breaker_idxs])
        # organize peaks and times
        Xpeaks = []
        Ypeaks = []
        for i, pxtimes in enumerate(PeakTimes):
            for v in pxtimes:
                Xpeaks.append(v)
            for v in np.ones(len(pxtimes)) * Y[i]:
                Ypeaks.append(v)
    # edges case
    if algorithm == "edges":
        Xpeaks = []
        Ypeaks = []
        # loop in time
        for i, t in enumerate(time):
            # cross-shore line
            crx_line = edges[i, :]
            # peaks with robust peak detection
            if peak_detection == "differential" or \
               peak_detection == "local_maxima":
                crx_line = (crx_line - crx_line.min()) / (crx_line.max() -
                                                          crx_line.min())
                if not np.all(crx_line == 0):
                    idx_peak = indexes(crx_line,
                                       thres=1 - threshold,
                                       min_dist=win)
                # apped peaks
                for peak in idx_peak:
                    if crx_dist[peak] > crx_start and crx_dist[peak] < crx_end:
                        Xpeaks.append(t)
                        Ypeaks.append(crx_dist[peak])
            # peaks with simple argmax - works better without colour learning
            else:
                peak = np.argmax(crx_line)
                if crx_dist[peak] > crx_start and crx_dist[peak] < crx_end:
                    Xpeaks.append(t)
                    Ypeaks.append(crx_dist[peak])
    # edges + colour learning case
    if algorithm == "edges_and_colour":
        Ipeaks = []
        Jpeaks = []
        # loop in time
        for i, t in enumerate(time):
            # cross-shore line
            crx_line = edges[i, :]
            if peak_detection == "differential" or \
               peak_detection == "local_maxima":
                crx_line = (crx_line - crx_line.min()) / (crx_line.max() -
                                                          crx_line.min())
                # peaks
                if not np.all(crx_line == 0):
                    idx_peak = indexes(crx_line,
                                       thres=1 - threshold,
                                       min_dist=win)
                    if not np.all(crx_line == 0):
                        idx_peak = indexes(crx_line,
                                           thres=1 - threshold,
                                           min_dist=win)
                # apped peaks
                for peak in idx_peak:
                    if crx_dist[peak] > crx_start and crx_dist[peak] < crx_end:
                        Ipeaks.append(i)
                        Jpeaks.append(peak)
            else:
                peak = np.argmax(crx_line)
                if crx_dist[peak] > crx_start and crx_dist[peak] < crx_end:
                    Ipeaks.append(i)
                    Jpeaks.append(peak)
        # colour learning step
        Xpeaks = []
        Ypeaks = []
        for i, j in zip(Ipeaks, Jpeaks):
            if not print_check:
                print("  + >> colour learning")
                print_check = True
            # classify colour
            y_pred = classify_colour(rgb[i, j, :], dom_colours, labels)
            if y_pred[0] == target:
                Xpeaks.append(time[i])
                Ypeaks.append(crx_dist[j])

    # sort values in time and outout
    y = np.array(Ypeaks)[np.argsort(date2num(Xpeaks))]
    x = np.array(Xpeaks)[np.argsort(Xpeaks)]

    return ellapsedseconds(x), y
Пример #29
0
colorspace = colour.models.BT709_COLOURSPACE

colorspace.use_derived_transformation_matrices(True)


RGB_to_XYZ_m = colorspace.RGB_to_XYZ_matrix
XYZ_to_RGB_m = colorspace.XYZ_to_RGB_matrix

# for performance use a larger interval.  Harder to solve, must raise tol

interval = 40
shape = colour.SpectralShape(380.0, 730.0, interval)

# spd via Meng-ish Burns-ish recovery
target_XYZ = colour.sRGB_to_XYZ([1,0,0])
spd = XYZ_to_spectral(target_XYZ, cmfs=CMFS.align(shape), illuminant=illuminant_SPD, interval=interval, tolerance=1e-8, max_refl=1.00)
print("red SPD is", spd.values)

target_XYZ = colour.sRGB_to_XYZ([0,1,0])
spd = XYZ_to_spectral(target_XYZ, cmfs=CMFS.align(shape), illuminant=illuminant_SPD, interval=interval, tolerance=1e-8, max_refl=1.00)
print("green SPD is", spd.values)


target_XYZ = colour.sRGB_to_XYZ([0,0,1])
spd = XYZ_to_spectral(target_XYZ, cmfs=CMFS.align(shape), illuminant=illuminant_SPD, interval=interval, tolerance=1e-8, max_refl=1.00)
print("blue SPD is", spd.values)

CMFS_ = CMFS.align(spd.shape).values.transpose() # align and transpose the CMFS
illuminant_SPD_ = illuminant_SPD.align(spd.shape).values # align illuminant vector
Пример #30
0
    def _pick_color_mode(self, tdw, x, y, mode):
        # init shared variables between normal and CIECAM modes
        doc = self.doc
        tdw = self.doc.tdw
        app = self.doc.app
        p = self.app.preferences
        elapsed = None
        t, x, y, pressure = doc.get_last_event_info(tdw)
        # TODO configure static pressure as a slider?
        # This would allow non-pressure devices to use
        # pressures besides 50% for brushes too
        if pressure is None:
            pressure = 0.5
        if p['color.pick_blend_use_pressure'] is False:
            pressure = 0.0
        if t <= doc.last_colorpick_time:
            t = (time.time() * 1000)

        # limit rate for performance
        min_wait = p['color.adjuster_min_wait']
        if doc.last_colorpick_time:
            elapsed = t - doc.last_colorpick_time
            if elapsed < min_wait:
                return

        cm = app.brush_color_manager
        prefs = cm.get_prefs()
        lightsource = prefs['color.dimension_lightsource']

        if lightsource == "custom_XYZ":
            lightsource = prefs['color.dimension_lightsource_XYZ']
        else:
            lightsource = colour.ILLUMINANTS['cie_2_1931'][lightsource]

        doc.last_colorpick_time = t
        pickcolor = tdw.pick_color(x, y, size=int(3/tdw.renderer.scale))
        brushcolor = self._get_app_brush_color()
        brushcolor_rgb = brushcolor.get_rgb()
        pickcolor_rgb = pickcolor.get_rgb()

        # grab the color with sRGB context
        pickcolor_cie = lib.color.CIECAMColor(color=pickcolor,
                                              cieaxes=brushcolor.cieaxes)
        # if brush and pick colors are the same, nothing to do
        if brushcolor_rgb != pickcolor_rgb:
            pickcolor_hsv = pickcolor.get_hsv()
            brushcolor_hsv = brushcolor.get_hsv()
            cm = self.doc.app.brush_color_manager

            # normal pick mode
            if mode == "PickAll":
                cm.set_color(pickcolor)
            elif mode == "PickIlluminant":
                ill = colour.sRGB_to_XYZ(np.array(pickcolor_rgb))*100
                if ill[1] <= 0:
                    return
                fac = 1/ill[1]*100

                p['color.dimension_lightsource'] = "custom_XYZ"
                p['color.dimension_lightsource_XYZ'] = (
                    ill[0]*fac,
                    ill[1]*fac,
                    ill[2]*fac
                )
                # update pref ui
                app.preferences_window.update_ui()

                # reset the brush color with the same color
                # under the new illuminant
                brushcolor.lightsource = ill * fac

                app.brush.set_color_hsv(brushcolor.get_hsv())
                app.brush.set_ciecam_color(brushcolor)
            elif mode == "PickandBlend":
                alloc = self.doc.tdw.get_allocation()
                size = max(int(p['color.preview_size'] * .01 * alloc.height),
                           self.MIN_PREVIEW_SIZE)
                if self.starting_color is None:
                    self.starting_color = pickcolor
                dist = np.linalg.norm(
                    np.array(self.starting_position) - np.array((x, y)))
                dist = np.clip(dist / size + pressure, 0, 1)
                if p['color.pick_blend_reverse'] is True:
                    dist = 1 - dist
                self.blending_ratio = dist
                brushcolor_pig = lib.color.PigmentColor(color=brushcolor)
                pickcolor_pig = lib.color.PigmentColor(
                    color=self.starting_color)
                self.blending_color = brushcolor_pig.mix(pickcolor_pig, dist)
            elif mode == "PickTarget":
                doc.last_color_target = pickcolor
            else:
                # pick V, S, H independently
                # using CIECAM
                brushcolornew = brushcolor
                if mode == "PickHue":
                    brushcolornew.h = pickcolor_cie.h
                elif mode == "PickLuma":
                    brushcolornew.v = pickcolor_cie.v
                elif mode == "PickChroma":
                    brushcolornew.s = pickcolor_cie.s

                app.brush.set_color_hsv(brushcolornew.get_hsv())
                app.brush.set_ciecam_color(brushcolornew)

        return None
Пример #31
0
noise=np.random.normal(0,meanlc*0.01,np.shape(dRGB))
dRGB+=noise

g=np.array(g)

print("start optimization...")
I_init=np.ones((Nx,Ny))
y = mfista_grouplasso(I_init, dRGB, g, lambda_gl = 1.e-1)

plt.imshow(y)
plt.savefig("carGroupLasso.png")

ti=timg.reshape(np.shape(timg)[0]*np.shape(timg)[1],3)
#XYZからxyへ変換
ti=timg.reshape(np.shape(timg)[0]*np.shape(timg)[1],3)
XYZ = colour.sRGB_to_XYZ(ti)
xy = colour.XYZ_to_xy(XYZ)

#CIE_1931_chromaticity_diagram_colours_plot(bounding_box=(-0.1, 0.9, -0.1, 0.9), standalone=False)
#plot_chromaticity_diagram_CIE1931(bounding_box=(0.15, 0.65, 0.15, 0.65), standalone=False)
#sRGB領域へプロット
#plt.plot(xy[:,0], xy[:,1], 'o', markersize=2, label="sRGB",color="gray",alpha=0.2)
fig=plt.figure(figsize=(10,5))
ax=fig.add_subplot(121,aspect=1.0)
ax.scatter(xy[:,0], xy[:,1],facecolors=ti,alpha=1,s=2)
ax.set_xlim(0.15,0.62)
ax.set_ylim(0.2,0.62)
ax.set_title("Original")
#plt.legend() 

yi=y.reshape(np.shape(y)[0]*np.shape(y)[1],3)