def haishokuColor(img): # dominant = Haishoku.getDominant(img) # Haishoku.showDominant(img) # print(dominant) palette = Haishoku.getPalette(img) x = [] y = [] for per, color in palette: x.append(per) y.append(color) print('调色盘色彩比例:\n', palette) Haishoku.showPalette(img)
def run_bot(reddit): print("Obtaining Submissions...") for comment in reddit.inbox.mentions(limit=None): footer = "\n\n ^I'm a bot. | Creator: [u/JoshuaScript](https://www.reddit.com/u/JoshuaScript). | [Source Code](https://github.com/Joshuascript/Palette_Bot)" #checks if the mention is unread, which is only the case if it hasn't been replied to if comment.new: try: #calls the showPalette (which creates and saves the image) method on the image link Haishoku.showPalette(comment.submission.url) CLIENT_ID = config.imgur_id PATH = haishoku.haillow.image_name im = pyimgur.Imgur(CLIENT_ID) uploaded_image = im.upload_image( PATH, title= f"Color Palette for Reddit Post: {comment.submission.title} ({comment.submission.url})" ) print(uploaded_image.title) print(uploaded_image.link) print(uploaded_image.size) print(uploaded_image.type) comment.reply( f"\n\n Here is the color palette of this image visualized: {uploaded_image.link}. The color sizes are proportionate to their dominance in the image. {footer}" ) #Marks the comment as read so it won't be replied to again comment.mark_read() print( f"Replied to {comment.submission.title} ({comment.submission.url})" ) #deletes generated palette image from local storage os.remove(PATH) except OSError as e: comment.reply( f"This is most likely a non-image post. Try mentioning me in a new comment and I may be able to get this post's palette if it is an image.{footer}" ) print( f"Comment was most likely on a non-image post ({e}) \n\n Title: {comment.submission.title}, Link: {comment.submission.url}" ) comment.mark_read() except TypeError as e: comment.reply( f"I was unable to get the palette of this image, most likely because it's a gif, which is currently unsupported.{footer}" ) print( f"Image was likely a gif ({e}) \n\n Title: {comment.submission.title}, Link: {comment.submission.url}" ) comment.mark_read() except Exception as e: print(e.__class__.__name__ + e) else: continue
def update_palette_by_image(self, field_name='image'): def get_hex_color(rate, rgb): return '#' + ''.join('%02X' % i for i in rgb) image = getattr(self, field_name, None) if not image: return from haishoku.haishoku import Haishoku try: palette = Haishoku.getPalette(image.path) except FileNotFoundError: return palette.reverse() try: self.color0 = get_hex_color(*palette.pop()) self.color1 = get_hex_color(*palette.pop()) self.color2 = get_hex_color(*palette.pop()) self.color3 = get_hex_color(*palette.pop()) self.color4 = get_hex_color(*palette.pop()) self.color5 = get_hex_color(*palette.pop()) self.color6 = get_hex_color(*palette.pop()) self.color7 = get_hex_color(*palette.pop()) except IndexError: return
def _get_haishoku_palette(cls, image_path): from haishoku.haishoku import Haishoku # pylint: disable=import-error palette = Haishoku.getPalette(image_path) hex_palette = [ color_hex_from_list(color) for _percentage, color in palette ] return hex_palette
def _get_haishoku_palette(cls, image_path) -> List[HexColor]: from haishoku.haishoku import Haishoku # pylint: disable=import-error,useless-suppression palette = Haishoku.getPalette(image_path) hex_palette = [ color_hex_from_list(color) for _percentage, color in palette ] return hex_palette
def generate(img_path, palette_size, freq_min=None, debug=False, show_palette=False): """returns a list of RGB tuples representing a palette of palette_size numbers of color, by maximum use""" try: full_palette = Haishoku.getPalette(str(img_path)) map_palette = full_palette[:palette_size] if show_palette: Haishoku.showPalette(str(img_path)) except FileNotFoundError: print( f"File {img_path} not found, be sure this includes the full or relative path - the folders containing the file, not just the file's name." ) exit() if debug: print(f' ► Full Palette (Freq,RGB) = {full_palette}') print(f' ► Map Reduced Palette (Freq,RGB) = {map_palette}') print(f' ► Autopalette threshold = {freq_min}') if freq_min: output = [] if map_palette[0][0] < freq_min: # return dominant color if no colors exceed threshold output = [map_palette[0][1]] print(' =Sample Warning: ') print( f' No color exceeds in {round(freq_min*100,1)}% sample tile. Color {output[0]} represents highest porportion of sample ({map_palette[0][0]*100}%) and will be used as result.' ) else: # filter colors below freq_min for freq, rgb in map_palette: if freq >= freq_min: output.append(rgb) if debug: print(f' ► Sample tile palette length: {len(output)}') else: # return only the RGB values output = dict(map_palette).values() return output
def display_haishoku(art_image, art_id): file = f"static/images/{art_image}" hai = Haishoku.loadHaishoku(file) palette = Haishoku.getPalette(file) # palette has two pieces of data, percent used in the color and RGB code for pal in palette: load_color_palette(pal[1]) color = Palette(c_percent=pal[0], c_palette=pal[1], artwork_id=art_id) db.session.add(color) db.session.commit() return color
def extract_user_palette(filename): """Display user image and color palette.""" def new_image(mode, size, color): return Image.new(mode, size, color) file = f"static/user_images/{filename}" hai = Haishoku.loadHaishoku(file) palette = Haishoku.getPalette(file) u_color_pal = [] for item in palette: c_pal = item[1] pal = new_image('RGB', (100, 100), c_pal) u_color_pal.append(c_pal) return render_template("user-palette.html", filename=filename, u_color_pal=u_color_pal)
def main(): path = "/Users/wujianming/Desktop/WechatIMG18547.jpeg" # path = "http://wx2.sinaimg.cn/large/89243dfbly1ffoekfainzj20dw05k0u7.jpg" # getPalette api palette = Haishoku.getPalette(path) # getDominant api dominant = Haishoku.getDominant(path) # showPalette api Haishoku.showPalette(path) # showDominant api # Haishoku.showDominant(path) # Haishoku object h = Haishoku.loadHaishoku(path) print(h.palette) print(h.dominant)
def get_domain_color(image): # returns: (R, G, B) tuple dominant = Haishoku.getDominant(image) color = "red" redsimi = math.sqrt((dominant[0] - 255) * (dominant[0] - 255) + dominant[1] * dominant[1] + dominant[2] * dominant[2]) greensimi = math.sqrt(dominant[0] * dominant[0] + (dominant[1] - 255) * (dominant[1] - 255) + dominant[2] * dominant[2]) if (greensimi < redsimi): color = "green" return color
def saveDominant(image_path, path, idx): # get the dominant color dominant = Haishoku.getDominant(image_path) # generate colors boxes images = [] dominant_box = new_image('RGB', (50, 20), dominant) for i in range(8): images.append(dominant_box) # save dominant color joint_image(images, path, idx, 'Dominant')
def savePalette(image_path, path, idx): # get the palette first palette = Haishoku.getPalette(image_path) # getnerate colors boxes images = [] for color_mean in palette: w = color_mean[0] * 400 color_box = new_image('RGB', (int(w), 20), color_mean[1]) images.append(color_box) # generate and save the palette joint_image(images, path, idx, 'Palette')
def get_rgb(path, file, idx): """获取图片的颜色信息,保存主要颜色和调色板图片,返回元组/Dataframe""" # 判断是否存在results/colors文件夹,如果不存在则创建为文件夹 folder = os.path.exists(path + 'results/colors') if not folder: os.makedirs(path + 'results/colors') # # 获取图片主要颜色元组(R,G,B) # dominant = Haishoku.getDominant(file) # 获取图片调色板列表[(percentage, (R,G,B)), ...] palette = Haishoku.getPalette(file) dominant = palette[0] # # 保存主要颜色图片 Haishoku_plus.saveDominant(file, path, idx) # 保存调色板图片 Haishoku_plus.savePalette(file, path, idx) # 转换为DataFrame df_list = [] level_dict = { 0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H' } for i, c in enumerate(palette): df_color = pd.DataFrame( data={ 'pic': [idx], 'level': [level_dict[i]], 'percentage': [c[0]], 'r': [c[1][0]], 'g': [c[1][1]], 'b': [c[1][2]], 'hex': [hexencode(c[1])] }) df_list.append(df_color) df_palette = pd.concat(df_list, ignore_index=True) return dominant, df_palette
def read_gif(im): response = "" path = "/tmp/test.jpg" try: while 1: jpg = im.convert("RGB") jpg.save(path) img = Haishoku.loadHaishoku(path) code = get_code(img.dominant) response += str(code) # print(code); im.show(); time.sleep(1) im.seek(im.tell() + 1) except EOFError: pass return response
def getPaletteOfImg(imgPath, contrast_val=1.0, color_val=1.0): img = Image.open(imgPath) pal = ImagePalette.ImagePalette() print(pal) contrast = contrast_img(img, contrast_val, color_val) print('contrast: {}'.format(contrast)) pal = img.getpalette() print(pal) newPalette = [] palette = Haishoku.getPalette(imgPath) for i in range(0, len(palette)): for j in palette[i][1]: newPalette.append(j) print(' Palette: {}'.format(newPalette)) return newPalette
def palette_op(self, palette_size, sample_factor=4): """Generates an shmops.fill_operation obj""" print("||||| Initiating Palette Fill Operation |||||") fill_op = shmops.Fill_Operation(id='4321') tiles = self.slice_to_tiles(show_info="Image to Map") #get palette to be used in the process if sample_factor == 1: palette = palette.generate(self.path, palette_size, debug=self.debug) else: #get combined palette by slicing map into sample tiles sampling_map_size = self.get_map_size(sample_factor) palette = self.get_combined_palette(palette_size, sampling_map_size) temp_path = Path('temp_img.png') x, y = 0, 0 for row in progress_bar.progress_bar(tiles, "Processing Map: ", " Row: ", 36): for tile in row: #if self.debug: # temp_path = f'{x}x{y}y_temp_img.png' # temp_path = Path('./test_tiles/' + temp_path) tile.save(temp_path, "PNG") dominant = Haishoku.getDominant(str(temp_path)) tile_color = palette.nearest_color(palette, dominant) #if self.debug: print(f'Tile Address: {x}, {y} | Tile Color: {tile_color} | Saved to: {temp_path}') fill_op.add_fill(x, y, palette.rgb_to_hex(*tile_color)) x += 1 y += 1 x = 0 if not self.debug: temp_path.unlink() return fill_op
def main(): path = "demo_01.png" # getPalette api palette = Haishoku.getPalette(path) print(palette) # getDominant api dominant = Haishoku.getDominant(path) print(dominant) # showPalette api Haishoku.showPalette(path) # showDominant api Haishoku.showDominant(path) # Haishoku object h = Haishoku.loadHaishoku(path) print(h.image) print(h.palette) print(h.dominant)
def main(): url = "https://img3.doubanio.com/lpic/s27028282.jpg" r = requests.get(url) path = BytesIO(r.content) # getPalette api palette = Haishoku.getPalette(path) print(palette) # getDominant api dominant = Haishoku.getDominant(path) print(dominant) # showPalette api Haishoku.showPalette(path) # showDominant api Haishoku.showDominant(path) # Haishoku object h = Haishoku.loadHaishoku(path) print(h.palette) print(h.dominant)
def buildingDatabase(jpg_dir, wav_path): # 超参数 jpg_dir = jpg_dir image_files = os.listdir(jpg_dir) image_files.sort(key=lambda x: int(x.split('.')[0][5:])) # 纹理检测 dhashList = [] # dominant color Hlist = [] # Slist=[] Vlist = [] # motion; optical flow # 角点检测参数 feature_params = dict(maxCorners=100, qualityLevel=0.1, minDistance=7, blockSize=7) # KLT光流参数 lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.02)) tracks = [] track_len = 15 detect_interval = 5 beforeGrey = None MotionList = [] count = 0 # for i in range(0,480): for jpg_path in image_files[0:480]: # jpg_path=jpg_dir+"frame"+str(i)+".jpg" jpg_path = jpg_dir + jpg_path print(jpg_path) # dominant color d_color = Haishoku.getDominant(jpg_path) #print(count, ':', d_color) r = d_color[0] g = d_color[1] b = d_color[2] # 转换为HSV空间 cmax = max(max(r, g), b) cmin = min(min(r, g), b) delta = cmax - cmin V = cmax if cmax == 0: S = 0 else: S = delta / cmax if delta == 0: H = 0 elif cmax == r: H = ((g - b) / delta) * 60 elif cmax == g: H = 120 + ((b - r) / delta) * 60 else: H = 240 + ((r - g) / delta) * 60 if H < 0: H = H + 360 Hlist.append(H) # Slist.append(S) Vlist.append(V) img = cv2.imread(jpg_path) # 纹理特征 dhash = d_hash(img) dhashList.append(dhash) # motion curGrey = next = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img0, img1 = beforeGrey, curGrey p0 = np.float32([tr[-1] for tr in tracks]).reshape(-1, 1, 2) if len(tracks) > 0: img0, img1 = beforeGrey, curGrey p0 = np.float32([tr[-1] for tr in tracks]).reshape(-1, 1, 2) # 上一帧的角点和当前帧的图像作为输入来得到角点在当前帧的位置 p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) sum = 0.0 for i in range(0, len(p0)): dist = math.sqrt((p0[i][0][0] - p1[i][0][0])**2 + (p0[i][0][1] - p1[i][0][1])**2) sum = sum + dist avg = sum / len(p0) MotionList.append(avg) # 反向检查,当前帧跟踪到的角点及图像和前一帧的图像作为输入来找到前一帧的角点位置 p0r, _, _ = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) # 得到角点回溯与前一帧实际角点的位置变化关系 d = abs(p0 - p0r).reshape(-1, 2).max(-1) # 判断d内的值是否小于1,大于1跟踪被认为是错误的跟踪点 good = d < 1 new_tracks = [] for i, (tr, (x, y), flag) in enumerate(zip(tracks, p1.reshape(-1, 2), good)): # 判断是否为正确的跟踪点 if not flag: continue # 存储动态的角点 tr.append((x, y)) # 只保留track_len长度的数据,消除掉前面的超出的轨迹 if len(tr) > track_len: del tr[0] # 保存在新的list中 new_tracks.append(tr) # 更新特征点 tracks = new_tracks else: MotionList.append(0.0) # 每隔 detect_interval 时间检测一次特征点 if count % detect_interval == 0: mask = np.zeros_like(curGrey) mask[:] = 255 p = cv2.goodFeaturesToTrack(curGrey, mask=mask, **feature_params) if p is not None: for x, y in np.float32(p).reshape(-1, 2): tracks.append([(x, y)]) beforeGrey = curGrey count += 1 # 得到整个视频的dominant color print('H len:', len(Hlist)) print('Hlist:', Hlist) # print('S len:',len(Slist)) # print('Slist:',Slist) print('V len:', len(Vlist)) print('Vlist:', Vlist) # 纹理 print('hash len:', len(dhashList)) print('hash list:', dhashList) # Motion print('Motion len:', len(MotionList)) print('MotionList:', MotionList) #voice voiceValue = get_voice_descriptor(wav_path) print('voiceValue:', voiceValue) return Hlist, Vlist, MotionList, dhashList, voiceValue
from haishoku.haishoku import Haishoku from PIL import Image, ImageDraw import random from random import randrange import argparse parser = argparse.ArgumentParser() parser.add_argument('-u', '--url', required=True, help="url to the image") parser.add_argument('-a', '--amount', required=True, help="the amount of squares") parser.add_argument('-r', '--radius', required=True, help="the max radius") args = parser.parse_args() palette = Haishoku.getPalette(args.url) w, h = 800, 800 image = Image.new("RGB", (w, h), random.choice(palette)[1]) layer = ImageDraw.Draw(image) for i in range(0, int(args.amount)): x = randrange(w) y = randrange(h) r, g, b = random.choice(palette)[1] divider = 800 - randrange(0, 600) if x >= divider: r = r + randrange(100, 200)
authors = [] for each_date in d: for each_author in d[each_date]: authors.append(each_author[0]) if each_author[0] not in face: face[each_author[0]] = each_author[2] with open('./get_data/face.py', 'w', encoding="utf-8-sig") as f: f.writelines('face = ' + str(face)) for each_author in face: if each_author in color: continue if face[each_author][-3:] == 'gif' or each_author == '开眼视频App': color[each_author] = '#000000' else: color_list = Haishoku.getPalette(face[each_author]) color_list = sorted(color_list, key=lambda x: x[1][0] + x[1][1] + x[1][2]) color[each_author] = 'rgb' + \ str(color_list[int(len(color_list)/2)][1]) with open('./get_data/color.py', 'w', encoding="utf-8-sig") as f: f.writelines('color = ' + str(color)) min_fans = 99999999 for each_author in authors: c_fans = db['author'].find_one({'name': each_author}, {field: True})[field] if c_fans <= min_fans: min_fans = c_fans print(min_fans)
def music(): w = 490*2 h = 740*2 banner_w = 490*2 banner_h = 235*2 cover_w = 140*2 cover_h = 140*2 cover_top = 120*2 cover_left = 100*2 block_w = 32*2 block_h = 12*2 block_left = 97*2 block_top = 160*2 + banner_h max_content_w = 300*2 max_title_w = 190*2 title_left = cover_w + cover_left + 10 *2 title = '成都' title_font = ImageFont.truetype('font/zh/YueSong.ttf',28*2) single_title_w,single_title_h= title_font.getsize("已") titles = list(title) title_formated = '' temp = '' for word in titles: temp += word temp_w,temp_h = title_font.getsize(temp) title_formated += word if temp_w > max_title_w + single_title_w: title_formated += '\n' temp = '' tlines = len(title_formated.split('\n')) title_h = tlines * single_title_h + (tlines -1) * 28*2 title_top = banner_h - title_h - 10 *2 max_author_w = 190*2 author_left = cover_w + cover_left+ 10 *2 author_top = banner_h + 10 *2 author = '赵雷' author_font = ImageFont.truetype('font/zh/YueSong.ttf',14*2) single_author_w,single_author_h = author_font.getsize("已") authors = list(author) author_formated = '' temp = '' for word in authors: temp += word temp_w,temp_h = author_font.getsize(temp) author_formated += word if temp_w > max_author_w + single_author_w: author_formated += '\n' temp = '' alines = len(author_formated.split('\n')) author_h = alines * single_author_h + (alines -1) * 14*2 content_left = 95*2 content_top = banner_h + 150*2 content = 'Just let time go on\n让我 掉下眼泪的\n不止昨夜的酒\n让我依依不舍的\n不止你的温柔\n余路还要走多久\n你攥着我的手\n让我感到为难的\n是挣扎的自由' content_formated = '' content_font = ImageFont.truetype('font/zh/YueSong.ttf',18*2) single_content_w,single_content_h = content_font.getsize("已") lines = content.split('\n') for line in lines: contents = list(line) line_formated = '' temp = '' for word in contents: temp += word temp_w,temp_h = content_font.getsize(temp) line_formated += word if temp_w > max_content_w + single_content_w: line_formated += '\n' temp = '' if temp != '': line_formated += '\n' content_formated += line_formated print(content_formated) clines = len(content_formated.split('\n')) content_h = clines * single_author_h + (clines -1) * 14*2 h = max(h,content_top + content_h + 100*2) base = Image.new('RGBA',(w,h),(255,255,255,255)) url = "https://y.gtimg.cn/music/photo_new/T002R300x300M000001qHmKU29WX7K.jpg" file = BytesIO(requests.get(url).content) photo = Image.open(file).convert('RGBA') (pw, ph) = photo.size r,g,b = Haishoku.getDominant(file) if pw/ph>w/h: bbox = ((pw-ph*w/h)/2,0,(pw+ph*w/h)/2,ph) else: bbox = (0,(ph-pw*h/w)/2,pw,(ph+pw*h/w)/2) txt = Image.new('L', (w,h), 255) draw = ImageDraw.Draw(txt) alpha = Image.new('L', (w,h), 0) draw.multiline_text((title_left,title_top), title_formated, font=title_font, fill=0, align='left',spacing=15*2) draw.multiline_text((author_left,author_top), author_formated, font=author_font, fill=0, align='left',spacing=15*2) draw.multiline_text((content_left,content_top), content_formated, font=content_font, fill=0, align='left',spacing=12*2) alpha.paste(txt, (0, 0)) banner_cover = photo.crop(bbox) banner_cover = banner_cover.resize((w,h),Image.ANTIALIAS) banner_blur = banner_cover.filter(ImageFilter.GaussianBlur(80)) banner_wrap = Image.new('RGBA',(w,h),(255, 255, 255, 193)) banner_mask = Image.alpha_composite(banner_blur,banner_wrap) banner_mask.putalpha(alpha) banner_blur.paste(banner_mask,box=(0,0),mask=banner_mask) base.paste(banner_blur,box=(0,0)) if pw/ph>cover_w/cover_h: box = ((pw-ph*cover_w/cover_h)/2,0,(pw+ph*cover_w/cover_h)/2,ph) else: box = (0,(ph-pw*cover_h/cover_w)/2,pw,(ph+pw*cover_h/cover_w)/2) cover = photo.crop(box) cover = cover.resize((cover_w,cover_h),Image.ANTIALIAS) base.paste(cover,box=(cover_left,cover_top)) base.show()
def book(request): w = 490*2 h = 740*2 banner_w = 490*2 banner_h = 265*2 cover_w = 135*2 cover_h = 200*2 cover_top = 120*2 cover_left = int((w-cover_w)/2) block_w = 32*2 block_h = 12*2 block_left = 97*2 block_top = 160*2 + banner_h max_content_w = 310*2 title_left = 97*2 title_top = block_top+block_h+20*2 title = '高窗' title_font = ImageFont.truetype('font/zh/YueSong.ttf',28*2) single_title_w,single_title_h= title_font.getsize("已") titles = wrap(title, 1) title_formated = '' temp = '' for word in titles: temp += word temp_w,temp_h = title_font.getsize(temp) title_formated += word if temp_w > max_content_w + single_title_w: title_formated += '\n' temp = '' tlines = len(title_formated.split('\n')) title_h = tlines * single_title_h + (tlines -1) * 28*2 division_left = 97*2 division_top = title_top+single_title_h+12*2 division = '╱' division_font = ImageFont.truetype('font/zh/PingFang.ttf',20*2) single_division_w,single_division_h = division_font.getsize("已") author_left = 97*2 author_top = division_top + title_h author = '作者:雷蒙德.钱德勒' author_font = ImageFont.truetype('font/zh/YueSong.ttf',14*2) single_author_w,single_author_h = author_font.getsize("已") authors = wrap(author, 1) author_formated = '' temp = '' for word in authors: temp += word temp_w,temp_h = author_font.getsize(temp) author_formated += word if temp_w > max_content_w + single_author_w: author_formated += '\n' temp = '' alines = len(author_formated.split('\n')) author_h = alines * single_author_h + (alines -1) * 14*2 content_left = 97*2 content_top = author_top + author_h + 12*2 content = '故事原型:加州石油大亨爱德华.多赫尼之子被杀案,及蒂波特山油田丑闻' content_formated = '' content_font = ImageFont.truetype('font/zh/YueSong.ttf',14*2) single_content_w,single_content_h = content_font.getsize("已") contents = wrap(content, 1) temp = '' for word in contents: temp += word temp_w,temp_h = content_font.getsize(temp) content_formated += word if temp_w > max_content_w + single_content_w: content_formated += '\n' temp = '' print(content_formated) clines = len(content_formated.split('\n')) content_h = clines * single_author_h + (clines -1) * 14*2 h = content_top + content_h + 150*2 base = Image.new('RGBA',(w,h),(255,255,255,255)) draw = ImageDraw.Draw(base) draw.rectangle([(0,0),(banner_w,banner_h)],(26, 26, 26, 255)) url = "https://img3.doubanio.com/lpic/s27028282.jpg" file = BytesIO(requests.get(url).content) photo = Image.open(file).convert('RGBA') (pw, ph) = photo.size if pw/ph>cover_w/cover_h: box = ((pw-ph*cover_w/cover_h)/2,0,(pw+ph*cover_w/cover_h)/2,ph) else: box = (0,(ph-cover_w*cover_h/cover_w)/2,pw,(ph+pw*cover_h/cover_w)/2) photo = photo.crop(box) photo = photo.resize((cover_w,cover_h),Image.ANTIALIAS) base.paste(photo,box=(cover_left,cover_top)) dominant = Haishoku.getDominant(file) draw.rectangle([(block_left,block_top),(block_left+block_w,block_top+block_h)],dominant) draw.multiline_text((title_left,title_top), title_formated, font=title_font, fill=(70,70,70), align='left',spacing=15*2) draw.multiline_text((division_left,division_top), division, font=division_font, fill=(70,70,70), align='left',spacing=0) draw.multiline_text((author_left,author_top), author_formated, font=author_font, fill=(90,90,90), align='left',spacing=15*2) draw.multiline_text((content_left,content_top), content_formated, font=content_font, fill=(90,90,90), align='left',spacing=12*2) print(dominant) #base.show() # get BytesIO msstream = BytesIO() # save image data to output stream base.save(msstream,"jpeg") # release memory base.close() return HttpResponse(msstream.getvalue(),content_type="image/jpeg")
def _get_haishoku_palette(cls, image_path): from haishoku.haishoku import Haishoku # pylint: disable=import-error palette = Haishoku.getPalette(image_path) hex_palette = [color_hex_from_list(color) for _percentage, color in palette] return hex_palette
import requests import sys import json import os # want to make a new directory/folder for each set of color palettes from haishoku.haishoku import Haishoku from PIL import Image img_path = "https://images.metmuseum.org/CRDImages/as/web-large/DP122117.jpg" #returns a Haishoku instance, used to read the file hai = Haishoku.loadHaishoku(img_path) palette = Haishoku.getPalette(img_path) # (percentage of color (RGB values)) def new_image(mode, size, color): return Image.new(mode, size, color) def create_color_palette(): for item in palette: # idx 0 is the percentage of color on the image c_pal = item[1] # need to keep this as a tuple, RGB color codes pal = new_image('RGB', (100, 100), c_pal) # create a new image in in RGB mode, with 100X100 px, as the RGB color # trying to save all color palette images in a folder named based on the artwork's title # folder_name = "static/color_palette/{art_title}" # os.makedirs(folder_name)
(screenheight - height) / 2 - 80) window.geometry(size) def myNewSize(a1): width = a1.size[0] height = a1.size[1] while (width > 500 or height > 500): width = 0.9 * width height = 0.9 * height return width, height # 窗口居中 palette = Haishoku.getPalette(fp) palette2 = Haishoku.getPalette2(fp) #导入 palettePic1 = Haishoku.showPalette(fp) palettePic2 = Haishoku.showPalette2(fp) # 配色可视化 colorF = toPalette(palette) colorF2 = toPalette(palette2) window = tk.Tk() window.title('调色盘') window.configure(background='#323232') pic = Image.open(fp)
def get_colorname(index, path): single_data = OrderedDict() try: haishoku = Haishoku.loadHaishoku(path) # Haishoku.showDominant(path) # single_data['图片路径名称'] = path palette = haishoku.palette main_color = palette[0][1] main_color_pct = palette[0][0] mian_colorname = min_color_diff(main_color, colors)[1] tmp_main_color = rgb2hex(main_color)[1:] # single_data['主要图片颜色'] = mian_colorname # single_data['主要图片颜色rgb'] = rgb2hex(main_color) # single_data['主要图片颜色占比'] = main_color_pct second_color = palette[1][1] second_color_pct = palette[1][0] second_colorname = min_color_diff(second_color, colors)[1] tmp_second_color = rgb2hex(second_color)[1:] # single_data['次要图片颜色'] = second_colorname # single_data['次要图片颜色rgb'] = rgb2hex(second_color) # single_data['次要图片颜色占比'] = second_color_pct thred_color = palette[2][1] thred_color_pct = palette[2][0] thred_colorname = min_color_diff(thred_color, colors)[1] tmp_thred_color = rgb2hex(thred_color)[1:] # single_data['次次要图片颜色'] = thred_colorname # single_data['次次要图片颜色rgb'] = rgb2hex(thred_color) # single_data['次次要图片颜色占比'] = thred_color_pct row_line = [ path, mian_colorname, "#" + tmp_main_color, main_color_pct, second_colorname, "#" + tmp_second_color, second_color_pct, thred_colorname, "#" + tmp_thred_color, thred_color_pct ] my_sheet.append(row_line) my_sheet["B" + str(2 + int(index))].fill = PatternFill( fill_type=fills.FILL_SOLID, fgColor=rgb2hex( list(colors.keys())[list( colors.values()).index(mian_colorname)])[1:], bgColor=tmp_main_color) my_sheet["C" + str(2 + int(index))].fill = PatternFill( fill_type=fills.FILL_SOLID, fgColor=tmp_main_color, bgColor=tmp_main_color) my_sheet["E" + str(2 + int(index))].fill = PatternFill( fill_type=fills.FILL_SOLID, fgColor=rgb2hex( list(colors.keys())[list( colors.values()).index(second_colorname)])[1:], bgColor=tmp_main_color) my_sheet["F" + str(2 + int(index))].fill = PatternFill( fill_type=fills.FILL_SOLID, fgColor=tmp_second_color, bgColor=tmp_second_color) my_sheet["H" + str(2 + int(index))].fill = PatternFill( fill_type=fills.FILL_SOLID, fgColor=rgb2hex( list(colors.keys())[list( colors.values()).index(thred_colorname)])[1:], bgColor=tmp_main_color) my_sheet["I" + str(2 + int(index))].fill = PatternFill( fill_type=fills.FILL_SOLID, fgColor=tmp_thred_color, bgColor=tmp_thred_color) print(index + 1) # print(path,'主要颜色是:'+mian_colorname,'主要颜色占比:'+str(main_color_pct*100)+'%',' 次要颜色是:'+second_colorname,'次要颜色占比:'+str(second_color_pct*100)+'%',' 次次要颜色是:'+thred_colorname,'次次要颜色占比:'+str(thred_color_pct*100)+'%') except Exception as e: print('错误-->', path, e)
def gen_colors(img): """Generate a colorscheme using Colorz.""" palette = Haishoku.getPlette(img) return [utils.rbg_to_hex(col[1]) for col in palette]
from haishoku.haishoku import Haishoku from PIL import Image import pprint img_path = 'images/kershisnik.jpg' haishoku = Haishoku.loadHaishoku(img_path) Haishoku.showPalette(img_path) palette = Haishoku.getPalette(img_path) print('palette: {}'.format(palette))
from haishoku.haishoku import Haishoku import sys def convert_to_hex(rgba): return "#" + ('%02x%02x%02x%02x' % rgba) haishoku = Haishoku.loadHaishoku(str(sys.argv[1])) palette = [] for color in haishoku.palette: hex_color = convert_to_hex((color[1][0], color[1][1], color[1][2], 255)) palette += hex_color print(hex_color)
def get_color_list(path): try: return Haishoku.getPalette(path) except Exception: return get_color_list(path)