def get_domain_color(image): # returns: (R, G, B) tuple dominant = Haishoku.getDominant(image) color = "red" redsimi = math.sqrt((dominant[0] - 255) * (dominant[0] - 255) + dominant[1] * dominant[1] + dominant[2] * dominant[2]) greensimi = math.sqrt(dominant[0] * dominant[0] + (dominant[1] - 255) * (dominant[1] - 255) + dominant[2] * dominant[2]) if (greensimi < redsimi): color = "green" return color
def saveDominant(image_path, path, idx): # get the dominant color dominant = Haishoku.getDominant(image_path) # generate colors boxes images = [] dominant_box = new_image('RGB', (50, 20), dominant) for i in range(8): images.append(dominant_box) # save dominant color joint_image(images, path, idx, 'Dominant')
def main(): path = "/Users/wujianming/Desktop/WechatIMG18547.jpeg" # path = "http://wx2.sinaimg.cn/large/89243dfbly1ffoekfainzj20dw05k0u7.jpg" # getPalette api palette = Haishoku.getPalette(path) # getDominant api dominant = Haishoku.getDominant(path) # showPalette api Haishoku.showPalette(path) # showDominant api # Haishoku.showDominant(path) # Haishoku object h = Haishoku.loadHaishoku(path) print(h.palette) print(h.dominant)
def main(): path = "demo_01.png" # getPalette api palette = Haishoku.getPalette(path) print(palette) # getDominant api dominant = Haishoku.getDominant(path) print(dominant) # showPalette api Haishoku.showPalette(path) # showDominant api Haishoku.showDominant(path) # Haishoku object h = Haishoku.loadHaishoku(path) print(h.image) print(h.palette) print(h.dominant)
def main(): url = "https://img3.doubanio.com/lpic/s27028282.jpg" r = requests.get(url) path = BytesIO(r.content) # getPalette api palette = Haishoku.getPalette(path) print(palette) # getDominant api dominant = Haishoku.getDominant(path) print(dominant) # showPalette api Haishoku.showPalette(path) # showDominant api Haishoku.showDominant(path) # Haishoku object h = Haishoku.loadHaishoku(path) print(h.palette) print(h.dominant)
def palette_op(self, palette_size, sample_factor=4): """Generates an shmops.fill_operation obj""" print("||||| Initiating Palette Fill Operation |||||") fill_op = shmops.Fill_Operation(id='4321') tiles = self.slice_to_tiles(show_info="Image to Map") #get palette to be used in the process if sample_factor == 1: palette = palette.generate(self.path, palette_size, debug=self.debug) else: #get combined palette by slicing map into sample tiles sampling_map_size = self.get_map_size(sample_factor) palette = self.get_combined_palette(palette_size, sampling_map_size) temp_path = Path('temp_img.png') x, y = 0, 0 for row in progress_bar.progress_bar(tiles, "Processing Map: ", " Row: ", 36): for tile in row: #if self.debug: # temp_path = f'{x}x{y}y_temp_img.png' # temp_path = Path('./test_tiles/' + temp_path) tile.save(temp_path, "PNG") dominant = Haishoku.getDominant(str(temp_path)) tile_color = palette.nearest_color(palette, dominant) #if self.debug: print(f'Tile Address: {x}, {y} | Tile Color: {tile_color} | Saved to: {temp_path}') fill_op.add_fill(x, y, palette.rgb_to_hex(*tile_color)) x += 1 y += 1 x = 0 if not self.debug: temp_path.unlink() return fill_op
def buildingDatabase(jpg_dir, wav_path): # 超参数 jpg_dir = jpg_dir image_files = os.listdir(jpg_dir) image_files.sort(key=lambda x: int(x.split('.')[0][5:])) # 纹理检测 dhashList = [] # dominant color Hlist = [] # Slist=[] Vlist = [] # motion; optical flow # 角点检测参数 feature_params = dict(maxCorners=100, qualityLevel=0.1, minDistance=7, blockSize=7) # KLT光流参数 lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.02)) tracks = [] track_len = 15 detect_interval = 5 beforeGrey = None MotionList = [] count = 0 # for i in range(0,480): for jpg_path in image_files[0:480]: # jpg_path=jpg_dir+"frame"+str(i)+".jpg" jpg_path = jpg_dir + jpg_path print(jpg_path) # dominant color d_color = Haishoku.getDominant(jpg_path) #print(count, ':', d_color) r = d_color[0] g = d_color[1] b = d_color[2] # 转换为HSV空间 cmax = max(max(r, g), b) cmin = min(min(r, g), b) delta = cmax - cmin V = cmax if cmax == 0: S = 0 else: S = delta / cmax if delta == 0: H = 0 elif cmax == r: H = ((g - b) / delta) * 60 elif cmax == g: H = 120 + ((b - r) / delta) * 60 else: H = 240 + ((r - g) / delta) * 60 if H < 0: H = H + 360 Hlist.append(H) # Slist.append(S) Vlist.append(V) img = cv2.imread(jpg_path) # 纹理特征 dhash = d_hash(img) dhashList.append(dhash) # motion curGrey = next = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img0, img1 = beforeGrey, curGrey p0 = np.float32([tr[-1] for tr in tracks]).reshape(-1, 1, 2) if len(tracks) > 0: img0, img1 = beforeGrey, curGrey p0 = np.float32([tr[-1] for tr in tracks]).reshape(-1, 1, 2) # 上一帧的角点和当前帧的图像作为输入来得到角点在当前帧的位置 p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) sum = 0.0 for i in range(0, len(p0)): dist = math.sqrt((p0[i][0][0] - p1[i][0][0])**2 + (p0[i][0][1] - p1[i][0][1])**2) sum = sum + dist avg = sum / len(p0) MotionList.append(avg) # 反向检查,当前帧跟踪到的角点及图像和前一帧的图像作为输入来找到前一帧的角点位置 p0r, _, _ = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) # 得到角点回溯与前一帧实际角点的位置变化关系 d = abs(p0 - p0r).reshape(-1, 2).max(-1) # 判断d内的值是否小于1,大于1跟踪被认为是错误的跟踪点 good = d < 1 new_tracks = [] for i, (tr, (x, y), flag) in enumerate(zip(tracks, p1.reshape(-1, 2), good)): # 判断是否为正确的跟踪点 if not flag: continue # 存储动态的角点 tr.append((x, y)) # 只保留track_len长度的数据,消除掉前面的超出的轨迹 if len(tr) > track_len: del tr[0] # 保存在新的list中 new_tracks.append(tr) # 更新特征点 tracks = new_tracks else: MotionList.append(0.0) # 每隔 detect_interval 时间检测一次特征点 if count % detect_interval == 0: mask = np.zeros_like(curGrey) mask[:] = 255 p = cv2.goodFeaturesToTrack(curGrey, mask=mask, **feature_params) if p is not None: for x, y in np.float32(p).reshape(-1, 2): tracks.append([(x, y)]) beforeGrey = curGrey count += 1 # 得到整个视频的dominant color print('H len:', len(Hlist)) print('Hlist:', Hlist) # print('S len:',len(Slist)) # print('Slist:',Slist) print('V len:', len(Vlist)) print('Vlist:', Vlist) # 纹理 print('hash len:', len(dhashList)) print('hash list:', dhashList) # Motion print('Motion len:', len(MotionList)) print('MotionList:', MotionList) #voice voiceValue = get_voice_descriptor(wav_path) print('voiceValue:', voiceValue) return Hlist, Vlist, MotionList, dhashList, voiceValue
def music(): w = 490*2 h = 740*2 banner_w = 490*2 banner_h = 235*2 cover_w = 140*2 cover_h = 140*2 cover_top = 120*2 cover_left = 100*2 block_w = 32*2 block_h = 12*2 block_left = 97*2 block_top = 160*2 + banner_h max_content_w = 300*2 max_title_w = 190*2 title_left = cover_w + cover_left + 10 *2 title = '成都' title_font = ImageFont.truetype('font/zh/YueSong.ttf',28*2) single_title_w,single_title_h= title_font.getsize("已") titles = list(title) title_formated = '' temp = '' for word in titles: temp += word temp_w,temp_h = title_font.getsize(temp) title_formated += word if temp_w > max_title_w + single_title_w: title_formated += '\n' temp = '' tlines = len(title_formated.split('\n')) title_h = tlines * single_title_h + (tlines -1) * 28*2 title_top = banner_h - title_h - 10 *2 max_author_w = 190*2 author_left = cover_w + cover_left+ 10 *2 author_top = banner_h + 10 *2 author = '赵雷' author_font = ImageFont.truetype('font/zh/YueSong.ttf',14*2) single_author_w,single_author_h = author_font.getsize("已") authors = list(author) author_formated = '' temp = '' for word in authors: temp += word temp_w,temp_h = author_font.getsize(temp) author_formated += word if temp_w > max_author_w + single_author_w: author_formated += '\n' temp = '' alines = len(author_formated.split('\n')) author_h = alines * single_author_h + (alines -1) * 14*2 content_left = 95*2 content_top = banner_h + 150*2 content = 'Just let time go on\n让我 掉下眼泪的\n不止昨夜的酒\n让我依依不舍的\n不止你的温柔\n余路还要走多久\n你攥着我的手\n让我感到为难的\n是挣扎的自由' content_formated = '' content_font = ImageFont.truetype('font/zh/YueSong.ttf',18*2) single_content_w,single_content_h = content_font.getsize("已") lines = content.split('\n') for line in lines: contents = list(line) line_formated = '' temp = '' for word in contents: temp += word temp_w,temp_h = content_font.getsize(temp) line_formated += word if temp_w > max_content_w + single_content_w: line_formated += '\n' temp = '' if temp != '': line_formated += '\n' content_formated += line_formated print(content_formated) clines = len(content_formated.split('\n')) content_h = clines * single_author_h + (clines -1) * 14*2 h = max(h,content_top + content_h + 100*2) base = Image.new('RGBA',(w,h),(255,255,255,255)) url = "https://y.gtimg.cn/music/photo_new/T002R300x300M000001qHmKU29WX7K.jpg" file = BytesIO(requests.get(url).content) photo = Image.open(file).convert('RGBA') (pw, ph) = photo.size r,g,b = Haishoku.getDominant(file) if pw/ph>w/h: bbox = ((pw-ph*w/h)/2,0,(pw+ph*w/h)/2,ph) else: bbox = (0,(ph-pw*h/w)/2,pw,(ph+pw*h/w)/2) txt = Image.new('L', (w,h), 255) draw = ImageDraw.Draw(txt) alpha = Image.new('L', (w,h), 0) draw.multiline_text((title_left,title_top), title_formated, font=title_font, fill=0, align='left',spacing=15*2) draw.multiline_text((author_left,author_top), author_formated, font=author_font, fill=0, align='left',spacing=15*2) draw.multiline_text((content_left,content_top), content_formated, font=content_font, fill=0, align='left',spacing=12*2) alpha.paste(txt, (0, 0)) banner_cover = photo.crop(bbox) banner_cover = banner_cover.resize((w,h),Image.ANTIALIAS) banner_blur = banner_cover.filter(ImageFilter.GaussianBlur(80)) banner_wrap = Image.new('RGBA',(w,h),(255, 255, 255, 193)) banner_mask = Image.alpha_composite(banner_blur,banner_wrap) banner_mask.putalpha(alpha) banner_blur.paste(banner_mask,box=(0,0),mask=banner_mask) base.paste(banner_blur,box=(0,0)) if pw/ph>cover_w/cover_h: box = ((pw-ph*cover_w/cover_h)/2,0,(pw+ph*cover_w/cover_h)/2,ph) else: box = (0,(ph-pw*cover_h/cover_w)/2,pw,(ph+pw*cover_h/cover_w)/2) cover = photo.crop(box) cover = cover.resize((cover_w,cover_h),Image.ANTIALIAS) base.paste(cover,box=(cover_left,cover_top)) base.show()
def book(request): w = 490*2 h = 740*2 banner_w = 490*2 banner_h = 265*2 cover_w = 135*2 cover_h = 200*2 cover_top = 120*2 cover_left = int((w-cover_w)/2) block_w = 32*2 block_h = 12*2 block_left = 97*2 block_top = 160*2 + banner_h max_content_w = 310*2 title_left = 97*2 title_top = block_top+block_h+20*2 title = '高窗' title_font = ImageFont.truetype('font/zh/YueSong.ttf',28*2) single_title_w,single_title_h= title_font.getsize("已") titles = wrap(title, 1) title_formated = '' temp = '' for word in titles: temp += word temp_w,temp_h = title_font.getsize(temp) title_formated += word if temp_w > max_content_w + single_title_w: title_formated += '\n' temp = '' tlines = len(title_formated.split('\n')) title_h = tlines * single_title_h + (tlines -1) * 28*2 division_left = 97*2 division_top = title_top+single_title_h+12*2 division = '╱' division_font = ImageFont.truetype('font/zh/PingFang.ttf',20*2) single_division_w,single_division_h = division_font.getsize("已") author_left = 97*2 author_top = division_top + title_h author = '作者:雷蒙德.钱德勒' author_font = ImageFont.truetype('font/zh/YueSong.ttf',14*2) single_author_w,single_author_h = author_font.getsize("已") authors = wrap(author, 1) author_formated = '' temp = '' for word in authors: temp += word temp_w,temp_h = author_font.getsize(temp) author_formated += word if temp_w > max_content_w + single_author_w: author_formated += '\n' temp = '' alines = len(author_formated.split('\n')) author_h = alines * single_author_h + (alines -1) * 14*2 content_left = 97*2 content_top = author_top + author_h + 12*2 content = '故事原型:加州石油大亨爱德华.多赫尼之子被杀案,及蒂波特山油田丑闻' content_formated = '' content_font = ImageFont.truetype('font/zh/YueSong.ttf',14*2) single_content_w,single_content_h = content_font.getsize("已") contents = wrap(content, 1) temp = '' for word in contents: temp += word temp_w,temp_h = content_font.getsize(temp) content_formated += word if temp_w > max_content_w + single_content_w: content_formated += '\n' temp = '' print(content_formated) clines = len(content_formated.split('\n')) content_h = clines * single_author_h + (clines -1) * 14*2 h = content_top + content_h + 150*2 base = Image.new('RGBA',(w,h),(255,255,255,255)) draw = ImageDraw.Draw(base) draw.rectangle([(0,0),(banner_w,banner_h)],(26, 26, 26, 255)) url = "https://img3.doubanio.com/lpic/s27028282.jpg" file = BytesIO(requests.get(url).content) photo = Image.open(file).convert('RGBA') (pw, ph) = photo.size if pw/ph>cover_w/cover_h: box = ((pw-ph*cover_w/cover_h)/2,0,(pw+ph*cover_w/cover_h)/2,ph) else: box = (0,(ph-cover_w*cover_h/cover_w)/2,pw,(ph+pw*cover_h/cover_w)/2) photo = photo.crop(box) photo = photo.resize((cover_w,cover_h),Image.ANTIALIAS) base.paste(photo,box=(cover_left,cover_top)) dominant = Haishoku.getDominant(file) draw.rectangle([(block_left,block_top),(block_left+block_w,block_top+block_h)],dominant) draw.multiline_text((title_left,title_top), title_formated, font=title_font, fill=(70,70,70), align='left',spacing=15*2) draw.multiline_text((division_left,division_top), division, font=division_font, fill=(70,70,70), align='left',spacing=0) draw.multiline_text((author_left,author_top), author_formated, font=author_font, fill=(90,90,90), align='left',spacing=15*2) draw.multiline_text((content_left,content_top), content_formated, font=content_font, fill=(90,90,90), align='left',spacing=12*2) print(dominant) #base.show() # get BytesIO msstream = BytesIO() # save image data to output stream base.save(msstream,"jpeg") # release memory base.close() return HttpResponse(msstream.getvalue(),content_type="image/jpeg")
def get_dominant_color(image): dominant_color = Haishoku.getDominant(image) return dominant_color
def haishokuColor(img): dominant = Haishoku.getDominant(img) print(dominant) palette = Haishoku.getPalette(img) print(palette)
import os import csv sys.path.insert(0, "..") from haishoku.haishoku import Haishoku folder = "/Users/talamram/Downloads/thike/" # Count files in selected repository counter = 0 for file in os.listdir('..'): counter += 1 print(str(counter) + 'files counted!') # Iterate through images in folder and extract dominant colour and palette # and write them to a csv path = "test.JPG" # getPalette api palette = Haishoku.getPalette(path) # getDominant api dominant = Haishoku.getDominant(path) # Haishoku object h = Haishoku.loadHaishoku(path) print('= showpalette =') print(h.palette) print('= dominant =') print(h.dominant)
# from colorthief import ColorThief from haishoku.haishoku import Haishoku image = './1341543305665_.pic_hd.jpg' # image = './1331543305664_.pic_hd.jpg' dominant = Haishoku.getDominant(image) print(dominant) # Haishoku.showDominant( image ) palette = Haishoku.getPalette(image) print(palette) # Haishoku.showPalette( image )
def getColor(self, imagePath): dominant = Haishoku.getDominant(imagePath) color = dominant.__str__().replace('(', '').replace(')', '') print('--------------------> getColor(%s) = %s' % (imagePath, color)) return color