def get_hashtag_set(self, photo): """ photo: String with file location returns python set of hashtags """ im = Img(photo) colors = self.ntc._get_color_names(im) im.set_color_names(colors) return im._create_hashtags()
def image_prc(bin_str): image = Img(bin_str) #画像でないデータなら無視 if not image.isImage: return #指定されたサイズより小さければ無視 if not image.is_larger((int(min_width), int(min_height))): return return image
def add_images(): img_1 = load() img_2 = load() new_img = Img.add(img_1, img_2) save(new_img)
def join_channels(): print('Será solicitado três imagens (uma para cada canal):') img_1 = load() img_2 = load() img_3 = load() new_img = Img.join_channels(img_1, img_2, img_3) rgb_img = new_img.hsi_to_rgb() save(rgb_img)
def main(): pdf_path = path.abspath('cover.pdf') out_folder = path.abspath('out') config_path = path.abspath('config.json') parser = argparse.ArgumentParser() parser.add_argument('cover_type', help='Define the type of cover') args = parser.parse_args() config = Config(config_path, cover_type=args.cover_type) pdf = Pdf(pdf_path) pdf.set_cropbox(config.get_cover_geometry()) img = Img(pdf.cover.name, output_folder=out_folder) for width in config.get_output_width(): img.convert(width)
def load(): try: file = input('Insira o caminho para a imagem: ') img = Img.load_image(file) if img != None: print("Imagem carregada com sucesso!") return img except: print(f'Erro ao abrir imagem') exit()
def set_hashtags(self, photo): """ photo: String with file location Sets hashtags to photo's EXIF """ im = Img(photo) colors = self.ntc._get_color_names(im) im.set_color_names(colors) im._set_hashtags()
def load_from_folder(self, fpath, num=None, rand=False): for path, dirs, files in os.walk(fpath): nums_all = np.arange(len(files)) for i in xrange(len(files)): if rand: fname = files[random.choice(nums_all)] else: fname = files[i] if fname.startswith('.'): continue fpath = os.path.join(path, fname) value = unicode(fname.split('.')[0].decode('utf-8')) Im = Img(fpath=fpath, smb_real=value, fsmb2vec=self.fsmb2vec) self.append(Im) if num and self.len_ >= num: break
def __save_image( cls, img_name, w, h, data, num_samples ): if not data: print "No data to write" return False img = Img( w, h ) img.copyPixels( data ) image_file = open( img_name, 'wb') img.get_formatted(image_file, num_samples) image_file.close()
def load_from_mnist_zip(self, fpath, num=None, rand=False): f = gzip.open(fpath, 'rb') trn_d, vld_d, tst_d = cPickle.load(f) f.close() x_d = np.vstack((trn_d[0], vld_d[0], tst_d[0])) y_d = np.hstack((trn_d[1], vld_d[1], tst_d[1])) nums_all = np.arange(len(x_d)) for i in xrange(len(x_d)): if rand: j = random.choice(nums_all) else: j = i x = x_d[j].reshape((28, 28)) * 256 y = unicode(y_d[j]) Im = Img(arr=x, smb_real=y, fsmb2vec=self.fsmb2vec) self.append(Im) if num and self.len_ >= num: break
class TestColorExtractor(unittest.TestCase): """ Tests for color extractor module """ ntc = NameTheColors() test_colors_1 = Img("4.jpg") test_colors_1.set_color_names(ntc._get_color_names(test_colors_1)) def test_file_not_exists(self): self.assertTrue(True) with self.assertRaises(NameError): Img('10.jpg') def test_colors(self): self.assertEquals( set([ 'Brown', 'Gold Drop', 'Neon Carrot', 'Gold', 'Dark Orange', 'Orange', 'Yellow', 'Chocolate' ]), self.test_colors_1.color_names) def test_hashtags_initcap_hash(self): self.assertEquals( "#Brown;#Gold Drop;#Neon Carrot;#Gold;#Dark Orange;#Orange;#Yellow;#Chocolate", self.test_colors_1._create_hashtags(True)) def test_hashtags_initcap_nohash(self): self.assertEquals( "Brown;Gold Drop;Neon Carrot;Gold;Dark Orange;Orange;Yellow;Chocolate", self.test_colors_1._create_hashtags(False)) def test_hashtags_lower_hash(self): self.assertEquals( "#brown;#gold drop;#neon carrot;#gold;#dark orange;#orange;#yellow;#chocolate", self.test_colors_1._create_hashtags(True, 'L')) def test_hashtags_upper_hash(self): self.assertEquals( "#BROWN;#GOLD DROP;#NEON CARROT;#GOLD;#DARK ORANGE;#ORANGE;#YELLOW;#CHOCOLATE", self.test_colors_1._create_hashtags(True, 'U'))
def save(new_img): file_name = 'img/' file_name += input('Insira o nome do arquivo para salvar a nova imagem: ') Img.save_image(new_img, file_name) print(f'Imagem salva com sucesso em {file_name}\n\n\n')
def test_close(self, mock_Image): my_img = Img() my_img.close() mock_Image.Image.close.assert_called_once_with('123')
def __init__(self, fname): self.hdr = None self.logic_sd = None self.blocks = [] Img.__init__(self, fname)
def test_file_not_exists(self): self.assertTrue(True) with self.assertRaises(NameError): Img('10.jpg')
import matplotlib.pyplot as plt from tqdm import tqdm import numpy as np from img import Img import copy, time from multiprocessing import Pool from functools import partial from sklearn.tree import DecisionTreeClassifier iimg = Img() class Feature(object): feature_list = [] cur_fl = [] frame_size = 64 start_width_height = 0 stop_width_height = frame_size // 2 stride_width_height = 4 stride_x_y = 4 def __init__(self, x1_d, y1_d, x2_d, y2_d, x1_l, y1_l, x2_l, y2_l): self.d1 = x1_d, y1_d self.d2 = x2_d, y2_d self.l1 = x1_l, y1_l self.l2 = x2_l, y2_l self.clf = DecisionTreeClassifier(max_depth=1, random_state=1) # the viola jones paper uses only rectangles of the same shape that are side by side. so i'm just going to do that.
tag="h2", class_name="", id="heading_section_2", content="This is a section" ), Component( tag="p", class_name="p_styles", id="text_section_2", content="It happens to be super cool" ), Img( tag="img", class_name="img_styles", id="img", src="https://images.theconversation.com/files/337593/original/file-20200526-106811-ql6d51.jpg?ixlib=rb-1.1.0&q=45&auto=format&w=1356&h=668&fit=crop", alt="Llama Face", width="600", height="300" ), Component( tag="p", class_name="p_styles", id="text_section_2", content="Here's a llama ^ for no particular reason" ), ] ) ] )
#coding:utf8 from img import Img i=Img() i.open() k=i.convert_thumbnail(input_file="/home/insion/Pictures/l.jpg",output_file="/home/insion/Pictures/toutput.jpg") print(k) k=i.convert_resize(input_file="/home/insion/Pictures/l.jpg",output_file="/home/insion/Pictures/loutput2.jpg",output_size="500x") print(k) ki=i.composite_watermark(watermark_file="/home/insion/Pictures/lhs_logo.png",input_file="/home/insion/Pictures/loutput2.jpg",output_file="/home/insion/Pictures/loutput.jpg") print(ki) ki=i.convert_watermark(watermark_file="/home/insion/Pictures/lhs_logo.png",input_file="/home/insion/Pictures/m.jpg",output_file="/home/insion/Pictures/moutput.jpg") print(ki)
print('dumped partially trained model to file') count += 1 if success_rate_overall >= 0.99: print('at least 99% of images classified correctly.') else: print('process failed to converge. success rate:', success_rate_overall) return cascade RESTORE_CASCADE = False Feature.gen_feature_list() img = Img() if not RESTORE_CASCADE: images, labels = img.load_data('faces', 'background', N=2000) int_imgs = img.compute_integral_images(images) if RESTORE_PARTIAL_CASCADE: with open('partial_save_data.bin', 'rb') as f: cascade = pickle.load(f) with open('partial_save_images.bin', 'rb') as f: train_images = pickle.load(f) with open('partial_save_labels.bin', 'rb') as f: train_labels = pickle.load(f) else: train_images, train_labels = copy.deepcopy(int_imgs), copy.deepcopy( labels) cascade = None