def drawpoint(raw_txt,o_dir,relative_img_path):
	for line in open(raw_txt):
		if line.isspace() : continue  # 
		img_name = line.split()[0] 
		full_img_path = relative_img_path + img_name
		img = cv2.imread(full_img_path)
		draw_img = img.copy()

		w = img.shape[1]		# width is x axis
		h = img.shape[0]		# height is y axis
		w1 = (w-1)/2			# for  [-1,1]
		h1 = (h-1)/2 	

		raw_land = list(line.split())[1:2*n_p+1]
		for i in range(n_p): 			# draw key points
			x_ = tools.convert_point(raw_land[2*i+0],w1)
			y_ = tools.convert_point(raw_land[2*i+1],h1)
			cv2.circle(draw_img,(x_,y_),2,(0,255,0))
		# output img
		sub_flod = o_dir + raw_txt.split('_')[-2] + '/'
		tools.makedir(sub_flod)			
		draw_img_path = sub_flod + img_name
		print 'draw ima path ', draw_img_path
		cv2.imwrite(draw_img_path,draw_img)
	open(raw_txt).close()
	print(raw_txt,' done!')
def gendata(target_txt,raw_txt):
	with open(target_txt,"w") as f:
		for line in open(raw_txt):
			# txt 
			if line.isspace() : continue
			img_name = line.split()[0] 
			full_img_path = relative_path + img_name
			print full_img_path
			img = cv2.imread(full_img_path)

			w = img.shape[1]		# weight is x axis
			h = img.shape[0]		# height is y axis
			w1 = (w-1)/2			# for  [-1,1]
			h1 = (h-1)/2 

			raw_land = list(line.split())[1:2*n_p+1] 
			new_line = img_name
			for i in range(n_p):
				x_ = round( (float(raw_land[2*i+0])-w1)/w1 , 4)  # value is [-1,1] 
				y_ = round( (float(raw_land[2*i+1])-h1)/h1 , 4)	
				new_line = new_line + ' ' + str(x_)			# note: The point order has changed: x1,y1,x2...
				new_line = new_line + ' ' + str(y_) 
			print('new_line: ', new_line)
			f.write(new_line + '\n') 

			# image 
			scale_img = cv2.resize(img,(net_1_w,net_1_h))
			sub_flod = l1_data_dir + raw_txt.split('_')[2] + '/'	
			tools.makedir(sub_flod)
			scale_img_path =  sub_flod + img_name
			print 'output path ',scale_img_path
			cv2.imwrite(scale_img_path,scale_img)
			# print a 
		open(raw_txt).close()
示例#3
0
def drawpoint(raw_txt, o_dir, relative_img_path):
    for line in open(raw_txt):
        if line.isspace(): continue  #
        img_name = line.split()[0]
        full_img_path = relative_img_path + img_name
        img = cv2.imread(full_img_path)
        draw_img = img.copy()

        w = img.shape[1]  # width is x axis
        h = img.shape[0]  # height is y axis
        w1 = (w - 1) / 2  # for  [-1,1]
        h1 = (h - 1) / 2

        raw_land = list(line.split())[1:2 * n_p + 1]
        for i in range(n_p):  # draw key points
            x_ = tools.convert_point(raw_land[2 * i + 0], w1)
            y_ = tools.convert_point(raw_land[2 * i + 1], h1)
            cv2.circle(draw_img, (x_, y_), 2, (0, 255, 0))
        # output img
        sub_flod = o_dir + raw_txt.split('_')[-2] + '/'
        tools.makedir(sub_flod)
        draw_img_path = sub_flod + img_name
        print 'draw ima path ', draw_img_path
        cv2.imwrite(draw_img_path, draw_img)
    open(raw_txt).close()
    print(raw_txt, ' done!')
示例#4
0
def prepare4hmmsearch(hmms_file, proteome_files, path_to_hmmsearch_results):
	args2concurrent = []
	outFiles4hmmsearch = []
	query = os.path.basename(hmms_file)
	for proteome_file in proteome_files:
		faaFileName, org, update = proteome_file
		if not os.path.isfile(faaFileName) or os.stat(faaFileName).st_size == 0:
			print('No such file or Empty file', faaFileName)
			continue
		fileName = '.'.join([query, os.path.basename(faaFileName)])
		output_file = os.path.join(path_to_hmmsearch_results, org, fileName)
		callhmmer = False
		if update == True:
			callhmmer = True
		elif os.path.isfile(output_file) and os.stat(output_file).st_size > 0:
			fp = open(output_file, 'r')
			fp.seek(fp.seek(0,2)-len('# [ok]\n'))
			if '# [ok]\n' in fp.read():
				callhmmer = False
			else:
				# incomplete file missing the last line of the normal file created by hmmer-3.1b2
				callhmmer = True
		else:
			callhmmer = True
		if callhmmer == True:
			args2concurrent.append((hmms_file, faaFileName, output_file))
			tools.makedir(os.path.dirname(output_file))
		else:
			print('Skip hmmsearch {} against {}'.format(hmms_file, faaFileName))

		outFiles4hmmsearch.append(output_file)
	return (args2concurrent, outFiles4hmmsearch)
示例#5
0
def drawpoint(raw_txt,o_dir):
	for line in open(raw_txt):
		if line.isspace() : continue  # 
		raw_land = list(line.split())[1:2*n_p+1]

		img_name = line.split()[0] 
		full_img_path = relative_path + img_name		
		img = cv2.imread(full_img_path)
		draw_img = img.copy()
		draw_img = tools.drawpoints_0(draw_img,raw_land)

		# output img
		sub_flod = o_dir + raw_txt.split('_')[-2]		
		tools.makedir(sub_flod)
		draw_img_path = sub_flod + '/' + img_name
		print (draw_img_path)
		cv2.imwrite(draw_img_path,draw_img)
	open(raw_txt).close()
	print(raw_txt,' done!')
示例#6
0
 def train(self, periods, checkpoint_frequency):
     tools.makedir('checkpoints/convnet')
     writer = tf.summary.FileWriter('./graphs/convnet',
                                    tf.get_default_graph())
     with tf.Session() as sess:
         sess.run(tf.global_variables_initializer())
         saver = tf.train.Saver()
         checkpoint = tf.train.get_checkpoint_state(
             os.path.dirname('checkpoints/convnet/checkpoint'))
         if checkpoint and checkpoint.model_checkpoint_path:
             saver.restore(sess, checkpoint.model_checkpoint_path)
         step = self.globalstep.eval()
         for epoch in range(periods):
             step = self.train_epoch(sess, saver, self.train_init, writer,
                                     epoch, step)
             self.eval_once(sess, self.test_init, writer, epoch, step)
             if (epoch + 1) % checkpoint_frequency == 0:
                 saver.save(sess, 'checkpoints/convnet/checkpoint', epoch)
     writer.close()
示例#7
0
def translateGenomeByFGS_v2(dnaFiles, dir2proteome):
    #seq_type = '1'
    #train_model = 'complete'
    seq_type = '0'
    #train_model = 'sanger_5'
    #train_model = 'sanger_10'
    #train_model = '454_5'
    #train_model = '454_10'
    #train_model = '454_30'
    train_model = 'illumina_5'
    #train_model = 'illumina_10'

    proteome_files = []
    args2concurrent = []
    for item in dnaFiles:
        dna_file, org = item

        outputFile = os.path.basename(dna_file)
        output_file = os.path.join(dir2proteome, org, outputFile)

        faaFile = output_file + '.faa'
        # prepare to translate genome into proteome if protome file has not been available.
        update = False
        if not os.path.isfile(faaFile):
            tools.makedir(os.path.dirname(faaFile))
            args2concurrent.append(
                (dna_file, output_file, seq_type, train_model))
            update = True
        elif os.stat(faaFile).st_size > 0:
            print('Skip translating {} into {}'.format(dna_file, faaFile))
        else:
            print('No gene was found for', dna_file)
            continue

        proteome_files.append((faaFile, org, update))

    # Translate genome into proteome.
    if len(args2concurrent) > 0:
        genome2proteome(args2concurrent)
    else:
        print('Skip translating genome into proteome.')
    return proteome_files
def gendata(target_txt, raw_txt):
    with open(target_txt, "w") as f:
        for line in open(raw_txt):
            # txt
            if line.isspace(): continue
            img_name = line.split()[0]
            full_img_path = relative_path + img_name
            print full_img_path
            img = cv2.imread(full_img_path)

            w = img.shape[1]  # weight is x axis
            h = img.shape[0]  # height is y axis
            w1 = (w - 1) / 2  # for  [-1,1]
            h1 = (h - 1) / 2

            raw_land = list(line.split())[1:2 * n_p + 1]
            new_line = img_name
            for i in range(n_p):
                x_ = round((float(raw_land[2 * i + 0]) - w1) / w1,
                           4)  # value is [-1,1]
                y_ = round((float(raw_land[2 * i + 1]) - h1) / h1, 4)
                new_line = new_line + ' ' + str(
                    x_)  # note: The point order has changed: x1,y1,x2...
                new_line = new_line + ' ' + str(y_)
            print('new_line: ', new_line)
            f.write(new_line + '\n')

            # image
            scale_img = cv2.resize(img, (net_1_w, net_1_h))
            sub_flod = l1_data_dir + raw_txt.split('_')[2] + '/'
            tools.makedir(sub_flod)
            scale_img_path = sub_flod + img_name
            print 'output path ', scale_img_path
            cv2.imwrite(scale_img_path, scale_img)
            # print a
        open(raw_txt).close()
示例#9
0
# generate img and txt for level_1
# The point order  x1,x2,x3...
import sys
sys.path.append('../../util')
import tools
import os
import numpy as np
import cv2

train_txt = '../Result/raw_train_label.txt'  #  raw_txt
test_txt = '../Result/raw_test_label.txt'   
relative_path = '../Data/img_celeba/'
draw_dir = '../Result/draw_img/' #  
tools.makedir(draw_dir)

n_p = 5 # num of points

def myint(numb):
	return int(round(float(numb)))
def drawpoint(raw_txt,o_dir):
	for line in open(raw_txt):
		if line.isspace() : continue  # 
		raw_land = list(line.split())[1:2*n_p+1]

		img_name = line.split()[0] 
		full_img_path = relative_path + img_name		
		img = cv2.imread(full_img_path)
		draw_img = img.copy()
		draw_img = tools.drawpoints_0(draw_img,raw_land)

		# output img
        print 'find a error,idx: ', idx
        continue
    full_img_path = relative_path + r_name
    img = cv2.imread(full_img_path)
    h, w, c = img.shape

    err_1, err_5 = tools.cal_error_nor_diag(
        img, r_, o_)  # r_ have img name , range of [-1,1]  , err_1 is mean
    err_mat.append(err_5)
    out_land = np.array(map(float, o_.split()[1:2 * n_p + 1]))

    if err_1 >= threshold:
        count_drop = count_drop + 1
        draw_img = img.copy()
        draw_img = tools.drawpoints(draw_img, out_land)
        tools.makedir(drop_img_flod)
        draw_img_name = str(err_1) + '_' + r_name
        draw_img_path = drop_img_flod + draw_img_name
        cv2.imwrite(draw_img_path, draw_img)
    else:
        draw_img = img.copy()
        draw_img = tools.drawpoints(draw_img, out_land)
        tools.makedir(draw_img_flod)
        draw_img_name = str(err_1) + '_' + r_name
        draw_img_path = draw_img_flod + draw_img_name
        cv2.imwrite(draw_img_path, draw_img)
# -------------------------------------------------------------- print result
err_mat = np.array(err_mat)
err_mat = np.reshape(err_mat, (-1, 5))
MNE_5 = []
for i in range(n_p):
示例#11
0
# generate img and txt for level_1
# The point order has changed: x1,y1,x2...
import sys
sys.path.append('../../../util')
import tools
import os
import numpy as np
import cv2
train_txt = '../../../raw_data/Result/raw_train_label.txt'  #  raw_txt
test_txt = '../../../raw_data/Result/raw_test_label.txt'
l1_data_dir = '../../Data/'  # target dir
l1_train_txt = l1_data_dir + 'l1_train_label.txt'  # target txt
l1_test_txt = l1_data_dir + 'l1_test_label.txt'
relative_path = '../../../raw_data/Data/img_celeba/'  #  for  find the img
tools.makedir(l1_data_dir)

net_1_w = 48
net_1_h = 48
n_p = 5  # num of points


def gendata(target_txt, raw_txt):
    with open(target_txt, "w") as f:
        for line in open(raw_txt):
            # txt
            if line.isspace(): continue
            img_name = line.split()[0]
            full_img_path = relative_path + img_name
            print full_img_path
            img = cv2.imread(full_img_path)
	o_name = o_.split()[0]
	if r_name != o_name:
		print 'find a error,idx: ', idx 
		continue
	full_img_path = relative_path + r_name
	img = cv2.imread(full_img_path)
	h,w,c = img.shape	

	err_1,err_5 = tools.cal_error_nor_diag(img,r_,o_)	# r_ have img name , range of [-1,1]  err_1 is mean 
	err_mat.append(err_5)
	out_land = np.array(map(float,o_.split()[1:2*n_p+1]))
	if err_1 >= threshold :		
		count_drop = count_drop + 1
		draw_img = img.copy()
		draw_img = tools.drawpoints(draw_img,out_land)
		tools.makedir(drop_img_flod)
		draw_img_name = str(err_1) + '_' + r_name
		draw_img_path = drop_img_flod + draw_img_name
		cv2.imwrite(draw_img_path, draw_img)
	else:
		draw_img = img.copy()
		draw_img = tools.drawpoints(draw_img,out_land)
		tools.makedir(draw_img_flod)
		draw_img_name = str(err_1) + '_' + r_name
		draw_img_path = draw_img_flod + draw_img_name
		cv2.imwrite(draw_img_path, draw_img)
	# print a
# -------------------------------------------------------------- print result
err_mat = np.array(err_mat)
err_mat = np.reshape(err_mat,(-1,5))
MNE_5 = []
sys.path.append('../../../util')
import tools
import numpy as np
import os
import cv2

l1_out_label = '../../Result/l1_out_test_label.txt'
l1_raw_label = '../../Data/l1_test_label.txt'
relative_path = '../../../raw_data/Data/img_celeba/'  # find the image from txt

crop_img_flod = '../../../level_2/Data/l1_crop/test/'

crop_label_flod = '../../../level_2/Data/l1_crop/'
crop_label_txt = crop_label_flod + 'l1_crop_test_label.txt'
crop_draw_img_flod = '../../../level_2/Data/l1_crop_draw/test/'
tools.makedir(crop_img_flod)

n_p = 5
# ----------------------------------------------------------------------- load label
l1_raw_fid = open(l1_raw_label)
l1_raw_lines = l1_raw_fid.readlines()
l1_raw_fid.close()
l1_out_fid = open(l1_out_label)
l1_out_lines = l1_out_fid.readlines()
l1_out_fid.close()
err_mat = []

threshold = 0.1
count_threshold = 0
fid = open(crop_label_txt, 'w')
for idx in range(len(l1_out_lines)):
sys.path.append('../../../util')
import tools
import numpy as np
import os
import cv2

l1_out_label = '../../Result/l1_out_test_label.txt'
l1_raw_label = '../../Data/l1_test_label.txt'
relative_path = '../../../raw_data/Data/img_celeba/' # find the image from txt

crop_img_flod = '../../../level_2/Data/l1_crop/test/'

crop_label_flod = '../../../level_2/Data/l1_crop/' 
crop_label_txt =  crop_label_flod + 'l1_crop_test_label.txt'
crop_draw_img_flod = '../../../level_2/Data/l1_crop_draw/test/'
tools.makedir(crop_img_flod)

n_p = 5
# ----------------------------------------------------------------------- load label
l1_raw_fid = open(l1_raw_label)		
l1_raw_lines = l1_raw_fid.readlines()
l1_raw_fid.close()
l1_out_fid = open(l1_out_label)
l1_out_lines = l1_out_fid.readlines()
l1_out_fid.close()
err_mat = []

threshold = 0.1
count_threshold = 0
fid = open(crop_label_txt,'w')
for idx in range(len(l1_out_lines)):
示例#15
0
# divide celebA dataset
import sys
sys.path.append('../../util')
import tools
import os
import random
import shutil

raw_txt = '../Data/celeba_label.txt'
relative_path = '../Data/img_celeba/'  #   for  find the img
train_txt = '../Result/raw_train_label.txt'  # target txt
test_txt = '../Result/raw_test_label.txt'
train_img_fold = '../Result/train/'
test_img_fold = '../Result/test/'
tools.makedir(train_img_fold)
tools.makedir(test_img_fold)

per = 0.8  # percentage of train set
line_num = 0
train_num = 0
test_num = 0
train_f = open(train_txt, "w")
test_f = open(test_txt, "w")
for line in open(raw_txt):
    if line.isspace(): continue  # skip empty line
    line_num += 1
    img_name = line.split()[0]
    full_img_path = relative_path + img_name
    a_rand = random.uniform(0, 1)
    # train set
    if a_rand <= per:
# generate img and txt for level_1
# The point order has changed: x1,y1,x2...
import sys
sys.path.append('../../../util')
import tools
import os
import numpy as np
import cv2
train_txt = '../../../raw_data/Result/raw_train_label.txt' 		#  raw_txt
test_txt = '../../../raw_data/Result/raw_test_label.txt'     
l1_data_dir = '../../Data/' 									# target dir
l1_train_txt = l1_data_dir + 'l1_train_label.txt'				# target txt
l1_test_txt = l1_data_dir + 'l1_test_label.txt'
relative_path =  '../../../raw_data/Data/img_celeba/'  			#  for  find the img 
tools.makedir(l1_data_dir)

net_1_w = 48
net_1_h = 48
n_p = 5 # num of points
def gendata(target_txt,raw_txt):
	with open(target_txt,"w") as f:
		for line in open(raw_txt):
			# txt 
			if line.isspace() : continue
			img_name = line.split()[0] 
			full_img_path = relative_path + img_name
			print full_img_path
			img = cv2.imread(full_img_path)

			w = img.shape[1]		# weight is x axis
			h = img.shape[0]		# height is y axis
import numpy as np
from PIL import Image  # 这个也可以生成验证码
import random, string, os, tools

LOG_DIR = tools.makedir_logs(os.path.basename(__file__)[:-3])


######################生成验证码#########################

# 获取随机字符串
def get_random_string(length=4):
    return ''.join(
            [random.choice(string.digits) for _ in range(length)])


created,IMG_PATH =tools.makedir(os.path.join(LOG_DIR, 'images'))


# 生成验证码
def gen_captcha_image():
    image = ImageCaptcha()
    captcha_txt = get_random_string()
    image.generate(captcha_txt)
    image.write(captcha_txt, os.path.join(IMG_PATH, '{}.jpg'.format(captcha_txt)))


#生成10000个验证码
num=10000
if created:
    for i in range(num):
        gen_captcha_image()