def evaluate_taglist(self, tag_list): tags_evaluated = [] for t in tag_list: if '.' in t: real_tag = '' for i in range(0, len(t) - 1): s = t[i] if s == '.': prefix_tag = t[:i] real_tag = t[i + 1:] # print("debug: real_tag: " + real_tag) # print("debug: prefix_tag: " + prefix_tag) break if is_int(prefix_tag): if not t in self.static_tags: if real_tag in self.tags: self.static_tags[t] = self.evaluate( get_random(self.tags[real_tag])) if t in self.static_tags: tags_evaluated.append(self.static_tags[t]) elif prefix_tag in self.text_functions: real_tag = self.evaluate("#" + real_tag + "#") tags_evaluated.append( self.text_functions[prefix_tag](real_tag)) elif t in self.tags: # print(t) tagged_text = get_random(self.tags[t]) tags_evaluated.append(self.evaluate(tagged_text)) return tags_evaluated
def get_random_canvas_color(self): n = random.random() def rc(): return random.randint(0, 256) if n < 0.1: color = (0, 0, 0, 0) elif n < 0.3: color = (0, 0, 0, int( utils.get_random([[0, 80], [80, 170], [170, 255]], [6, 2, 1]))) elif n < 0.65: color = (255, 255, 255, int( utils.get_random([[0, 50], [50, 170], [170, 255]], [7, 2, 1]))) elif n < 1: color = (rc(), rc(), rc(), int( utils.get_random([[0, 50], [50, 170], [170, 255]], [7, 2, 1]))) # elif n<0.8: # low,high=50,100 # color = (random.randint(low,high), random.randint(low,high), random.randint(low,high), random.randint(0, 200)) # elif n<0.95: # color = (random.randint(0,255), random.randint(0,255), random.randint(0,255), random.randint(50, 200)) else: color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), random.randint(150, 200)) return color
def main(): nums = utils.get_random(0,100,10) print("Raw input:") utils.print_nums(nums) bubble_sort(nums) print("Result output:") utils.print_nums(nums)
def __init__(self, file, save_dir="uploads", http_dir="uploads", prefix="processed", separator="-"): self.file = AppFile(file) self.save_dir = save_dir self.http_dir = http_dir self.prefix = prefix self.separator = separator self.secure_hash = get_random()
def main(): nums = utils.get_random(0,100,10) print("Raw input:") nums= [2,0,2,1,1,0] utils.print_nums(nums) insert_sort(nums) print("Result output:") utils.print_nums(nums)
def generate_resources(self): new_resources_attempts = randint(1, self.sizey * self.sizex) for i in xrange(0, new_resources_attempts): randposx = randint(0, self.sizex - 1) randposy = randint(0, self.sizey - 1) if self.island_matrix[randposy][randposx].event_type == 'nothing': self.island_matrix[randposy][randposx].event_type = get_random( self.all_resources)
def evaluate_taglist(self, tag_list): tags_evaluated = [] for t in tag_list: if t in self.tags: tagged_text = get_random(self.tags[t]) tags_evaluated.append(self.evaluate(tagged_text)) return tags_evaluated
def apply_darker(self, img): # scale=utils.random_from_range([0.1,1]) scale = utils.get_random( [[0.1, 0.2], [0.2, 0.4], [0.4, 0.6], [0.6, 0.8], [0.8, 1]], [0.5, 1, 2, 3, 5]) img = self.pil2np(img) img = img * scale img = self.np2pil(img) return img
def save(self, name=None, save_dir="uploads", prefix="original", separator="-"): if name is None: file_parts = [prefix, get_random()] name = separator.join(file_parts) + self.ext self.save_path = os.path.abspath(os.path.join(save_dir, name)) self.__file.save(self.save_path) self.saved = True
def generate_initial_map(self): self.map_elements = [] initial_elements_size = Dice.parse('3d6') for e in range(0, initial_elements_size): new_element = copy.deepcopy( utils.get_random(self.map_elements_types)) new_element['distance'] = Dice.parse('10*10d10') self.map_elements.append(new_element) self.map_elements = sorted(self.map_elements, key=lambda x: x['distance'], reverse=False)
def apply_rot_on_canvas(self, img, angel=20, max_angel=80): low = 75 high = 90 max_angel = utils.get_random([[-high, -low], [low, high]], [1, 1]) angel = utils.get_randn_clipped([-angel, angel]) angel = 5 img = self.rot(img, angel=angel, shape=self.out_size, max_angel=max_angel) return img
def gen_one_img(self, char): scale = utils.get_random( [[1.0, 1.1], [1.1, 1.2], [1.2, 1.3], [1.3, 1.4], [1.4, 1.5], [1.5, 1.6], [1.6, 1.7]], [1, 1.5, 2, 2.5, 3, 2.5, 1.5]) self.out_size = (int(self.font_size * scale), int(self.font_size * scale)) bg = self.get_random_bg() bg = bg.resize(self.bg_size) # bg=self.apply_guassion_blur(bg,0.1) canvas_color = self.get_random_canvas_color() canvas = self.get_canvas(canvas_color) canvas = self.draw_char_on_canvas(canvas, char) # canvas.show() canvas = self.apply_rotate_on_canvas(canvas, fillcolor=canvas_color) # canvas.show() canvas = self.apply_rot_on_canvas(canvas, angel=20) canvas.show() # crop here canvas = self.paste_canvas_on_bg(canvas, bg) canvas = self.aug_img_using_cv2(canvas, 0.8) # crop here canvas = self.crop_from_bg(canvas) # canvas.show() canvas = self.aug_img_using_cv2(canvas, 0.5) canvas = self.add_noise(canvas) canvas = self.random_aug(canvas, 0) canvas = self.np(canvas) # canvas=GD.distort_img(canvas) # self.show(canvas) # canvas=augmentor.aug_after_crop(np.random,NA,canvas) # self.show(canvas) canvas = self.np2pil(canvas) # canvas.show() return canvas
seq -= (1 - mask) * 1e10 return K.max(seq, 1) id2kb, kb2id = get_kb() print(type(id2kb)) train_data = get_train_data() print("len(train_data): %d" % len(train_data)) id2char, char2id = get_char_dict(id2kb, train_data) print("char number is %d", len(id2char)) random_order = get_random(train_data) dev_data = [train_data[j] for i, j in enumerate(random_order) if i % 9 == 0] train_data = [train_data[j] for i, j in enumerate(random_order) if i % 9 != 0] sentence_in = Input(shape=(None, )) #带识别句子输入 mention_in = Input(shape=(None, )) #实体语义表达式 left_in = Input(shape=(None, )) #识别左边界 right_in = Input(shape=(None, )) #识别右边界 y_in = Input(shape=(None, )) #实体标记 t_in = Input(shape=(None, )) #是否有关联 #三维[batch, sentence, word] sentence_mask = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x, 2), 0), "float32"))(sentence_in) print("sentence_mask:", sentence_mask)
def gen_one_img(self, char): scale = utils.get_random([ [1.0, 1.1], [1.1, 1.2], [1.2, 1.3], [1.3, 1.4], [1.4, 1.5], [1.5, 1.6], [1.6, 1.7] ], [1, 1.5, 2, 2.5, 3, 2.5, 1.5] ) scale = 1.3 self.out_size = (int(self.font_size * scale), int(self.font_size * scale)) # self.out_size=(int(self.font_size*scale),int(self.font_size*scale)) bg = self.get_random_bg() bg = bg.resize(self.bg_size) # bg=self.apply_guassion_blur(bg,0.1) canvas_color = self.get_random_canvas_color() # canvas_color=(255,0,0) canvas = self.get_canvas(canvas_color) canvas = self.draw_char_on_canvas(canvas, char) # self.show(canvas) angel = utils.get_randn_clipped([-30, 30]) # angel=utils.random_from_range([-20,20]) # angel=-20 canvas = self.apply_affine(canvas, angle=angel, max_angle=80) # canvas.show() angle = utils.get_randn_clipped([-90, 90]) # print(angel) # angle=-45 canvas = self.apply_rotate(canvas, angle=angle) # canvas.show() # crop here canvas = self.paste_canvas_on_bg(canvas, bg) canvas = self.aug_img_using_cv2(canvas, 0.8) # self.show(canvas) # crop here canvas = self.crop_from_bg(canvas) # canvas.show() canvas = self.aug_img_using_cv2(canvas, 0.5) # self.show(canvas) canvas = self.add_noise(canvas) # self.show(canvas) canvas = self.random_aug(canvas, 0) self.show(canvas) canvas = self.np(canvas) # canvas=GD.distort_img(canvas) # self.show(canvas) # canvas=augmentor.aug_after_crop(np.random,NA,canvas) # self.show(canvas) canvas = self.np2pil(canvas) # canvas.show() return canvas
def train(options): lam = 10 alpha = 1000 log_nums_per_epoch = 20 net_gradient_clip_value = 1e8 with tf.Graph().as_default(), tf.device('/cpu:0'): dataset = Dataset(options.batch_size, options.thread_num, "../data") steps_per_epoch = dataset.record_number // options.batch_size global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0),trainable=False) learning_rate = tf.train.exponential_decay(options.lr,global_step,steps_per_epoch*5,0.9, staircase=True) d_opt = tf.train.AdamOptimizer(learning_rate=learning_rate) g_opt = tf.train.AdamOptimizer(learning_rate=learning_rate) real_A=tf.placeholder(tf.float32,(None,512,512,3)) real_B = tf.placeholder(tf.float32, (None, 512, 512, 3)) gp_weight_1 = tf.placeholder(tf.float32) gp_weight_2 = tf.placeholder(tf.float32) # d_tower_grads = [] # g_tower_grads = [] # placeholders=[] for i in range(options.gpus): with tf.device("/gpu:0"): with tf.variable_scope("G_B",reuse=tf.AUTO_REUSE): fake_A=G(real_B,True) #G_B(B) with tf.variable_scope("G_A",reuse=tf.AUTO_REUSE): fake_B=G(real_A,True) #G_A(A) test_B=G(real_A,False) # test rec_B = G(fake_A, True, "bn2") # G_A(G_B(B)) with tf.variable_scope("G_B",reuse=tf.AUTO_REUSE): rec_A = G(fake_B, True,"bn2") #G_B(G_A(A)) with tf.variable_scope("D_A",reuse=tf.AUTO_REUSE): d_fake_A = D(fake_A, True) d_real_A=D(real_A,True) gradient_penalty_1=wgan_gp(fake_A,real_A)*gp_weight_1 with tf.variable_scope("D_B",reuse=tf.AUTO_REUSE): d_fake_B = D(fake_B, True) d_real_B = D(real_B, True) gradient_penalty_2=wgan_gp(fake_B,real_B)*gp_weight_2 """ keep in mind that whether the score of groundtruth is high or low doesn't matter """ wd_B = -tf.reduce_mean(d_fake_B) + tf.reduce_mean(d_real_B) wd_A = -tf.reduce_mean(d_real_A) + tf.reduce_mean(d_fake_A) netD_train_loss = wd_A + wd_B d_loss=-netD_train_loss+gradient_penalty_1+gradient_penalty_2 _g_loss = tf.reduce_mean(d_fake_B) - tf.reduce_mean(d_fake_A) cycle_loss=tf.reduce_mean(tf.stack([L2_loss(real_A,rec_A),L2_loss(real_B,rec_B)])) I_loss=tf.reduce_mean(tf.stack([L2_loss(real_A,fake_B),L2_loss(real_B,fake_A)])) g_loss=-_g_loss+alpha*I_loss+10*alpha*cycle_loss """ show these values in train loop""" # true and fake data discriminator score dd1 = tf.reduce_mean(d_fake_B) dd2 = tf.reduce_mean(d_real_B) dd3 = tf.reduce_mean(d_real_A) dd4 = tf.reduce_mean(d_fake_A) # generator discriminator score gg1 = tf.reduce_mean(d_fake_B) gg2 = tf.reduce_mean(d_fake_A) d_var = utils.trainable_variables('discriminator') g_var = utils.trainable_variables('generator') d_grads=d_opt.compute_gradients(d_loss,d_var) d_capped= [(tf.clip_by_value(grad, -net_gradient_clip_value,net_gradient_clip_value), var) for grad, var in d_grads] netD_opt=d_opt.apply_gradients(d_capped) g_grads = g_opt.compute_gradients(g_loss, g_var) g_capped = [(tf.clip_by_value(grad, -net_gradient_clip_value, net_gradient_clip_value), var) for grad, var in g_grads] netG_opt = g_opt.apply_gradients(g_capped) saver = tf.train.Saver() init = tf.global_variables_initializer() sess = tf.Session() sess.run(init) log_interval_per_epoch = steps_per_epoch // log_nums_per_epoch #save_interval_per_epoch = steps_per_epoch//save_nums_per_epoch netD_gp_weight_1 = lam netD_gp_weight_2 = lam for epoch in range(options.epochs): print ("epoch {} out of {}".format(epoch+1,options.epochs)) for step in range(steps_per_epoch): print ("step {} out of {}".format(step+1,steps_per_epoch)) imagesA, imagesB = dataset.batch() feed_dict={real_A:imagesA,real_B:imagesB,gp_weight_1:netD_gp_weight_1,gp_weight_2:netD_gp_weight_2} # train D _,d,d1,d2,d3,d4=sess.run([netD_opt,netD_train_loss,dd1,dd2,dd3,dd4],feed_dict=feed_dict) # train G if step%options.n_critic==0: feed_dict = {real_A: imagesA, real_B: imagesB} _,g,g1,g2=sess.run([netG_opt,_g_loss,gg1,gg2],feed_dict=feed_dict) if step%log_interval_per_epoch==0: print ("d1:{} d2:{} d3:{} d4:{} d:{}".format(d1,d2,d3,d4,d)) print ("g1:{} g2:{} g:{}".format(g1,g2,g)) # each epoch save model and test checkpoint_path = os.path.join("../checkpoints", "model_{}.ckpt".format(epoch)) saver.save(sess,checkpoint_path) random_img=utils.get_random(options.test_dir) show_img=sess.run(test_B,feed_dict={real_A:random_img}) #imsave("../{}_real.jpg".format(epoch), random_img[0]) imsave("../{}_fake.jpg".format(epoch), show_img[0]) imsave("../{}_fake_clip.jpg".format(epoch), np.clip(show_img[0],0,1))