def __init__(self, **kwargs): super(YACSGame, self).__init__(**kwargs) self.gameworld.init_gameworld([ 'back_stars', 'mid_stars', 'position', 'sun1', 'sun2', 'camera_stars1', 'camera_stars2', 'map', 'planet1', 'planet2', 'camera_sun1', 'camera_sun2', 'camera_planet1', 'camera_planet2', 'scale', 'rotate', 'color', 'particles', 'emitters', 'particle_renderer', 'cymunk_physics', 'steering', 'ship_system', 'projectiles', 'projectile_weapons', 'lifespan', 'combat_stats', 'asteroids', 'steering_ai', 'weapon_ai', 'shields', 'shield_renderer', 'map_grid', 'grid_camera', 'radar_renderer', 'radar_color', 'world_grid', 'global_map', 'global_camera', 'world_map', 'global_map_renderer', 'global_map_renderer2', 'global_map_planet_renderer' ], callback=self.init_game) self.background_generator = BackgroundGenerator(self.gameworld)
def do_eval(sess, eval_loss, images_placeholder, labels_placeholder, training_time_placeholder, images, labels, batch_size): ''' Function for running the evaluations every X iterations on the training and validation sets. :param sess: The current tf session :param eval_loss: The placeholder containing the eval loss :param images_placeholder: Placeholder for the images :param labels_placeholder: Placeholder for the masks :param training_time_placeholder: Placeholder toggling the training/testing mode. :param images: A numpy array or h5py dataset containing the images :param labels: A numpy array or h45py dataset containing the corresponding labels :param batch_size: The batch_size to use. :return: The average loss (as defined in the experiment), and the average dice over all `images`. ''' loss_ii = 0 dice_ii = 0 num_batches = 0 for batch in BackgroundGenerator( iterate_minibatches(images, labels, batch_size=batch_size, augment_batch=False)): # No aug in evaluation # As before you can wrap the iterate_minibatches function in the BackgroundGenerator class for speed improvements # but at the risk of not catching exceptions x, y = batch if y.shape[0] < batch_size: continue feed_dict = { images_placeholder: x, labels_placeholder: y, training_time_placeholder: False } closs, cdice = sess.run(eval_loss, feed_dict=feed_dict) loss_ii += closs dice_ii += cdice num_batches += 1 avg_loss = loss_ii / num_batches avg_dice = dice_ii / num_batches logging.info(' Average loss: %0.04f, average dice: %0.04f' % (avg_loss, avg_dice)) return avg_loss, avg_dice
def generateBackgroundImage(cls, distorted_img, background_type): ############################# # Generate background image # ############################# new_text_width, new_text_height = distorted_img.size if background_type == 0: background = BackgroundGenerator.gaussian_noise( new_text_height + 10, new_text_width + 10) elif background_type == 1: background = BackgroundGenerator.plain_white( new_text_height + 10, new_text_width + 10) elif background_type == 2: background = BackgroundGenerator.quasicrystal( new_text_height + 10, new_text_width + 10) else: background = BackgroundGenerator.picture(new_text_height + 10, new_text_width + 10) mask = distorted_img.point(lambda x: 0 if x == 255 or x == 0 else 255, '1') background.paste(distorted_img, (5, 5), mask=mask) return background
def generate(cls, index, text, font, out_dir, height, extension, skewing_angle, random_skew, blur, random_blur, background_type, distorsion_type, distorsion_orientation, is_handwritten, name_format, width, alignment, text_color=-1): image = None ########################## # Create picture of text # ########################## if is_handwritten: image = HandwrittenTextGenerator.generate(text) else: image = ComputerTextGenerator.generate(text, font, text_color, height) random_angle = random.randint(0 - skewing_angle, skewing_angle) rotated_img = image.rotate( skewing_angle if not random_skew else random_angle, expand=1) ############################# # Apply distorsion to image # ############################# if distorsion_type == 0: distorted_img = rotated_img # Mind = blown elif distorsion_type == 1: distorted_img = DistorsionGenerator.sin( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2)) elif distorsion_type == 2: distorted_img = DistorsionGenerator.cos( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2)) else: distorted_img = DistorsionGenerator.random( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2)) ################################## # Resize image to desired format # ################################## new_width = int( float(distorted_img.size[0] + 10) * (float(height) / float(distorted_img.size[1] + 10))) resized_img = distorted_img.resize((new_width, height - 10), Image.ANTIALIAS) background_width = width if width > 0 else new_width + 10 ############################# # Generate background image # ############################# if background_type == 0: background = BackgroundGenerator.gaussian_noise( height, background_width) elif background_type == 1: background = BackgroundGenerator.plain_white( height, background_width) elif background_type == 2: background = BackgroundGenerator.quasicrystal( height, background_width) else: background = BackgroundGenerator.picture(height, background_width) ############################# # Place text with alignment # ############################# new_text_width, _ = resized_img.size if alignment == 0: background.paste(resized_img, (5, 5), resized_img) elif alignment == 1: background.paste( resized_img, (int(background_width / 2 - new_text_width / 2), 5), resized_img) else: background.paste(resized_img, (background_width - new_text_width - 5, 5), resized_img) ################################## # Apply gaussian blur # ################################## final_image = background.filter( ImageFilter.GaussianBlur( radius=(blur if not random_blur else random.randint(0, blur)))) ##################################### # Generate name for resulting image # ##################################### if name_format == 0: image_name = '{}_{}.{}'.format(text, str(index), extension) elif name_format == 1: image_name = '{}_{}.{}'.format(str(index), text, extension) elif name_format == 2: image_name = '{}.{}'.format(str(index), extension) else: print('{} is not a valid name format. Using default.'.format( name_format)) image_name = '{}_{}.{}'.format(text, str(index), extension) # Save the image final_image.convert('RGB').save(os.path.join(out_dir, image_name))
def generate(cls, index, text, fonts, out_dir, height, extension, skewing_angle, random_skew, blur, random_blur, background_type, distorsion_type, distorsion_orientation, is_handwritten, name_format, width, alignment, text_color): ########################## # Create picture of text # ########################## images = ComputerTextGenerator.generate(text, fonts, text_color, height, width) ############################# # Generate background image # ############################# background_width = sum([ im.size[1] for im in images ]) background = Image.fromarray(np.ones((height, background_width, 3), dtype='uint8') * 255, "RGB") print('# of images: {}'.format(len(images))) acc_width = np.random.randint(2, 13) # offset for idx, image in enumerate(images): random_angle = random.randint(0-skewing_angle, skewing_angle) rotated_img = image.rotate(skewing_angle if not random_skew else random_angle, expand=1) ############################# # Apply distorsion to image # ############################# if distorsion_type == 0: distorted_img = rotated_img # Mind = blown elif distorsion_type == 1: distorted_img = DistorsionGenerator.sin( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2) ) elif distorsion_type == 2: distorted_img = DistorsionGenerator.cos( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2) ) else: distorted_img = DistorsionGenerator.random( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2) ) ################################## # Resize image to desired format # ################################## new_width = int(float(distorted_img.size[0] + 10) * (float(height) / float(distorted_img.size[1] + 10))) resized_img = distorted_img.resize((new_width, height - 10), Image.ANTIALIAS) ############################# # Place text with alignment # ############################# new_text_width, _ = resized_img.size background.paste(resized_img, (int(acc_width), np.random.randint(2, 10))) acc_width += new_text_width background = BackgroundGenerator.applyMyBackground(height, background_width, np.array(background)) ################################## # Apply gaussian blur # ################################## final_image = background.filter( ImageFilter.GaussianBlur( radius=(blur if not random_blur else random.randint(0, blur)) ) ) ##################################### # Generate name for resulting image # ##################################### if name_format == 0: image_name = '{}_{}.{}'.format(text, str(index), extension) elif name_format == 1: image_name = '{}_{}.{}'.format(str(index), text, extension) elif name_format == 2: image_name = '{}.{}'.format(str(index),extension) else: print('{} is not a valid name format. Using default.'.format(name_format)) image_name = '{}_{}.{}'.format(text, str(index), extension) # Save the image final_image.convert('RGB').save(os.path.join(out_dir, image_name))
def generate(cls, index, text, font, out_dir, height, extension, skewing_angle, random_skew, blur, random_blur, background_type, distorsion_type, distorsion_orientation, is_handwritten, name_format, text_color=-1, prefix = ""): image = None ########################## # Create picture of text # ########################## if is_handwritten: print("--------- text: ", text) image = HandwrittenTextGenerator.generate(text) print("---> ", image) else: image = ComputerTextGenerator.generate(text, font, text_color, height) random_angle = random.uniform(0-skewing_angle, skewing_angle) rotated_img = image.rotate(skewing_angle if not random_skew else random_angle, expand=1) if (random.randint(0,10) < 3): try: x = random.randint(1,4) kernel = np.ones((x, x), np.uint8) rotated_img = Image.fromarray(cv2.erode(np.array(rotated_img), kernel, iterations=1)) except Exception as e: pass else: if (random.randint(0,10) < 1 and height > 45): x = random.randint(1, 4) kernel = np.ones((x, x), np.uint8) rotated_img = Image.fromarray(cv2.morphologyEx(np.array(rotated_img), cv2.MORPH_CLOSE, kernel)) f = random.uniform(0.9, 1.1) if (random.randint(0, 1) == 0): rotated_img = rotated_img.resize((int(rotated_img.size[0] * f), int(rotated_img.size[1] * f)), Image.ANTIALIAS) else: if (random.randint(0, 1) == 0): rotated_img = rotated_img.resize((int(rotated_img.size[0] * f), int(rotated_img.size[1] * f)), Image.BILINEAR) else: rotated_img = rotated_img.resize((int(rotated_img.size[0] * f), int(rotated_img.size[1] * f)), Image.LANCZOS) if (random.randint(0,30) < 1 and height > 60): rotated_img = Image.fromarray(nick_binarize([np.array(rotated_img)])[0]) # if (random.randint(0,10) < 1 and height > 60): # kernel = np.ones((2, 2), np.uint8) # # rotated_img = Image.fromarray(cv2.morphologyEx(np.array(rotated_img), cv2.MORPH_TOPHAT, kernel)) ############################# # Apply distorsion to image # ############################# distorsion_type = random.choice([0,1,2]) if distorsion_type == 0: distorted_img = rotated_img # Mind = blown elif distorsion_type == 1: try: distorted_img = DistorsionGenerator.sin( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2), max_offset = 2 ) except Exception as e: pass elif distorsion_type == 2: try: distorted_img = DistorsionGenerator.cos( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2), max_offset = 2 ) except Exception as e: pass else: try: distorted_img = DistorsionGenerator.random( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2) ) except Exception as e: distorted_img = rotated_img new_text_width, new_text_height = distorted_img.size x = random.randint(1, 10) y = random.randint(1, 10) ############################# # Generate background image # ############################# if (distorsion_type == 0): background_type = random.randint(0, 3) else: background_type = random.randint(0, 3) if background_type == 0: background = BackgroundGenerator.gaussian_noise(new_text_height + x, new_text_width + y) elif background_type == 1: background = BackgroundGenerator.plain_white(new_text_height + x, new_text_width + y) elif background_type == 2: background = BackgroundGenerator.quasicrystal(new_text_height + x, new_text_width + y) else: background = BackgroundGenerator.picture(new_text_height + 10, new_text_width + 10) mask = distorted_img.point(lambda x: 0 if x == 255 or x == 0 else 255, '1') apply_background = False if (random.randint(0,10) < 1): background = distorted_img else: apply_background = True background.paste(distorted_img, (5, 5), mask=mask) ################################## # Resize image to desired format # ################################## # new_width = float(new_text_width + y) * (float(height) / float(new_text_height + x)) # image_on_background = background.resize((int(new_width), height), Image.ANTIALIAS) if distorsion_type != 3 and background_type != 2 and new_text_height > 45: final_image = background.filter( ImageFilter.GaussianBlur( radius=(blur if not random_blur else random.randint(0, blur)) ) ) else: final_image = background f = random.uniform(0.8, 1.5) # if distorsion_type != 3: if (random.randint(0,1) == 0): final_image = final_image.resize((int(final_image.size[0] * f), int(final_image.size[1] * f)), Image.ANTIALIAS) else: if (random.randint(0, 1) == 0): final_image = final_image.resize((int(final_image.size[0] * f), int(final_image.size[1] * f)), Image.BILINEAR) else: final_image = final_image.resize((int(final_image.size[0] * f), int(final_image.size[1] * f)), Image.LANCZOS) # else: # if (random.randint(0, 1) == 0): # final_image = Image.fromarray(cv2.resize(np.array(final_image), # (int(final_image.size[0] * f), # int(final_image.size[1] * f))), cv2.INTER_CUBIC) # else: # final_image = Image.fromarray(cv2.resize(np.array(final_image), # (int(final_image.size[0] * f), # int(final_image.size[1] * f))), cv2.INTER_LINEAR) # if (random.randint(0, 10) < 4 and apply_background == False and background_type == 1 and new_text_height > 45): # final_image = Image.fromarray(nick_binarize([np.array(final_image)])[0]) ##################################### # Generate name for resulting image # ##################################### if name_format == 0: image_name = '{}_{}.{}'.format(text, str(index), extension) elif name_format == 1: image_name = '{}_{}.{}'.format(str(index), text, extension) elif name_format == 2: image_name = '{}.{}'.format(str(index),extension) elif name_format == 3: image_name = '{}_{}.{}'.format(prefix, str(index), extension) else: print('{} is not a valid name format. Using default.'.format(name_format)) image_name = '{}_{}.{}'.format(text, str(index), extension) print("---------------{}---------------------------".format(index)) print("saver: ", os.path.join(out_dir, image_name)) print("image name: ", image_name) print("--------------------------------------------") saver = os.path.join(out_dir, image_name) componet_path = saver.split('/') if len(componet_path) == 2 and len(componet_path[0]) != 0: final_image.convert('RGB').save(saver) else: with open('logs/log_imagePath.txt', 'a') as f: f.write(str(saver)) f.write('\n')
def data_generator(index=0, text=None, language=None, out_dir=None, width=32, height=32, extension='jpg', skewing_angle=0, blur=0, background_type=0, distorsion_type=0, name_format=0): ''' draw chinese(or not) text with ttf :param image: image(numpy.ndarray) to draw text :param pos: where to draw text :param text: the context, for chinese should b e unicode type :param text_size: text size :param text_color:text color :return: image ''' if background_type == 0: background = BackgroundGenerator.gaussian_noise(height + 5, width + 5) elif background_type == 1: background = BackgroundGenerator.plain_white(height + 5, width + 5) else: background = BackgroundGenerator.picture() # print ('background', index) pos = (3, 3) text_size = 32 text_color = [0, 0, 0] image, font = ComputerTextGenerator().draw_text(background, language, pos, text, text_size, text_color) if image is None or font is None: return # skewing image if skewing_angle > 0: if len(image.shape) == 3: row, col, ch = image.shape else: row, col = image.shape M = cv2.getRotationMatrix2D((col / 2, row / 2), skewing_angle, 1) skewing_img = cv2.warpAffine(image, M, (col, row), borderMode=cv2.BORDER_REPLICATE, flags=cv2.INTER_LINEAR) else: skewing_img = image # Affine transform if distorsion_type == 0: transform_img = skewing_img if distorsion_type == 1: transform_img = AffineTransformGenerator().right(skewing_img) elif distorsion_type == 2: transform_img = AffineTransformGenerator().left(skewing_img) # Gaussian Blur if blur: kernels_size = [3, 5] kernel_size = kernels_size[random.randint(0, 1)] sigma = random.uniform(0, 3) blur_img = cv2.GaussianBlur(transform_img, (kernel_size, kernel_size), sigma) else: blur_img = transform_img # normalize the image size if language == 'cn': final_res_img = RandomCropGenerator().crop(blur_img, height, width) else: final_res_img = cv2.resize(blur_img, (height, width), interpolation=cv2.INTER_LINEAR) fontname = os.path.basename(font).split('.')[0] if text == '/': text = 'slash' if name_format == 0: image_name = '{}_{}_{}.{}'.format(text, fontname, str(index), extension) elif name_format == 1: image_name = '{}_{}_{}.{}'.format(str(index), fontname, text, extension) elif name_format == 2: image_name = '{}.{}'.format(str(index), extension) else: print('{} is not a valid name format. Using default.'.format( name_format)) image_name = '{}_{}_{}.{}'.format(text, fontname, str(index), extension) path = os.path.join(out_dir.encode('utf-8'), image_name) cv2.imwrite(path, final_res_img)
def generate(cls, index, text, fonts, out_dir, height, random_height, extension, skewing_angle, random_skew, blur, random_blur, background_type, random_bg, distorsion_type, distorsion_orientation, is_handwritten, name_format, width, random_width, alignment, bounding_box, view_bounding_box, random_alignment, text_color=-1): image = None ######################################################################### # Randomly determine height between height and random_height variables # ######################################################################### if random_height > height: height = random.randint(height, random_height) ########################## # Create picture of text # ########################## if is_handwritten: image = HandwrittenTextGenerator.generate(text) else: image, rois = ComputerTextGenerator.generate(text, fonts, text_color, height, bounding_box) random_angle = random.randint(0-skewing_angle, skewing_angle) rotated_img = image.rotate(skewing_angle if not random_skew else random_angle, expand=1) if bounding_box: rois = RoiRotator.compute(rois, random_angle, image.size, rotated_img.size) ############################# # Apply distorsion to image # ############################# if distorsion_type == 0: distorted_img = rotated_img # Mind = blown elif distorsion_type == 1: distorted_img = DistorsionGenerator.sin( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2) ) elif distorsion_type == 2: distorted_img = DistorsionGenerator.cos( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2) ) else: distorted_img = DistorsionGenerator.random( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2) ) ################################## # Resize image to desired format # ################################## old_width = distorted_img.size[0] old_height = distorted_img.size[1] new_width = int(float(distorted_img.size[0]) * (float(height) / float(distorted_img.size[1]))) resized_img = distorted_img.resize((new_width, height - 10), Image.ANTIALIAS) x_factor = new_width / old_width y_factor = (height - 10) / old_height if bounding_box: i = 0 for roi in rois: rois[i] = (np.array(roi) * np.array([x_factor, y_factor, x_factor, y_factor])).astype(int) i += 1 if width > 0 and random_width > width: background_width = new_width + random.randint(width,random_width) elif width > 0: background_width = width else: background_width = new_width + 10 ############################# # Generate background image # ############################# if random_bg: background_type = random.randint(0,2) if background_type == 0: background = BackgroundGenerator.gaussian_noise(height, background_width) elif background_type == 1: background = BackgroundGenerator.plain_white(height, background_width) elif background_type == 2: background = BackgroundGenerator.quasicrystal(height, background_width) else: background = BackgroundGenerator.picture(height, background_width) ############################# # Place text with alignment # ############################# new_text_width, _ = resized_img.size if random_alignment: alignment = random.randint(0,2) if alignment == 0: x_offset = 5 background.paste(resized_img, (5, 5), resized_img) elif alignment == 1: x_offset = int(background_width / 2 - new_text_width / 2) background.paste(resized_img, (x_offset, 5), resized_img) else: x_offset = background_width - new_text_width - 5 background.paste(resized_img, (x_offset, 5), resized_img) if bounding_box: i = 0 for roi in rois: rois[i] = (np.array(roi) + np.array([x_offset, 5, x_offset, 5])).tolist() i += 1 ################################## # Apply gaussian blur # ################################## blur_image = background.filter( ImageFilter.GaussianBlur( radius=(blur if not random_blur else random.randint(0, blur)) ) ) ################################## # Apply elastic transform # ################################## final_image = ElasticTransform.generate(blur_image, random.randint(0, 20) / 100 , random.randint(1, 100) / 100) ################################################# # Apply width reduction to get skinny characters# ################################################# # width_factor = random.randint(2,3) # # final_width = final_image.size[0] # final_height = final_image.size[1] # adjusted_width = int(final_width/width_factor) # # final_image = final_image.resize((adjusted_width, final_height)) # # x_factor = adjusted_width / final_width # y_factor = 1 # # i = 0 # for roi in rois: # rois[i] = (np.array(roi) * np.array([x_factor, y_factor, x_factor, y_factor])).astype(int).tolist() # i += 1 ################################## # Downsample to smaller image # ################################## # width, height = final_image.size # resize_factor = random.randint(20,30) / height # final_image = final_image.resize((int(width * resize_factor), int(height * resize_factor))) # drawrois = ImageDraw.Draw(final_image) # for roi in rois: # drawrois.rectangle(roi, outline=0, fill=None) ################################## # Draw ROIs as a test # ################################## if bounding_box and view_bounding_box: FakeTextDataGenerator.draw_bounding_boxes(final_image, rois) ##################################### # Generate name for resulting image # ##################################### if name_format == 0: image_name = '{}_{}.{}'.format(text, str(index), extension) elif name_format == 1: image_name = '{}_{}.{}'.format(str(index), text, extension) elif name_format == 2: image_name = '{}.{}'.format(str(index),extension) else: print('{} is not a valid name format. Using default.'.format(name_format)) image_name = '{}_{}.{}'.format(text, str(index), extension) # Save the image final_image.convert('RGB').save(os.path.join(out_dir, image_name)) return rois, index
def run_training(continue_run): logging.info('EXPERIMENT NAME: %s' % exp_config.experiment_name) init_step = 0 # Load data base_data, recursion_data, recursion = acdc_data.load_and_maybe_process_scribbles( scribble_file=sys_config.project_root + exp_config.scribble_data, target_folder=log_dir, percent_full_sup=exp_config.percent_full_sup, scr_ratio=exp_config.length_ratio) #wrap everything from this point onwards in a try-except to catch keyboard interrupt so #can control h5py closing data try: loaded_previous_recursion = False start_epoch = 0 if continue_run: logging.info( '!!!!!!!!!!!!!!!!!!!!!!!!!!!! Continuing previous run !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' ) try: try: init_checkpoint_path = utils.get_latest_model_checkpoint_path( log_dir, 'recursion_{}_model.ckpt'.format(recursion)) except: init_checkpoint_path = utils.get_latest_model_checkpoint_path( log_dir, 'recursion_{}_model.ckpt'.format(recursion - 1)) loaded_previous_recursion = True logging.info('Checkpoint path: %s' % init_checkpoint_path) init_step = int( init_checkpoint_path.split('/')[-1].split('-') [-1]) + 1 # plus 1 b/c otherwise starts with eval start_epoch = int( init_step / (len(base_data['images_train']) / exp_config.batch_size)) logging.info('Latest step was: %d' % init_step) logging.info('Continuing with epoch: %d' % start_epoch) except: logging.warning( '!!! Did not find init checkpoint. Maybe first run failed. Disabling continue mode...' ) continue_run = False init_step = 0 start_epoch = 0 logging.info( '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' ) if loaded_previous_recursion: logging.info( "Data file exists for recursion {} " "but checkpoints only present up to recursion {}".format( recursion, recursion - 1)) logging.info("Likely means postprocessing was terminated") recursion_data = acdc_data.load_different_recursion( recursion_data, -1) recursion -= 1 # load images and validation data images_train = np.array(base_data['images_train']) scribbles_train = np.array(base_data['scribbles_train']) images_val = np.array(base_data['images_test']) labels_val = np.array(base_data['masks_test']) # if exp_config.use_data_fraction: # num_images = images_train.shape[0] # new_last_index = int(float(num_images)*exp_config.use_data_fraction) # # logging.warning('USING ONLY FRACTION OF DATA!') # logging.warning(' - Number of imgs orig: %d, Number of imgs new: %d' % (num_images, new_last_index)) # images_train = images_train[0:new_last_index,...] # labels_train = labels_train[0:new_last_index,...] logging.info('Data summary:') logging.info(' - Images:') logging.info(images_train.shape) logging.info(images_train.dtype) #logging.info(' - Labels:') #logging.info(labels_train.shape) #logging.info(labels_train.dtype) # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): # Generate placeholders for the images and labels. image_tensor_shape = [exp_config.batch_size] + list( exp_config.image_size) + [1] mask_tensor_shape = [exp_config.batch_size] + list( exp_config.image_size) images_placeholder = tf.placeholder(tf.float32, shape=image_tensor_shape, name='images') labels_placeholder = tf.placeholder(tf.uint8, shape=mask_tensor_shape, name='labels') learning_rate_placeholder = tf.placeholder(tf.float32, shape=[]) training_time_placeholder = tf.placeholder(tf.bool, shape=[]) tf.summary.scalar('learning_rate', learning_rate_placeholder) # Build a Graph that computes predictions from the inference model. logits = model.inference(images_placeholder, exp_config.model_handle, training=training_time_placeholder, nlabels=exp_config.nlabels) # Add to the Graph the Ops for loss calculation. [loss, _, weights_norm ] = model.loss(logits, labels_placeholder, nlabels=exp_config.nlabels, loss_type=exp_config.loss_type, weight_decay=exp_config.weight_decay ) # second output is unregularised loss tf.summary.scalar('loss', loss) tf.summary.scalar('weights_norm_term', weights_norm) # Add to the Graph the Ops that calculate and apply gradients. if exp_config.momentum is not None: train_op = model.training_step(loss, exp_config.optimizer_handle, learning_rate_placeholder, momentum=exp_config.momentum) else: train_op = model.training_step(loss, exp_config.optimizer_handle, learning_rate_placeholder) # Add the Op to compare the logits to the labels during evaluation. # eval_loss = model.evaluation(logits, # labels_placeholder, # images_placeholder, # nlabels=exp_config.nlabels, # loss_type=exp_config.loss_type, # weak_supervision=True, # cnn_threshold=exp_config.cnn_threshold, # include_bg=True) eval_val_loss = model.evaluation( logits, labels_placeholder, images_placeholder, nlabels=exp_config.nlabels, loss_type=exp_config.loss_type, weak_supervision=True, cnn_threshold=exp_config.cnn_threshold, include_bg=False) # Build the summary Tensor based on the TF collection of Summaries. summary = tf.summary.merge_all() # Add the variable initializer Op. init = tf.global_variables_initializer() # Create a saver for writing training checkpoints. # Only keep two checkpoints, as checkpoints are kept for every recursion # and they can be 300MB + saver = tf.train.Saver(max_to_keep=2) saver_best_dice = tf.train.Saver(max_to_keep=2) saver_best_xent = tf.train.Saver(max_to_keep=2) # Create a session for running Ops on the Graph. sess = tf.Session() # Instantiate a SummaryWriter to output summaries and the Graph. summary_writer = tf.summary.FileWriter(log_dir, sess.graph) # with tf.name_scope('monitoring'): val_error_ = tf.placeholder(tf.float32, shape=[], name='val_error') val_error_summary = tf.summary.scalar('validation_loss', val_error_) val_dice_ = tf.placeholder(tf.float32, shape=[], name='val_dice') val_dice_summary = tf.summary.scalar('validation_dice', val_dice_) val_summary = tf.summary.merge( [val_error_summary, val_dice_summary]) train_error_ = tf.placeholder(tf.float32, shape=[], name='train_error') train_error_summary = tf.summary.scalar('training_loss', train_error_) train_dice_ = tf.placeholder(tf.float32, shape=[], name='train_dice') train_dice_summary = tf.summary.scalar('training_dice', train_dice_) train_summary = tf.summary.merge( [train_error_summary, train_dice_summary]) # Run the Op to initialize the variables. sess.run(init) # Restore session # crf_weights = [] # for v in tf.all_variables(): # # if v.name[0:4]=='bila': # print(str(v)) # crf_weights.append(v.name) # elif v.name[0:4] =='spat': # print(str(v)) # crf_weights.append(v.name) # elif v.name[0:4] =='comp': # print(str(v)) # crf_weights.append(v.name) # restore_var = [v for v in tf.all_variables() if v.name not in crf_weights] # # load_saver = tf.train.Saver(var_list=restore_var) # load_saver.restore(sess, '/scratch_net/biwirender02/cany/basil/logdir/unet2D_ws_spot_blur/recursion_0_model.ckpt-5699') if continue_run: # Restore session saver.restore(sess, init_checkpoint_path) step = init_step curr_lr = exp_config.learning_rate no_improvement_counter = 0 best_val = np.inf last_train = np.inf loss_history = [] loss_gradient = np.inf best_dice = 0 logging.info('RECURSION {0}'.format(recursion)) # random walk - if it already has been random walked it won't redo recursion_data = acdc_data.random_walk_epoch( recursion_data, exp_config.rw_beta, exp_config.rw_threshold, exp_config.random_walk) #get ground truths labels_train = np.array(recursion_data['random_walked']) for epoch in range(start_epoch, exp_config.max_epochs): if (epoch % exp_config.epochs_per_recursion == 0 and epoch != 0) \ or loaded_previous_recursion: loaded_previous_recursion = False #Have reached end of recursion recursion_data = predict_next_gt( data=recursion_data, images_train=images_train, images_placeholder=images_placeholder, training_time_placeholder=training_time_placeholder, logits=logits, sess=sess) recursion_data = postprocess_gt( data=recursion_data, images_train=images_train, scribbles_train=scribbles_train) recursion += 1 # random walk - if it already has been random walked it won't redo recursion_data = acdc_data.random_walk_epoch( recursion_data, exp_config.rw_beta, exp_config.rw_threshold, exp_config.random_walk) #get ground truths labels_train = np.array(recursion_data['random_walked']) #reinitialise savers - otherwise, no checkpoints will be saved for each recursion saver = tf.train.Saver(max_to_keep=2) saver_best_dice = tf.train.Saver(max_to_keep=2) saver_best_xent = tf.train.Saver(max_to_keep=2) logging.info( 'Epoch {0} ({1} of {2} epochs for recursion {3})'.format( epoch, 1 + epoch % exp_config.epochs_per_recursion, exp_config.epochs_per_recursion, recursion)) # for batch in iterate_minibatches(images_train, # labels_train, # batch_size=exp_config.batch_size, # augment_batch=exp_config.augment_batch): # You can run this loop with the BACKGROUND GENERATOR, which will lead to some improvements in the # training speed. However, be aware that currently an exception inside this loop may not be caught. # The batch generator may just continue running silently without warning even though the code has # crashed. for batch in BackgroundGenerator( iterate_minibatches( images_train, labels_train, batch_size=exp_config.batch_size, augment_batch=exp_config.augment_batch)): if exp_config.warmup_training: if step < 50: curr_lr = exp_config.learning_rate / 10.0 elif step == 50: curr_lr = exp_config.learning_rate start_time = time.time() # batch = bgn_train.retrieve() x, y = batch # TEMPORARY HACK (to avoid incomplete batches if y.shape[0] < exp_config.batch_size: step += 1 continue feed_dict = { images_placeholder: x, labels_placeholder: y, learning_rate_placeholder: curr_lr, training_time_placeholder: True } _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time # Write the summaries and print an overview fairly often. if step % 10 == 0: # Print status to stdout. logging.info('Step %d: loss = %.6f (%.3f sec)' % (step, loss_value, duration)) # Update the events file. summary_str = sess.run(summary, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) summary_writer.flush() # if (step + 1) % exp_config.train_eval_frequency == 0: # # logging.info('Training Data Eval:') # [train_loss, train_dice] = do_eval(sess, # eval_loss, # images_placeholder, # labels_placeholder, # training_time_placeholder, # images_train, # labels_train, # exp_config.batch_size) # # train_summary_msg = sess.run(train_summary, feed_dict={train_error_: train_loss, # train_dice_: train_dice} # ) # summary_writer.add_summary(train_summary_msg, step) # # loss_history.append(train_loss) # if len(loss_history) > 5: # loss_history.pop(0) # loss_gradient = (loss_history[-5] - loss_history[-1]) / 2 # # logging.info('loss gradient is currently %f' % loss_gradient) # # if exp_config.schedule_lr and loss_gradient < exp_config.schedule_gradient_threshold: # logging.warning('Reducing learning rate!') # curr_lr /= 10.0 # logging.info('Learning rate changed to: %f' % curr_lr) # # # reset loss history to give the optimisation some time to start decreasing again # loss_gradient = np.inf # loss_history = [] # # if train_loss <= last_train: # best_train: # logging.info('Decrease in training error!') # else: # logging.info('No improvement in training error for %d steps' % no_improvement_counter) # # last_train = train_loss # Save a checkpoint and evaluate the model periodically. if (step + 1) % exp_config.val_eval_frequency == 0: checkpoint_file = os.path.join( log_dir, 'recursion_{}_model.ckpt'.format(recursion)) saver.save(sess, checkpoint_file, global_step=step) # Evaluate against the training set. # Evaluate against the validation set. logging.info('Validation Data Eval:') [val_loss, val_dice ] = do_eval(sess, eval_val_loss, images_placeholder, labels_placeholder, training_time_placeholder, images_val, labels_val, exp_config.batch_size) val_summary_msg = sess.run(val_summary, feed_dict={ val_error_: val_loss, val_dice_: val_dice }) summary_writer.add_summary(val_summary_msg, step) if val_dice > best_dice: best_dice = val_dice best_file = os.path.join( log_dir, 'recursion_{}_model_best_dice.ckpt'.format( recursion)) saver_best_dice.save(sess, best_file, global_step=step) logging.info( 'Found new best dice on validation set! - {} - ' 'Saving recursion_{}_model_best_dice.ckpt'. format(val_dice, recursion)) if val_loss < best_val: best_val = val_loss best_file = os.path.join( log_dir, 'recursion_{}_model_best_xent.ckpt'.format( recursion)) saver_best_xent.save(sess, best_file, global_step=step) logging.info( 'Found new best crossentropy on validation set! - {} - ' 'Saving recursion_{}_model_best_xent.ckpt'. format(val_loss, recursion)) step += 1 except Exception: raise
def generate(cls, index, text, font, out_dir, size, extension, skewing_angle, random_skew, blur, random_blur, background_type, distorsion_type, distorsion_orientation, is_handwritten, name_format, width, alignment, text_color, orientation, space_width, lang_dict): image = None ########################## # Create picture of text # ########################## if is_handwritten: if orientation == 1: raise ValueError("Vertical handwritten text is unavailable") image = HandwrittenTextGenerator.generate(text) else: image = ComputerTextGenerator.generate(text, font, text_color, size, orientation, space_width) random_angle = random.randint(0-skewing_angle, skewing_angle) rotated_img = image.rotate(skewing_angle if not random_skew else random_angle, expand=1) ############################# # Apply distorsion to image # ############################# if distorsion_type == 0: distorted_img = rotated_img # Mind = blown elif distorsion_type == 1: distorted_img = DistorsionGenerator.sin( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2) ) elif distorsion_type == 2: distorted_img = DistorsionGenerator.cos( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2) ) else: distorted_img = DistorsionGenerator.random( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2) ) ################################## # Resize image to desired format # ################################## # Horizontal text if orientation == 0: new_width = int(float(distorted_img.size[0] + 10) * (float(size) / float(distorted_img.size[1] + 10))) resized_img = distorted_img.resize((new_width, size - 10), Image.ANTIALIAS) background_width = width if width > 0 else new_width + 10 background_height = size # Vertical text elif orientation == 1: new_height = int(float(distorted_img.size[1] + 10) * (float(size) / float(distorted_img.size[0] + 10))) resized_img = distorted_img.resize((size - 10, new_height), Image.ANTIALIAS) background_width = size background_height = new_height + 10 else: raise ValueError("Invalid orientation") ############################# # Generate background image # ############################# if background_type == 0: background = BackgroundGenerator.gaussian_noise(background_height, background_width) elif background_type == 1: background = BackgroundGenerator.plain_white(background_height, background_width) elif background_type == 2: background = BackgroundGenerator.quasicrystal(background_height, background_width) else: background = BackgroundGenerator.picture(background_height, background_width) ############################# # Place text with alignment # ############################# new_text_width, _ = resized_img.size if alignment == 0: background.paste(resized_img, (5, 5), resized_img) elif alignment == 1: background.paste(resized_img, (int(background_width / 2 - new_text_width / 2), 5), resized_img) else: background.paste(resized_img, (background_width - new_text_width - 5, 5), resized_img) ################################## # Apply gaussian blur # ################################## final_image = background.filter( ImageFilter.GaussianBlur( radius=(blur if not random_blur else random.randint(0, blur)) ) ) lang = 'char_std_5991' with open(os.path.join('dicts', lang + '.txt'), 'r', encoding="utf8", errors='ignore') as d: lang_dict = d.readlines() lang_dict = [ch.strip('\n') for ch in lang_dict] char_index = '' for character in text: p = lang_dict.index(character) char_index = char_index + str(p) + ' ' char_index = char_index[:-1] ##################################### # Generate name for resulting image # ##################################### if name_format == 0: image_name = '{}_{}.{}'.format(text, str(index), extension) elif name_format == 1: image_name = '{}_{}.{}'.format(str(index), text, extension) elif name_format == 2: image_name = '{}.{}'.format(str(index),extension) elif name_format == 3: image_name = '{}_{}.{}'.format(str(index), str(int(round(time.time() * 1000))), extension) else: print('{} is not a valid name format. Using default.'.format(name_format)) image_name = '{}_{}.{}'.format(text, str(index), extension) # Save the image final_image.convert('RGB').save(os.path.join(out_dir, image_name)) file = r'out\data.txt' with open(file, 'a+') as f: f.write(image_name + ' ' + char_index + '\n') # 加\n换行显示
def generate(cls, index, text, font, out_dir, size, extension, skewing_angle, random_skew, blur, random_blur, background_type, distorsion_type, distorsion_orientation, is_handwritten, name_format, width, alignment, text_color, orientation, space_width, margins, fit): image = None margin_top, margin_left, margin_bottom, margin_right = margins horizontal_margin = margin_left + margin_right vertical_margin = margin_top + margin_bottom ########################## # Create picture of text # ########################## if is_handwritten: if orientation == 1: raise ValueError("Vertical handwritten text is unavailable") image = HandwrittenTextGenerator.generate(text, text_color, fit) else: image = ComputerTextGenerator.generate(text, font, text_color, size, orientation, space_width, fit) random_angle = random.randint(0 - skewing_angle, skewing_angle) rotated_img = image.rotate( skewing_angle if not random_skew else random_angle, expand=1) ############################# # Apply distorsion to image # ############################# if distorsion_type == 0: distorted_img = rotated_img # Mind = blown elif distorsion_type == 1: distorted_img = DistorsionGenerator.sin( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2)) elif distorsion_type == 2: distorted_img = DistorsionGenerator.cos( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2)) else: distorted_img = DistorsionGenerator.random( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2)) ################################## # Resize image to desired format # ################################## # Horizontal text if orientation == 0: new_width = int( distorted_img.size[0] * (float(size - vertical_margin) / float(distorted_img.size[1]))) resized_img = distorted_img.resize( (new_width, size - vertical_margin), Image.ANTIALIAS) background_width = width if width > 0 else new_width + horizontal_margin background_height = size # Vertical text elif orientation == 1: new_height = int( float(distorted_img.size[1]) * (float(size - horizontal_margin) / float(distorted_img.size[0]))) resized_img = distorted_img.resize( (size - horizontal_margin, new_height), Image.ANTIALIAS) background_width = size background_height = new_height + vertical_margin else: raise ValueError("Invalid orientation") ############################# # Generate background image # ############################# if background_type == 0: background = BackgroundGenerator.gaussian_noise( background_height, background_width) elif background_type == 1: background = BackgroundGenerator.plain_white( background_height, background_width) elif background_type == 2: background = BackgroundGenerator.quasicrystal( background_height, background_width) else: background = BackgroundGenerator.picture(background_height, background_width) ############################# # Place text with alignment # ############################# new_text_width, _ = resized_img.size if alignment == 0 or width == -1: background.paste(resized_img, (margin_left, margin_top), resized_img) elif alignment == 1: background.paste( resized_img, (int(background_width / 2 - new_text_width / 2), margin_top), resized_img) else: background.paste( resized_img, (background_width - new_text_width - margin_right, margin_top), resized_img) ################################## # Apply gaussian blur # ################################## final_image = background.filter( ImageFilter.GaussianBlur( radius=(blur if not random_blur else random.randint(0, blur)))) ##################################### # Generate name for resulting image # ##################################### if name_format == 0: image_name = '{}_{}.{}'.format(text, str(index), extension) elif name_format == 1: image_name = '{}_{}.{}'.format(str(index), text, extension) elif name_format == 2: image_name = '{}.{}'.format(str(index), extension) else: print('{} is not a valid name format. Using default.'.format( name_format)) image_name = '{}_{}.{}'.format(text, str(index), extension) # Save the image final_image.convert('RGB').save(os.path.join(out_dir, image_name))
def generate_adversarial_examples(input_folder, output_path, model_path, attack, attack_args, exp_config, add_gaussian=False): nx, ny = exp_config.image_size[:2] batch_size = 1 num_channels = exp_config.nlabels image_tensor_shape = [batch_size] + list(exp_config.image_size) + [1] mask_tensor_shape = [batch_size] + list(exp_config.image_size) images_pl = tf.placeholder(tf.float32, shape=image_tensor_shape, name='images') labels_pl = tf.placeholder(tf.uint8, shape=mask_tensor_shape, name='labels') logits_pl = model.inference(images_pl, exp_config=exp_config, training=tf.constant(False, dtype=tf.bool)) eval_loss = model.evaluation(logits_pl, labels_pl, images_pl, nlabels=exp_config.nlabels, loss_type=exp_config.loss_type) data = acdc_data.load_and_maybe_process_data( input_folder=sys_config.data_root, preprocessing_folder=sys_config.preproc_folder, mode=exp_config.data_mode, size=exp_config.image_size, target_resolution=exp_config.target_resolution, force_overwrite=False, split_test_train=True) images = data['images_test'][:20] labels = data['masks_test'][:20] print("Num images train {} test {}".format(len(data['images_train']), len(images))) saver = tf.train.Saver() init = tf.global_variables_initializer() baseline_closs = 0.0 baseline_cdice = 0.0 attack_closs = 0.0 attack_cdice = 0.0 l2_diff_sum = 0.0 ln_diff_sum = 0.0 ln_diff = 0.0 l2_diff = 0.0 batches = 0 result_dict = [] with tf.Session() as sess: results = [] sess.run(init) checkpoint_path = utils.get_latest_model_checkpoint_path( model_path, 'model_best_dice.ckpt') saver.restore(sess, checkpoint_path) for batch in BackgroundGenerator( train.iterate_minibatches(images, labels, batch_size)): x, y = batch batches += 1 if batches != 9: continue non_adv_mask_out = sess.run( [tf.arg_max(tf.nn.softmax(logits_pl), dimension=-1)], feed_dict={images_pl: x}) if attack == 'fgsm': adv_x = adv_attack.fgsm_run(x, y, images_pl, labels_pl, logits_pl, exp_config, sess, attack_args) elif attack == 'pgd': adv_x = adv_attack.pgd(x, y, images_pl, labels_pl, logits_pl, exp_config, sess, attack_args) elif attack == 'spgd': adv_x = adv_attack.pgd_conv(x, y, images_pl, labels_pl, logits_pl, exp_config, sess, **attack_args) else: raise NotImplementedError adv_x = [adv_x] if add_gaussian: print('adding gaussian noise') adv_x = adv_attack.add_gaussian_noise( x, adv_x[0], sess, eps=attack_args['eps'], sizes=attack_args['sizes'], weights=attack_args['weights']) for i in range(len(adv_x)): l2_diff = np.average( np.squeeze(np.linalg.norm(adv_x[i] - x, axis=(1, 2)))) ln_diff = np.average( np.squeeze( np.linalg.norm(adv_x[i] - x, axis=(1, 2), ord=np.inf))) l2_diff_sum += l2_diff ln_diff_sum += ln_diff print(l2_diff, l2_diff) adv_mask_out = sess.run( [tf.arg_max(tf.nn.softmax(logits_pl), dimension=-1)], feed_dict={images_pl: adv_x[i]}) closs, cdice = sess.run(eval_loss, feed_dict={ images_pl: x, labels_pl: y }) baseline_closs = closs + baseline_closs baseline_cdice = cdice + baseline_cdice adv_closs, adv_cdice = sess.run(eval_loss, feed_dict={ images_pl: adv_x[i], labels_pl: y }) attack_closs = adv_closs + attack_closs attack_cdice = adv_cdice + attack_cdice partial_result = dict({ 'attack': attack, 'attack_args': { k: attack_args[k] for k in ['eps', 'step_alpha', 'epochs'] }, # 'baseline_closs': closs, 'baseline_cdice': cdice, 'attack_closs': adv_closs, 'attack_cdice': adv_cdice, 'attack_l2_diff': l2_diff, 'attack_ln_diff': ln_diff }) jsonString = json.dumps(str(partial_result)) #results.append(copy.deepcopy(result_dict)) with open( "eval_results/{}-{}-{}-{}-metrics.json".format( attack, add_gaussian, batches, i), "w") as jsonFile: jsonFile.write(jsonString) image_gt = "eval_results/ground-truth-{}-{}-{}-{}.pdf".format( attack, add_gaussian, batches, i) plt.imshow(np.squeeze(x), cmap='gray') plt.imshow(np.squeeze(y), cmap='viridis', alpha=0.7) plt.axis('off') plt.tight_layout() plt.savefig(image_gt, format='pdf') plt.clf() image_benign = "eval_results/benign-{}-{}-{}-{}.pdf".format( attack, add_gaussian, batches, i) plt.imshow(np.squeeze(x), cmap='gray') plt.imshow(np.squeeze(non_adv_mask_out), cmap='viridis', alpha=0.7) plt.axis('off') plt.tight_layout() plt.savefig(image_benign, format='pdf') plt.clf() image_adv = "eval_results/adversarial-{}-{}-{}-{}.pdf".format( attack, add_gaussian, batches, i) plt.imshow(np.squeeze(adv_x[i]), cmap='gray') plt.imshow(np.squeeze(adv_mask_out), cmap='viridis', alpha=0.7) plt.axis('off') plt.tight_layout() plt.savefig(image_adv, format='pdf') plt.clf() plt.imshow(np.squeeze(adv_x[i]), cmap='gray') image_adv_input = "eval_results/adv-input-{}-{}-{}-{}.pdf".format( attack, add_gaussian, batches, i) plt.tight_layout() plt.axis('off') plt.savefig(image_adv_input, format='pdf') plt.clf() plt.imshow(np.squeeze(x), cmap='gray') image_adv_input = "eval_results/benign-input-{}-{}-{}-{}.pdf".format( attack, add_gaussian, batches, i) plt.axis('off') plt.tight_layout() plt.savefig(image_adv_input, format='pdf') plt.clf() print(attack_closs, attack_cdice, l2_diff, ln_diff) print("Evaluation results") print("{} Attack Params {}".format(attack, attack_args)) print("Baseline metrics: Avg loss {}, Avg DICE Score {} ".format( baseline_closs / (batches * len(adv_x)), baseline_cdice / (batches * len(adv_x)))) print( "{} Attack effectiveness: Avg loss {}, Avg DICE Score {} ".format( attack, attack_closs / (batches * len(adv_x)), attack_cdice / (batches * len(adv_x)))) print( "{} Attack visibility: Avg l2-norm diff {} Avg l-inf-norm diff {}". format(attack, l2_diff_sum / (batches * len(adv_x)), ln_diff_sum / (batches * len(adv_x)))) result_dict = dict({ 'attack': attack, 'attack_args': {k: attack_args[k] for k in ['eps', 'step_alpha', 'epochs']}, # 'baseline_closs_avg': baseline_closs / batches, 'baseline_cdice_avg': baseline_cdice / batches, 'attack_closs_avg': attack_closs / batches, 'attack_cdice_avg': attack_cdice / batches, 'attack_l2_diff': l2_diff_sum / batches, 'attack_ln_diff': ln_diff_sum / batches }) results.append(copy.deepcopy(result_dict)) print(results) jsonString = json.dumps(results) with open("eval_results/{}-results.json".format(attack), "w") as jsonFile: jsonFile.write(jsonString)
def generate(cls, index, text, font, out_dir, size, extension, skewing_angle, random_skew, blur, random_blur, background_appoint, background_type, distorsion_type, distorsion_orientation, is_handwritten, name_format, width, alignment, text_color, orientation, space_width): image = None ########################## # Create picture of text # ########################## if not is_handwritten: size = random.randint(size, size + 100) image, text_color = ComputerTextGenerator.generate( text, font, text_color, size, orientation, space_width) random_angle = random.uniform(0 - skewing_angle, skewing_angle) rotated_img = image.rotate( skewing_angle if not random_skew else random_angle, expand=1) ############################# # Apply distorsion to image # ############################# if distorsion_type == 0: distorted_img = rotated_img # Mind = blown elif distorsion_type == 1: distorted_img = DistorsionGenerator.sin( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2)) elif distorsion_type == 2: distorted_img = DistorsionGenerator.cos( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2)) else: distorted_img = DistorsionGenerator.random( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2)) ################################## # Resize image to desired format # ################################## # Horizontal text if orientation == 0: new_width = int( float(distorted_img.size[0] + 10) * (float(size) / float(distorted_img.size[1] + 10))) resized_img = distorted_img.resize((new_width, size - 10), Image.ANTIALIAS) background_width = width if width > 0 else new_width + 10 background_height = size # Vertical text elif orientation == 1: new_height = int( float(distorted_img.size[1] + 10) * (float(size) / float(distorted_img.size[0] + 10))) resized_img = distorted_img.resize((size - 10, new_height), Image.ANTIALIAS) background_width = size background_height = new_height + 10 else: raise ValueError("Invalid orientation") ############################# # Generate background image # ############################# try: if background_type == 0: type = random.randint(0, 2) if type == 0: background = BackgroundGenerator.gaussian_noise( background_height, background_width) elif type == 1: background = BackgroundGenerator.plain_white( background_height, background_width) #elif type == 2: # background = BackgroundGenerator.quasicrystal(background_height, background_width) elif type == 2: background = BackgroundGenerator.picture( background_height, background_width) else: background = BackgroundGenerator.picture( background_height, background_width) except: raise ValueError("Picture Error, Continue!") if background == 'ERROR': background = BackgroundGenerator.plain_white( background_height, background_width) # num_colors = 1 # # small_image = image.resize((image.size)) # result = small_image.convert('P', palette=Image.ADAPTIVE, colors=num_colors) # image with 5 dominating colors # # result = result.convert('RGB') # main_colors = result.getcolors(image.width * image.height) ############################# # Place text with alignment # ############################# new_text_width, _ = resized_img.size if alignment == 0: background.paste(resized_img, (5, 5), resized_img) elif alignment == 1: background.paste( resized_img, (int(background_width / 2 - new_text_width / 2), 5), resized_img) else: background.paste(resized_img, (background_width - new_text_width - 5, 5), resized_img) ################################## # Apply gaussian blur # ################################## final_image = background.filter( ImageFilter.GaussianBlur( radius=(blur if not random_blur else random.randint(0, blur)))) text = make_farsi_text(text) ##################################### # Generate name for resulting image # ##################################### if name_format == 0: image_name = '{}_{}.{}'.format(text, str(index), extension) elif name_format == 1: image_name = '{}_{}.{}'.format(str(index), text, extension) elif name_format == 2: image_name = '{}.{}'.format(str(index), extension) else: print('{} is not a valid name format. Using default.'.format( name_format)) image_name = '{}_{}.{}'.format(text, str(index), extension) # Save the image final_image.convert('RGB').save(os.path.join(out_dir, image_name))
def generate(cls, index, text, font, out_dir, height, extension, skewing_angle, random_skew, blur, random_blur, background_type, is_handwritten, name_format, text_color=-1): image = None if is_handwritten: image = HandwrittenTextGenerator.generate(text) else: image = ComputerTextGenerator.generate(text, font, text_color) random_angle = random.randint(0 - skewing_angle, skewing_angle) rotated_img = image.rotate( skewing_angle if not random_skew else random_angle, expand=1) new_text_width, new_text_height = rotated_img.size if background_type == 0: background = BackgroundGenerator.gaussian_noise( new_text_height + 10, new_text_width + 10) elif background_type == 1: background = BackgroundGenerator.plain_white( new_text_height + 10, new_text_width + 10) elif background_type == 2: background = BackgroundGenerator.quasicrystal( new_text_height + 10, new_text_width + 10) else: background = BackgroundGenerator.picture(new_text_height + 10, new_text_width + 10) mask = rotated_img.point(lambda x: 0 if x == 255 or x == 0 else 255, '1') background.paste(rotated_img, (5, 5), mask=mask) # Create the name for our image if name_format == 0: image_name = '{}_{}.{}'.format(text, str(index), extension) elif name_format == 1: image_name = '{}_{}.{}'.format(str(index), text, extension) else: print('{} is not a valid name format. Using default.'.format( name_format)) image_name = '{}_{}.{}'.format(text, str(index), extension) # Resizing the image to desired format new_width = float(new_text_width + 10) * (float(height) / float(new_text_height + 10)) image_on_background = background.resize((int(new_text_width), height), Image.ANTIALIAS) final_image = image_on_background.filter( ImageFilter.GaussianBlur( radius=(blur if not random_blur else random.randint(0, blur)))) # Save the image final_image.convert('RGB').save(os.path.join(out_dir, image_name))
def generate(cls, index, text, font, out_dir, height, extension, skewing_angle, random_skew, blur, random_blur, background_type, distorsion_type, distorsion_orientation, is_handwritten, name_format, width, alignment, text_color): image = None ########################## # Create picture of text # ########################## if is_handwritten: image = HandwrittenTextGenerator.generate(text) else: image = ComputerTextGenerator.generate(text, font, text_color, height) #image.show() # random_angle = random.randint(0-skewing_angle, skewing_angle) #random_angle = random.uniform(0-skewing_angle, skewing_angle) #angle: 50%==0,30%>0,20%<0 flag=random.uniform(0,1) if (flag<0.5): random_angle=0 elif (flag<0.7): random_angle=random.uniform(0-skewing_angle, 0) else : random_angle=random.uniform(0, skewing_angle) rotated_img = image.rotate(skewing_angle if not random_skew else random_angle, expand=1, resample=Image.BICUBIC) # rotated_img = image.transform(image.size, Image.AFFINE, (1, 0.3, 0, # 0, 1, 0)) # image_name = '{}__{}.{}'.format(text, str(index), extension) # rotated_img.convert('RGB').save(os.path.join(out_dir, image_name)) ############################# # Apply distorsion to image # ############################# if distorsion_type == 0:#0: None (Default), 1: Sine wave, 2: Cosine wave, 3: 0+1+2 , 4: Random distorted_img = rotated_img # Mind = blown elif distorsion_type == 1: distorted_img = DistorsionGenerator.sin( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2) ) elif distorsion_type == 2: distorted_img = DistorsionGenerator.cos( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2) ) elif distorsion_type == 3: ra = np.random.rand(1) if ra > 1/3*2: distorted_img = rotated_img # Mind = blown elif ra < 1/3: distorted_img = DistorsionGenerator.sin( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2) ) else: distorted_img = DistorsionGenerator.cos( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2) ) else: distorted_img = DistorsionGenerator.random( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2) ) ################################## # Resize image to desired format # ################################## # if(distorted_img.size[0]==0 | distorted_img.size[1]==0): # height = 3 # scale = max(distorted_img.size[0] / 280, distorted_img.size[1] / 32) scale = min(width/(0.0001+distorted_img.size[0]), float(height)/(0.0001+distorted_img.size[1])) # resized_img = cv2.resize(distorted_img, None, fx=scale, fy=scale) # new_width = int(float(distorted_img.size[0] + 1) * (float(height) / float(distorted_img.size[1] + 1))) # resized_img = distorted_img.resize((new_width, height - 1), Image.ANTIALIAS) new_width = int(scale*distorted_img.size[0]) new_height = int(scale * distorted_img.size[1]) #resized_img = distorted_img.resize((new_width, new_height - 1), Image.ANTIALIAS) resized_img = rotated_img # #resized_img.show() # image_name = '{}__{}.{}'.format('out/', '1', '.jpg') # resized_img.convert('RGB').save(os.path.join(image_name)) resized_img1 =resized_img.convert("L") arr1=np.asarray(resized_img1) amount=0 for i in range(len(arr1)): for j in range(len(arr1[0])): if (arr1[i][j]>160): pass else: amount=amount+1 mean1 = np.mean(arr1)*(amount)/(resized_img1.size[0]*resized_img1.size[1]) background_width = width if width > 0 else new_width + 1 #print(resized_img.size[0],resized_img.size[1]) ############################# # Generate background image # ############################# if background_type == 0:#0: Gaussian Noise, 1: Plain white, 2: Quasicrystal, 3: Pictures" background = BackgroundGenerator.gaussian_noise(height, background_width) elif background_type == 1: background = BackgroundGenerator.plain_white(height, background_width) elif background_type == 2: background = BackgroundGenerator.quasicrystal(height, background_width) else: #background = BackgroundGenerator.picture(height, background_width) background = BackgroundGenerator.picture(resized_img.size[1],int(max(resized_img.size[0],resized_img.size[1]*280/32))) background1 =background.convert("L") arr2=np.asarray(background1) mean2 = np.mean(arr2) while (mean2-mean1<80): background = BackgroundGenerator.picture(resized_img.size[1],int(max(resized_img.size[0],resized_img.size[1]*280/32))) background1 =background.convert("L") arr2=np.asarray(background1) mean2 = np.mean(arr2) #print(mean1,mean2) if mean2<120: arr2=np.asarray(background1) arr3=arr2+30 background=Image.fromarray(arr3) #print (mean1,mean2,text) # image_name = '{}__{}.{}'.format('out/', '1', '.jpg') # background.convert('RGB').save(os.path.join(image_name)) ############################# # Place text with alignment # ############################# #print(len(text)) background.paste(resized_img, (int((background.size[0]-resized_img.size[0])/2), 0), resized_img) #background.show() #print(x_left,y_top,x_right,y_bottom) #background.show() #background = background.resize((width, height),Image.ANTIALIAS) new_text_width, _ = resized_img.size ''' if alignment == 0:#0: left, 1: center, 2: right , 3:随机 background.paste(resized_img, (2, 0), resized_img) # background.paste(resized_img, (5, 5), resized_img) elif alignment == 1: background.paste(resized_img, (int(background_width / 2 - new_text_width / 2), 0), resized_img) elif alignment == 2: background.paste(resized_img, (background_width - new_text_width+1, 0), resized_img) else: if np.random.rand(1) > 1/3*2: background.paste(resized_img, (2, 2), resized_img) elif np.random.rand(1) < 1/3: background.paste(resized_img, (int(background_width / 2 - new_text_width / 2), 2), resized_img) else: background.paste(resized_img, (background_width - new_text_width+1, 2), resized_img) ''' # image_name = '{}_{}.{}'.format('out/', '2', '.jpg') # background.convert('RGB').save(os.path.join(image_name)) #final_image.show() ##################################### # Generate name for resulting image # ##################################### '''text1=list(text) #字符串转列表再转字符串 for i,c in enumerate(text1): if ((c==':')|(c==' ')): text1.remove(c) for i,c in enumerate(text1): if (c=='/'): text1[i]='!' text1="".join(text1) ''' if name_format == 0: #0: [TEXT]_[ID].[EXT], 1: [ID]_[TEXT].[EXT] 2: [ID].[EXT] + one file labels.txt containing id-to-label mappings # -------修改文件名字--------# # text_name = text.replace('/', 'A') text_name = text.replace(':', '') text_name = text_name.replace(' ', '') text_name = text_name.replace('/', '!') text_name = text_name.replace('O', '0') text_name = text_name.replace('有', 'A') text_name = text_name.replace('机', 'B') text_name = text_name.replace('码', 'C') image_name = '{}_{}.{}'.format(text_name, str(index), extension) elif name_format == 1: image_name = '{}_{}.{}'.format(str(index), text, extension) elif name_format == 2: image_name = '{}.{}'.format(str(index), extension) elif name_format == 3: text_name = text.replace(':', '') text_name = text_name.replace(' ', '') text_name = text_name.replace('/', '!') text_name = text_name.replace('O', '0') image_name = '{}.{}'.format(text_name, extension) else: print('{} is not a valid name format. Using default.'.format(name_format)) image_name = '{}_{}.{}'.format(text, str(index), extension) #background.convert('RGB').save(os.path.join(out_dir,image_name1)) #resized_img.convert('RGB').save(os.path.join(out_dir,image_name2)) ################################## # Apply gaussian blur # ################################## #print(image_name) final_image = background '''filter( ImageFilter.GaussianBlur( radius=(blur if not random_blur else random.randint(0, blur)) ) ) ##################################### # 添加:随机噪声+图像亮度微调 # ##################################### rand = np.random.rand(1) if rand > 0.7:# 亮度增强 brightness = np.random.rand(1)+0.6 if brightness > 1.1: brightness = 1.1 enh_bri = ImageEnhance.Brightness(final_image) final_image = enh_bri.enhance(brightness) # elif np.random.rand(1) > 0.7: # 色度增强 # enh_col = ImageEnhance.Color(final_image) # color = np.random.rand(1)*1.5 # final_image = enh_col.enhance(color) # elif np.random.rand(1) > 0.6: # 对比度增强 # enh_con = ImageEnhance.Contrast(final_image) # contrast = np.random.rand(1)*1.2+0.5 # final_image = enh_con.enhance(contrast) elif (rand < 0.7) & (rand > 0.3): # 锐度增强 enh_sha = ImageEnhance.Sharpness(final_image) sharpness = np.random.rand(1)*3 final_image = enh_sha.enhance(sharpness) else: rand1 = np.random.rand(1) if rand1 > 0.7: percentrand1 = np.random.rand(1) * 0.002 final_image = addGaussianNoise(final_image, percentrand1) # 添加高斯噪声 elif rand1 < 0.3: percentrand2 = np.random.rand(1) * 0.004 final_image = SaltAndPepper(final_image, percentrand2) # 添加椒盐噪声 #final_image.show() ''' # Save the image # resize 图像 flag1=random.uniform(0,1) if (flag1 < 0.2): final_image1 = final_image elif (flag1 < 0.6): final_image1 = RGBAcrop(final_image,Lthreshold=mean2-30,direction='') elif (flag1 < 0.8): final_image1 = RGBAcrop(final_image,Lthreshold=mean2-30,direction='td') else : final_image1 = RGBAcrop(final_image,Lthreshold=mean2-30,direction='lr') final_image1 = final_image1.resize((width, height),Image.ANTIALIAS) pic3 =final_image1.convert("L") arr3=np.asarray(pic3) mean3 = np.mean(arr3) if mean3<10: final_image = final_image.resize((width, height),Image.ANTIALIAS) else: final_image = final_image1.resize((width, height),Image.ANTIALIAS) final_image.convert("RGB").save(os.path.join(out_dir, image_name),quality=100)
def run_training(continue_run): logging.info('EXPERIMENT NAME: %s' % exp_config.experiment_name) already_created_recursion = True print("ALready created recursion : " + str(already_created_recursion)) init_step = 0 # Load data base_data, recursion_data, recursion = acdc_data.load_and_maybe_process_scribbles( scribble_file=sys_config.project_root + exp_config.scribble_data, target_folder=log_dir, percent_full_sup=exp_config.percent_full_sup, scr_ratio=exp_config.length_ratio) #wrap everything from this point onwards in a try-except to catch keyboard interrupt so #can control h5py closing data try: loaded_previous_recursion = False start_epoch = 0 if continue_run: logging.info( '!!!!!!!!!!!!!!!!!!!!!!!!!!!! Continuing previous run !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' ) try: try: init_checkpoint_path = utils.get_latest_model_checkpoint_path( log_dir, 'recursion_{}_model.ckpt'.format(recursion)) except: print("EXCEPTE GİRDİ") init_checkpoint_path = utils.get_latest_model_checkpoint_path( log_dir, 'recursion_{}_model.ckpt'.format(recursion - 1)) loaded_previous_recursion = True logging.info('Checkpoint path: %s' % init_checkpoint_path) init_step = int( init_checkpoint_path.split('/')[-1].split('-') [-1]) + 1 # plus 1 b/c otherwise starts with eval start_epoch = int(init_step / (len(base_data['images_train']) / 4)) logging.info('Latest step was: %d' % init_step) logging.info('Continuing with epoch: %d' % start_epoch) except: logging.warning( '!!! Did not find init checkpoint. Maybe first run failed. Disabling continue mode...' ) continue_run = False init_step = 0 start_epoch = 0 logging.info( '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' ) if loaded_previous_recursion: logging.info( "Data file exists for recursion {} " "but checkpoints only present up to recursion {}".format( recursion, recursion - 1)) logging.info("Likely means postprocessing was terminated") # if not already_created_recursion: # # recursion_data = acdc_data.load_different_recursion(recursion_data, -1) # recursion-=1 # else: start_epoch = 0 init_step = 0 # load images and validation data images_train = np.array(base_data['images_train']) scribbles_train = np.array(base_data['scribbles_train']) images_val = np.array(base_data['images_test']) labels_val = np.array(base_data['masks_test']) # if exp_config.use_data_fraction: # num_images = images_train.shape[0] # new_last_index = int(float(num_images)*exp_config.use_data_fraction) # # logging.warning('USING ONLY FRACTION OF DATA!') # logging.warning(' - Number of imgs orig: %d, Number of imgs new: %d' % (num_images, new_last_index)) # images_train = images_train[0:new_last_index,...] # labels_train = labels_train[0:new_last_index,...] logging.info('Data summary:') logging.info(' - Images:') logging.info(images_train.shape) logging.info(images_train.dtype) #logging.info(' - Labels:') #logging.info(labels_train.shape) #logging.info(labels_train.dtype) # Tell TensorFlow that the model will be built into the default Graph. config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True # with tf.Graph().as_default(): with tf.Session(config=config) as sess: # Generate placeholders for the images and labels. image_tensor_shape = [exp_config.batch_size] + list( exp_config.image_size) + [1] mask_tensor_shape = [exp_config.batch_size] + list( exp_config.image_size) images_placeholder = tf.placeholder(tf.float32, shape=image_tensor_shape, name='images') labels_placeholder = tf.placeholder(tf.uint8, shape=mask_tensor_shape, name='labels') learning_rate_placeholder = tf.placeholder(tf.float32, shape=[]) training_time_placeholder = tf.placeholder(tf.bool, shape=[]) keep_prob = tf.placeholder(tf.float32, shape=[]) crf_learning_rate_placeholder = tf.placeholder(tf.float32, shape=[]) tf.summary.scalar('learning_rate', learning_rate_placeholder) # Build a Graph that computes predictions from the inference model. logits = model.inference(images_placeholder, keep_prob, exp_config.model_handle, training=training_time_placeholder, nlabels=exp_config.nlabels) # Add to the Graph the Ops for loss calculation. [loss, _, weights_norm ] = model.loss(logits, labels_placeholder, nlabels=exp_config.nlabels, loss_type=exp_config.loss_type, weight_decay=exp_config.weight_decay ) # second output is unregularised loss tf.summary.scalar('loss', loss) tf.summary.scalar('weights_norm_term', weights_norm) # Add to the Graph the Ops that calculate and apply gradients. global_step = tf.Variable(0, name='global_step', trainable=False) crf_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='crf_scope') restore_var = [ v for v in tf.all_variables() if v.name not in crf_variables ] global_step = tf.Variable(0, name='global_step', trainable=False) network_train_op = tf.train.AdamOptimizer( learning_rate=learning_rate_placeholder).minimize( loss, var_list=restore_var, colocate_gradients_with_ops=True, global_step=global_step) crf_train_op = tf.train.AdamOptimizer( learning_rate=crf_learning_rate_placeholder).minimize( loss, var_list=crf_variables, colocate_gradients_with_ops=True, global_step=global_step) eval_val_loss = model.evaluation( logits, labels_placeholder, images_placeholder, nlabels=exp_config.nlabels, loss_type=exp_config.loss_type, weak_supervision=True, cnn_threshold=exp_config.cnn_threshold, include_bg=False) # Build the summary Tensor based on the TF collection of Summaries. summary = tf.summary.merge_all() # Add the variable initializer Op. init = tf.global_variables_initializer() # Create a saver for writing training checkpoints. # Only keep two checkpoints, as checkpoints are kept for every recursion # and they can be 300MB + saver = tf.train.Saver(max_to_keep=2) saver_best_dice = tf.train.Saver(max_to_keep=2) saver_best_xent = tf.train.Saver(max_to_keep=2) # Create a session for running Ops on the Graph. sess = tf.Session() # Instantiate a SummaryWriter to output summaries and the Graph. summary_writer = tf.summary.FileWriter(log_dir, sess.graph) # with tf.name_scope('monitoring'): val_error_ = tf.placeholder(tf.float32, shape=[], name='val_error') val_error_summary = tf.summary.scalar('validation_loss', val_error_) val_dice_ = tf.placeholder(tf.float32, shape=[], name='val_dice') val_dice_summary = tf.summary.scalar('validation_dice', val_dice_) val_summary = tf.summary.merge( [val_error_summary, val_dice_summary]) train_error_ = tf.placeholder(tf.float32, shape=[], name='train_error') train_error_summary = tf.summary.scalar('training_loss', train_error_) train_dice_ = tf.placeholder(tf.float32, shape=[], name='train_dice') train_dice_summary = tf.summary.scalar('training_dice', train_dice_) train_summary = tf.summary.merge( [train_error_summary, train_dice_summary]) # Run the Op to initialize the variables. sess.run(init) # if continue_run: # # Restore session # saver.restore(sess, init_checkpoint_path) # saver.restore(sess,"/scratch_net/biwirender02/cany/scribble/logdir/heart_dropout_rnn_exp/recursion_1_model_best_dice.ckpt-12699") init_step = 0 recursion = 0 start_epoch = 0 # step = init_step curr_lr = exp_config.learning_rate / 10 crf_curr_lr = 1e-07 / 10 no_improvement_counter = 0 best_val = np.inf last_train = np.inf loss_history = [] loss_gradient = np.inf best_dice = 0 logging.info('RECURSION {0}'.format(recursion)) # random walk - if it already has been random walked it won't redo if recursion == 0: recursion_data = acdc_data.random_walk_epoch( recursion_data, exp_config.rw_beta, exp_config.rw_threshold, exp_config.random_walk) print("Random walku geçti") #get ground truths labels_train = np.array(recursion_data['random_walked']) else: labels_train = np.array(recursion_data['predicted']) print("Start epoch : " + str(start_epoch) + " : max epochs : " + str(exp_config.epochs_per_recursion)) for epoch in range(start_epoch, exp_config.max_epochs): if (epoch % exp_config.epochs_per_recursion == 0 and epoch != 0): #Have reached end of recursion recursion_data = predict_next_gt( data=recursion_data, images_train=images_train, images_placeholder=images_placeholder, training_time_placeholder=training_time_placeholder, keep_prob=keep_prob, logits=logits, sess=sess) # recursion_data = postprocess_gt(data=recursion_data, # images_train=images_train, # scribbles_train=scribbles_train) recursion += 1 # random walk - if it already has been random walked it won't redo # recursion_data = acdc_data.random_walk_epoch(recursion_data, # exp_config.rw_beta, # exp_config.rw_threshold, # exp_config.random_walk) #get ground truths labels_train = np.array(recursion_data['predicted']) #reinitialise savers - otherwise, no checkpoints will be saved for each recursion saver = tf.train.Saver(max_to_keep=2) saver_best_dice = tf.train.Saver(max_to_keep=2) saver_best_xent = tf.train.Saver(max_to_keep=2) logging.info( 'Epoch {0} ({1} of {2} epochs for recursion {3})'.format( epoch, 1 + epoch % exp_config.epochs_per_recursion, exp_config.epochs_per_recursion, recursion)) # for batch in iterate_minibatches(images_train, # labels_train, # batch_size=exp_config.batch_size, # augment_batch=exp_config.augment_batch): # You can run this loop with the BACKGROUND GENERATOR, which will lead to some improvements in the # training speed. However, be aware that currently an exception inside this loop may not be caught. # The batch generator may just continue running silently without warning even though the code has # crashed. for batch in BackgroundGenerator( iterate_minibatches( images_train, labels_train, batch_size=exp_config.batch_size, augment_batch=exp_config.augment_batch)): if exp_config.warmup_training: if step < 50: curr_lr = exp_config.learning_rate / 10.0 elif step == 50: curr_lr = exp_config.learning_rate if ((step % 3000 == 0) & (step > 0)): curr_lr = curr_lr * 0.9 crf_curr_lr = crf_curr_lr * 0.9 start_time = time.time() # batch = bgn_train.retrieve() x, y = batch # TEMPORARY HACK (to avoid incomplete batches if y.shape[0] < exp_config.batch_size: step += 1 continue network_feed_dict = { images_placeholder: x, labels_placeholder: y, learning_rate_placeholder: curr_lr, keep_prob: 0.5, training_time_placeholder: True } crf_feed_dict = { images_placeholder: x, labels_placeholder: y, crf_learning_rate_placeholder: crf_curr_lr, keep_prob: 1, training_time_placeholder: True } if (step % 10 == 0): _, loss_value = sess.run([crf_train_op, loss], feed_dict=crf_feed_dict) _, loss_value = sess.run([network_train_op, loss], feed_dict=network_feed_dict) duration = time.time() - start_time # Write the summaries and print an overview fairly often. if step % 10 == 0: # Print status to stdout. logging.info('Step %d: loss = %.6f (%.3f sec)' % (step, loss_value, duration)) # Update the events file. # Save a checkpoint and evaluate the model periodically. if (step + 1) % exp_config.val_eval_frequency == 0: checkpoint_file = os.path.join( log_dir, 'recursion_{}_model.ckpt'.format(recursion)) saver.save(sess, checkpoint_file, global_step=step) # Evaluate against the training set. # Evaluate against the validation set. logging.info('Validation Data Eval:') [val_loss, val_dice ] = do_eval(sess, eval_val_loss, images_placeholder, labels_placeholder, training_time_placeholder, keep_prob, images_val, labels_val, exp_config.batch_size) val_summary_msg = sess.run(val_summary, feed_dict={ val_error_: val_loss, val_dice_: val_dice }) summary_writer.add_summary(val_summary_msg, step) if val_dice > best_dice: best_dice = val_dice best_file = os.path.join( log_dir, 'recursion_{}_model_best_dice.ckpt'.format( recursion)) saver_best_dice.save(sess, best_file, global_step=step) logging.info( 'Found new best dice on validation set! - {} - ' 'Saving recursion_{}_model_best_dice.ckpt'. format(val_dice, recursion)) text_file = open('val_results.txt', "a") text_file.write("\nVal dice " + str(step) + " : " + str(val_dice)) text_file.close() if val_loss < best_val: best_val = val_loss best_file = os.path.join( log_dir, 'recursion_{}_model_best_xent.ckpt'.format( recursion)) saver_best_xent.save(sess, best_file, global_step=step) logging.info( 'Found new best crossentropy on validation set! - {} - ' 'Saving recursion_{}_model_best_xent.ckpt'. format(val_loss, recursion)) step += 1 except Exception: raise
def generate(cls, index, text, font, out_dir, height, extension, skewing_angle, random_skew, blur, random_blur, background_type, distorsion_type, distorsion_orientation, is_handwritten, name_format, text_color=-1): image = None ########################## # Create picture of text # ########################## if is_handwritten: image = HandwrittenTextGenerator.generate(text) else: image = ComputerTextGenerator.generate(text, font, text_color) random_angle = random.randint(0-skewing_angle, skewing_angle) rotated_img = image.rotate(skewing_angle if not random_skew else random_angle, expand=1) ############################# # Apply distorsion to image # ############################# if distorsion_type == 0: distorted_img = rotated_img # Mind = blown elif distorsion_type == 1: distorted_img = DistorsionGenerator.sin( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2) ) elif distorsion_type == 2: distorted_img = DistorsionGenerator.cos( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2) ) else: distorted_img = DistorsionGenerator.random( rotated_img, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2) ) new_text_width, new_text_height = distorted_img.size ############################# # Generate background image # ############################# if background_type == 0: background = BackgroundGenerator.gaussian_noise(new_text_height + 10, new_text_width + 10) elif background_type == 1: background = BackgroundGenerator.plain_white(new_text_height + 10, new_text_width + 10) elif background_type == 2: background = BackgroundGenerator.quasicrystal(new_text_height + 10, new_text_width + 10) else: background = BackgroundGenerator.picture(new_text_height + 10, new_text_width + 10) mask = distorted_img.point(lambda x: 0 if x == 255 or x == 0 else 255, '1') background.paste(distorted_img, (5, 5), mask=mask) ################################## # Resize image to desired format # ################################## new_width = float(new_text_width + 10) * (float(height) / float(new_text_height + 10)) image_on_background = background.resize((int(new_text_width), height), Image.ANTIALIAS) final_image = image_on_background.filter( ImageFilter.GaussianBlur( radius=(blur if not random_blur else random.randint(0, blur)) ) ) ##################################### # Generate name for resulting image # ##################################### if name_format == 0: image_name = '{}_{}.{}'.format(text, str(index), extension) elif name_format == 1: image_name = '{}_{}.{}'.format(str(index), text, extension) else: print('{} is not a valid name format. Using default.'.format(name_format)) image_name = '{}_{}.{}'.format(text, str(index), extension) # Save the image image_name_save = image_name.replace(' ','') final_image.convert('RGB').save(os.path.join(out_dir, image_name_save))
def generate(cls, index, text, font, out_dir, height, extension, skewing_angle, random_skew, blur, random_blur, background_type, distorsion_type, distorsion_orientation, is_handwritten, name_format, text_color=-1): decode = { 'a': '1', 'b': '2', 'c': '3', 'd': '4', 'e': '5', 'f': '6', 'g': '7', 'h': '8', 'i': '9', 'j': '0', 'k': '0', 'l': '1', 'm': '1', 'n': '0', 'o': '0', 'p': '1', 'q': '2', 'r': '3', 's': '4', 't': '5', 'u': '6', 'v': '7', 'w': '8', 'x': '9', 'y': '8', 'z': '7', ' ': '-', } output = '' text = ''.join([c if random.random() > 0.1 else ' ' for c in text]) text = text.strip() height = random.randint(32, 256) for char in text: if char.islower() or char == ' ': output += decode[char] else: output += char ########################## # Create picture of text # ########################## # if is_handwritten: # image = HandwrittenTextGenerator.generate(text) # else: # image = ComputerTextGenerator.generate(text, font, text_color) random_angle = random.randint(0 - skewing_angle, skewing_angle) # rotated_img = image.rotate(skewing_angle if not random_skew else random_angle, expand=1) # new_text_width, new_text_height = distorted_img.size image_font = ImageFont.truetype(font=font, size=random.randint(32, 256)) new_text_width, new_text_height = image_font.getsize(text) ############################# # Generate background image # ############################# if background_type == 0: background = BackgroundGenerator.gaussian_noise( new_text_height + 10, new_text_width + 10) elif background_type == 1: background = BackgroundGenerator.plain_white( new_text_height + 10, new_text_width + 10) elif background_type == 2: background = BackgroundGenerator.quasicrystal( new_text_height + 10, new_text_width + 10) else: background = BackgroundGenerator.picture(new_text_height + 10, new_text_width + 10) # mask = distorted_img.point(lambda x: 0 if x == 255 or x == 0 else 255, '1') # background.paste(distorted_img, (5, 5), mask=mask) font_color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) txt_draw = ImageDraw.Draw(background) txt_draw.text((0, 0), text, fill=font_color, font=image_font) background = background.rotate( skewing_angle if not random_skew else random_angle, expand=1) ############################# # Apply distorsion to image # ############################# if distorsion_type == 0: background = background # Mind = blown elif distorsion_type == 1: background = DistorsionGenerator.sin( background, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2)) elif distorsion_type == 2: background = DistorsionGenerator.cos( background, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2)) else: background = DistorsionGenerator.random( background, vertical=(distorsion_orientation == 0 or distorsion_orientation == 2), horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2)) ################################## # Resize image to desired format # ################################## new_width = float(new_text_width + 10) * (float(height) / float(new_text_height + 10)) image_on_background = background.resize((int(new_text_width), height), Image.ANTIALIAS) final_image = image_on_background.filter( ImageFilter.GaussianBlur( radius=(blur if not random_blur else random.randint(0, blur)))) ##################################### # Generate name for resulting image # ##################################### dictionary[index] = output if (index == 99): with open('label.json', 'w') as outfile: json.dump(dictionary, outfile) if name_format == 0: image_name = '{}_{}.{}'.format(output, str(index), extension) elif name_format == 1: image_name = '{}_{}.{}'.format(str(index), output, extension) else: print('{} is not a valid name format. Using default.'.format( name_format)) image_name = '{}_{}.{}'.format(output, str(index), extension) # Save the image final_image.convert('RGB').save(os.path.join(out_dir, image_name))
class YACSGame(Widget): player_entity = NumericProperty(None, allownone=True) is_clearing = BooleanProperty(False) zoom_level = NumericProperty(.5) def __init__(self, **kwargs): super(YACSGame, self).__init__(**kwargs) self.gameworld.init_gameworld([ 'back_stars', 'mid_stars', 'position', 'sun1', 'sun2', 'camera_stars1', 'camera_stars2', 'map', 'planet1', 'planet2', 'camera_sun1', 'camera_sun2', 'camera_planet1', 'camera_planet2', 'scale', 'rotate', 'color', 'particles', 'emitters', 'particle_renderer', 'cymunk_physics', 'steering', 'ship_system', 'projectiles', 'projectile_weapons', 'lifespan', 'combat_stats', 'asteroids', 'steering_ai', 'weapon_ai', 'shields', 'shield_renderer', 'map_grid', 'grid_camera', 'radar_renderer', 'radar_color', 'world_grid', 'global_map', 'global_camera', 'world_map', 'global_map_renderer', 'global_map_renderer2', 'global_map_planet_renderer' ], callback=self.init_game) self.background_generator = BackgroundGenerator(self.gameworld) def init_game(self): self.world_seed = WorldSeed('kovak', (2500., 2500.)) self.setup_states() self.ids.shields.register_collision() self.load_assets() self.background_generator.generate() self.setup_grid() self.load_music() self.gameworld.sound_manager.music_volume = 0. self.ids.global_map.setup(self.world_seed, self.background_generator) self.set_state('main') def load_level(self): global_map = self.ids.global_map self.background_generator.generate_map(self.world_seed, global_map.world_x, global_map.world_y) global_map.add_zone_to_visited( (global_map.world_x, global_map.world_y)) self.ids.player.load_player() self.create_minimap_grid() #self.load_enemy_ship() def setup_grid(self): outer_color = [150, 0, 100, 100] inner_color = [150, 0, 100, 255] grid_size = 17 actual_size = (2500, 2500) actual_pos = (0., 0.) grid_offset, grid_data, cell_size = generate_grid( 0., 10., 1., actual_size, actual_pos, grid_size, outer_color, inner_color) self.grid_model = load_grid(self.gameworld, grid_data, 'mini_map_grid') self.grid_offset = grid_offset def create_minimap_grid(self): create_dict = { 'position': self.grid_offset, 'map_grid': { 'model_key': self.grid_model }, } ent = self.gameworld.init_entity(create_dict, ['position', 'map_grid']) return ent def load_music(self): sound_manager = self.gameworld.sound_manager sound_manager.loop_music = True for x in range(1, 11): name = 'track' + str(x) track_name = sound_manager.load_music( name, get_asset_path('assets', 'music', name + '.ogg')) sound_manager.play_track(choice(sound_manager.music_dict.keys())) def spawn_explosion_for_blaster(self, entity_id): if not self.is_clearing: entity = self.gameworld.entities[entity_id] position = entity.position self.ids.explosions.spawn_object_from_template( 'orb_explosion', position.pos) def load_ships(self, ship_collision_type): ship_system = self.ids.ship_system emitter_system = self.ids.emitter physics_system = self.ids.physics model_manager = self.gameworld.model_manager sound_manager = self.gameworld.sound_manager player_system = self.ids.player radar_texture = texture_manager.load_image( get_asset_path('assets', 'ships', 'ship1-radar.png')) texture_name = texture_manager.load_image( get_asset_path('assets', 'ships', 'ship1.png')) radar_model_name = model_manager.load_textured_rectangle( 'vertex_format_4f', 104, 128, 'ship1-radar', '4f_ship1_radar') model_name = model_manager.load_textured_rectangle( 'vertex_format_4f', 104, 128, 'ship1', '4f_ship1', ) effect_name = emitter_system.load_effect( get_asset_path('assets', 'vfx', 'engine1.kep')) shield_model_data = generate_shield_model(85., 10.) shield_model = model_manager.load_model( 'vertex_format_2f4ub', shield_model_data['vert_count'], shield_model_data['ind_count'], 'shield_model', indices=shield_model_data['indices'], vertices=shield_model_data['vertices'], do_copy=False) explosion_sound = sound_manager.load_sound( 'explosion_sound', get_asset_path('assets', 'soundfx', 'explosion.wav')) ship_system.register_template('ship1', 'Bulldog', model_name, 'ship1', ship_collision_type, health=100., mass=250., max_speed=200., max_turn_speed=200., accel=15000., angular_accel=45., boost_force=25000., boost_drain=25., max_boost_speed=300., armor=5., boost_reserve=50., boost_regen=10., width=96., height=108., weapons=['ship1_shotgun'], emitters=[effect_name], emitter_speed_base=90., scale_base=2.2, emitter_scaling_factor=150., explosion_sound=explosion_sound, has_shield=True, shield_model=shield_model, shield_health=100., shield_radius=90., shield_timeout=1.25, shield_recharge=20., radar_model_name=radar_model_name, radar_texture='ship1-radar') ship_engine_rumble = sound_manager.load_sound( 'engine_rumble', get_asset_path('assets', 'soundfx', 'shipengine.wav'), ) # player_system.engine_rumble = ship_engine_rumble def load_weapons(self): emitter_system = self.ids.emitter projectile_system = self.ids.projectiles sound_manager = self.gameworld.sound_manager weapon_system = self.ids.weapons model_manager = self.gameworld.model_manager explosion_system = self.ids.explosions emitter_system.load_effect( get_asset_path('assets', 'vfx', 'blaster_projectile.kep')) blaster_hit_sound = sound_manager.load_sound('blaster_hit', get_asset_path( 'assets', 'soundfx', 'blaster', 'hit.wav'), track_count=2) blaster_bullet_type = projectile_system.register_projectile_template( 'blaster_projectile', 10., 1., 1, None, None, 12, 12, 1., 550., 50., main_effect="blaster_projectile", hit_sound=blaster_hit_sound, destruction_callback=self.spawn_explosion_for_blaster) blaster_begin = sound_manager.load_sound( 'blaster-reload-begin', get_asset_path('assets', 'soundfx', 'blaster', 'reload-laser.wav'), track_count=2) blaster_end = sound_manager.load_sound( 'blaster-reload-end', get_asset_path('assets', 'soundfx', 'blaster', 'reload-end.wav'), track_count=2) blaster_fire_sound = sound_manager.load_sound( 'blaster-shoot', get_asset_path('assets', 'soundfx', 'blaster', 'shoot.wav'), track_count=2) emitter_system.load_effect( get_asset_path('assets', 'vfx', 'orb_explosion.kep')) explosion_system.register_template('orb_explosion', 'orb_explosion', .3, .6) weapon_system.register_weapon_template( 'ship1_blaster', 'Blaster', reload_time=3.5, projectile_type=1, ammo_count=100, rate_of_fire=.4, clip_size=14, barrel_offsets=[(46., 59.), (-46., 59.)], barrel_count=2, ammo_type=blaster_bullet_type, projectile_width=12., projectile_height=12., accel=500, reload_begin_sound=blaster_begin, reload_end_sound=blaster_end, fire_sound=blaster_fire_sound, spread=radians(0.), shot_count=1, time_between_shots=0.35, ) rifle_hit_sound = sound_manager.load_sound('blaster_hit', get_asset_path( 'assets', 'soundfx', 'rifle', 'hit.wav'), track_count=2) bullet_tex = texture_manager.load_image( get_asset_path('assets', 'projectiles', 'bullet-14px.png')) bullet_model = model_manager.load_textured_rectangle( 'vertex_format_4f', 28., 14., 'bullet-14px', '4f_bullet-14px') rifle_bullet_type = projectile_system.register_projectile_template( 'rifle_projectile', 12., 1., 1, 'bullet-14px', bullet_model, 14., 14., 10, 750., 50., hit_sound=rifle_hit_sound) rifle_begin = sound_manager.load_sound( 'rifle-reload-begin', get_asset_path('assets', 'soundfx', 'rifle', 'reload-begin.wav'), track_count=2) rifle_end = sound_manager.load_sound('rifle-reload-end', get_asset_path( 'assets', 'soundfx', 'rifle', 'reload-end.wav'), track_count=2) rifle_fire_sound = sound_manager.load_sound('rifle-shoot', get_asset_path( 'assets', 'soundfx', 'rifle', 'shoot.wav'), track_count=2) weapon_system.register_weapon_template( 'ship1_rifle', 'Rifle', reload_time=5.0, projectile_type=3, ammo_count=100, rate_of_fire=.5, clip_size=8, barrel_offsets=[(46., 59.), (-46., 59.)], barrel_count=2, ammo_type=rifle_bullet_type, projectile_width=12., projectile_height=12., accel=10000., reload_begin_sound=rifle_begin, reload_end_sound=rifle_end, fire_sound=rifle_fire_sound, spread=radians(0.), shot_count=2, time_between_shots=.1, ) shotgun_begin = sound_manager.load_sound( 'shotgun-reload-begin', get_asset_path('assets', 'soundfx', 'shotgun', 'reload-begin.wav')) shotgun_end = sound_manager.load_sound( 'shotgun-reload-end', get_asset_path('assets', 'soundfx', 'shotgun', 'reload-end.wav')) shotgun_fire_sound = sound_manager.load_sound( 'shotgun-shoot', get_asset_path('assets', 'soundfx', 'shotgun', 'shoot.wav')) shotgun_bullet_tex = texture_manager.load_image( get_asset_path('assets', 'projectiles', 'bullet-6px.png')) shotgun_bullet_model = model_manager.load_textured_rectangle( 'vertex_format_4f', 6., 6., 'bullet-6px', '4f_bullet-6px') shotgun_bullet_type = projectile_system.register_projectile_template( 'shotgun_projectile', 7., 1., 1, 'bullet-6px', shotgun_bullet_model, 4.5, 4.5, 2.5, 750., 50., lifespan=2.0, hit_sound=rifle_hit_sound) weapon_system.register_weapon_template( 'ship1_shotgun', 'Shotgun', reload_time=2.5, projectile_type=1, ammo_count=100, rate_of_fire=.70, clip_size=8, barrel_offsets=[(46., 59.), (-46., 59.)], barrel_count=2, ammo_type=shotgun_bullet_type, projectile_width=4., projectile_height=4., accel=3000, reload_begin_sound=shotgun_begin, reload_end_sound=shotgun_end, fire_sound=shotgun_fire_sound, spread=radians(15.), shot_count=5, time_between_shots=0., ) def load_assets(self): model_manager = self.gameworld.model_manager emitter_system = self.ids.emitter projectile_system = self.ids.projectiles weapon_system = self.ids.weapons sound_manager = self.gameworld.sound_manager asteroid_system = self.ids.asteroids physics_system = self.ids.physics shield_system = self.ids.shields explosion_system = self.ids.explosions texture_manager.load_atlas( get_asset_path('assets', 'particles', 'particles.atlas')) texture_manager.load_image( get_asset_path('assets', 'objects', 'asteroid1.png')) texture_manager.load_image( get_asset_path('assets', 'objects', 'asteroid1-radar.png')) texture_manager.load_image( get_asset_path('assets', 'particles', 'particle3.png')) asteroid_model = model_manager.load_textured_rectangle( 'vertex_format_4f', 64, 64, 'asteroid1', '4f_asteroid1', ) asteroid_radar_model = model_manager.load_textured_rectangle( 'vertex_format_4f', 64, 64, 'asteroid1-radar', '4f_asteroid1_radar', ) asteroid_collision_type = physics_system.register_collision_type( 'asteroids') ship_collision_type = physics_system.register_collision_type('ships') projectile_system.add_origin_collision_type(asteroid_collision_type) projectile_system.add_origin_collision_type(ship_collision_type) self.load_weapons() ship_hit_asteroid = sound_manager.load_sound('ship_hit_asteroid', get_asset_path( 'assets', 'soundfx', 'shiphit.wav'), track_count=2) asteroid_hit_asteroid = sound_manager.load_sound( 'asteroid_hit_asteroid', get_asset_path('assets', 'soundfx', 'asteroidhitasteroid.wav'), track_count=2) emitter_system.load_effect( get_asset_path('assets', 'vfx', 'asteroidexplosion.kep')) emitter_system.load_effect( get_asset_path('assets', 'vfx', 'shipexplosion.kep')) explosion_system.register_template('ship_explosion', 'shipexplosion', 3.0, 1.5) asteroid_system.register_template( 'asteroid1', asteroid_collision_type, mass=125., radius=30., texture='asteroid1', model_key=asteroid_model, health=15., armor=4., ship_collision_sound=ship_hit_asteroid, asteroid_collision_sound=asteroid_hit_asteroid, radar_model=asteroid_radar_model, radar_texture='asteroid1-radar') explosion_system.register_template('asteroid_explosion', 'asteroidexplosion', 1.5, 1.0) physics_system.add_collision_handler( asteroid_collision_type, ship_collision_type, begin_func=asteroid_system.on_collision_begin_asteroid_ship) physics_system.add_collision_handler( asteroid_collision_type, asteroid_collision_type, begin_func=asteroid_system.on_collision_begin_asteroid_asteroid) physics_system.add_collision_handler( asteroid_collision_type, shield_system.shield_collision_type, begin_func=shield_system.on_collision_begin_asteroid_shield) physics_system.add_collision_handler( shield_system.shield_collision_type, shield_system.shield_collision_type, begin_func=shield_system.on_collision_begin_shield_shield) physics_system.add_collision_handler( ship_collision_type, shield_system.shield_collision_type, begin_func=shield_system.on_collision_begin_ship_shield) projectile_system.add_custom_collision_type( shield_system.shield_collision_type, shield_system.on_collision_begin_bullet_shield) self.load_ships(ship_collision_type) def load_enemy_ship(self): ship_system = self.gameworld.system_manager['ship_system'] ship_id = ship_system.spawn_ship('ship1', False, (1600, 1600.)) def clear(self): camera = self.ids.camera_top camera.focus_entity = False self.ids.player.current_entity = None self.ids.asteroids.is_clearing = True self.ids.ship_system.is_clearing = True self.is_clearing = True self.gameworld.clear_entities() self.ids.asteroids.is_clearing = False self.ids.ship_system.is_clearing = False self.is_clearing = False def setup_states(self): self.gameworld.add_state(state_name='main', systems_added=[ 'player', 'back_stars', 'mid_stars', 'sun1', 'sun2', 'planet1', 'rotate_renderer', 'shield_renderer', 'planet2', 'particle_renderer' ], systems_removed=[ 'grid_camera', 'map_grid', 'radar_renderer', 'world_grid', 'global_camera', 'global_map_renderer', 'global_map_renderer2', 'global_map_planet_renderer', ], systems_paused=[ 'grid_camera', 'map_grid', 'radar_renderer', 'global_map_renderer', 'global_map_renderer2', 'global_map_planet_renderer', ], systems_unpaused=[ 'back_stars', 'mid_stars', 'sun1', 'sun2', 'planet1', 'planet2', 'emitters', 'particles', 'particle_renderer', 'steering', 'cymunk_physics', 'rotate_renderer', 'projectiles', 'projectile_weapons', 'lifespan', 'combat_stats', 'steering_ai', 'weapon_ai', ], screenmanager_screen='main', on_change_callback=self.switch_to_main) self.gameworld.add_state(state_name='minimap', systems_added=[ 'back_stars', 'mid_stars', 'sun1', 'sun2', 'planet1', 'planet2', 'particle_renderer', 'rotate_renderer', 'shield_renderer', 'grid_camera', 'map_grid', 'radar_renderer' ], systems_removed=[ 'player', 'world_grid', 'global_camera', 'global_map_renderer', 'global_map_renderer2', 'global_map_planet_renderer', ], systems_paused=[ 'emitters', 'particles', 'steering', 'cymunk_physics', 'lifespan', 'projectiles', 'projectile_weapons', 'combat_stats', 'steering_ai', 'weapon_ai', 'global_map_renderer', 'global_map_renderer2', 'global_map_planet_renderer', ], systems_unpaused=[ 'back_stars', 'mid_stars', 'sun1', 'sun2', 'planet1', 'planet2', 'grid_camera', 'map_grid', 'radar_renderer', 'rotate_renderer', ], screenmanager_screen='map') self.gameworld.add_state( state_name='worldmap', systems_added=[ 'global_camera', 'global_map_renderer2', 'global_map_renderer', 'global_map_planet_renderer', 'world_grid' ], systems_removed=[ 'player', 'back_stars', 'mid_stars', 'sun1', 'sun2', 'planet1', 'planet2', 'particle_renderer', 'rotate_renderer', 'shield_renderer', 'grid_camera', 'map_grid', 'radar_renderer', ], systems_paused=[ 'emitters', 'particles', 'particle_renderer', 'steering', 'cymunk_physics', 'lifespan', 'projectiles', 'projectile_weapons', 'combat_stats', 'steering_ai', 'weapon_ai', 'back_stars', 'mid_stars', 'sun1', 'sun2', 'planet1', 'planet2', 'grid_camera', 'map_grid', 'radar_renderer', 'rotate_renderer' ], systems_unpaused=[ 'global_map_renderer', 'global_map_renderer2', 'global_map_planet_renderer', 'global_camera' ], on_change_callback=self.load_global_map, screenmanager_screen='jump') def switch_to_main(self, current_state, previous_state): if previous_state != 'minimap': self.clear() self.load_level() def load_global_map(self, current_state, previous_state): self.clear() self.ids.global_map.draw_map() def set_state(self, state): self.gameworld.state = state