def write_processed_image(processed_image, input_filename): output_filename = input_filename.replace(INPUT_TILES_FOLDER, OUTPUT_TILES_FOLDER) folder, _ = os.path.split(output_filename) mkdir_p(folder) with open(output_filename, 'w+') as f: misc.imsave(f, processed_image)
def generate_tiles(model_type, model_file): ModelClass = all_models[model_type] model = ModelClass(None, None, model_file=model_file) folder = os.path.join(GENERATED_TILES_FOLDER, '{}'.format(time.time())) mkdir_p(folder) i = 0 while True: try: print "Generating {}...".format(i) filename = os.path.join(folder, '{}.png'.format(i)) misc.imsave(filename, model.generate_image()) i += 1 except KeyboardInterrupt: exit(0)
def forwardprop(model_file, zoom): input_files = glob.glob( os.path.join(BACKPROPS_FOLDER, str(zoom), '*', '*.txt')) model = load_model(model_file) generator = model.layers[0] generator.compile(loss='mean_squared_error', optimizer=Adagrad(lr=1)) for filname in input_files: print "Generating..." output_filename = filname.replace(BACKPROPS_FOLDER, FORWARDPROPS_FOLDER) + '.png' folder, _ = os.path.split(output_filename) mkdir_p(folder) inp = np.loadtxt(filname) image = forwardprop_single_image(generator, inp) image = ((image + 1) * 128).astype('uint8') misc.imsave(output_filename, image)
def backprop(model_file, zoom): input_files = glob.glob( os.path.join(OUTPUT_TILES_FOLDER, str(zoom), '*', '*.png')) model = load_model(model_file) generator = model.layers[0] generator.trainable = False for layer in generator.layers: layer.trainable = False for filname in input_files: output_filename = filname.replace(OUTPUT_TILES_FOLDER, BACKPROPS_FOLDER) + '.txt' folder, _ = os.path.split(output_filename) mkdir_p(folder) image = misc.imread(filname, mode='RGB') / 127.5 - 1 weights = backprop_single_image(generator, image) np.savetxt(output_filename, weights)
def random(model_file): model = load_model(model_file) generator = model.layers[0] generator.trainable = False for layer in generator.layers: layer.trainable = False api_key_water = [np.loadtxt(filename) for filename in glob.glob(os.path.join(BACKPROPS_FOLDER, 'api_key', 'water', '*.txt'))] no_api_key_water = [np.loadtxt(filename) for filename in glob.glob(os.path.join(BACKPROPS_FOLDER, 'no_api_key', 'water', '*.txt'))] no_api_key_trees = np.loadtxt(os.path.join(BACKPROPS_FOLDER, 'no_api_key', 'trees', '3391.png.txt')) folder = os.path.join(RANDOM_FOLDER, '{}'.format(time.time())) mkdir_p(folder) for a in api_key_water: for na in no_api_key_water: api_key_trees = a - na + no_api_key_trees image = forwardprop_single_image(generator, api_key_trees) misc.imsave(os.path.join(folder, 'land-{}.png'.format(time.time())), ((image + 1)*128).astype('uint8'))
def train(self): i = 0 model_name = "best_dcgan-{}".format(time.time()) folder = os.path.join(GENERATED_TILES_FOLDER, model_name) mkdir_p(folder) for epoch in range(self.EPOCHS): logging.info("=== Epoch {}".format(epoch)) for batch_base in range(0, len(self.image_loader), self.MAX_BATCH_SIZE): i += 1 batch_size = min( len(self.image_loader) - batch_base, self.MAX_BATCH_SIZE) logging.info("Training {} images".format(batch_size)) g_loss = float('inf') r_loss = float('inf') while g_loss + r_loss > 0.8: if g_loss >= r_loss: generated_images_batch_size = batch_size generated_images_X = self._generate_batch( generated_images_batch_size) generated_images_Y = np.array( [0.0] * generated_images_batch_size) gen_loss = self.trainable_discriminator.train_on_batch( generated_images_X, generated_images_Y) logging.info( "Discriminator gen. loss: {}".format(gen_loss)) g_loss = gen_loss[0] else: real_images_batch_size = batch_size real_images_X = self._load_batch( real_images_batch_size) real_images_Y = np.array([1.0] * real_images_batch_size) real_loss = self.trainable_discriminator.train_on_batch( real_images_X, real_images_Y) logging.info( "Discriminator real loss: {}".format(real_loss)) r_loss = real_loss[0] logging.info("Copying weights...") self._copy_weights() generator_loss = float('inf') while generator_loss > (15.0 if i == 1 else 4.0): generator_batch_size = batch_size generator_X = self._generate_noise(generator_batch_size) generator_Y = np.array([1.0] * generator_batch_size) g_loss = self.model.train_on_batch(generator_X, generator_Y) generator_loss = g_loss[0] logging.info("Generator loss: {}".format(g_loss)) logging.info("Generating image...") filename = os.path.join(folder, '{:06d}.png'.format(i)) image = self.generate_image() misc.imsave(filename, image) misc.imsave(os.path.join(folder, '000000__current.png'), image) if i % 1000 == 0: logging.info("=== Writing model to disk") self.model.save("models/" + model_name + "-{}.h5".format(i)) logging.info("=== Writing model to disk") self.model.save(model_name)