def __init__(self, fig, data): self.data = data self.fig = fig vi.visualize_encodings(data, grid=(3, 5), skip_every=5, fast=fast, fig=fig, interactive=True) plt.subplot(155).set_title(', '.join('hold on')) # fig.canvas.mpl_connect('button_press_event', self.on_click) fig.canvas.mpl_connect('pick_event', self.on_pick) try: # if True: ut.print_info('Checkpoint: %s' % FLAGS.load_from_checkpoint) self.model = dm.DoomModel() self.reconstructions = self.model.decode(data) except: ut.print_info("Model could not load from checkpoint %s" % str(sys.exc_info()), color=31) self.original_data, _ = inp.get_images(FLAGS.input_path) self.reconstructions = np.zeros(self.original_data.shape).astype( np.uint8) ut.print_info('INPUT: %s' % FLAGS.input_path.split('/')[-3]) self.original_data, _ = inp.get_images(FLAGS.input_path)
def __init__(self, fig, data): self.data = data self.fig = fig vi.visualize_encodings(data, grid=(3, 5), skip_every=5, fast=fast, fig=fig, interactive=True) plt.subplot(155).set_title(', '.join('hold on')) # fig.canvas.mpl_connect('button_press_event', self.on_click) fig.canvas.mpl_connect('pick_event', self.on_pick) try: # if True: ut.print_info('Checkpoint: %s' % FLAGS.load_from_checkpoint) self.model = dm.DoomModel() self.reconstructions = self.model.decode(data) except: ut.print_info("Model could not load from checkpoint %s" % str(sys.exc_info()), color=31) self.original_data, _ = inp.get_images(FLAGS.input_path) self.reconstructions = np.zeros(self.original_data.shape).astype(np.uint8) ut.print_info('INPUT: %s' % FLAGS.input_path.split('/')[-3]) self.original_data, _ = inp.get_images(FLAGS.input_path)
def print_reconstructions_along_with_originals(): FLAGS.load_from_checkpoint = './tmp/doom_bs__act|sigmoid__bs|20__h|500|5|500__init|na__inp|cbd4__lr|0.0004__opt|AO' model = model_class() files = ut.list_encodings(FLAGS.save_path) last_encoding = files[-1] print(last_encoding) take_only = 20 data = np.loadtxt(last_encoding)[0:take_only] reconstructions = model.decode(data) original, _ = input.get_images(FLAGS.input_path, at_most=take_only) ut.print_side_by_side(original, reconstructions)
def fetch_datasets(self, activation_func_bounds): original_data, filters = inp.get_images(FLAGS.input_path) assert len(filters) == len(original_data) original_data, filters = self.bloody_hack_filterbatches(original_data, filters) ut.print_info('shapes. data, filters: %s' % str((original_data.shape, filters.shape))) original_data = inp.rescale_ds(original_data, activation_func_bounds.min, activation_func_bounds.max) self._image_shape = inp.get_image_shape(FLAGS.input_path) if DEV: original_data = original_data[:300] self.epoch_size = math.ceil(len(original_data) / FLAGS.batch_size) self.test_size = math.ceil(len(original_data) / FLAGS.batch_size) return original_data, filters
def fetch_datasets(self, activation_func_bounds): original_data, filters = inp.get_images(FLAGS.input_path) assert len(filters) == len(original_data) original_data, filters = self.bloody_hack_filterbatches( original_data, filters) ut.print_info('shapes. data, filters: %s' % str( (original_data.shape, filters.shape))) original_data = inp.rescale_ds(original_data, activation_func_bounds.min, activation_func_bounds.max) self._image_shape = inp.get_image_shape(FLAGS.input_path) if DEV: original_data = original_data[:300] self.epoch_size = math.ceil(len(original_data) / FLAGS.batch_size) self.test_size = math.ceil(len(original_data) / FLAGS.batch_size) return original_data, filters
def fetch_datasets(): activation_func_bounds = act.sigmoid original_data, filters = inp.get_images(source) original_data = inp.rescale_ds(original_data, activation_func_bounds.min, activation_func_bounds.max) part = 1. return original_data[:len(original_data)*part], original_data[len(original_data)*part:]