def test_image(self): tag = "layer1/layer2/image0" image_writer = self.writer.image(tag, 10, 1) num_passes = 10 num_samples = 100 shape = [10, 10, 3] for pass_ in range(num_passes): image_writer.start_sampling() for ins in range(num_samples): data = np.random.random(shape) * 256 data = np.ndarray.flatten(data) image_writer.add_sample(shape, list(data)) image_writer.finish_sampling() self.reader = LogReader(self.dir) with self.reader.mode("train") as reader: image_reader = reader.image(tag) self.assertEqual(image_reader.caption(), tag) self.assertEqual(image_reader.num_records(), num_passes) image_record = image_reader.record(0, 1) self.assertTrue(np.equal(image_record.shape(), shape).all()) data = image_record.data() self.assertEqual(len(data), np.prod(shape)) image_tags = reader.tags("image") self.assertTrue(image_tags) self.assertEqual(len(image_tags), 1)
def test_check_image(self): ''' check whether the storage will keep image data consistent ''' print('check image') tag = "layer1/check/image1" image_writer = self.writer.image(tag, 10) image = Image.open("./dog.jpg") shape = [image.size[1], image.size[0], 3] origin_data = np.array(image.getdata()).flatten() self.reader = LogReader(self.dir) with self.reader.mode("train") as reader: image_writer.start_sampling() image_writer.add_sample(shape, list(origin_data)) image_writer.finish_sampling() # read and check whether the original image will be displayed image_reader = reader.image(tag) image_record = image_reader.record(0, 0) data = image_record.data() shape = image_record.shape() PIL_image_shape = (shape[0] * shape[1], shape[2]) data = np.array(data, dtype='uint8').reshape(PIL_image_shape) print('origin', origin_data.flatten()) print('data', data.flatten()) image = Image.fromarray(data.reshape(shape))
def test_with_syntax(self): with self.writer.mode("train") as writer: scalar = writer.scalar("model/scalar/average") for i in range(10): scalar.add_record(i, float(i)) self.reader = LogReader(self.dir) with self.reader.mode("train") as reader: scalar = reader.scalar("model/scalar/average") self.assertEqual(scalar.caption(), "train")
def __init__(self, logdir, model, cache_timeout): self._reader = LogReader(logdir) if model: self._reader.model = model self.model_name = os.path.basename(model) # use a memory cache to reduce disk reading frequency. cache = MemCache(timeout=cache_timeout) self._cache = lib.cache_get(cache)
def test_scalar(self): print('test write') scalar = self.writer.scalar("model/scalar/min") # scalar.set_caption("model/scalar/min") for i in range(10): scalar.add_record(i, float(i)) print('test read') self.reader = LogReader(self.dir) with self.reader.mode("train") as reader: scalar = reader.scalar("model/scalar/min") self.assertEqual(scalar.caption(), "train") records = scalar.records() ids = scalar.ids() self.assertTrue( np.equal(records, [float(i) for i in range(10 - 1)]).all()) self.assertTrue(np.equal(ids, [float(i) for i in range(10)]).all()) print('records', records) print('ids', ids)
def setUp(self): dir = "./tmp/mock" writer = LogWriter(dir, sync_cycle=30) add_scalar(writer, "train", "layer/scalar0/min", 1000, 1) add_scalar(writer, "test", "layer/scalar0/min", 1000, 10) add_scalar(writer, "valid", "layer/scalar0/min", 1000, 10) add_scalar(writer, "train", "layer/scalar0/max", 1000, 1) add_scalar(writer, "test", "layer/scalar0/max", 1000, 10) add_scalar(writer, "valid", "layer/scalar0/max", 1000, 10) add_image(writer, "train", "layer/image0", 7, 10, 1) add_image(writer, "test", "layer/image0", 7, 10, 3) add_image(writer, "train", "layer/image1", 7, 10, 1, shape=[30, 30, 2]) add_image(writer, "test", "layer/image1", 7, 10, 1, shape=[30, 30, 2]) add_histogram(writer, "train", "layer/histogram0", 100) add_histogram(writer, "test", "layer/histogram0", 100) self.reader = LogReader(dir)
class StorageTest(unittest.TestCase): def setUp(self): self.dir = "./tmp/storage_test" self.writer = LogWriter(self.dir, sync_cycle=1).as_mode("train") def test_scalar(self): print('test write') scalar = self.writer.scalar("model/scalar/min") # scalar.set_caption("model/scalar/min") for i in range(10): scalar.add_record(i, float(i)) print('test read') self.reader = LogReader(self.dir) with self.reader.mode("train") as reader: scalar = reader.scalar("model/scalar/min") self.assertEqual(scalar.caption(), "train") records = scalar.records() ids = scalar.ids() self.assertTrue( np.equal(records, [float(i) for i in range(10 - 1)]).all()) self.assertTrue(np.equal(ids, [float(i) for i in range(10)]).all()) print('records', records) print('ids', ids) def test_image(self): tag = "layer1/layer2/image0" image_writer = self.writer.image(tag, 10, 1) num_passes = 10 num_samples = 100 shape = [10, 10, 3] for pass_ in range(num_passes): image_writer.start_sampling() for ins in range(num_samples): data = np.random.random(shape) * 256 data = np.ndarray.flatten(data) image_writer.add_sample(shape, list(data)) image_writer.finish_sampling() self.reader = LogReader(self.dir) with self.reader.mode("train") as reader: image_reader = reader.image(tag) self.assertEqual(image_reader.caption(), tag) self.assertEqual(image_reader.num_records(), num_passes) image_record = image_reader.record(0, 1) self.assertTrue(np.equal(image_record.shape(), shape).all()) data = image_record.data() self.assertEqual(len(data), np.prod(shape)) image_tags = reader.tags("image") self.assertTrue(image_tags) self.assertEqual(len(image_tags), 1) def test_check_image(self): ''' check whether the storage will keep image data consistent ''' print('check image') tag = "layer1/check/image1" image_writer = self.writer.image(tag, 10) image = Image.open("./dog.jpg") shape = [image.size[1], image.size[0], 3] origin_data = np.array(image.getdata()).flatten() self.reader = LogReader(self.dir) with self.reader.mode("train") as reader: image_writer.start_sampling() image_writer.add_sample(shape, list(origin_data)) image_writer.finish_sampling() # read and check whether the original image will be displayed image_reader = reader.image(tag) image_record = image_reader.record(0, 0) data = image_record.data() shape = image_record.shape() PIL_image_shape = (shape[0] * shape[1], shape[2]) data = np.array(data, dtype='uint8').reshape(PIL_image_shape) print('origin', origin_data.flatten()) print('data', data.flatten()) image = Image.fromarray(data.reshape(shape)) # manully check the image and found that nothing wrong with the image storage. # image.show() def test_with_syntax(self): with self.writer.mode("train") as writer: scalar = writer.scalar("model/scalar/average") for i in range(10): scalar.add_record(i, float(i)) self.reader = LogReader(self.dir) with self.reader.mode("train") as reader: scalar = reader.scalar("model/scalar/average") self.assertEqual(scalar.caption(), "train") def test_modes(self): store = LogWriter(self.dir, sync_cycle=1) scalars = [] for i in range(10): with store.mode("mode-%d" % i) as writer: scalar = writer.scalar("add/scalar0") scalars.append(scalar) for scalar in scalars[:-1]: for i in range(10): scalar.add_record(i, float(i))
def main(args): config = Config(args.config) cfg = config(vars(args), mode=['train', 'init']) mdname = cfg['train']['model'] vdl_dir = os.path.join(args.save_dir, cfg['init']['vdl_dir']) vdl_dir = os.path.join(vdl_dir, mdname) vdl_name = 'vdlrecords.' + mdname + '.log' vdl_log_dir = os.path.join(vdl_dir, vdl_name) fil_list = os.path.join(cfg['train']['root_path'], cfg['train']['train_list']) mean = cfg['train']['mean'] std = cfg['train']['std'] custom = cfg['train']['custom']['type'] if custom == True: print('use custom data') mean = cfg['train']['custom']['mean'] std = cfg['train']['custom']['std'] # image enhance trfm = imgehance(size=cfg['train']['sz']) # load dataset ds = SDataSet(path=cfg['train']['root_path'], fl=fil_list, sz=cfg['train']['sz']) train_ds = SubSet(ds, mode='train', mean=mean, std=std, transform=trfm) val_ds = SubSet(ds, mode='valid', mean=mean, std=std, transform=None) # select model net = modelset(mode=mdname, num_classes=cfg['init']['num_classes']) # load moel input = InputSpec([None, 3, 64, 64], 'float32', 'image') label = InputSpec([None, 1, 64, 64], 'int64', 'label') model = paddle.Model(net, input, label) #print(model.summary((-1, 3, 64, 64))) # iters = 0 epochs = 0 if args.pretrain: model.load(path=os.path.join(args.save_dir, mdname) + '/' + str(mdname)) vdlreader = LogReader(file_path=vdl_log_dir) iters = vdlreader.get_data('scalar', 'train%miou')[-1].id + 1 epochs = vdlreader.get_data('scalar', 'eval%miou')[-1].id + 1 elif os.path.exists(vdl_dir): shutil.rmtree(vdl_dir) write = LogWriter(logdir=vdl_dir, file_name=vdl_name) opt = paddle.optimizer.Momentum(learning_rate=cfg['train']['lr'], parameters=model.parameters()) model.prepare( optimizer=opt, loss=Loss(), metrics=Miou(num_classes=cfg['init']['num_classes'], name='miou'), ) model.fit( train_ds, val_ds, epochs=cfg['train']['epoch'], batch_size=cfg['train']['batchsz'], log_freq=1, save_freq=cfg['train']['save_freq'], save_dir=os.path.join(args.save_dir, mdname) + '/' + str(mdname), verbose=1, num_workers=cfg['train']['num_workers'], callbacks=VDL(write=write, iters=iters, epochs=epochs) #VDL(logdir=vdl_dir)# ) print('save model in {}'.format(os.path.join(args.save_dir, mdname))) model.save(path=os.path.join(args.save_dir, mdname) + '/' + str(mdname))
from visualdl import LogReader log_reader = LogReader("./log") print("Data associated with the train loss:\n") with log_reader.mode("train") as logger: text_reader = logger.scalar("scalars/train_loss") print("Train loss =", text_reader.records()) print("Ids = ", text_reader.ids()) print("Timestamps =", text_reader.timestamps()) print("\nData associated with the test loss:\n") with log_reader.mode("test") as logger: text_reader = logger.scalar("scalars/test_loss") print("Test losses =", text_reader.records()) print("Ids = ", text_reader.ids()) print("Timestamps =", text_reader.timestamps()) print("\nData associated with the test accuracy:\n") with log_reader.mode("test") as logger: text_reader = logger.scalar("scalars/test_accuracy") print("Test accuracy =", text_reader.records()) print("Ids = ", text_reader.ids()) print("Timestamps =", text_reader.timestamps())