def init_exp_logger(repo=None, experiment_name=None, flush_frequency=1): if repo is None: repo = os.path.join(log_path(),"./.aim") if not os.path.exists(repo): logger.info('{} dir is not exist, create {}',repo, repo) os.system(str("cd " + os.path.join(repo,"../") + "&& aim init")) aim_logger = aim.Session(repo = repo, experiment=experiment_name, flush_frequency=flush_frequency) aim_logger.experiment_name = experiment_name return aim_logger
def __init__( self, name = 'default', results_dir = 'results', models_dir = 'models', base_dir = './', image_size = 128, network_capacity = 16, fmap_max = 512, transparent = False, batch_size = 4, mixed_prob = 0.9, gradient_accumulate_every=1, lr = 2e-4, lr_mlp = 1., ttur_mult = 2, rel_disc_loss = False, num_workers = None, save_every = 1000, evaluate_every = 1000, num_image_tiles = 8, trunc_psi = 0.6, fp16 = False, cl_reg = False, fq_layers = [], fq_dict_size = 256, attn_layers = [], no_const = False, aug_prob = 0., aug_types = ['translation', 'cutout'], top_k_training = False, generator_top_k_gamma = 0.99, generator_top_k_frac = 0.5, dataset_aug_prob = 0., calculate_fid_every = None, is_ddp = False, rank = 0, world_size = 1, log = False, *args, **kwargs ): self.GAN_params = [args, kwargs] self.GAN = None self.name = name base_dir = Path(base_dir) self.base_dir = base_dir self.results_dir = base_dir / results_dir self.models_dir = base_dir / models_dir print(self.results_dir, self.models_dir) self.config_path = self.models_dir / name / '.config.json' assert log2(image_size).is_integer(), 'image size must be a power of 2 (64, 128, 256, 512, 1024)' self.image_size = image_size self.network_capacity = network_capacity self.fmap_max = fmap_max self.transparent = transparent self.fq_layers = cast_list(fq_layers) self.fq_dict_size = fq_dict_size self.has_fq = len(self.fq_layers) > 0 self.attn_layers = cast_list(attn_layers) self.no_const = no_const self.aug_prob = aug_prob self.aug_types = aug_types self.lr = lr self.lr_mlp = lr_mlp self.ttur_mult = ttur_mult self.rel_disc_loss = rel_disc_loss self.batch_size = batch_size self.num_workers = num_workers self.mixed_prob = mixed_prob self.num_image_tiles = num_image_tiles self.evaluate_every = evaluate_every self.save_every = save_every self.steps = 0 self.av = None self.trunc_psi = trunc_psi self.pl_mean = None self.gradient_accumulate_every = gradient_accumulate_every assert not fp16 or fp16 and APEX_AVAILABLE, 'Apex is not available for you to use mixed precision training' self.fp16 = fp16 self.cl_reg = cl_reg self.d_loss = 0 self.g_loss = 0 self.q_loss = None self.last_gp_loss = None self.last_cr_loss = None self.last_fid = None self.pl_length_ma = EMA(0.99) self.init_folders() self.loader = None self.dataset_aug_prob = dataset_aug_prob self.calculate_fid_every = calculate_fid_every self.top_k_training = top_k_training self.generator_top_k_gamma = generator_top_k_gamma self.generator_top_k_frac = generator_top_k_frac assert not (is_ddp and cl_reg), 'Contrastive loss regularization does not work well with multi GPUs yet' self.is_ddp = is_ddp self.is_main = rank == 0 self.rank = rank self.world_size = world_size self.logger = aim.Session(experiment=name) if log else None
import aim import random import math epochs = 4 steps = 30 c_step = 100 sess = aim.Session(experiment='test_epoch_alignment_x', flush_frequency=10) sess.set_params({ 'name': 'Dataset name', 'version': 'Dataset version', }, name='dataset') sess.set_params({ 'epochs': epochs, 'steps': steps, 'c_step': c_step, }, name='hparmas') sess.set_params({ 'foo': random.random() * 100, 'bar': random.random() * 100, 'baz': random.random() * 100, 'cluster': int(random.random() * 3), 'nested': { 'arr': ['aa', 'bb', 'cc'], 'obj': {
import aim import math sess = aim.Session(experiment='test_params') sess.set_params({ 'num_epochs': 10, 'fc_units': 128, }, name='hparams') sess.set_params({ 'name': 'Dataset name', 'version': 'Dataset version', }, name='dataset') sess.set_params({ 'foo': 'bar', }) sess.set_params({ 'inf': float('inf'), 'inf_in_nested_obj': (1, 2, 3, { 'inf': math.inf, }), })
import random import os import aim exp1_run1 = aim.Session(experiment='test_metrics', flush_frequency=10) exp1_run2 = aim.Session(experiment='test_metrics', flush_frequency=10) exp2 = aim.Session(experiment='test_metrics_2', flush_frequency=10) print(os.getpid(), exp1_run1.run_hash, exp1_run2.run_hash, exp2.run_hash) exp1_run1.set_params({ 'foo': random.random() * 100, 'bar': random.random() * 100, 'baz': random.random() * 100, }) exp1_run2.set_params({ 'foo': random.random() * 100, 'bar': random.random() * 100, 'baz': random.random() * 100, }) exp2.set_params({ 'foo': random.random() * 100, 'bar': random.random() * 100, 'baz': random.random() * 100, }) for i in range(100, 200): # Experiment 1, run 1 exp1_run1.track(i, name='metric_2')
import aim import random import os exp1_run1 = aim.Session(experiment='test_const', flush_frequency=10) exp1_run1.set_params({ 'foo': random.random() * 100, 'bar': random.random() * 100, 'baz': random.random() * 100, }) for i in range(100, 200): exp1_run1.track(3.45, name='const3')
import random import time import os import aim sess = aim.Session(experiment='TEST_METRICS') sess.set_params({'key': random.random()}) print(os.getpid()) for i in range(100, 4000): print(i, i * 2, i * 3) sess.track(i, name='metric') sess.track(i * 2, name='metric', subset='train', sub=12) sess.track(i * 3, name='metric', subset='test', aab=22) time.sleep(0.1)
import random import time import os import aim sess = aim.Session(experiment='TEST_METRICS', flush_frequency=10) sess2 = aim.Session(experiment='TEST_METRICS_2', flush_frequency=30) sess.set_params({'key': random.random()}) print(os.getpid()) for i in range(100, 200): sess.track(i, name='metric') sess.track(i * 2, name='metric', subset='train', foo='baz') sess.track(i * 3, name='metric', subset='test', bar='baz') sess2.track(i, name='metric', subset='train', bar='baz') time.sleep(1)
import aim import math sess = aim.Session(experiment='test_floats') sess.set_params({ 'num_epochs': 10, 'fc_units': 128, }, name='hparams') sess.set_params({ 'name': 'Dataset name', 'version': 'Dataset version', }, name='dataset') sess.set_params({ 'foo': 'bar', }) sess.set_params({ 'nan_x': 'NaN', 'inf_x': 'Infinity', 'nan': float('nan'), 'inf': float('inf'), 'inf_in_nested_obj': (1, 2, 3, { 'inf': math.inf, }), 'nan_in_nested_obj': (1, 2, 3, { 'nan': math.nan, }),
import random import aim sess = aim.Session(experiment='test_2_metrics') # sess.set_params({'key': random.random()}) for _ in range(100): sess.track(random.random(), name='metric')
import aim sess = aim.Session(experiment='test_metrics_context', flush_frequency=10) sess.set_params({ 'num_epochs': 5, 'lr': 10, }, name='hparams') for e in range(5): for i in range(50): sess.track(i, name='loss', epoch=e, subset='train', subtask='lm') sess.track(i, name='acc', epoch=e, subset='train', subtask='lm') if i % 10 == 0: sess.track(i, name='loss', epoch=e, subset='val', subtask='lm') sess.track(i, name='acc', epoch=e, subset='val', subtask='lm') for e in range(5): for i in range(50): sess.track(i, name='loss', epoch=e, subset='train', subtask='nmt') sess.track(i, name='acc', epoch=e, subset='train', subtask='nmt') if i % 10 == 0: sess.track(i, name='loss', epoch=e, subset='val', subtask='nmt') sess.track(i, name='acc', epoch=e, subset='val', subtask='nmt')
import aim import random import math import time foo = 1 bar = 1 seed = 1003 epochs = 4 steps = 30 k = 2.3 sess = aim.Session(experiment='test_system', system_tracking_interval=2) sess.set_params( { 'epochs': epochs, 'steps': steps, 'k': k, 'foo': foo, 'bar': bar, 'seed': seed, }, name='hparmas') # # for e in range(epochs): # for i in range(steps): # sess.track(k, name='agg_metric', epoch=e, subset='train') # sess.track(k, name='agg_metric', epoch=e, subset='val')