def main_flags(): # Data & model config flags.DEFINE_string('data_config', 'datasets/multid_config.py', 'Path to a data config file.') flags.DEFINE_string('model_config', 'models/genesis_config.py', 'Path to a model config file.') # Logging config flags.DEFINE_string('results_dir', 'checkpoints', 'Top directory for all experimental results.') flags.DEFINE_string('run_name', 'test', 'Name of this job and name of results folder.') flags.DEFINE_integer( 'report_loss_every', 1000, 'Number of iterations between reporting minibatch loss.') flags.DEFINE_integer('run_validation_every', 10000, 'How many equally spaced validation runs to do.') flags.DEFINE_integer('num_checkpoints', 40, 'How many equally spaced model checkpoints to save.') flags.DEFINE_boolean('resume', False, 'Tries to resume a job if True.') flags.DEFINE_boolean( 'log_grads_and_weights', False, 'Log gradient and weight histograms - storage intensive!') flags.DEFINE_boolean( 'log_distributions', False, 'Log mu and sigma of posterior and prior distributions.') # Optimisation config flags.DEFINE_integer('train_iter', 2000000, 'Number of training iterations.') flags.DEFINE_integer('batch_size', 32, 'Mini-batch size.') flags.DEFINE_string('optimiser', 'adam', 'Optimiser for updating weights.') flags.DEFINE_float('learning_rate', 0.0001, 'Learning rate.') flags.DEFINE_integer('N_eval', 10000, 'Number of samples to run evaluation on.') # Loss config flags.DEFINE_float('beta', 0.5, 'KL weighting.') flags.DEFINE_boolean('beta_warmup', True, 'Warm up beta.') flags.DEFINE_boolean('geco', True, 'Use GECO objective.') flags.DEFINE_float('g_goal', 0.5655, 'GECO recon goal.') flags.DEFINE_float('g_lr', 1e-5, 'GECO learning rate.') flags.DEFINE_float('g_alpha', 0.99, 'GECO momentum for error.') flags.DEFINE_float('g_init', 1.0, 'GECO inital Lagrange factor.') flags.DEFINE_float('g_min', 1e-10, 'GECO min Lagrange factor.') flags.DEFINE_float('g_speedup', 10., 'Scale GECO lr if delta positive.') # Other flags.DEFINE_boolean('gpu', True, 'Use GPU if available.') flags.DEFINE_boolean('multi_gpu', False, 'Use multiple GPUs if available.') flags.DEFINE_boolean('debug', False, 'Debug flag.') flags.DEFINE_integer('seed', 0, 'Seed for random number generators.')
def main_flags(): # Data & model config flags.DEFINE_string('data_config', 'datasets/gqn_config.py', 'Path to a data config file.') flags.DEFINE_string('model_config', 'models/genesis_config.py', 'Path to a model config file.') # Trained model flags.DEFINE_string('model_dir', 'checkpoints/test/1', 'Path to model directory.') flags.DEFINE_string('model_file', 'model.ckpt-FINAL', 'Name of model file.') # FID flags.DEFINE_integer('feat_dim', 2048, 'Number of Incpetion features.') flags.DEFINE_integer('num_fid_images', 10000, 'Number of images to compute the FID on.') # Other flags.DEFINE_string('img_dir', '/tmp', 'Directory for saving pngs.') flags.DEFINE_integer('batch_size', 10, 'Mini-batch size.') flags.DEFINE_boolean('gpu', True, 'Use GPU if available.') flags.DEFINE_integer('seed', 0, 'Seed for random number generators.')
import torch from lie_conv.dynamicsTrainer import FC, HFC from lie_conv.graphnets import OGN, HOGN from eqv_transformer.dynamics_predictor import DynamicsPredictor from forge import flags flags.DEFINE_integer("channel_width", 256, "Channel width for the network.") flags.DEFINE_integer("num_layers", 4, "Number of layers.") flags.DEFINE_integer("model_seed", 0, "Model rng seed") flags.DEFINE_string( "network_type", "FC", "One of FC, HFC, OGN, HOGN.", ) def load(config, **unused_kwargs): print(f"Using network: {config.network_type}.") torch.manual_seed(config.model_seed) network = (eval(config.network_type))( sys_dim=config.sys_dim, d=config.space_dim, k=config.channel_width, num_layers=config.num_layers, )
from tqdm import tqdm import random import torch import numpy as np import forge from forge import flags import forge.experiment_tools as fet from forge.experiment_tools import fprint from utils.misc import average_ari, average_segcover # Config flags.DEFINE_string('data_config', 'datasets/shapestacks_config.py', 'Path to a data config file.') flags.DEFINE_string('model_config', 'models/genesis_config.py', 'Path to a model config file.') # Trained model flags.DEFINE_string('model_dir', 'checkpoints/test/1', 'Path to model directory.') flags.DEFINE_string('model_file', 'model.ckpt-FINAL', 'Name of model file.') # Other flags.DEFINE_integer('seed', 0, 'Seed for random number generators.') flags.DEFINE_integer('num_images', 300, 'Number of images to run on.') flags.DEFINE_string('split', 'test', '{train, val, test}') # Set manual seed torch.manual_seed(0) np.random.seed(0) random.seed(0)
from torch.distributions.kl import kl_divergence import numpy as np from forge import flags from modules.unet import UNet import modules.seq_att as seq_att from modules.component_vae import ComponentVAE from models.genesis_config import Genesis from utils import misc # Attention network flags.DEFINE_integer('filter_start', 32, 'Starting number of channels in UNet.') flags.DEFINE_string('prior_mode', 'softmax', '{scope, softmax}') def load(cfg): return MONet(cfg) class MONet(nn.Module): def __init__(self, cfg): super(MONet, self).__init__() # Configuration self.K_steps = cfg.K_steps self.prior_mode = cfg.prior_mode self.mckl = cfg.montecarlo_kl self.debug = cfg.debug self.pixel_bound = cfg.pixel_bound
import modules.decoders as decoders from modules.component_vae import ComponentVAE from third_party.sylvester.VAE import VAE import utils.misc as misc # Model type flags.DEFINE_boolean('two_stage', True, 'Use two stages if two, else only one.') # Priors flags.DEFINE_boolean('autoreg_prior', True, 'Autoregressive prior.') flags.DEFINE_boolean('comp_prior', True, 'Component prior.') # Attention VAE flags.DEFINE_integer('attention_latents', 64, 'Latent dimension.') flags.DEFINE_string('enc_norm', 'bn', '{bn, in} - norm type in encoder.') flags.DEFINE_string('dec_norm', 'bn', '{bn, in} - norm type in decoder.') # Component VAE flags.DEFINE_integer('comp_enc_channels', 32, 'Starting number of channels.') flags.DEFINE_integer('comp_ldim', 16, 'Latent dimension of the VAE.') flags.DEFINE_integer('comp_dec_channels', 32, 'Num channels in Broadcast Decoder.') flags.DEFINE_integer('comp_dec_layers', 4, 'Num layers in Broadcast Decoder.') # Losses flags.DEFINE_boolean('pixel_bound', True, 'Bound pixel values to [0, 1].') flags.DEFINE_float('pixel_std1', 0.7, 'StdDev of reconstructed pixels.') flags.DEFINE_float('pixel_std2', 0.7, 'StdDev of reconstructed pixels.') flags.DEFINE_boolean('montecarlo_kl', True, 'Evaluate KL via MC samples.') def load(cfg): return Genesis(cfg)
# ############################################################################### from __future__ import print_function from os import path as osp import torch import torch.nn.functional as F import torch.optim as optim import forge from forge import flags import forge.experiment_tools as fet # Job config flags.DEFINE_string('data_config', 'configs/mnist_data.py', 'Path to a data config file.') flags.DEFINE_string('model_config', 'configs/mnist_mlp.py', 'Path to a model config file.') flags.DEFINE_string('results_dir', 'checkpoints', 'Top directory for all experimental results.') flags.DEFINE_string('run_name', 'mnist', 'Name of this job and name of results folder.') flags.DEFINE_boolean('resume', False, 'Tries to resume a job if True.') # Logging config flags.DEFINE_integer('report_loss_every', 100, 'Number of iterations between reporting minibatch loss.') flags.DEFINE_integer('train_epochs', 20, 'Maximum number of training epochs.') # Experiment config flags.DEFINE_integer('batch_size', 32, 'Mini-batch size.')
import torch from torch.utils.data import DataLoader from oil.utils.utils import FixedNumpySeed, islice from oil.datasetup.datasets import split_dataset from lie_conv.datasets import SpringDynamics from forge import flags flags.DEFINE_integer("n_train", 3000, "Number of training datapoints.") flags.DEFINE_integer("n_test", 2000, "Number of testing datapoints.") flags.DEFINE_integer("n_val", 2000, "Number of validation datapoints.") flags.DEFINE_integer("n_systems", 10000, "Size of total dataset generated.") flags.DEFINE_string( "data_path", "./datasets/ODEDynamics/SpringDynamics/", "Dataset is loaded from and/or downloaded to this path.", ) flags.DEFINE_integer("sys_dim", 2, "[add description].") flags.DEFINE_integer("space_dim", 2, "Dimension of particle system.") flags.DEFINE_integer("data_seed", 0, "Data splits random seed.") flags.DEFINE_integer("num_particles", 6, "Number of particles in system.") flags.DEFINE_integer("chunk_len", 5, "Length of trajectories.") flags.DEFINE_boolean( "load_preprocessed", False, "Load data already preprocessed to avoid RAM memory spike. Ensure data exists first for the chunk_lun required.", ) def load(config):
# (research or commercial). No warranty, explicit or implicit, provided. # # =========================== A2I Copyright Header =========================== import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import NoNorm import torch import forge from forge import flags import forge.experiment_tools as fet # Config flags.DEFINE_string('data_config', 'datasets/multid_config.py', 'Path to a data config file.') flags.DEFINE_integer('batch_size', 8, 'Mini-batch size.') flags.DEFINE_integer('seed', 0, 'Seed for random number generators.') def main(): # Parse flags cfg = forge.config() cfg.num_workers = 0 # Set manual seed torch.manual_seed(cfg.seed) np.random.seed(cfg.seed) # Make CUDA operations deterministic torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False
import tensorflow as tf import numpy as np from forge import flags from forge.experiment_tools import fprint from utils.misc import loader_throughput, len_tfrecords, np_img_centre_crop import third_party.multi_object_datasets.multi_dsprites as multi_dsprites import third_party.multi_object_datasets.objects_room as objects_room import third_party.multi_object_datasets.clevr_with_masks as clevr_with_masks import third_party.multi_object_datasets.tetrominoes as tetrominoes flags.DEFINE_string('data_folder', 'data/multi-object-datasets', 'Path to data folder.') flags.DEFINE_string('dataset', 'objects_room', '{multi_dsprites, objects_room, clevr, tetrominoes}') flags.DEFINE_integer('img_size', -1, 'Dimension of images. Images are square.') flags.DEFINE_integer('dataset_size', -1, 'Number of images to use.') flags.DEFINE_integer('num_workers', 4, 'Number of threads for loading data.') flags.DEFINE_integer('buffer_size', 128, 'TF records dataset.') flags.DEFINE_integer('K_steps', -1, 'Number of recurrent steps.') MULTI_DSPRITES = '/multi_dsprites/multi_dsprites_colored_on_colored.tfrecords' OBJECTS_ROOM = '/objects_room/objects_room_train.tfrecords' CLEVR = '/clevr_with_masks/clevr_with_masks_train.tfrecords' TETROMINOS = '/tetrominoes/tetrominoes_train.tfrecords' CLEVR_CROP = 192 # Following pre-processing in the IODINE paper
from lie_conv.moleculeTrainer import MolecLieResNet from eqv_transformer.molecule_predictor import MoleculePredictor from lie_conv.lieGroups import SE3, SO3, T, Trivial from forge import flags flags.DEFINE_bool( "data_augmentation", False, "Apply data augmentation to the data before passing to the model", ) flags.DEFINE_integer( "nbhd_size", 25, "The number of samples to use for Monte Carlo estimation") flags.DEFINE_string("activation_function", "swish", "Activation function to use in the network") flags.DEFINE_boolean("batch_norm", True, "Use batch norm in the layers") flags.DEFINE_bool( "mean_pooling", True, "Use mean pooling insteave of sum pooling in the invariant layer", ) flags.DEFINE_integer("num_layers", 6, "Number of ResNet layers to use") flags.DEFINE_string("group", "SE3", "Group to be invariant to") flags.DEFINE_integer("channels", 1536, "Number of channels in the conv layers") flags.DEFINE_float( "fill", 1.0, "specifies the fraction of the input which is included in local neighborhood. (can be array to specify a different value for each layer", ) flags.DEFINE_integer(
from eqv_transformer.eqv_attention import EquivariantTransformer from lie_conv.lieGroups import SE3, SE2, SO3, T, Trivial # from lie_conv.datasets import SE3aug from forge import flags flags.DEFINE_boolean( "data_augmentation", False, "Apply data augmentation to the data before passing to the model", ) flags.DEFINE_integer("dim_hidden", 512, "Dimension of features to use in each layer") flags.DEFINE_string( "activation_function", "swish", "Activation function to use in the network" ) # flags.DEFINE_boolean("layer_norm", True, "Use layer norm in the layers") flags.DEFINE_boolean( "mean_pooling", True, "Use mean pooling insteave of sum pooling in the invariant layer", ) flags.DEFINE_integer("num_heads", 8, "Number of attention heads in each layer") flags.DEFINE_integer("kernel_dim", 16, "Hidden layer size to use in kernel MLPs") # flags.DEFINE_boolean("batch_norm", False, "Use batch norm in the kernel MLPs") flags.DEFINE_integer("num_layers", 6, "Number of ResNet layers to use") flags.DEFINE_string("group", "SE2", "Group to be invariant to") flags.DEFINE_integer( "lift_samples", 1, "Number of coset lift samples to use for non-trivial stabilisers" )
# =========================== A2I Copyright Header =========================== import os import torch from torch.utils.data import Dataset, DataLoader from torchvision import transforms import torch.nn.functional as F import numpy as np from forge import flags from utils.misc import loader_throughput flags.DEFINE_string('data_folder', 'data/multi_dsprites/processed', 'Path to data folder.') flags.DEFINE_boolean('unique_colours', False, 'Dataset with unique colours.') flags.DEFINE_boolean('load_instances', True, 'Load instances.') flags.DEFINE_integer('img_size', 64, 'Dimension of images. Images are square.') flags.DEFINE_integer('num_workers', 4, 'Number of threads for loading data.') flags.DEFINE_boolean('mem_map', False, 'Use memory mapping.') flags.DEFINE_integer('K_steps', 5, 'Number of recurrent steps.') def load(cfg, **unused_kwargs): """ Args: cfg (obj): Forge config Returns:
load_checkpoint, save_checkpoint, ExponentialMovingAverage, nested_to, param_count, get_component, get_average_norm, ) # %% ##################################################################################################################### # Command line flags ##################################################################################################################### # Directories flags.DEFINE_string("results_dir", "checkpoints/", "Top directory for all experimental results.") # Configuration files to load flags.DEFINE_string( "data_config", "configs/dynamics/spring_dynamics_data.py", "Path to a data config file.", ) flags.DEFINE_string( "model_config", "configs/dynamics/eqv_transformer_model.py", "Path to a model config file.", ) # Job management flags.DEFINE_string( "run_name",
from oil.datasetup.datasets import split_dataset from lie_conv.datasets import QM9datasets from corm_data.collate import collate_fn import forge from forge import flags flags.DEFINE_float( "subsample_trainset", 1.0, "Proportion or number of samples of the full trainset to use", ) flags.DEFINE_string( "task", "h**o", "Which task in the QM9 dataset to train on. Pass as a comma separated string", ) flags.DEFINE_boolean("recenter", False, "Recenter the positions of atoms with charge > 0") flags.DEFINE_integer("batch_fit", 0, "number of samples to fit to") flags.DEFINE_integer("data_seed", 0, "seed to pick data with") def load(config, **unused_kwargs): with FixedNumpySeed(config.data_seed): datasets, num_species, charge_scale = QM9datasets( os.path.join(config.data_dir, "qm9")) if config.subsample_trainset != 1.0: datasets.update(
import torch from eqv_transformer.eqv_attention import EquivariantTransformer from lie_conv.dynamicsTrainer import HNet from lie_conv.hamiltonian import HamiltonianDynamics from lie_conv.lieGroups import T, SE2, SE2_canonical, SO2 from eqv_transformer.dynamics_predictor import DynamicsPredictor from forge import flags flags.DEFINE_string("group", "T(2)", "Group to be invariant to.") flags.DEFINE_integer("dim_hidden", 160, "Dimension of features to use in each layer") flags.DEFINE_string("activation_function", "swish", "Activation function to use in the network") flags.DEFINE_boolean( "mean_pooling", True, "Use mean pooling insteave of sum pooling in the invariant layer", ) flags.DEFINE_integer("num_heads", 8, "Number of attention heads in each layer") flags.DEFINE_integer("kernel_dim", 16, "Hidden layer size to use in kernel MLPs") flags.DEFINE_integer("num_layers", 5, "Number of ResNet layers to use") flags.DEFINE_integer( "lift_samples", 1, "Number of coset lift samples to use for non-trivial stabilisers.", ) flags.DEFINE_integer("model_seed", 0, "Model rng seed") flags.DEFINE_string("attention_fn", "dot_product",
from eqv_transformer.eqv_attention import EquivariantTransformer from eqv_transformer.molecule_predictor import MoleculePredictor from lie_conv.lieGroups import SE3, SO3, T, Trivial from lie_conv.datasets import SE3aug from forge import flags flags.DEFINE_boolean( "data_augmentation", True, "Apply data augmentation to the data before passing to the model", ) flags.DEFINE_integer("dim_hidden", 512, "Dimension of features to use in each layer") flags.DEFINE_string("activation_function", "swish", "Activation function to use in the network") flags.DEFINE_boolean( "mean_pooling", True, "Use mean pooling insteave of sum pooling in the invariant layer", ) flags.DEFINE_integer("num_heads", 8, "Number of attention heads in each layer") flags.DEFINE_string( "block_norm", "layer_pre", "Type of norm to use in the attention block. none/[layer/batch]_[pre/post]", ) flags.DEFINE_string( "output_norm", "none", "Type of norm to use in the final MLP layers block. none/layer/batch",
# GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ######################################################################################## """Experiment training script.""" from os import path as osp import tensorflow as tf import forge from forge import flags import forge.experiment_tools as fet # job config flags.DEFINE_string('data_config', 'configs/mnist_data.py', 'Path to a data config file.') flags.DEFINE_string('model_config', 'configs/mnist_mlp.py', 'Path to a model config file.') flags.DEFINE_string('results_dir', '../checkpoints', 'Top directory for all experimental results.') flags.DEFINE_string('run_name', 'test_run', 'Name of this job. Results will be stored in a corresponding folder.') flags.DEFINE_boolean('resume', False, 'Tries to resume a job if True.') # logging config flags.DEFINE_integer('report_loss_every', int(1e3), 'Number of iterations between reporting minibatch loss - hearbeat.') flags.DEFINE_integer('save_itr', int(1e4), 'Number of iterations between snapshotting the model.') flags.DEFINE_integer('train_itr', int(2e6), 'Maximum number of training iterations.') # experiment config flags.DEFINE_integer('batch_size', 32, '') flags.DEFINE_float('learning_rate', 1e-5, 'Initial values of the learning rate') # gpu
import torch import torch.nn.functional as F import tensorflow as tf import numpy as np import third_party.tf_gqn.gqn_tfr_provider as gqn from forge import flags from forge.experiment_tools import fprint from utils.misc import loader_throughput flags.DEFINE_string('data_folder', 'data/gqn_datasets', 'Path to data folder.') flags.DEFINE_integer('img_size', 64, 'Dimension of images. Images are square.') flags.DEFINE_integer('val_frac', 60, 'Fraction of training images to use for validation.') flags.DEFINE_integer('num_workers', 4, 'TF records dataset.') flags.DEFINE_integer('buffer_size', 128, 'TF records dataset.') flags.DEFINE_integer('K_steps', 7, 'Number of recurrent steps.') SEED = 0 def load(cfg, **unused_kwargs): # Fix TensorFlow seed global SEED
import random from attrdict import AttrDict import torch import numpy as np import matplotlib.pyplot as plt import forge from forge import flags import forge.experiment_tools as fet from forge.experiment_tools import fprint from utils.plotting import plot # Data & model config flags.DEFINE_string('data_config', 'datasets/gqn_config.py', 'Path to a data config file.') flags.DEFINE_string('model_config', 'models/genesis_config.py', 'Path to a model config file.') # Trained model flags.DEFINE_string('model_dir', 'checkpoints/test/1', 'Path to model directory.') flags.DEFINE_string('model_file', 'model.ckpt-FINAL', 'Name of model file.') def main(): # Parse flags config = forge.config() # Restore flags of pretrained model flag_path = osp.join(config.model_dir, 'flags.json') fprint(f"Restoring flags from {flag_path}") pretrained_flags = AttrDict(fet.json_load(flag_path))
from attrdict import AttrDict import torch from torch import nn import torch.nn.functional as F from eqv_transformer.classfier import Classifier from eqv_transformer.eqv_attention_se2_finite import EqvTransformer from forge import flags # flags.DEFINE_integer('input_dim', 2, 'Dimensionality of the input.') flags.DEFINE_integer('n_outputs', 4, 'Number of output vectors.') # flags.DEFINE_integer('output_dim', 3, 'Dimensionality of the output.') flags.DEFINE_string('content_type', 'pairwise_distances', 'How to initialize y') flags.DEFINE_integer('n_enc_layers', 4, 'Number of encoder layers.') flags.DEFINE_integer('n_dec_layers', 4, 'Number of encoder layers.') flags.DEFINE_integer('n_heads', 4, 'Number of attention heads.') flags.DEFINE_boolean('layer_norm', False, 'Uses layer-norm if True.') flags.DEFINE_integer('cn', 5, 'Size of rotation group.') flags.DEFINE_string('similarity_fn', 'softmax', 'Similarity function used to compute attention weights.') flags.DEFINE_string('arch', 'set_transf', 'Architecture.') flags.DEFINE_integer('num_moments', 5, 'When using pairwise distances as Y, number of moments.') def load(config, **unused_kwargs): del unused_kwargs # should not affect things #### number of moments # config.patterns_reps * 17 - 1
import torch from torch.utils.data import Dataset, DataLoader from torchvision import transforms import numpy as np from PIL import Image from forge import flags from forge.experiment_tools import fprint from utils.misc import loader_throughput from third_party.shapestacks.shapestacks_provider import _get_filenames_with_labels flags.DEFINE_string('data_folder', 'data/shapestacks', 'Path to data folder.') flags.DEFINE_string('split_name', 'default', '{default, blocks_all, css_all}') flags.DEFINE_integer('img_size', 64, 'Dimension of images. Images are square.') flags.DEFINE_boolean('shuffle_test', False, 'Shuffle test set.') flags.DEFINE_integer('num_workers', 4, 'Number of threads for loading data.') flags.DEFINE_boolean('copy_to_tmp', False, 'Copy files to /tmp.') flags.DEFINE_integer('K_steps', 9, 'Number of recurrent steps.') MAX_SHAPES = 6 CENTRE_CROP = 196 def load(cfg, **unused_kwargs):
from lie_conv.utils import Pass, Expression from lie_conv.masked_batchnorm import MaskBatchNormNd from oil.utils.utils import cosLr from corm_data.collate import collate_fn if torch.cuda.is_available(): device = "cuda" # device = "cpu" else: device = "cpu" ##################################################################################################################### # Command line flags ##################################################################################################################### # Directories flags.DEFINE_string("data_dir", "data/", "Path to data directory") flags.DEFINE_string( "results_dir", "checkpoints/", "Top directory for all experimental results." ) # Configuration files to load flags.DEFINE_string( "data_config", "configs/molecule/qm9_data.py", "Path to a data config file." ) flags.DEFINE_string( "model_config", "configs/molecule/set_transformer.py", "Path to a model config file.", ) # Job management flags.DEFINE_string("run_name", "test", "Name of this job and name of results folder.")
# but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ######################################################################################## """MNIST data config.""" from attrdict import AttrDict import os from tensorflow.examples.tutorials.mnist import input_data from forge import flags from forge.data import tensors_from_data flags.DEFINE_string('data_folder', '../data/MNIST_data', 'Path to a data folder.') # This function should return a dataset in a form that is accepted by the # corresponding model file. # In this case, it returns a dictionary of tensors. def load(config, **unused_kwargs): del unused_kwargs if not os.path.exists(config.data_folder): os.makedirs(config.data_folder) dataset = input_data.read_data_sets(config.data_folder) train_data = {'imgs': dataset.train.images, 'labels': dataset.train.labels}
print_reports, load_checkpoint, save_checkpoint, ExponentialMovingAverage, param_count, get_component, get_average_norm, ) # %% ##################################################################################################################### # Command line flags ##################################################################################################################### # Directories flags.DEFINE_string("data_dir", "data/", "Path to data directory") flags.DEFINE_string("results_dir", "checkpoints/", "Top directory for all experimental results.") # Configuration files to load flags.DEFINE_string( "data_config", "configs/constellation/constellation.py", "Path to a data config file.", ) flags.DEFINE_string( "model_config", "configs/constellation/eqv_transformer_model.py", "Path to a model config file.", ) # Job management