コード例 #1
0
ファイル: train.py プロジェクト: ogroth/genesis
def main_flags():
    # Data & model config
    flags.DEFINE_string('data_config', 'datasets/multid_config.py',
                        'Path to a data config file.')
    flags.DEFINE_string('model_config', 'models/genesis_config.py',
                        'Path to a model config file.')
    # Logging config
    flags.DEFINE_string('results_dir', 'checkpoints',
                        'Top directory for all experimental results.')
    flags.DEFINE_string('run_name', 'test',
                        'Name of this job and name of results folder.')
    flags.DEFINE_integer(
        'report_loss_every', 1000,
        'Number of iterations between reporting minibatch loss.')
    flags.DEFINE_integer('run_validation_every', 10000,
                         'How many equally spaced validation runs to do.')
    flags.DEFINE_integer('num_checkpoints', 40,
                         'How many equally spaced model checkpoints to save.')
    flags.DEFINE_boolean('resume', False, 'Tries to resume a job if True.')
    flags.DEFINE_boolean(
        'log_grads_and_weights', False,
        'Log gradient and weight histograms - storage intensive!')
    flags.DEFINE_boolean(
        'log_distributions', False,
        'Log mu and sigma of posterior and prior distributions.')
    # Optimisation config
    flags.DEFINE_integer('train_iter', 2000000,
                         'Number of training iterations.')
    flags.DEFINE_integer('batch_size', 32, 'Mini-batch size.')
    flags.DEFINE_string('optimiser', 'adam', 'Optimiser for updating weights.')
    flags.DEFINE_float('learning_rate', 0.0001, 'Learning rate.')
    flags.DEFINE_integer('N_eval', 10000,
                         'Number of samples to run evaluation on.')
    # Loss config
    flags.DEFINE_float('beta', 0.5, 'KL weighting.')
    flags.DEFINE_boolean('beta_warmup', True, 'Warm up beta.')
    flags.DEFINE_boolean('geco', True, 'Use GECO objective.')
    flags.DEFINE_float('g_goal', 0.5655, 'GECO recon goal.')
    flags.DEFINE_float('g_lr', 1e-5, 'GECO learning rate.')
    flags.DEFINE_float('g_alpha', 0.99, 'GECO momentum for error.')
    flags.DEFINE_float('g_init', 1.0, 'GECO inital Lagrange factor.')
    flags.DEFINE_float('g_min', 1e-10, 'GECO min Lagrange factor.')
    flags.DEFINE_float('g_speedup', 10., 'Scale GECO lr if delta positive.')
    # Other
    flags.DEFINE_boolean('gpu', True, 'Use GPU if available.')
    flags.DEFINE_boolean('multi_gpu', False, 'Use multiple GPUs if available.')
    flags.DEFINE_boolean('debug', False, 'Debug flag.')
    flags.DEFINE_integer('seed', 0, 'Seed for random number generators.')
コード例 #2
0
ファイル: compute_fid.py プロジェクト: mcm2020/genesis
def main_flags():
    # Data & model config
    flags.DEFINE_string('data_config', 'datasets/gqn_config.py',
                        'Path to a data config file.')
    flags.DEFINE_string('model_config', 'models/genesis_config.py',
                        'Path to a model config file.')
    # Trained model
    flags.DEFINE_string('model_dir', 'checkpoints/test/1',
                        'Path to model directory.')
    flags.DEFINE_string('model_file', 'model.ckpt-FINAL', 'Name of model file.')
    # FID
    flags.DEFINE_integer('feat_dim', 2048, 'Number of Incpetion features.')
    flags.DEFINE_integer('num_fid_images', 10000,
                         'Number of images to compute the FID on.')
    # Other
    flags.DEFINE_string('img_dir', '/tmp', 'Directory for saving pngs.')
    flags.DEFINE_integer('batch_size', 10, 'Mini-batch size.')
    flags.DEFINE_boolean('gpu', True, 'Use GPU if available.')
    flags.DEFINE_integer('seed', 0, 'Seed for random number generators.')
コード例 #3
0
flags.DEFINE_string(
    "results_dir", "checkpoints/", "Top directory for all experimental results."
)

# Configuration files to load
flags.DEFINE_string(
    "data_config", "configs/molecule/qm9_data.py", "Path to a data config file."
)
flags.DEFINE_string(
    "model_config",
    "configs/molecule/set_transformer.py",
    "Path to a model config file.",
)
# Job management
flags.DEFINE_string("run_name", "test", "Name of this job and name of results folder.")
flags.DEFINE_boolean("resume", False, "Tries to resume a job if True.")

# Logging
flags.DEFINE_integer(
    "report_loss_every", 500, "Number of iterations between reporting minibatch loss."
)
flags.DEFINE_integer(
    "evaluate_every", 10000, "Number of iterations between reporting validation loss."
)
flags.DEFINE_integer(
    "save_check_points",
    10,
    "frequency with which to save checkpoints, in number of epochs.",
)
flags.DEFINE_boolean("log_train_values", True, "Logs train values if True.")
flags.DEFINE_float(
コード例 #4
0
from torch import nn
import torch.nn.functional as F
from eqv_transformer.classfier import Classifier
from eqv_transformer.eqv_attention_se2_finite import EqvTransformer

from forge import flags

# flags.DEFINE_integer('input_dim', 2, 'Dimensionality of the input.')
flags.DEFINE_integer('n_outputs', 4, 'Number of output vectors.')
# flags.DEFINE_integer('output_dim', 3, 'Dimensionality of the output.')
flags.DEFINE_string('content_type', 'pairwise_distances',
                    'How to initialize y')
flags.DEFINE_integer('n_enc_layers', 4, 'Number of encoder layers.')
flags.DEFINE_integer('n_dec_layers', 4, 'Number of encoder layers.')
flags.DEFINE_integer('n_heads', 4, 'Number of attention heads.')
flags.DEFINE_boolean('layer_norm', False, 'Uses layer-norm if True.')
flags.DEFINE_integer('cn', 5, 'Size of rotation group.')
flags.DEFINE_string('similarity_fn', 'softmax',
                    'Similarity function used to compute attention weights.')
flags.DEFINE_string('arch', 'set_transf', 'Architecture.')
flags.DEFINE_integer('num_moments', 5,
                     'When using pairwise distances as Y, number of moments.')


def load(config, **unused_kwargs):
    del unused_kwargs

    # should not affect things #### number of moments # config.patterns_reps * 17 - 1
    input_dim = None
    output_dim = config.patterns_reps + 1
コード例 #5
0
import torch.nn as nn
from torch.distributions.normal import Normal

from forge import flags

import modules.blocks as B
import modules.seq_att as seq_att
import modules.decoders as decoders
from modules.component_vae import ComponentVAE

import third_party.sylvester.VAE as sylvester

import utils.misc as misc

# Model type
flags.DEFINE_boolean('two_stage', True,
                     'Use two stages if two, else only one.')
# Priors
flags.DEFINE_boolean('autoreg_prior', True, 'Autoregressive prior.')
flags.DEFINE_boolean('comp_prior', True, 'Component prior.')
# Attention VAE
flags.DEFINE_integer('attention_latents', 64, 'Latent dimension.')
flags.DEFINE_string('enc_norm', 'bn', '{bn, in} - norm type in encoder.')
flags.DEFINE_string('dec_norm', 'bn', '{bn, in} - norm type in decoder.')
# Component VAE
flags.DEFINE_integer('comp_enc_channels', 32, 'Starting number of channels.')
flags.DEFINE_integer('comp_ldim', 16, 'Latent dimension of the VAE.')
flags.DEFINE_integer('comp_dec_channels', 32,
                     'Num channels in Broadcast Decoder.')
flags.DEFINE_integer('comp_dec_layers', 4, 'Num layers in Broadcast Decoder.')
flags.DEFINE_boolean('comp_symmetric', False,
                     'Use same encoder/decoder as in attention VAE.')
コード例 #6
0
import numpy as np
import tensorflow as tf
import torch
from torchvision import transforms

# import ipdb
from forge import flags
import pickle
import os
import json

flags.DEFINE_integer("train_size", 10000, "Number of training examples per epoch.")
flags.DEFINE_integer("test_size", 1000, "Number of testing examples per epoch.")
flags.DEFINE_integer("naug", 2, "Number of augmentation.")
flags.DEFINE_float("corner_noise", 0.1, "See `create_constellations`.")
flags.DEFINE_boolean("shuffle_corners", True, "See `create_constellations`.")

flags.DEFINE_float("pattern_upscale", 0.0, "See `create_constellations`.")
flags.DEFINE_float("max_rotation", 0.33, "See `create_constellations`.")
flags.DEFINE_float("global_rotation_angle", 0.0, "See `create_constellations`.")
flags.DEFINE_float("global_translation", 0.0, "See `create_constellations`.")
flags.DEFINE_float("pattern_drop_prob", 0.5, "See `create_constellations`.")
flags.DEFINE_integer("patterns_reps", 2, "See `create_constellations`.")
flags.DEFINE_integer("data_seed", 0, "Seed for data generation.")


def roots_of_unity(n):
    x_coors = np.cos(2 * np.pi / n * np.arange(n)[..., np.newaxis])
    y_coors = np.sin(2 * np.pi / n * np.arange(n)[..., np.newaxis])

    coors = np.concatenate([x_coors, y_coors, np.tile([[1]], (n, 1))], axis=1)
コード例 #7
0
import numpy as np
from PIL import Image

from forge import flags
from forge.experiment_tools import fprint

from utils.misc import loader_throughput

from third_party.shapestacks.shapestacks_provider import _get_filenames_with_labels


flags.DEFINE_string('data_folder', 'data/shapestacks', 'Path to data folder.')
flags.DEFINE_string('split_name', 'default', '{default, blocks_all, css_all}')
flags.DEFINE_integer('img_size', 64, 'Dimension of images. Images are square.')
flags.DEFINE_boolean('shuffle_test', False, 'Shuffle test set.')

flags.DEFINE_integer('num_workers', 4, 'Number of threads for loading data.')
flags.DEFINE_boolean('copy_to_tmp', False, 'Copy files to /tmp.')

flags.DEFINE_integer('K_steps', 9, 'Number of recurrent steps.')


MAX_SHAPES = 6
CENTRE_CROP = 196


def load(cfg, **unused_kwargs):
    del unused_kwargs
    if not os.path.exists(cfg.data_folder):
        raise Exception("Data folder does not exist.")
コード例 #8
0
from attrdict import AttrDict

import torch
from torch import nn
import torch.nn.functional as F

from lie_conv.datasets import SE3aug

from eqv_transformer.attention import SetTransformer
from eqv_transformer.molecule_predictor import MoleculePredictor

from forge import flags

flags.DEFINE_boolean("data_augmentation", False,
                     "Apply data augmentation to the input data or not")
flags.DEFINE_integer("n_enc_layers", 4, "Number of encoder layers.")
flags.DEFINE_integer("n_dec_layers", 4, "Number of encoder layers.")
flags.DEFINE_integer("num_heads", 4, "Number of attention heads.")
flags.DEFINE_integer(
    "n_inducing_points",
    0,
    "Number of inducing points; does not use inducing points if 0.",
)
flags.DEFINE_boolean("layer_norm", False, "Uses layer-norm if True.")
flags.DEFINE_integer("hidden_dim", 128, "Hidden dimension between layers")


class MolecueSetTransformer(SetTransformer):
    def __init__(self, num_species, charge_scale, aug=False, **kwargs):
        super().__init__(dim_input=3 + 3 * num_species,
                         num_outputs=1,
コード例 #9
0
import torch

from eqv_transformer.classfier import Classifier
from eqv_transformer.eqv_attention import EquivariantTransformer
from lie_conv.lieGroups import SE3, SE2, SO3, T, Trivial

# from lie_conv.datasets import SE3aug

from forge import flags


flags.DEFINE_boolean(
    "data_augmentation",
    False,
    "Apply data augmentation to the data before passing to the model",
)
flags.DEFINE_integer("dim_hidden", 512, "Dimension of features to use in each layer")
flags.DEFINE_string(
    "activation_function", "swish", "Activation function to use in the network"
)
# flags.DEFINE_boolean("layer_norm", True, "Use layer norm in the layers")
flags.DEFINE_boolean(
    "mean_pooling",
    True,
    "Use mean pooling insteave of sum pooling in the invariant layer",
)
flags.DEFINE_integer("num_heads", 8, "Number of attention heads in each layer")
flags.DEFINE_integer("kernel_dim", 16, "Hidden layer size to use in kernel MLPs")
# flags.DEFINE_boolean("batch_norm", False, "Use batch norm in the kernel MLPs")
flags.DEFINE_integer("num_layers", 6, "Number of ResNet layers to use")
flags.DEFINE_string("group", "SE2", "Group to be invariant to")
コード例 #10
0
    "data_config",
    "configs/dynamics/spring_dynamics_data.py",
    "Path to a data config file.",
)
flags.DEFINE_string(
    "model_config",
    "configs/dynamics/eqv_transformer_model.py",
    "Path to a model config file.",
)
# Job management
flags.DEFINE_string(
    "run_name",
    "test",
    "Name of this job and name of results folder.",
)
flags.DEFINE_boolean("resume", False, "Tries to resume a job if True.")

# Logging
flags.DEFINE_integer("report_loss_every", 10,
                     "Number of iterations between reporting minibatch loss.")
flags.DEFINE_integer(
    "evaluate_every", 10000,
    "Number of iterations between reporting validation loss.")
flags.DEFINE_integer(
    "save_check_points",
    50,
    "frequency with which to save checkpoints, in number of epoches.",
)
flags.DEFINE_boolean("log_train_values", True, "Logs train values if True.")

# Optimization
コード例 #11
0
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import torch.nn.functional as F

import numpy as np

from forge import flags

from utils.misc import loader_throughput

flags.DEFINE_string('data_folder', 'data/multi_dsprites/processed',
                    'Path to data folder.')

flags.DEFINE_integer('img_size', 64, 'Dimension of images. Images are square.')
flags.DEFINE_integer('num_workers', 4, 'Number of threads for loading data.')
flags.DEFINE_boolean('mem_map', False, 'Use memory mapping.')

flags.DEFINE_integer('K_steps', 5, 'Number of recurrent steps.')


def load(cfg, **unused_kwargs):
    """
    Args:
        cfg (obj): Forge config
    Returns:
        (DataLoader, DataLoader, DataLoader):
            Tuple of data loaders for train, val, test
    """
    del unused_kwargs
    if not os.path.exists(cfg.data_folder):
        raise Exception("Data folder does not exist.")
コード例 #12
0
ファイル: multid_config.py プロジェクト: ogroth/genesis
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import torch.nn.functional as F

import numpy as np

from forge import flags

from utils.misc import loader_throughput

flags.DEFINE_string('data_folder', 'data/multi_dsprites/processed',
                    'Path to data folder.')

flags.DEFINE_boolean('load_instances', False, 'Load instances.')

flags.DEFINE_integer('img_size', 64, 'Dimension of images. Images are square.')
flags.DEFINE_integer('num_workers', 4, 'Number of threads for loading data.')
flags.DEFINE_boolean('mem_map', False, 'Use memory mapping.')

flags.DEFINE_integer('K_steps', 5, 'Number of recurrent steps.')


def load(cfg, **unused_kwargs):
    """
    Args:
        cfg (obj): Forge config
    Returns:
        (DataLoader, DataLoader, DataLoader):
            Tuple of data loaders for train, val, test
コード例 #13
0
from corm_data.collate import collate_fn

import forge
from forge import flags

flags.DEFINE_float(
    "subsample_trainset",
    1.0,
    "Proportion or number of samples of the full trainset to use",
)
flags.DEFINE_string(
    "task",
    "h**o",
    "Which task in the QM9 dataset to train on. Pass as a comma separated string",
)
flags.DEFINE_boolean("recenter", False,
                     "Recenter the positions of atoms with charge > 0")
flags.DEFINE_integer("batch_fit", 0, "number of samples to fit to")
flags.DEFINE_integer("data_seed", 0, "seed to pick data with")


def load(config, **unused_kwargs):

    with FixedNumpySeed(config.data_seed):
        datasets, num_species, charge_scale = QM9datasets(
            os.path.join(config.data_dir, "qm9"))
        if config.subsample_trainset != 1.0:
            datasets.update(
                split_dataset(datasets["train"],
                              {"train": config.subsample_trainset}))
        if config.batch_fit != 0:
            datasets.update(
コード例 #14
0
from eqv_transformer.eqv_attention import EquivariantTransformer
from lie_conv.dynamicsTrainer import HNet
from lie_conv.hamiltonian import HamiltonianDynamics
from lie_conv.lieGroups import T, SE2, SE2_canonical, SO2
from eqv_transformer.dynamics_predictor import DynamicsPredictor

from forge import flags

flags.DEFINE_string("group", "T(2)", "Group to be invariant to.")
flags.DEFINE_integer("dim_hidden", 160,
                     "Dimension of features to use in each layer")
flags.DEFINE_string("activation_function", "swish",
                    "Activation function to use in the network")
flags.DEFINE_boolean(
    "mean_pooling",
    True,
    "Use mean pooling insteave of sum pooling in the invariant layer",
)
flags.DEFINE_integer("num_heads", 8, "Number of attention heads in each layer")
flags.DEFINE_integer("kernel_dim", 16,
                     "Hidden layer size to use in kernel MLPs")
flags.DEFINE_integer("num_layers", 5, "Number of ResNet layers to use")
flags.DEFINE_integer(
    "lift_samples",
    1,
    "Number of coset lift samples to use for non-trivial stabilisers.",
)
flags.DEFINE_integer("model_seed", 0, "Model rng seed")
flags.DEFINE_string("attention_fn", "dot_product",
                    "How to form the attention weights from the 'logits'.")
コード例 #15
0
flags.DEFINE_integer("n_test", 2000, "Number of testing datapoints.")
flags.DEFINE_integer("n_val", 2000, "Number of validation datapoints.")
flags.DEFINE_integer("n_systems", 10000, "Size of total dataset generated.")
flags.DEFINE_string(
    "data_path",
    "./datasets/ODEDynamics/SpringDynamics/",
    "Dataset is loaded from and/or downloaded to this path.",
)
flags.DEFINE_integer("sys_dim", 2, "[add description].")
flags.DEFINE_integer("space_dim", 2, "Dimension of particle system.")
flags.DEFINE_integer("data_seed", 0, "Data splits random seed.")
flags.DEFINE_integer("num_particles", 6, "Number of particles in system.")
flags.DEFINE_integer("chunk_len", 5, "Length of trajectories.")
flags.DEFINE_boolean(
    "load_preprocessed",
    False,
    "Load data already preprocessed to avoid RAM memory spike. Ensure data exists first for the chunk_lun required.",
)


def load(config):

    dataset = SpringDynamics(
        n_systems=config.n_systems,
        root_dir=config.data_path,
        space_dim=config.space_dim,
        num_particles=config.num_particles,
        chunk_len=config.chunk_len,
        load_preprocessed=config.load_preprocessed,
    )
コード例 #16
0
from torch.distributions.normal import Normal

from forge import flags

import modules.blocks as B
import modules.seq_att as seq_att
import modules.decoders as decoders
from modules.component_vae import ComponentVAE

from third_party.sylvester.VAE import VAE

import utils.misc as misc


# Model type
flags.DEFINE_boolean('two_stage', True, 'Use two stages if two, else only one.')
# Priors
flags.DEFINE_boolean('autoreg_prior', True, 'Autoregressive prior.')
flags.DEFINE_boolean('comp_prior', True, 'Component prior.')
# Attention VAE
flags.DEFINE_integer('attention_latents', 64, 'Latent dimension.')
flags.DEFINE_string('enc_norm', 'bn', '{bn, in} - norm type in encoder.')
flags.DEFINE_string('dec_norm', 'bn', '{bn, in} - norm type in decoder.')
# Component VAE
flags.DEFINE_integer('comp_enc_channels', 32, 'Starting number of channels.')
flags.DEFINE_integer('comp_ldim', 16, 'Latent dimension of the VAE.')
flags.DEFINE_integer('comp_dec_channels', 32, 'Num channels in Broadcast Decoder.')
flags.DEFINE_integer('comp_dec_layers', 4, 'Num layers in Broadcast Decoder.')
# Losses
flags.DEFINE_boolean('pixel_bound', True, 'Bound pixel values to [0, 1].')
flags.DEFINE_float('pixel_std1', 0.7, 'StdDev of reconstructed pixels.')
コード例 #17
0
from eqv_transformer.attention import SetTransformer

from forge import flags

flags.DEFINE_integer("input_dim", 2, "Dimensionality of the input.")
flags.DEFINE_integer("n_outputs", 4, "Number of output vectors.")
flags.DEFINE_integer("output_dim", 3, "Dimensionality of the output.")
flags.DEFINE_integer("n_enc_layers", 4, "Number of encoder layers.")
flags.DEFINE_integer("n_dec_layers", 4, "Number of encoder layers.")
flags.DEFINE_integer("num_heads", 4, "Number of attention heads.")
flags.DEFINE_integer(
    "n_inducing_points",
    0,
    "Number of inducing points; does not use inducing points if 0.",
)
flags.DEFINE_boolean("layer_norm", False, "Uses layer-norm if True.")


def load(config, **unused_kwargs):
    del unused_kwargs

    encoder = SetTransformer(
        config.input_dim,
        config.n_outputs,
        config.output_dim,
        n_enc_layers=config.n_enc_layers,
        n_dec_layers=config.n_dec_layers,
        num_heads=config.num_heads,
        num_inducing_points=config.n_inducing_points,
        ln=config.layer_norm,
    )
コード例 #18
0
import numpy as np
from PIL import Image

from forge import flags
from forge.experiment_tools import fprint

from utils.misc import loader_throughput, np_img_centre_crop

from third_party.shapestacks.shapestacks_provider import _get_filenames_with_labels
from third_party.shapestacks.segmentation_utils import load_segmap_as_matrix

flags.DEFINE_string('data_folder', 'data/shapestacks', 'Path to data folder.')
flags.DEFINE_string('split_name', 'default', '{default, blocks_all, css_all}')
flags.DEFINE_integer('img_size', 64, 'Dimension of images. Images are square.')
flags.DEFINE_boolean('shuffle_test', False, 'Shuffle test set.')

flags.DEFINE_integer('num_workers', 4, 'Number of threads for loading data.')
flags.DEFINE_boolean('load_instances', False, 'Load instances.')
flags.DEFINE_boolean('copy_to_tmp', False, 'Copy files to /tmp.')

flags.DEFINE_integer('K_steps', 9, 'Number of recurrent steps.')

MAX_SHAPES = 6
CENTRE_CROP = 196


def load(cfg, **unused_kwargs):
    del unused_kwargs
    if not os.path.exists(cfg.data_folder):
        raise Exception("Data folder does not exist.")
コード例 #19
0
import os

import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import torch.nn.functional as F

import numpy as np

from forge import flags

from utils.misc import loader_throughput

flags.DEFINE_string('data_folder', 'data/multi_dsprites/processed',
                    'Path to data folder.')
flags.DEFINE_boolean('unique_colours', False, 'Dataset with unique colours.')
flags.DEFINE_boolean('load_instances', True, 'Load instances.')
flags.DEFINE_integer('img_size', 64, 'Dimension of images. Images are square.')

flags.DEFINE_integer('num_workers', 4, 'Number of threads for loading data.')
flags.DEFINE_boolean('mem_map', False, 'Use memory mapping.')

flags.DEFINE_integer('K_steps', 5, 'Number of recurrent steps.')


def load(cfg, **unused_kwargs):
    """
    Args:
        cfg (obj): Forge config
    Returns:
        (DataLoader, DataLoader, DataLoader):
コード例 #20
0
from attrdict import AttrDict

import torch
import torch.nn as nn
from torch.distributions.normal import Normal

from forge import flags

from modules.blocks import Flatten
from modules.decoders import BroadcastDecoder
from third_party.sylvester.VAE import VAE

# GatedConvVAE
flags.DEFINE_integer('latent_dimension', 64, 'Latent channels.')
flags.DEFINE_boolean('broadcast_decoder', False,
                     'Use broadcast decoder instead of deconv.')
# Losses
flags.DEFINE_boolean('pixel_bound', True, 'Bound pixel values to [0, 1].')
flags.DEFINE_float('pixel_std', 0.7, 'StdDev of reconstructed pixels.')


def load(cfg):
    return BaselineVAE(cfg)


class BaselineVAE(nn.Module):
    def __init__(self, cfg):
        super(BaselineVAE, self).__init__()
        cfg.K_steps = None
        # Configuration
        self.ldim = cfg.latent_dimension
コード例 #21
0
from eqv_transformer.molecule_predictor import MoleculePredictor
from lie_conv.lieGroups import SE3, SO3, T, Trivial

from forge import flags

flags.DEFINE_bool(
    "data_augmentation",
    False,
    "Apply data augmentation to the data before passing to the model",
)
flags.DEFINE_integer(
    "nbhd_size", 25, "The number of samples to use for Monte Carlo estimation")
flags.DEFINE_string("activation_function", "swish",
                    "Activation function to use in the network")
flags.DEFINE_boolean("batch_norm", True, "Use batch norm in the layers")
flags.DEFINE_bool(
    "mean_pooling",
    True,
    "Use mean pooling insteave of sum pooling in the invariant layer",
)
flags.DEFINE_integer("num_layers", 6, "Number of ResNet layers to use")
flags.DEFINE_string("group", "SE3", "Group to be invariant to")
flags.DEFINE_integer("channels", 1536, "Number of channels in the conv layers")
flags.DEFINE_float(
    "fill",
    1.0,
    "specifies the fraction of the input which is included in local neighborhood. (can be array to specify a different value for each layer",
)
flags.DEFINE_integer(
    "lift_samples", 4,
コード例 #22
0
import torch.optim as optim

import forge
from forge import flags
import forge.experiment_tools as fet

# Job config
flags.DEFINE_string('data_config', 'configs/mnist_data.py',
                    'Path to a data config file.')
flags.DEFINE_string('model_config', 'configs/mnist_mlp.py',
                    'Path to a model config file.')
flags.DEFINE_string('results_dir', 'checkpoints',
                    'Top directory for all experimental results.')
flags.DEFINE_string('run_name', 'mnist',
                    'Name of this job and name of results folder.')
flags.DEFINE_boolean('resume', False, 'Tries to resume a job if True.')

# Logging config
flags.DEFINE_integer('report_loss_every', 100,
                     'Number of iterations between reporting minibatch loss.')
flags.DEFINE_integer('train_epochs', 20, 'Maximum number of training epochs.')

# Experiment config
flags.DEFINE_integer('batch_size', 32, 'Mini-batch size.')
flags.DEFINE_float('learning_rate', 1e-5, 'SGD learning rate.')

# Parse flags
config = forge.config()

# Prepare enviornment
logdir = osp.join(config.results_dir, config.run_name)
コード例 #23
0
# Configuration files to load
flags.DEFINE_string(
    "data_config",
    "configs/constellation/constellation.py",
    "Path to a data config file.",
)
flags.DEFINE_string(
    "model_config",
    "configs/constellation/eqv_transformer_model.py",
    "Path to a model config file.",
)
# Job management
flags.DEFINE_string("run_name", "main",
                    "Name of this job and name of results folder.")
flags.DEFINE_boolean("resume", False, "Tries to resume a job if True.")

# Logging
flags.DEFINE_integer("report_loss_every", 500,
                     "Number of iterations between reporting minibatch loss.")
flags.DEFINE_integer(
    "evaluate_every", 10000,
    "Number of iterations between reporting validation loss.")
flags.DEFINE_integer(
    "save_check_points",
    50,
    "frequency with which to save checkpoints, in number of epoches.",
)
flags.DEFINE_boolean("log_train_values", True, "Logs train values if True.")
flags.DEFINE_float(
    "ema_alpha", 0.99,