Ejemplo n.º 1
0
def main_flags():
    # Data & model config
    flags.DEFINE_string('data_config', 'datasets/gqn_config.py',
                        'Path to a data config file.')
    flags.DEFINE_string('model_config', 'models/genesis_config.py',
                        'Path to a model config file.')
    # Trained model
    flags.DEFINE_string('model_dir', 'checkpoints/test/1',
                        'Path to model directory.')
    flags.DEFINE_string('model_file', 'model.ckpt-FINAL', 'Name of model file.')
    # FID
    flags.DEFINE_integer('feat_dim', 2048, 'Number of Incpetion features.')
    flags.DEFINE_integer('num_fid_images', 10000,
                         'Number of images to compute the FID on.')
    # Other
    flags.DEFINE_string('img_dir', '/tmp', 'Directory for saving pngs.')
    flags.DEFINE_integer('batch_size', 10, 'Mini-batch size.')
    flags.DEFINE_boolean('gpu', True, 'Use GPU if available.')
    flags.DEFINE_integer('seed', 0, 'Seed for random number generators.')
Ejemplo n.º 2
0
def main_flags():
    # Data & model config
    flags.DEFINE_string('data_config', 'datasets/multid_config.py',
                        'Path to a data config file.')
    flags.DEFINE_string('model_config', 'models/genesis_config.py',
                        'Path to a model config file.')
    # Logging config
    flags.DEFINE_string('results_dir', 'checkpoints',
                        'Top directory for all experimental results.')
    flags.DEFINE_string('run_name', 'test',
                        'Name of this job and name of results folder.')
    flags.DEFINE_integer(
        'report_loss_every', 1000,
        'Number of iterations between reporting minibatch loss.')
    flags.DEFINE_integer('run_validation_every', 10000,
                         'How many equally spaced validation runs to do.')
    flags.DEFINE_integer('num_checkpoints', 40,
                         'How many equally spaced model checkpoints to save.')
    flags.DEFINE_boolean('resume', False, 'Tries to resume a job if True.')
    flags.DEFINE_boolean(
        'log_grads_and_weights', False,
        'Log gradient and weight histograms - storage intensive!')
    flags.DEFINE_boolean(
        'log_distributions', False,
        'Log mu and sigma of posterior and prior distributions.')
    # Optimisation config
    flags.DEFINE_integer('train_iter', 2000000,
                         'Number of training iterations.')
    flags.DEFINE_integer('batch_size', 32, 'Mini-batch size.')
    flags.DEFINE_string('optimiser', 'adam', 'Optimiser for updating weights.')
    flags.DEFINE_float('learning_rate', 0.0001, 'Learning rate.')
    flags.DEFINE_integer('N_eval', 10000,
                         'Number of samples to run evaluation on.')
    # Loss config
    flags.DEFINE_float('beta', 0.5, 'KL weighting.')
    flags.DEFINE_boolean('beta_warmup', True, 'Warm up beta.')
    flags.DEFINE_boolean('geco', True, 'Use GECO objective.')
    flags.DEFINE_float('g_goal', 0.5655, 'GECO recon goal.')
    flags.DEFINE_float('g_lr', 1e-5, 'GECO learning rate.')
    flags.DEFINE_float('g_alpha', 0.99, 'GECO momentum for error.')
    flags.DEFINE_float('g_init', 1.0, 'GECO inital Lagrange factor.')
    flags.DEFINE_float('g_min', 1e-10, 'GECO min Lagrange factor.')
    flags.DEFINE_float('g_speedup', 10., 'Scale GECO lr if delta positive.')
    # Other
    flags.DEFINE_boolean('gpu', True, 'Use GPU if available.')
    flags.DEFINE_boolean('multi_gpu', False, 'Use multiple GPUs if available.')
    flags.DEFINE_boolean('debug', False, 'Debug flag.')
    flags.DEFINE_integer('seed', 0, 'Seed for random number generators.')
Ejemplo n.º 3
0
import modules.seq_att as seq_att
import modules.decoders as decoders
from modules.component_vae import ComponentVAE

from third_party.sylvester.VAE import VAE

import utils.misc as misc


# Model type
flags.DEFINE_boolean('two_stage', True, 'Use two stages if two, else only one.')
# Priors
flags.DEFINE_boolean('autoreg_prior', True, 'Autoregressive prior.')
flags.DEFINE_boolean('comp_prior', True, 'Component prior.')
# Attention VAE
flags.DEFINE_integer('attention_latents', 64, 'Latent dimension.')
flags.DEFINE_string('enc_norm', 'bn', '{bn, in} - norm type in encoder.')
flags.DEFINE_string('dec_norm', 'bn', '{bn, in} - norm type in decoder.')
# Component VAE
flags.DEFINE_integer('comp_enc_channels', 32, 'Starting number of channels.')
flags.DEFINE_integer('comp_ldim', 16, 'Latent dimension of the VAE.')
flags.DEFINE_integer('comp_dec_channels', 32, 'Num channels in Broadcast Decoder.')
flags.DEFINE_integer('comp_dec_layers', 4, 'Num layers in Broadcast Decoder.')
# Losses
flags.DEFINE_boolean('pixel_bound', True, 'Bound pixel values to [0, 1].')
flags.DEFINE_float('pixel_std1', 0.7, 'StdDev of reconstructed pixels.')
flags.DEFINE_float('pixel_std2', 0.7, 'StdDev of reconstructed pixels.')
flags.DEFINE_boolean('montecarlo_kl', True, 'Evaluate KL via MC samples.')


def load(cfg):
import torch

from lie_conv.dynamicsTrainer import FC, HFC
from lie_conv.graphnets import OGN, HOGN
from eqv_transformer.dynamics_predictor import DynamicsPredictor

from forge import flags

flags.DEFINE_integer("channel_width", 256, "Channel width for the network.")
flags.DEFINE_integer("num_layers", 4, "Number of layers.")
flags.DEFINE_integer("model_seed", 0, "Model rng seed")

flags.DEFINE_string(
    "network_type",
    "FC",
    "One of FC, HFC, OGN, HOGN.",
)


def load(config, **unused_kwargs):

    print(f"Using network: {config.network_type}.")

    torch.manual_seed(config.model_seed)
    network = (eval(config.network_type))(
        sys_dim=config.sys_dim,
        d=config.space_dim,
        k=config.channel_width,
        num_layers=config.num_layers,
    )
Ejemplo n.º 5
0
import os

import torch
from torch.utils.data import DataLoader
from oil.utils.utils import FixedNumpySeed, islice
from oil.datasetup.datasets import split_dataset
from lie_conv.datasets import SpringDynamics

from forge import flags

flags.DEFINE_integer("n_train", 3000, "Number of training datapoints.")
flags.DEFINE_integer("n_test", 2000, "Number of testing datapoints.")
flags.DEFINE_integer("n_val", 2000, "Number of validation datapoints.")
flags.DEFINE_integer("n_systems", 10000, "Size of total dataset generated.")
flags.DEFINE_string(
    "data_path",
    "./datasets/ODEDynamics/SpringDynamics/",
    "Dataset is loaded from and/or downloaded to this path.",
)
flags.DEFINE_integer("sys_dim", 2, "[add description].")
flags.DEFINE_integer("space_dim", 2, "Dimension of particle system.")
flags.DEFINE_integer("data_seed", 0, "Data splits random seed.")
flags.DEFINE_integer("num_particles", 6, "Number of particles in system.")
flags.DEFINE_integer("chunk_len", 5, "Length of trajectories.")
flags.DEFINE_boolean(
    "load_preprocessed",
    False,
    "Load data already preprocessed to avoid RAM memory spike. Ensure data exists first for the chunk_lun required.",
)

Ejemplo n.º 6
0
from torch.distributions.normal import Normal
from torch.distributions.categorical import Categorical
from torch.distributions.kl import kl_divergence

import numpy as np

from forge import flags

from modules.unet import UNet
import modules.seq_att as seq_att
from modules.component_vae import ComponentVAE
from models.genesis_config import Genesis
from utils import misc

# Attention network
flags.DEFINE_integer('filter_start', 32,
                     'Starting number of channels in UNet.')
flags.DEFINE_string('prior_mode', 'softmax', '{scope, softmax}')


def load(cfg):
    return MONet(cfg)


class MONet(nn.Module):
    def __init__(self, cfg):
        super(MONet, self).__init__()
        # Configuration
        self.K_steps = cfg.K_steps
        self.prior_mode = cfg.prior_mode
        self.mckl = cfg.montecarlo_kl
        self.debug = cfg.debug
Ejemplo n.º 7
0
# =========================== A2I Copyright Header ===========================

from attrdict import AttrDict

import torch
import torch.nn as nn
from torch.distributions.normal import Normal

from forge import flags

from modules.blocks import Flatten
from modules.decoders import BroadcastDecoder
from third_party.sylvester.VAE import VAE

# GatedConvVAE
flags.DEFINE_integer('latent_dimension', 64, 'Latent channels.')
flags.DEFINE_boolean('broadcast_decoder', False,
                     'Use broadcast decoder instead of deconv.')
# Losses
flags.DEFINE_boolean('pixel_bound', True, 'Bound pixel values to [0, 1].')
flags.DEFINE_float('pixel_std', 0.7, 'StdDev of reconstructed pixels.')


def load(cfg):
    return BaselineVAE(cfg)


class BaselineVAE(nn.Module):
    def __init__(self, cfg):
        super(BaselineVAE, self).__init__()
        cfg.K_steps = None
Ejemplo n.º 8
0
from torchvision import transforms

import numpy as np
from PIL import Image

from forge import flags
from forge.experiment_tools import fprint

from utils.misc import loader_throughput

from third_party.shapestacks.shapestacks_provider import _get_filenames_with_labels


flags.DEFINE_string('data_folder', 'data/shapestacks', 'Path to data folder.')
flags.DEFINE_string('split_name', 'default', '{default, blocks_all, css_all}')
flags.DEFINE_integer('img_size', 64, 'Dimension of images. Images are square.')
flags.DEFINE_boolean('shuffle_test', False, 'Shuffle test set.')

flags.DEFINE_integer('num_workers', 4, 'Number of threads for loading data.')
flags.DEFINE_boolean('copy_to_tmp', False, 'Copy files to /tmp.')

flags.DEFINE_integer('K_steps', 9, 'Number of recurrent steps.')


MAX_SHAPES = 6
CENTRE_CROP = 196


def load(cfg, **unused_kwargs):
    del unused_kwargs
    if not os.path.exists(cfg.data_folder):
Ejemplo n.º 9
0
from attrdict import AttrDict

import torch
from torch import nn
import torch.nn.functional as F

from lie_conv.datasets import SE3aug

from eqv_transformer.attention import SetTransformer
from eqv_transformer.molecule_predictor import MoleculePredictor

from forge import flags

flags.DEFINE_boolean("data_augmentation", False,
                     "Apply data augmentation to the input data or not")
flags.DEFINE_integer("n_enc_layers", 4, "Number of encoder layers.")
flags.DEFINE_integer("n_dec_layers", 4, "Number of encoder layers.")
flags.DEFINE_integer("num_heads", 4, "Number of attention heads.")
flags.DEFINE_integer(
    "n_inducing_points",
    0,
    "Number of inducing points; does not use inducing points if 0.",
)
flags.DEFINE_boolean("layer_norm", False, "Uses layer-norm if True.")
flags.DEFINE_integer("hidden_dim", 128, "Hidden dimension between layers")


class MolecueSetTransformer(SetTransformer):
    def __init__(self, num_species, charge_scale, aug=False, **kwargs):
        super().__init__(dim_input=3 + 3 * num_species,
                         num_outputs=1,
Ejemplo n.º 10
0
# modification, is not permitted without an explicit licensing agreement
# (research or commercial). No warranty, explicit or implicit, provided.
#
# =========================== A2I Copyright Header ===========================

import os
from pathlib import Path

import torch
import torch.nn.functional as F
from forge import flags
from forge.experiment_tools import fprint
from torchvision.io import read_video
from utils.misc import loader_throughput

flags.DEFINE_integer("img_size", 64, "Dimension of images. Images are square.")
flags.DEFINE_integer(
    "val_frac", 60, "Fraction of training images to use for validation."
)

flags.DEFINE_integer("num_workers", 4, "TF records dataset.")
flags.DEFINE_integer("buffer_size", 128, "TF records dataset.")

flags.DEFINE_integer("K_steps", 7, "Number of recurrent steps.")


SEED = 0


def load(cfg, **unused_kwargs):
    # Fix TensorFlow seed
Ejemplo n.º 11
0
import torch

from lie_conv.moleculeTrainer import MolecLieResNet

from eqv_transformer.molecule_predictor import MoleculePredictor
from lie_conv.lieGroups import SE3, SO3, T, Trivial

from forge import flags

flags.DEFINE_bool(
    "data_augmentation",
    False,
    "Apply data augmentation to the data before passing to the model",
)
flags.DEFINE_integer(
    "nbhd_size", 25, "The number of samples to use for Monte Carlo estimation")
flags.DEFINE_string("activation_function", "swish",
                    "Activation function to use in the network")
flags.DEFINE_boolean("batch_norm", True, "Use batch norm in the layers")
flags.DEFINE_bool(
    "mean_pooling",
    True,
    "Use mean pooling insteave of sum pooling in the invariant layer",
)
flags.DEFINE_integer("num_layers", 6, "Number of ResNet layers to use")
flags.DEFINE_string("group", "SE3", "Group to be invariant to")
flags.DEFINE_integer("channels", 1536, "Number of channels in the conv layers")
flags.DEFINE_float(
    "fill",
    1.0,
    "specifies the fraction of the input which is included in local neighborhood. (can be array to specify a different value for each layer",
Ejemplo n.º 12
0
import forge.experiment_tools as fet
from forge.experiment_tools import fprint

from utils.plotting import plot

# Data & model config
flags.DEFINE_string('data_config', 'datasets/gqn_config.py',
                    'Path to a data config file.')
flags.DEFINE_string('model_config', 'models/genesis_config.py',
                    'Path to a model config file.')
# Trained model
flags.DEFINE_string('model_dir', 'checkpoints/test/1',
                    'Path to model directory.')
flags.DEFINE_string('model_file', 'model.ckpt-FINAL', 'Name of model file.')
# Other
flags.DEFINE_integer('num_images', 10, 'Number of images to visualize.')


def main():
    # Parse flags
    config = forge.config()
    fet.print_flags()
    # Restore flags of pretrained model
    flag_path = osp.join(config.model_dir, 'flags.json')
    fprint(f"Restoring flags from {flag_path}")
    pretrained_flags = AttrDict(fet.json_load(flag_path))
    pretrained_flags.debug = True

    # Fix seeds. Always first thing to be done after parsing the config!
    torch.manual_seed(0)
    np.random.seed(0)
from attrdict import AttrDict

import torch
from torch import nn
import torch.nn.functional as F
from eqv_transformer.classfier import Classifier
from eqv_transformer.attention import SetTransformer

from forge import flags

flags.DEFINE_integer("input_dim", 2, "Dimensionality of the input.")
flags.DEFINE_integer("n_outputs", 4, "Number of output vectors.")
flags.DEFINE_integer("output_dim", 3, "Dimensionality of the output.")
flags.DEFINE_integer("n_enc_layers", 4, "Number of encoder layers.")
flags.DEFINE_integer("n_dec_layers", 4, "Number of encoder layers.")
flags.DEFINE_integer("num_heads", 4, "Number of attention heads.")
flags.DEFINE_integer(
    "n_inducing_points",
    0,
    "Number of inducing points; does not use inducing points if 0.",
)
flags.DEFINE_boolean("layer_norm", False, "Uses layer-norm if True.")


def load(config, **unused_kwargs):
    del unused_kwargs

    encoder = SetTransformer(
        config.input_dim,
        config.n_outputs,
        config.output_dim,
Ejemplo n.º 14
0
)
flags.DEFINE_string(
    "model_config",
    "configs/dynamics/eqv_transformer_model.py",
    "Path to a model config file.",
)
# Job management
flags.DEFINE_string(
    "run_name",
    "test",
    "Name of this job and name of results folder.",
)
flags.DEFINE_boolean("resume", False, "Tries to resume a job if True.")

# Logging
flags.DEFINE_integer("report_loss_every", 10,
                     "Number of iterations between reporting minibatch loss.")
flags.DEFINE_integer(
    "evaluate_every", 10000,
    "Number of iterations between reporting validation loss.")
flags.DEFINE_integer(
    "save_check_points",
    50,
    "frequency with which to save checkpoints, in number of epoches.",
)
flags.DEFINE_boolean("log_train_values", True, "Logs train values if True.")

# Optimization
flags.DEFINE_integer("train_epochs", 200, "Maximum number of training epochs.")
flags.DEFINE_integer("batch_size", 100, "Mini-batch size.")
flags.DEFINE_float("learning_rate", 1e-3, "Adam learning rate.")
flags.DEFINE_float("beta1", 0.9, "Adam Beta 1 parameter")
Ejemplo n.º 15
0
import forge
from forge import flags

flags.DEFINE_float(
    "subsample_trainset",
    1.0,
    "Proportion or number of samples of the full trainset to use",
)
flags.DEFINE_string(
    "task",
    "h**o",
    "Which task in the QM9 dataset to train on. Pass as a comma separated string",
)
flags.DEFINE_boolean("recenter", False,
                     "Recenter the positions of atoms with charge > 0")
flags.DEFINE_integer("batch_fit", 0, "number of samples to fit to")
flags.DEFINE_integer("data_seed", 0, "seed to pick data with")


def load(config, **unused_kwargs):

    with FixedNumpySeed(config.data_seed):
        datasets, num_species, charge_scale = QM9datasets(
            os.path.join(config.data_dir, "qm9"))
        if config.subsample_trainset != 1.0:
            datasets.update(
                split_dataset(datasets["train"],
                              {"train": config.subsample_trainset}))
        if config.batch_fit != 0:
            datasets.update(
                split_dataset(datasets["train"], {"train": config.batch_fit}))
Ejemplo n.º 16
0
import forge.experiment_tools as fet
from forge.experiment_tools import fprint

from utils.misc import average_ari, average_segcover

# Config
flags.DEFINE_string('data_config', 'datasets/shapestacks_config.py',
                    'Path to a data config file.')
flags.DEFINE_string('model_config', 'models/genesis_config.py',
                    'Path to a model config file.')
# Trained model
flags.DEFINE_string('model_dir', 'checkpoints/test/1',
                    'Path to model directory.')
flags.DEFINE_string('model_file', 'model.ckpt-FINAL', 'Name of model file.')
# Other
flags.DEFINE_integer('seed', 0, 'Seed for random number generators.')
flags.DEFINE_integer('num_images', 300, 'Number of images to run on.')
flags.DEFINE_string('split', 'test', '{train, val, test}')

# Set manual seed
torch.manual_seed(0)
np.random.seed(0)
random.seed(0)
# Make CUDA operations deterministic
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False


def main():
    # Parse flags
    config = forge.config()
Ejemplo n.º 17
0
import tensorflow as tf

import forge
from forge import flags
import forge.experiment_tools as fet

# job config
flags.DEFINE_string('data_config', 'configs/mnist_data.py', 'Path to a data config file.')
flags.DEFINE_string('model_config', 'configs/mnist_mlp.py', 'Path to a model config file.')
flags.DEFINE_string('results_dir', '../checkpoints', 'Top directory for all experimental results.')
flags.DEFINE_string('run_name', 'test_run', 'Name of this job. Results will be stored in a corresponding folder.')
flags.DEFINE_boolean('resume', False, 'Tries to resume a job if True.')

# logging config
flags.DEFINE_integer('report_loss_every', int(1e3), 'Number of iterations between reporting minibatch loss - hearbeat.')
flags.DEFINE_integer('save_itr', int(1e4), 'Number of iterations between snapshotting the model.')
flags.DEFINE_integer('train_itr', int(2e6), 'Maximum number of training iterations.')

# experiment config
flags.DEFINE_integer('batch_size', 32, '')
flags.DEFINE_float('learning_rate', 1e-5, 'Initial values of the learning rate')

# gpu
flags.DEFINE_string('gpu', '0', 'Id of the gpu to use for this job.')

# Parse flags
config = forge.config()

# sets visible gpus to config.gpu
fet.set_gpu(config.gpu)
Ejemplo n.º 18
0
from eqv_transformer.classfier import Classifier
from eqv_transformer.eqv_attention import EquivariantTransformer
from lie_conv.lieGroups import SE3, SE2, SO3, T, Trivial

# from lie_conv.datasets import SE3aug

from forge import flags


flags.DEFINE_boolean(
    "data_augmentation",
    False,
    "Apply data augmentation to the data before passing to the model",
)
flags.DEFINE_integer("dim_hidden", 512, "Dimension of features to use in each layer")
flags.DEFINE_string(
    "activation_function", "swish", "Activation function to use in the network"
)
# flags.DEFINE_boolean("layer_norm", True, "Use layer norm in the layers")
flags.DEFINE_boolean(
    "mean_pooling",
    True,
    "Use mean pooling insteave of sum pooling in the invariant layer",
)
flags.DEFINE_integer("num_heads", 8, "Number of attention heads in each layer")
flags.DEFINE_integer("kernel_dim", 16, "Hidden layer size to use in kernel MLPs")
# flags.DEFINE_boolean("batch_norm", False, "Use batch norm in the kernel MLPs")
flags.DEFINE_integer("num_layers", 6, "Number of ResNet layers to use")
flags.DEFINE_string("group", "SE2", "Group to be invariant to")
flags.DEFINE_integer(
Ejemplo n.º 19
0
import torch

from eqv_transformer.eqv_attention import EquivariantTransformer
from eqv_transformer.molecule_predictor import MoleculePredictor
from lie_conv.lieGroups import SE3, SO3, T, Trivial
from lie_conv.datasets import SE3aug

from forge import flags

flags.DEFINE_boolean(
    "data_augmentation",
    True,
    "Apply data augmentation to the data before passing to the model",
)
flags.DEFINE_integer("dim_hidden", 512,
                     "Dimension of features to use in each layer")
flags.DEFINE_string("activation_function", "swish",
                    "Activation function to use in the network")
flags.DEFINE_boolean(
    "mean_pooling",
    True,
    "Use mean pooling insteave of sum pooling in the invariant layer",
)
flags.DEFINE_integer("num_heads", 8, "Number of attention heads in each layer")
flags.DEFINE_string(
    "block_norm",
    "layer_pre",
    "Type of norm to use in the attention block. none/[layer/batch]_[pre/post]",
)
flags.DEFINE_string(
    "output_norm",
Ejemplo n.º 20
0
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################################
"""Simple MLP model config for MNIST classification."""
import sonnet as snt
import tensorflow as tf

from forge import flags

flags.DEFINE_integer('n_hidden', 128, 'Number of hidden units.')


def load(config, **inputs):

    imgs, labels = inputs['train_img'], inputs['train_label']

    imgs = snt.BatchFlatten()(imgs)
    mlp = snt.nets.MLP([config.n_hidden, 10])
    logits = mlp(imgs)
    labels = tf.cast(labels, tf.int32)

    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
                                                       labels=labels))
Ejemplo n.º 21
0
import torch
import torch.nn.functional as F

import tensorflow as tf

import numpy as np

import third_party.tf_gqn.gqn_tfr_provider as gqn

from forge import flags
from forge.experiment_tools import fprint

from utils.misc import loader_throughput

flags.DEFINE_string('data_folder', 'data/gqn_datasets', 'Path to data folder.')
flags.DEFINE_integer('img_size', 64, 'Dimension of images. Images are square.')
flags.DEFINE_integer('val_frac', 60,
                     'Fraction of training images to use for validation.')

flags.DEFINE_integer('num_workers', 4, 'TF records dataset.')
flags.DEFINE_integer('buffer_size', 128, 'TF records dataset.')

flags.DEFINE_integer('K_steps', 7, 'Number of recurrent steps.')

SEED = 0


def load(cfg, **unused_kwargs):
    # Fix TensorFlow seed
    global SEED
    SEED = cfg.seed
Ejemplo n.º 22
0
# =========================== A2I Copyright Header ===========================

import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import NoNorm

import torch

import forge
from forge import flags
import forge.experiment_tools as fet

# Config
flags.DEFINE_string('data_config', 'datasets/multid_config.py',
                    'Path to a data config file.')
flags.DEFINE_integer('batch_size', 8, 'Mini-batch size.')
flags.DEFINE_integer('seed', 0, 'Seed for random number generators.')


def main():
    # Parse flags
    cfg = forge.config()
    cfg.num_workers = 0

    # Set manual seed
    torch.manual_seed(cfg.seed)
    np.random.seed(cfg.seed)
    # Make CUDA operations deterministic
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
Ejemplo n.º 23
0
from attrdict import AttrDict

import torch
from torch import nn
import torch.nn.functional as F
from eqv_transformer.classfier import Classifier
from eqv_transformer.eqv_attention_se2_finite import EqvTransformer

from forge import flags

# flags.DEFINE_integer('input_dim', 2, 'Dimensionality of the input.')
flags.DEFINE_integer('n_outputs', 4, 'Number of output vectors.')
# flags.DEFINE_integer('output_dim', 3, 'Dimensionality of the output.')
flags.DEFINE_string('content_type', 'pairwise_distances',
                    'How to initialize y')
flags.DEFINE_integer('n_enc_layers', 4, 'Number of encoder layers.')
flags.DEFINE_integer('n_dec_layers', 4, 'Number of encoder layers.')
flags.DEFINE_integer('n_heads', 4, 'Number of attention heads.')
flags.DEFINE_boolean('layer_norm', False, 'Uses layer-norm if True.')
flags.DEFINE_integer('cn', 5, 'Size of rotation group.')
flags.DEFINE_string('similarity_fn', 'softmax',
                    'Similarity function used to compute attention weights.')
flags.DEFINE_string('arch', 'set_transf', 'Architecture.')
flags.DEFINE_integer('num_moments', 5,
                     'When using pairwise distances as Y, number of moments.')


def load(config, **unused_kwargs):
    del unused_kwargs

    # should not affect things #### number of moments # config.patterns_reps * 17 - 1
Ejemplo n.º 24
0
from forge import flags
from forge.experiment_tools import fprint

from utils.misc import loader_throughput, len_tfrecords, np_img_centre_crop

import third_party.multi_object_datasets.multi_dsprites as multi_dsprites
import third_party.multi_object_datasets.objects_room as objects_room
import third_party.multi_object_datasets.clevr_with_masks as clevr_with_masks
import third_party.multi_object_datasets.tetrominoes as tetrominoes

flags.DEFINE_string('data_folder', 'data/multi-object-datasets',
                    'Path to data folder.')
flags.DEFINE_string('dataset', 'objects_room',
                    '{multi_dsprites, objects_room, clevr, tetrominoes}')
flags.DEFINE_integer('img_size', -1, 'Dimension of images. Images are square.')
flags.DEFINE_integer('dataset_size', -1, 'Number of images to use.')

flags.DEFINE_integer('num_workers', 4, 'Number of threads for loading data.')
flags.DEFINE_integer('buffer_size', 128, 'TF records dataset.')

flags.DEFINE_integer('K_steps', -1, 'Number of recurrent steps.')

MULTI_DSPRITES = '/multi_dsprites/multi_dsprites_colored_on_colored.tfrecords'
OBJECTS_ROOM = '/objects_room/objects_room_train.tfrecords'
CLEVR = '/clevr_with_masks/clevr_with_masks_train.tfrecords'
TETROMINOS = '/tetrominoes/tetrominoes_train.tfrecords'
CLEVR_CROP = 192  # Following pre-processing in the IODINE paper

SEED = 0
# Configuration files to load
flags.DEFINE_string(
    "data_config", "configs/molecule/qm9_data.py", "Path to a data config file."
)
flags.DEFINE_string(
    "model_config",
    "configs/molecule/set_transformer.py",
    "Path to a model config file.",
)
# Job management
flags.DEFINE_string("run_name", "test", "Name of this job and name of results folder.")
flags.DEFINE_boolean("resume", False, "Tries to resume a job if True.")

# Logging
flags.DEFINE_integer(
    "report_loss_every", 500, "Number of iterations between reporting minibatch loss."
)
flags.DEFINE_integer(
    "evaluate_every", 10000, "Number of iterations between reporting validation loss."
)
flags.DEFINE_integer(
    "save_check_points",
    10,
    "frequency with which to save checkpoints, in number of epochs.",
)
flags.DEFINE_boolean("log_train_values", True, "Logs train values if True.")
flags.DEFINE_float(
    "ema_alpha", 0.99, "Alpha coefficient for exponential moving average of train logs."
)

# Optimization
Ejemplo n.º 26
0
from forge import flags
import forge.experiment_tools as fet

# Job config
flags.DEFINE_string('data_config', 'configs/mnist_data.py',
                    'Path to a data config file.')
flags.DEFINE_string('model_config', 'configs/mnist_mlp.py',
                    'Path to a model config file.')
flags.DEFINE_string('results_dir', 'checkpoints',
                    'Top directory for all experimental results.')
flags.DEFINE_string('run_name', 'mnist',
                    'Name of this job and name of results folder.')
flags.DEFINE_boolean('resume', False, 'Tries to resume a job if True.')

# Logging config
flags.DEFINE_integer('report_loss_every', 100,
                     'Number of iterations between reporting minibatch loss.')
flags.DEFINE_integer('train_epochs', 20, 'Maximum number of training epochs.')

# Experiment config
flags.DEFINE_integer('batch_size', 32, 'Mini-batch size.')
flags.DEFINE_float('learning_rate', 1e-5, 'SGD learning rate.')

# Parse flags
config = forge.config()

# Prepare enviornment
logdir = osp.join(config.results_dir, config.run_name)
logdir, resume_checkpoint = fet.init_checkpoint(logdir, config.data_config,
                                                config.model_config,
                                                config.resume)
checkpoint_name = osp.join(logdir, 'model.ckpt')
Ejemplo n.º 27
0
"""

import collections
import functools
import numpy as np
import tensorflow as tf
import torch
from torchvision import transforms

# import ipdb
from forge import flags
import pickle
import os
import json

flags.DEFINE_integer("train_size", 10000, "Number of training examples per epoch.")
flags.DEFINE_integer("test_size", 1000, "Number of testing examples per epoch.")
flags.DEFINE_integer("naug", 2, "Number of augmentation.")
flags.DEFINE_float("corner_noise", 0.1, "See `create_constellations`.")
flags.DEFINE_boolean("shuffle_corners", True, "See `create_constellations`.")

flags.DEFINE_float("pattern_upscale", 0.0, "See `create_constellations`.")
flags.DEFINE_float("max_rotation", 0.33, "See `create_constellations`.")
flags.DEFINE_float("global_rotation_angle", 0.0, "See `create_constellations`.")
flags.DEFINE_float("global_translation", 0.0, "See `create_constellations`.")
flags.DEFINE_float("pattern_drop_prob", 0.5, "See `create_constellations`.")
flags.DEFINE_integer("patterns_reps", 2, "See `create_constellations`.")
flags.DEFINE_integer("data_seed", 0, "Seed for data generation.")


def roots_of_unity(n):