# ______
# IMPORT:
from utils import load_yaml_config, Config

# ______
# CONFIG:
config = Config()
config.ocean_traits = [0, 1, 2, 3, 4]
# OCEAN personality traits to which perform the coherence test: O:0, C:1, E:2, A:3, N:4.
config.distances = [0, 4]
# Distances to which perform the coherence test.
config.max_neigs = 12500
# Maximum number of unknown neighbors to return in the case of distance>0.
# Use None or 0 if you want to return all possible neighbors in the select distance.
config.batch_size = 32
# Training batch size of fnn models.
config.epochs = [50, 300]
config.epochs_train2 = 300
config.epochs_interval = 50
# Epochs is a list of len=2 containing the range of epochs after which stop training of M1 models and train a new model M2.
# M1's training will stop after epochs[0]+n*interval such that  n>0 and epochs[0]+n*interval<=epochs[1]
# M2's training will last epochs_train2 epochs.
config.epochs_interval_evaluation = 1
# M2's training will stop epochs_interval_evaluation epochs to evaluate performance
# M1's training will stop to evaluate performance only if test1=True
config.folds_number = 10
# Numbers of K-fold CV folds.
config.embedding_name = "tuned_embedding"
# The embedding to be used. There must be a directory containing the embedding in data folder.
config.test1 = False
Exemplo n.º 2
0
# post sampling related flags
flags.DEFINE_boolean("post", False, "True for post sampling [False]")

# create flag object
FLAGS = flags.FLAGS

# merge flags and fixed configs into config, which gets passed to the StratGAN object
config = Config()

# training data sources
config.image_dir = os.path.join(os.pardir, 'data', FLAGS.image_dir)
config.image_ext = '*.png'
config.img_verbose = True

# model configurations
config.batch_size = FLAGS.batch_size
config.z_dim = 100  # number inputs to gener
config.c_dim = 1
config.gf_dim = FLAGS.gf_dim  # number of gener conv filters
config.df_dim = FLAGS.df_dim  # number of discim conv filters
config.gfc_dim = 1024  # number of gener fully connecter layer units
config.dfc_dim = 1024  # number of discim fully connected layer units
config.alpha = 0.1  # leaky relu alpha
config.batch_norm = True
config.minibatch_discrim = True

# training hyperparameters
config.epoch = FLAGS.epoch
config.learning_rate = FLAGS.learning_rate  # optim learn rate
config.beta1 = FLAGS.beta1  # momentum
config.repeat_data = True