def setUp(self): self.targets = self.get_targets() self.logits = self.get_logits() self.batch_size = self.targets.shape[0] self.num_classes = self.get_num_classes() self.logger = get_logger('data') self.pl_logits = self.logits_placeholder() self.pl_targets = self.targets_placeholder()
def setUp(self): self.targets = np.asarray([0, 1, 2, 1]) self.batch_size = len(self.targets) self.num_classes = 3 self.logits = np.random.random((self.batch_size, self.num_classes)) self.logger = get_logger('data') # Define Tensorflow placeholder self.pl_logits = tf.placeholder(tf.float32, shape=[None, self.num_classes]) self.pl_targets = tf.placeholder(tf.int32, shape=[None])
def setUp(self): """ Prepare data """ # Define data self.K = np.random.randint(2, 6) self.out_size = 1 self.batch_size = np.random.randint(20, 100) self.logits_data = np.random.random( (self.batch_size, self.K * (self.out_size + 2))) self.targets_data = np.asarray( np.random.random((self.batch_size, self.out_size))) # Define Tensorflow placeholders self.pl_logits = tf.placeholder(tf.float32, shape=(self.batch_size, self.K * (self.out_size + 2))) self.pl_targets = tf.placeholder(tf.float32, shape=(self.batch_size, self.out_size)) self.logger = get_logger('data')
""" Test to check that the defined architecture works for a very simple dataset such as Diabetes UCI """ from protodata.datasets.scikit_dataset import DiabetesSettings from protodata.datasets import Datasets from protodata.utils import get_data_location from protodata.data_ops import TrainMode, DataMode from widedeep.model.model_base import LinearModel, MLP from widedeep.model.joint_model import JointRegressor from widedeep.ops.losses import Optimizers, MeanSquared import widedeep.utils as ut import widedeep.ops.metrics as me import tensorflow as tf logger = ut.get_logger('data') flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string("data_location", get_data_location(Datasets.DIABETES), "Where data is stored") flags.DEFINE_integer("batch_size", 32, "Batch size to use.") flags.DEFINE_string("network", ut.NetworkModels.MLP, "Network to use for MLP, if used") flags.DEFINE_integer("summaries", 50, "Steps between summaries.") flags.DEFINE_integer("checkpoints", 100, "Steps between model checkpoints.")
import widedeep.model.model_utils as mu from widedeep.model.model_base import CNNModel from widedeep.ops.losses import LossFn, ClassificationLoss, RegressionLoss, \ MixtureDensityLoss import widedeep.utils as ut from protodata.utils import create_dir from protodata.data_ops import DataMode import abc import os import numpy as np import scipy.misc import tensorflow as tf logger = ut.get_logger('model') class JointModel(object): """ Model that trains multiple models independently by using a joint training loss. The output os each model is summed and loss is computed wrt each model independently using separate optimizers. Args: model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. models: List of models. outputs: Number of targets to predict. loss_fn: Loss function to use for training.
""" Helper class for creating network layers """ from widedeep.ops.variables import variable, add_variable_summary from widedeep.utils import get_logger from tensorflow.contrib import layers import tensorflow as tf logger = get_logger('data') def conv_layer( inputs, is_training, output_size, kernel_size, stride=1, group=1, padding='SAME', activation_fn=tf.nn.relu, lrn=None, batch_norm=False, pool=None, # max_pool2d, avg_pool2d pool_size=2, pool_stride=3, pool_padding='SAME', summary=True, weights_initializer=layers.xavier_initializer(), biases_initializer=tf.constant_initializer(0.1),
from widedeep.model.model_base import LinearModel, MLP from widedeep.model.joint_model import JointMDN from widedeep.ops.distributions import GaussianMixture from widedeep.ops.losses import Optimizers import widedeep.utils as utils from widedeep.ops import metrics from protodata.datasets.scikit_dataset import DiabetesSettings from protodata.datasets import Datasets from protodata.data_ops import TrainMode, DataMode from protodata.utils import get_data_location import tensorflow as tf logger = utils.get_logger('data') flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string( "data_location", get_data_location(Datasets.DIABETES), "Where data is stored" ) flags.DEFINE_integer( "batch_size", 64,
from widedeep.utils import get_logger, VariableNotFound, MultipleFound from widedeep.ops.variables import savable_variables import os import re import time import numpy as np from glob import glob import tensorflow as tf logger = get_logger('model') """ Class that contains function helpers for network training """ def new_model(session): """ Initializes model from scratch and returns global step variable Args: session: Tensorflow session Returns: step: Global step variable """ logger.info('Initializing model from scratch ...') session.run(tf.global_variables_initializer()) session.run(tf.local_variables_initializer()) session.run(tf.initialize_all_tables()) return get_global_step() def restore_model(session, saver, path): """ Initializes a model that has been previously trained and