コード例 #1
0
def separable_conv_with_pad(x, h_row, h_col, stride=1):
    """ Function to do spatial separable convolution.

    The filter weights must already be defined. It will use symmetric extension
    before convolution.

    Parameters
    ----------
    x : tf variable of shape [Batch, height, width, c]
        The input variable. Should be of shape
    h_row : tf tensor of shape [1, l, c_in, c_out]
        The spatial row filter
    h_col : tf tensor of shape [l, 1, c_in, c_out]
        The column filter.
    stride : int
        What stride to use on the convolution.

    Returns
    -------
    y : tf variable
        Result of applying convolution to x
    """
    # Do the row filter first:
    if tf.is_numeric_tensor(h_row):
        h_size = h_row.get_shape().as_list()
    else:
        h_size = h_row.shape

    assert h_size[0] == 1
    pad = h_size[1] // 2
    if h_size[1] % 2 == 0:
        y = tf.pad(x, [[0, 0], [0, 0], [pad - 1, pad], [0, 0]], 'SYMMETRIC')
    else:
        y = tf.pad(x, [[0, 0], [0, 0], [pad, pad], [0, 0]], 'SYMMETRIC')
    y = tf.nn.conv2d(y, h_row, strides=[1, stride, stride, 1],
                     padding='VALID')

    # Now do the column filtering
    if tf.is_numeric_tensor(h_col):
        h_size = h_col.get_shape().as_list()
    else:
        h_size = h_col.shape

    assert h_size[1] == 1
    pad = h_size[0] // 2
    if h_size[0] % 2 == 0:
        y = tf.pad(y, [[0, 0], [pad - 1, pad], [0, 0], [0, 0]], 'SYMMETRIC')
    else:
        y = tf.pad(y, [[0, 0], [pad, pad], [0, 0], [0, 0]], 'SYMMETRIC')
    y = tf.nn.conv2d(y, h_col, strides=[1, stride, stride, 1],
                     padding='VALID')

    assert x.get_shape().as_list()[1:3] == y.get_shape().as_list()[1:3]

    return y
コード例 #2
0
    def symbolic_step(self, state_t):
        """Takes agent's previous step and observation, returns next state and whatever it needs to learn (tf tensors)"""
        logits, state_value = self.network(state_t)
        state_value = state_value[:, 0]

        assert tf.is_numeric_tensor(state_value) and state_value.shape.ndims == 1, \
            "please return 1D tf tensor of state values [you got %s]" % repr(state_value)
        assert tf.is_numeric_tensor(logits) and logits.shape.ndims == 2, \
            "please return 2d tf tensor of logits [you got %s]" % repr(logits)
        # hint: if you triggered state_values assert with your shape being [None, 1],
        # just select [:, 0]-th element of state values as new state values

        return (logits, state_value)
コード例 #3
0
    def _process_metrics(graph, metrics, real_batch_size):

        outputs = [real_batch_size]
        val_methods = None
        if metrics is not None:
            idx = 1
            val_methods = []
            for metric_name in metrics:
                metric = metrics[metric_name]
                if tf.is_numeric_tensor(metric):
                    outputs.append(metric)
                    val_methods.append(StatelessMetric(metric_name, idx, 0))
                    idx += 1
                else:
                    outputs += metric.outputs
                    with graph.as_default():
                        val_labels = [tf.identity(v) for v in metric.labels]
                    outputs += val_labels
                    method = TFValidationMethod(metric.val_method,
                                                metric_name,
                                                list(range(idx, idx + len(metric.outputs))),
                                                list(range(idx + len(metric.outputs),
                                                           idx + len(metric.outputs)
                                                           + len(val_labels))))
                    val_methods.append(method)
                    idx += len(metric.outputs) + len(val_labels)

        outputs = [tf.to_float(output) for output in outputs]
        return outputs, val_methods
コード例 #4
0
ファイル: utils.py プロジェクト: sirCamp/tensorflow-kernels
def tensor_to_array(tensor=None, dtype=np.float32, session=None):
    """
    This method convert a tensor to a numpy array
    :param tensor: tensor
    :param dtype: numpy type to convert the tensor
    :param session: your session of tensorflow
    :return: numpy array of the specified type
    """

    sess = session
    if tensor is None:
        raise ValueError("tensor is None, please put a valid tensor")

    if sess is None:
        sess = tf.Session()

    if tf.is_numeric_tensor(tensor) and dtype == np.object:
        raise ValueError(
            "type mismatch: you have pass a numeric tensor and you're trying to convert it to a string array")

    result = None
    if not tf.executing_eagerly():
        result = np.array(tensor.eval(session=sess), dtype=dtype)
    else:
        result = np.array(tensor, dtype=dtype)

    return result
コード例 #5
0
    def _process_metrics(graph, metrics, loss, inputs):
        import tensorflow as tf
        outputs = []
        val_methods = None
        if metrics is not None:
            idx = 0
            val_methods = []
            for metric_name in metrics:
                metric = metrics[metric_name]
                if tf.is_numeric_tensor(metric):
                    outputs.append(metric)
                    val_methods.append(StatelessMetric(metric_name, idx))
                    idx += 1
                else:
                    outputs += metric.outputs
                    with graph.as_default():
                        val_labels = [tf.identity(v) for v in metric.labels]
                    outputs += val_labels
                    method = TFValidationMethod(
                        metric.val_method, metric_name,
                        list(range(idx, idx + len(metric.outputs))),
                        list(
                            range(idx + len(metric.outputs), idx +
                                  len(metric.outputs) + len(val_labels))))
                    val_methods.append(method)
                    idx += len(metric.outputs) + len(val_labels)
            with graph.as_default():
                real_batch_size = tf.shape(inputs[0])[0]
            outputs.append(real_batch_size)

        with graph.as_default():
            outputs = [tf.to_float(output) for output in outputs]

        outputs.append(loss)
        return outputs, val_methods
コード例 #6
0
ファイル: dqa.py プロジェクト: dogi4234/haxball-ai
    def get_symbolic_qvalues(self, state_t):
        """takes agent's observation, returns qvalues. Both are tf Tensors"""
        qvalues = self.model(state_t)

        assert tf.is_numeric_tensor(qvalues) and qvalues.shape.ndims == 2, \
            "please return 2d tf tensor of qvalues [you got %s]" % repr(qvalues)
        assert int(qvalues.shape[1]) == self.n_actions

        return qvalues
コード例 #7
0
    def get_symbolic_q_values(self, state_t):
        """ Takes agent's observation, returns Q-values. Both are tf tensors. """
        q_values = self.network(state_t)

        assert tf.is_numeric_tensor(q_values) and q_values.shape.ndims == 2, \
            'Please return 2D tf tensor of Q-values, got %s' % repr(q_values)
        assert int(q_values.shape[1]) == self.n_actions

        return q_values
コード例 #8
0
    def get_symbolic_qvalues(self, state_t):
        """takes agent's observation, returns qvalues. Both are tf Tensors"""
        qvalues = self.network(state_t)  # < apply your network layers here >
        # qvalues = < symbolic tensor for q-values >

        assert tf.is_numeric_tensor(qvalues) and qvalues.shape.ndims == 2, \
            "please return 2d tf tensor of qvalues [you got %s]" % repr(qvalues)
        assert int(qvalues.shape[1]) == n_actions

        return qvalues
コード例 #9
0
ファイル: tf_eval.py プロジェクト: lhd231/tcl_mri
def get_tensor(x, vars, sess, data_holder, batch=256):
    """Get tensor data .
    Args:
        x: input data [Ndim, Ndata]
        vars: tensors (list)
        sess: session
        data_holder: data holder
        batch: batch size
    Returns:
        y: value of tensors
    """

    Ndata = x.shape[1]
    if batch is None:
        Nbatch = Ndata
    else:
        Nbatch = batch
    Niter = int(np.ceil(Ndata / Nbatch))

    if not isinstance(vars, list):
        vars = [vars]

    # Convert names to tensors (if necessary) -----------------
    for i in xrange(len(vars)):
        if not tf.is_numeric_tensor(vars[i]) and isinstance(vars[i], str):
            vars[i] = tf.get_default_graph().get_tensor_by_name(vars[i])

    # Start batch-inputs --------------------------------------
    y = {}
    for iter in xrange(Niter):

        sys.stdout.write('\r>> Getting tensors... %d/%d' % (iter + 1, Niter))
        sys.stdout.flush()

        # Get batch -------------------------------------------
        batchidx = np.arange(Nbatch * iter,
                             np.minimum(Nbatch * (iter + 1), Ndata))
        xbatch = x[:, batchidx].T

        # Get tensor data -------------------------------------
        feed_dict = {data_holder: xbatch}
        ybatch = sess.run(vars, feed_dict=feed_dict)

        # Storage
        for tn in xrange(len(ybatch)):
            # Initialize
            if iter == 0:
                y[tn] = np.zeros([Ndata] + list(ybatch[tn].shape[1:]),
                                 dtype=np.float64)
            # Store
            y[tn][batchidx, ] = ybatch[tn]

    sys.stdout.write('\r\n')

    return y
コード例 #10
0
def is_tf_object(x):
    '''Determine whether x is a Tensorflow object.

    Args:
        x: Any object

    Returns:
        A bool indicating whether x is any type of TF object (e.g.,
        tf.Variable, tf.Tensor, tf.placeholder, or any TF op)
    '''
    return tf.is_numeric_tensor(x) or isinstance(x, tf.Variable)
コード例 #11
0
ファイル: misc.py プロジェクト: mandar5335/encodermap
def pairwise_dist(positions, squared=False, flat=False):
    # thanks to https://omoindrot.github.io/triplet-loss

    with tf.name_scope("pairwise_dist"):
        if not tf.is_numeric_tensor(positions):
            positions = tf.convert_to_tensor(positions)
        if len(positions.get_shape()) == 2:
            positions = tf.expand_dims(positions, 0)

        # Get the dot product between all embeddings
        # shape (batch_size, batch_size)
        dot_product = tf.matmul(positions, tf.transpose(positions, [0, 2, 1]))

        # Get squared L2 norm for each embedding. We can just take the diagonal of `dot_product`.
        # This also provides more numerical stability (the diagonal of the result will be exactly 0).
        # shape (batch_size,)
        square_norm = tf.linalg.diag_part(dot_product)

        # Compute the pairwise distance matrix as we have:
        # ||a - b||^2 = ||a||^2  - 2 <a, b> + ||b||^2
        # shape (batch_size, batch_size)
        distances = tf.expand_dims(square_norm,
                                   1) - 2.0 * dot_product + tf.expand_dims(
                                       square_norm, 2)

        # Because of computation errors, some distances might be negative so we put everything >= 0.0
        distances = tf.maximum(distances, 0.0)

        if flat:
            n = int(positions.shape[1])
            mask = np.ones((n, n), dtype=bool)
            mask[np.tril_indices(n)] = False
            distances = tf.boolean_mask(distances, mask, axis=1)

        if not squared:
            # Because the gradient of sqrt is infinite when distances == 0.0 (ex: on the diagonal)
            # we need to add a small epsilon where distances == 0.0
            mask = tf.to_float(tf.equal(distances, 0.0))
            distances = distances + mask * 1e-16

            distances = tf.sqrt(distances)

            # Correct the epsilon added: set the distances on the mask to be exactly 0.0
            distances = distances * (1.0 - mask)

    return distances
コード例 #12
0
def engineered_features(img, halfsize):
  with tf.control_dependencies([
      tf.Assert(tf.is_numeric_tensor(img), [img])
    ]):
    qtrsize = halfsize // 2
    ref_smbox = img[:, qtrsize:(qtrsize+halfsize+1), qtrsize:(qtrsize+halfsize+1), 0:1]
    ltg_smbox = img[:, qtrsize:(qtrsize+halfsize+1), qtrsize:(qtrsize+halfsize+1), 1:2]
    ref_bigbox = img[:, :, :, 0:1]
    ltg_bigbox = img[:, :, :, 1:2]
    engfeat = tf.concat([
      tf.reduce_max(ref_bigbox, [1, 2]), # [?, 64, 64, 1] -> [?, 1]
      tf.reduce_max(ref_smbox, [1, 2]),
      tf.reduce_mean(ref_bigbox, [1, 2]),
      tf.reduce_mean(ref_smbox, [1, 2]),
      tf.reduce_mean(ltg_bigbox, [1, 2]),
      tf.reduce_mean(ltg_smbox, [1, 2])
    ], axis=1)
    return engfeat
コード例 #13
0
def engineered_features(img, halfsize):
  with tf.control_dependencies([
      tf.Assert(tf.is_numeric_tensor(img), [img])
    ]):
    qtrsize = halfsize // 2
    ref_smbox = img[:, qtrsize:(qtrsize+halfsize+1), qtrsize:(qtrsize+halfsize+1), 0:1]
    ltg_smbox = img[:, qtrsize:(qtrsize+halfsize+1), qtrsize:(qtrsize+halfsize+1), 1:2]
    ref_bigbox = img[:, :, :, 0:1]
    ltg_bigbox = img[:, :, :, 1:2]
    engfeat = tf.concat([
      tf.reduce_max(ref_bigbox, [1, 2]), # [?, 64, 64, 1] -> [?, 1]
      tf.reduce_max(ref_smbox, [1, 2]),
      tf.reduce_mean(ref_bigbox, [1, 2]),
      tf.reduce_mean(ref_smbox, [1, 2]),
      tf.reduce_mean(ltg_bigbox, [1, 2]),
      tf.reduce_mean(ltg_smbox, [1, 2])
    ], axis=1)
    return engfeat
コード例 #14
0
    def unet_features(self, preceptual_input, perceptual_gt):

        # averaged_mse = self.loss_instance.mean_squared_error(labels=self.label1,
        #                                                 logit=y[:, :, :, :, 0, np.newaxis])
        # cost = tf.reduce_mean(averaged_mse, name="cost")
        # restore the model

        if tf.is_numeric_tensor(preceptual_input):
            [unet_end] = self.sess.run(
                [self.level_us1],
                feed_dict={
                    self.img_row1:
                    np.zeros([
                        1, self.input_cube_size, self.input_cube_size,
                        self.input_cube_size, 1
                    ]),
                    self.label1:
                    np.zeros([
                        1, self.gt_cube_size, self.gt_cube_size,
                        self.gt_cube_size, 1
                    ]),
                    self.is_training:
                    False,
                    self.input_dim:
                    self.input_cube_size,
                    self.ave_huber:
                    -1,
                    self.trainable:
                    False
                })
        else:
            [unet_end] = self.sess.run(
                [self.level_us1],
                feed_dict={
                    self.img_row1: preceptual_input,
                    self.label1: perceptual_gt,
                    self.is_training: False,
                    self.input_dim: self.input_cube_size,
                    self.ave_huber: -1,
                    self.trainable: False
                })
        return unet_end  #, unet_end
コード例 #15
0
def split_batch(inputs=[], num=1):
    ## Inputs
    ##  inputs: list of tensor = [tensor0, tensor1, ...]
    ## Outputs
    ##  outputs: list of split tensors = [[tensor0_0, tensor1_0, ...], [tensor0_1, tensor1_1, ...], ...]

    assert num > 0
    if not isinstance(inputs, list) and not isinstance(inputs, tuple):
        assert tf.is_numeric_tensor(inputs)
        inputs = [inputs]
    if num == 1:
        if len(inputs) == 1:
            return inputs
        else:
            return [inputs]
    else:
        with tf.device('/cpu:0'):
            if len(inputs) == 1:
                return tf.split(inputs[0], num, axis=0)
            else:
                return np.array([tf.split(i, num, axis=0) for i in inputs],
                                np.object).transpose().tolist()
コード例 #16
0
def dihedrals_to_cartesian_tf(dihedrals, cartesian):

    if not tf.is_numeric_tensor(dihedrals):
        dihedrals = tf.convert_to_tensor(dihedrals)

    n = int(dihedrals.shape[-1])

    if len(cartesian.get_shape()) == 2:
        cartesian = tf.tile(tf.expand_dims(cartesian, axis=0), [tf.shape(dihedrals)[0], 1, 1])

    split = int(int(cartesian.shape[1])/2)

    cartesian_right = cartesian[:, split-1:]
    dihedrals_right = dihedrals[:, split-1:]

    cartesian_left = cartesian[:, split+1::-1]
    dihedrals_left = dihedrals[:, split-2::-1]

    new_cartesian_right = dihedral_to_cartesian_tf_one_way(dihedrals_right, cartesian_right)
    new_cartesian_left = dihedral_to_cartesian_tf_one_way(dihedrals_left, cartesian_left)

    new_cartesian = tf.concat([new_cartesian_left[:, ::-1], new_cartesian_right[:, 3:]], axis=1)

    return new_cartesian
コード例 #17
0
def dihedrals_to_cartesian_tf_old(dihedrals, cartesian=None, central_atom_indices=None, no_omega=False):

    if not tf.is_numeric_tensor(dihedrals):
        dihedrals = tf.convert_to_tensor(dihedrals)
    if len(dihedrals.get_shape()) == 1:
        one_d = True
        dihedrals = tf.expand_dims(dihedrals, 0)
    else:
        one_d = False

    n = int(dihedrals.shape[-1])
    dihedrals = -dihedrals

    if cartesian is None:
        cartesian = tf.constant(straight_tetrahedral_chain(n + 3))
    if len(cartesian.get_shape()) == 2:
        cartesian = tf.tile(tf.expand_dims(cartesian, axis=0), [tf.shape(dihedrals)[0], 1, 1])

    if central_atom_indices is None:
        cai = list(range(cartesian.shape[1]))
    else:
        cai = central_atom_indices

    for i in range(n):
        if not no_omega:
            j = i
        else:
            j = i + int((i+1)/2)
        axis = cartesian[:, cai[j+2]] - cartesian[:, cai[j+1]]
        axis /= tf.norm(axis, axis=1, keepdims=True)
        rotated = cartesian[:, cai[j+2]:cai[j+2]+1] + \
            tf.matmul(cartesian[:, cai[j+2]+1:] - cartesian[:, cai[j+2]:cai[j+2]+1],
                      rotation_matrix(axis, dihedrals[:, i]))
        cartesian = tf.concat([cartesian[:, :cai[j+2]+1], rotated], axis=1)

    return cartesian
コード例 #18
0
            #< Define your network body here. Please make sure you don't use any layers created elsewhere >

            # prepare a graph for agent step
            self.state_t = tf.placeholder('float32', [None,] + list(state_shape))
            self.qvalues_t = self.get_symbolic_qvalues(self.state_t)

        self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)
        self.epsilon = epsilon

    def get_symbolic_qvalues(self, state_t):
        """takes agent's observation, returns qvalues. Both are tf Tensors"""
        < apply your network layers here >
        qvalues = < symbolic tensor for q-values >


        assert tf.is_numeric_tensor(qvalues) and qvalues.shape.ndims == 2, \
            "please return 2d tf tensor of qvalues [you got %s]" % repr(qvalues)
        assert int(qvalues.shape[1]) == n_actions

        return qvalues

    def get_qvalues(self, state_t):
        """Same as symbolic step except it operates on numpy arrays"""
        sess = tf.get_default_session()
        return sess.run(self.qvalues_t, {self.state_t: state_t})

    def sample_actions(self, qvalues):
        """pick actions given qvalues. Uses epsilon-greedy exploration strategy. """
        epsilon = self.epsilon
        batch_size, n_actions = qvalues.shape
        random_actions = np.random.choice(n_actions, size=batch_size)
コード例 #19
0
def _to_tensor(x):
    if not tf.is_numeric_tensor(x):
        x = tf.convert_to_tensor(x, dtype=tf.float64)
    return x
コード例 #20
0
tf.ordered_map_incomplete_size()
tf.ordered_map_peek()
tf.ordered_map_size()
tf.ordered_map_stage()
tf.ordered_map_unstage()
tf.ordered_map_unstage_no_key()

tf.matrix_diag()

tf.negative()
tf.norm()
tf.is_nan()
tf.is_finite()
tf.is_inf()
tf.is_non_decreasing()
tf.is_numeric_tensor()
tf.is_strictly_increasing()
tf.is_variable_initialized

tf.global_variables_initializer()
tf.global_variables()
tf.global_norm()

tf.local_variables()
tf.local_variables_initializer()
tf.get_local_variable
tf.initialize_local_variables

tf.equal()
tf.einsum()
tf.extract_image_patches()
コード例 #21
0
def convnet_ltg(features, labels, mode, params, tpu_estimator_spec):
    """Model function for a simple convnet.

  Args:
    features (dict): features
    labels (tensor): labels
    mode (int): TRAIN, EVAL or PREDICT
    params (dict): command-line parameters
    tpu_estimator_spec (bool): return TPUEstimatorSpec or EstimatorSpec

  Returns:
    tpuestimatorspec
  """
    # comes in directly during training, wrapped in dict during serving
    image = features['image'] if isinstance(features, dict) else features

    # do convolutional layers
    ylogits = _convnet(image, mode, params)
    #ylogits = tf.Print(ylogits, [ylogits], "ylogits= ")

    # output layer from logits
    ltgprob = tf.nn.sigmoid(ylogits)
    class_int = tf.round(ltgprob)

    # set up loss, train op and eval metrics
    loss = None
    train_op = None
    evalmetrics = None
    eval_metric_ops = None
    if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:

        with tf.control_dependencies([
                tf.Assert(tf.is_numeric_tensor(ylogits), [ylogits]),
                tf.assert_non_negative(labels, [labels]),
                tf.assert_less_equal(labels, 1, [labels])
        ]):
            loss = tf.losses.sigmoid_cross_entropy(labels,
                                                   tf.reshape(ylogits, [-1]))
            l2loss = tf.add_n(
                [tf.nn.l2_loss(v) for v in tf.trainable_variables()])
            loss = loss + 0.001 * l2loss

        def metric_fn(labels, class_int, ltgprob):
            return {
                'accuracy':
                tf.metrics.accuracy(labels, class_int),
                'rmse':
                tf.metrics.root_mean_squared_error(
                    tf.cast(labels, dtype=tf.float32), ltgprob)
            }

        evalmetrics = (metric_fn, [labels, class_int, ltgprob])
        eval_metric_ops = metric_fn(labels, class_int, ltgprob)

        if mode == tf.estimator.ModeKeys.TRAIN:
            # this is needed for batch normalization, but has no effect otherwise
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            optimizer = tf.train.AdamOptimizer(
                learning_rate=params['learning_rate'], epsilon=1)
            optimizer = tf.contrib.estimator.clip_gradients_by_norm(
                optimizer, 5)
            if params['use_tpu']:
                optimizer = tf.contrib.tpu.CrossShardOptimizer(
                    optimizer)  # for TPU
            with tf.control_dependencies(update_ops):
                train_op = optimizer.minimize(loss, tf.train.get_global_step())

    predictions_dict = {'ltg_probability': ltgprob, 'has_ltg': class_int}
    export_outputs = {
        'ltgpred': tf.estimator.export.PredictOutput(predictions_dict)
    }
    if tpu_estimator_spec:
        return tf.contrib.tpu.TPUEstimatorSpec(  # works on TPU, GPU, CPU
            mode=mode,
            predictions=predictions_dict,
            loss=loss,
            train_op=train_op,
            eval_metrics=evalmetrics,
            export_outputs=export_outputs)
    else:
        return tf.estimator.EstimatorSpec(mode=mode,
                                          predictions=predictions_dict,
                                          loss=loss,
                                          train_op=train_op,
                                          eval_metric_ops=eval_metric_ops,
                                          export_outputs=export_outputs)
コード例 #22
0
def nancheck(tensor, tx=""):
    return tf.Print(tensor, [tx, "Nans?", tf.is_numeric_tensor(tensor)])
コード例 #23
0
    def create(loss, sess, inputs, grads, variables, graph, tensors_with_value,
               session_config, metrics):

        import tensorflow as tf
        from zoo.util.tf import export_tf
        additional_inputs = []
        additional_values = []
        all_required_inputs = _find_placeholders([loss])
        all_required_inputs_names = [v.name for v in all_required_inputs]
        if tensors_with_value:
            for t, v in tensors_with_value.items():
                if t.name in all_required_inputs_names:
                    additional_inputs.append(t)
                    additional_values.append(v)

        if not isinstance(inputs, list):
            inputs = nest.flatten(inputs)

        inputs = inputs + additional_inputs

        if session_config is not None:
            import tensorflow as tf
            assert isinstance(session_config, tf.ConfigProto),\
                "session_config should be a tf.ConfigProto"
            session_config.use_per_session_threads = True
        session_config = session_config

        from zoo.util.tf import process_grad
        grads = [process_grad(grad) for grad in grads]

        outputs = []
        val_methods = None
        if metrics is not None:
            idx = 0
            val_methods = []
            for metric_name in metrics:
                metric = metrics[metric_name]
                if tf.is_numeric_tensor(metric):
                    outputs.append(metric)
                    val_methods.append(StatelessMetric(metric_name, idx))
                    idx += 1
                else:
                    outputs += metric.outputs
                    with graph.as_default():
                        val_labels = [tf.identity(v) for v in metric.labels]
                    outputs += val_labels
                    method = TFValidationMethod(
                        metric.val_method, metric_name,
                        list(range(idx, idx + len(metric.outputs))),
                        list(
                            range(idx + len(metric.outputs), idx +
                                  len(metric.outputs) + len(val_labels))))
                    val_methods.append(method)
                    idx += len(metric.outputs) + len(val_labels)
            with graph.as_default():
                real_batch_size = tf.shape(inputs[0])[0]
            outputs.append(real_batch_size)

        outputs.append(loss)

        export_dir = tempfile.mkdtemp()
        export_tf(sess, export_dir, inputs=inputs, outputs=grads + outputs)

        variable_names = [v.name for v in variables]
        grad_names = [g.name for g in grads]
        output_names = [o.name for o in outputs]

        def to_floats(vs):
            return [float(v) for v in vs]

        meta = {
            "input_names": [i.name for i in inputs],
            "output_names": output_names,
            "variables": variable_names,
            "grad_variables": grad_names,
            "default_tensor_values": [to_floats(v) for v in additional_values]
        }

        with open(os.path.join(export_dir, "training_meta.json"), "w") as f:
            f.write(json.dumps(meta))
        variable_placeholders = []
        with graph.as_default():
            assigns = []
            for v in variables:
                p = tf.placeholder(dtype=tf.float32, shape=v.shape)
                a = tf.assign(v, p)
                variable_placeholders.append(p)
                assigns.append(a)
            assign = tf.group(*assigns)
        assign = assign
        training_helper_layer = TFTrainingHelper(export_dir, session_config,
                                                 assign, variable_placeholders)

        criterion = IdentityCriterion()

        return TFModel(training_helper_layer, criterion, val_methods)
コード例 #24
0
def main():
    if tf.__version__.split('.')[0] != "1":
        raise Exception("Tensorflow version 1 required")

    if a.seed is None:
        a.seed = random.randint(0, 2**31 - 1)

    tf.set_random_seed(a.seed)
    np.random.seed(a.seed)
    random.seed(a.seed)

    if not os.path.exists(a.output_dir):
        os.makedirs(a.output_dir)

    for k, v in a._get_kwargs():
        print(k, "=", v)

    with open(os.path.join(a.output_dir, "options.json"), "w") as filename:
        filename.write(json.dumps(vars(a), sort_keys=True, indent=4))

    examples = load_examples()

    model = create_model(examples.inputs, examples.targets)

    # encoding images for saving
    with tf.name_scope("encode_images"):
        display_fetches = {}
        for name, value in examples._asdict().items():
            if "path" in name:
                display_fetches[name] = value
            elif tf.is_numeric_tensor(value):
                display_fetches[name] = tf.map_fn(tf.image.encode_png,
                                                  deprocess(value),
                                                  dtype=tf.string,
                                                  name=name + "_pngs")
        for name, value in model._asdict().items():
            if tf.is_numeric_tensor(value) and "predict_" not in name:
                display_fetches[name] = tf.map_fn(tf.image.encode_png,
                                                  deprocess(value),
                                                  dtype=tf.string,
                                                  name=name + "_pngs")

    # progress report for all losses
    with tf.name_scope("progress_summary"):
        progress_fetches = {}
        for name, value in model._asdict().items():
            if not tf.is_numeric_tensor(
                    value
            ) and "grads_and_vars" not in name and not name == "train":
                progress_fetches[name] = value

    # summaries for model: images, scalars, histograms
    for name, value in examples._asdict().items():
        if tf.is_numeric_tensor(value):
            with tf.name_scope(name + "_summary"):
                tf.summary.image(name, deprocess(value))
    for name, value in model._asdict().items():
        if tf.is_numeric_tensor(value):
            with tf.name_scope(name + "_summary"):
                if "predict_" in name:  # discriminators produce values in [0, 1]
                    tf.summary.image(
                        name,
                        tf.image.convert_image_dtype(value, dtype=tf.uint8))
                else:  # generators produce values in [-1, 1]
                    tf.summary.image(name, deprocess(value))
        elif "grads_and_vars" in name:
            for grad, var in value:
                tf.summary.histogram(var.op.name + "/gradients", grad)
        elif not name == "train":
            tf.summary.scalar(name, value)

    for var in tf.trainable_variables():
        tf.summary.histogram(var.op.name + "/values", var)

    with tf.name_scope("parameter_count"):
        parameter_count = tf.reduce_sum(
            [tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])

    saver = tf.train.Saver(max_to_keep=1)

    logdir = a.output_dir if (a.trace_freq > 0 or a.summary_freq > 0) else None
    sv = tf.train.Supervisor(logdir=logdir, save_summaries_secs=0, saver=None)
    with sv.managed_session() as sess:
        print("parameter_count =", sess.run(parameter_count))

        if a.checkpoint is not None:
            checkpoint = tf.train.latest_checkpoint(a.checkpoint)
            saver.restore(sess, checkpoint)

        max_steps = 2**32
        if a.max_epochs is not None:
            max_steps = examples.steps_per_epoch * a.max_epochs
        if a.max_steps is not None:
            max_steps = a.max_steps

        if a.mode == "test":
            # testing
            # at most, process the test data once
            max_steps = min(examples.steps_per_epoch, max_steps)
            for step in range(max_steps):
                results = sess.run(display_fetches)
                filesets = save_images(results)
                for i, filename in enumerate(filesets):
                    print("evaluated image", filename["name"])
                index_path = append_index(filesets)

            print("wrote index at %s" % index_path)

        if a.mode == "predict":
            # predicting
            # at most, process the test data once
            max_steps = min(examples.steps_per_epoch, max_steps)
            for step in range(max_steps):
                results = sess.run(display_fetches)
                fileset = save_predicted_images(results)
                for filename in fileset:
                    print("predicted image", filename)
            print("wrote predicted labels at %s" % a.output_dir)

        if a.mode == "train":
            # training
            start = time.time()

            for step in range(max_steps):

                def should(freq):
                    return freq > 0 and ((step + 1) % freq == 0
                                         or step == max_steps - 1)

                options = None
                run_metadata = None
                if should(a.trace_freq):
                    options = tf.RunOptions(
                        trace_level=tf.RunOptions.FULL_TRACE)
                    run_metadata = tf.RunMetadata()

                fetches = {
                    "train": model.train,
                    "global_step": sv.global_step,
                }

                if should(a.progress_freq):
                    fetches["progress"] = progress_fetches

                if should(a.summary_freq):
                    fetches["summary"] = sv.summary_op

                if should(a.display_freq):
                    fetches["display"] = display_fetches

                results = sess.run(fetches,
                                   options=options,
                                   run_metadata=run_metadata)

                if should(a.summary_freq):
                    print("recording summary")
                    sv.summary_writer.add_summary(results["summary"],
                                                  results["global_step"])

                if should(a.display_freq):
                    print("saving display images")
                    filesets = save_images(results["display"],
                                           step=results["global_step"])
                    append_index(filesets, step=True)

                if should(a.trace_freq):
                    print("recording trace")
                    sv.summary_writer.add_run_metadata(
                        run_metadata, "step_%d" % results["global_step"])

                if should(a.progress_freq):
                    # global_step will have the correct step count if we resume from a checkpoint
                    train_epoch = math.ceil(results["global_step"] /
                                            examples.steps_per_epoch)
                    train_step = (results["global_step"] -
                                  1) % examples.steps_per_epoch + 1
                    rate = (step + 1) * a.batch_size / (time.time() - start)
                    remaining = (max_steps - step) * a.batch_size / rate
                    print(
                        "progress  epoch %d  step %d  image/sec %0.1f  remaining %dm"
                        % (train_epoch, train_step, rate, remaining / 60))
                    for name, value in results["progress"].items():
                        print(name, value)

                if should(a.save_freq):
                    print("saving model")
                    saver.save(sess,
                               os.path.join(a.output_dir, "model"),
                               global_step=sv.global_step)

                if sv.should_stop():
                    break
コード例 #25
0
 def infer_output_shape(self, tensor):
     assert tf.is_numeric_tensor(tensor)
     self._shapes.append((tensor.name, tensor.get_shape().as_list()))