Пример #1
0
  def AddFCLayer(self, prev_layer, index):
    """Parse expression and add Fully Connected Layer.

    Args:
      prev_layer: Input tensor.
      index:      Position in model_str to start parsing

    Returns:
      Output tensor, end index in model_str.
    """
    pattern = re.compile(R'(F)(s|t|r|l|m)({\w+})?(\d+)')
    m = pattern.match(self.model_str, index)
    if m is None:
      return None, None
    fn = self._NonLinearity(m.group(2))
    name = self._GetLayerName(m.group(0), index, m.group(3))
    depth = int(m.group(4))
    input_depth = shapes.tensor_dim(prev_layer, 1) * shapes.tensor_dim(
        prev_layer, 2) * shapes.tensor_dim(prev_layer, 3)
    # The slim fully connected is actually a 1x1 conv, so we have to crush the
    # dimensions on input.
    # Everything except batch goes to depth, and therefore has to be known.
    shaped = tf.reshape(
        prev_layer, [-1, input_depth], name=name + '_reshape_in')
    output = slim.fully_connected(shaped, depth, activation_fn=fn, scope=name)
    # Width and height are collapsed to 1.
    self.reduction_factors[1] = None
    self.reduction_factors[2] = None
    return tf.reshape(
        output, [shapes.tensor_dim(prev_layer, 0), 1, 1, depth],
        name=name + '_reshape_out'), m.end()
Пример #2
0
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=0.0001):
    '''Compute mean and std for batch then apply batch_normalization on batch.
    '''
    mean, std = tf.nn.moments(x, reduction_axes,
                              shift=None, name=None, keep_dims=False)
    if sorted(reduction_axes) == range(ndim(x))[:-1]:
        normed = tf.nn.batch_normalization(x, mean, std,
                                           beta, gamma,
                                           epsilon)
    else:
        # need broadcasting
        target_shape = []
        for axis in range(ndim(x)):
            if axis in reduction_axes:
                target_shape.append(1)
            else:
                target_shape.append(tf.shape(x)[axis])
        target_shape = tf.pack(target_shape)

        broadcast_mean = tf.reshape(mean, target_shape)
        broadcast_std = tf.reshape(std, target_shape)
        broadcast_gamma = tf.reshape(gamma, target_shape)
        broadcast_beta = tf.reshape(beta, target_shape)
        normed = tf.nn.batch_normalization(x, broadcast_mean, broadcast_std,
                                           broadcast_beta, broadcast_gamma,
                                           epsilon)
    return normed, mean, std
Пример #3
0
    def build_graph(self, nn_im_w, nn_im_h, num_colour_channels=3, weights=None, biases=None):
        num_outputs = 1 #ofc
        self.nn_im_w = nn_im_w
        self.nn_im_h = nn_im_h

        if weights is None:
            weights = [None, None, None, None, None]
        if biases is None:
            biases = [None, None, None, None, None]

        with tf.device('/cpu:0'):
            # Placeholder variables for the input image and output images
            self.x = tf.placeholder(tf.float32, shape=[None, nn_im_w*nn_im_h*3])
            self.y_ = tf.placeholder(tf.float32, shape=[None, num_outputs])
            self.threshold = tf.placeholder(tf.float32)

            # Build the convolutional and pooling layers
            conv1_output_channels = 32
            conv2_output_channels = 16
            conv3_output_channels = 8

            conv_layer_1_input = tf.reshape(self.x, [-1, nn_im_h, nn_im_w, num_colour_channels]) #The resized input image
            self.build_conv_layer(conv_layer_1_input, num_colour_channels, conv1_output_channels, initial_weights=weights[0], initial_biases=biases[0]) # layer 1
            self.build_conv_layer(self.layers[0][0], conv1_output_channels, conv2_output_channels, initial_weights=weights[1], initial_biases=biases[1])# layer 2
            self.build_conv_layer(self.layers[1][0], conv2_output_channels, conv3_output_channels, initial_weights=weights[2], initial_biases=biases[2])# layer 3

            # Build the fully connected layer
            convnet_output_w = nn_im_w//8
            convnet_output_h = nn_im_h//8

            fully_connected_layer_input = tf.reshape(self.layers[2][0], [-1, convnet_output_w * convnet_output_h * conv3_output_channels])
            self.build_fully_connected_layer(fully_connected_layer_input, convnet_output_w, convnet_output_h, conv3_output_channels, initial_weights=weights[3], initial_biases=biases[3])

            # The dropout stage and readout layer
            self.keep_prob, self.h_drop = self.dropout(self.layers[3][0])
            self.y_conv,_,_ = self.build_readout_layer(self.h_drop, num_outputs, initial_weights=weights[4], initial_biases=biases[4])

            self.mean_error =  tf.sqrt(tf.reduce_mean(tf.square(self.y_ - self.y_conv)))
            self.train_step = tf.train.AdamOptimizer(1e-4).minimize(self.mean_error)

            self.accuracy = (1.0 - tf.reduce_mean(tf.abs(self.y_ - tf.round(self.y_conv))))


            positive_examples = tf.greater_equal(self.y_, 0.5)
            negative_examples = tf.logical_not(positive_examples)
            positive_classifications = tf.greater_equal(self.y_conv, self.threshold)
            negative_classifications = tf.logical_not(positive_classifications)

            self.true_positive = tf.reduce_sum(tf.cast(tf.logical_and(positive_examples, positive_classifications),tf.int32)) # count the examples that are positive and classified as positive
            self.false_positive = tf.reduce_sum(tf.cast(tf.logical_and(negative_examples, positive_classifications),tf.int32)) # count the examples that are negative but classified as positive

            self.true_negative = tf.reduce_sum(tf.cast(tf.logical_and(negative_examples, negative_classifications),tf.int32)) # count the examples that are negative and classified as negative
            self.false_negative = tf.reduce_sum(tf.cast(tf.logical_and(positive_examples, negative_classifications),tf.int32)) # count the examples that are positive but classified as negative

            self.positive_count = tf.reduce_sum(tf.cast(positive_examples, tf.int32)) # count the examples that are positive
            self.negative_count = tf.reduce_sum(tf.cast(negative_examples, tf.int32)) # count the examples that are negative

            self.confusion_matrix = tf.reshape(tf.pack([self.true_positive, self.false_positive, self.false_negative, self.true_negative]), [2,2])

        self.sess.run(tf.initialize_all_variables())
    def project_bilstm_layer(self, lstm_outputs, name=None):
        """
        hidden layer between lstm layer and logits
        :param lstm_outputs: [batch_size, num_steps, emb_size] 
        :return: [batch_size, num_steps, num_tags]
        """
        with tf.variable_scope("project" if not name else name):
            with tf.variable_scope("hidden"):
                W = tf.get_variable("W", shape=[self.hidden_unit * 2, self.hidden_unit],
                                    dtype=tf.float32, initializer=self.initializers.xavier_initializer())

                b = tf.get_variable("b", shape=[self.hidden_unit], dtype=tf.float32,
                                    initializer=tf.zeros_initializer())
                output = tf.reshape(lstm_outputs, shape=[-1, self.hidden_unit * 2])
                hidden = tf.tanh(tf.nn.xw_plus_b(output, W, b))

            # project to score of tags
            with tf.variable_scope("logits"):
                W = tf.get_variable("W", shape=[self.hidden_unit, self.num_labels],
                                    dtype=tf.float32, initializer=self.initializers.xavier_initializer())

                b = tf.get_variable("b", shape=[self.num_labels], dtype=tf.float32,
                                    initializer=tf.zeros_initializer())

                pred = tf.nn.xw_plus_b(hidden, W, b)
            return tf.reshape(pred, [-1, self.seq_length, self.num_labels])
 def make_net(self, input_images, input_measurements, input_actions, input_objectives, reuse=False):
     if reuse:
         tf.get_variable_scope().reuse_variables()
     
     self.fc_val_params = np.copy(self.fc_joint_params)
     self.fc_val_params['out_dims'][-1] = self.target_dim
     self.fc_adv_params = np.copy(self.fc_joint_params)
     self.fc_adv_params['out_dims'][-1] = len(self.net_discrete_actions) * self.target_dim
     p_img_conv = my_ops.conv_encoder(input_images, self.conv_params, 'p_img_conv', msra_coeff=0.9)
     p_img_fc = my_ops.fc_net(my_ops.flatten(p_img_conv), self.fc_img_params, 'p_img_fc', msra_coeff=0.9)
     p_meas_fc = my_ops.fc_net(input_measurements, self.fc_meas_params, 'p_meas_fc', msra_coeff=0.9)
     if isinstance(self.fc_obj_params, np.ndarray):
         p_obj_fc = my_ops.fc_net(input_objectives, self.fc_obj_params, 'p_obj_fc', msra_coeff=0.9)
         p_concat_fc = tf.concat([p_img_fc,p_meas_fc,p_obj_fc], 1)
     else:
         p_concat_fc = tf.concat([p_img_fc,p_meas_fc], 1)
         if self.random_objective_coeffs:
             raise Exception('Need fc_obj_params with randomized objectives')
         
     p_val_fc = my_ops.fc_net(p_concat_fc, self.fc_val_params, 'p_val_fc', last_linear=True, msra_coeff=0.9)
     p_adv_fc = my_ops.fc_net(p_concat_fc, self.fc_adv_params, 'p_adv_fc', last_linear=True, msra_coeff=0.9)
     
     adv_reshape = tf.reshape(p_adv_fc, [-1, len(self.net_discrete_actions), self.target_dim])
     
     pred_all_nomean = adv_reshape - tf.reduce_mean(adv_reshape, reduction_indices=1, keep_dims=True)
     pred_all = pred_all_nomean + tf.reshape(p_val_fc, [-1, 1, self.target_dim])
     pred_relevant = tf.boolean_mask(pred_all, tf.cast(input_actions, tf.bool))
     
     return pred_all, pred_relevant
Пример #6
0
def boston_input_fn():
    boston = tf.contrib.learn.datasets.load_boston()
    features = tf.cast(
        tf.reshape(tf.constant(boston.data), [-1, 13]), tf.float32)
    labels = tf.cast(
        tf.reshape(tf.constant(boston.target), [-1, 1]), tf.float32)
    return features, labels
Пример #7
0
def iris_input_fn(num_epochs=None):
  iris = tf.contrib.learn.datasets.load_iris()
  features = tf.reshape(tf.constant(iris.data), [-1, 4])
  if num_epochs:
    features = tf.train.limit_epochs(features, num_epochs=num_epochs)
  target = tf.reshape(tf.constant(iris.target), [-1])
  return features, target
Пример #8
0
def LGMRES_solver(mps,
                  direction,
                  left_dominant,
                  right_dominant,
                  inhom,
                  x0,
                  precision=1e-10,
                  nmax=2000,
                  **kwargs):
    #mps.D[0] has to be mps.D[-1], so no distincion between direction='l' or direction='r' has to be made here
    if not tf.equal(mps.D[0], mps.D[-1]):
        raise ValueError(
            'in LGMRES_solver: mps.D[0]!=mps.D[-1], can only handle intinite MPS!'
        )
    inhom_numpy = tf.reshape(inhom, [mps.D[0] * mps.D[0]]).numpy()
    x0_numpy = tf.reshape(x0, [mps.D[0] * mps.D[0]]).numpy()
    mv = fct.partial(one_minus_pseudo_unitcell_transfer_op,
                     *[direction, mps, left_dominant, right_dominant])

    LOP = LinearOperator((int(mps.D[0])**2, int(mps.D[-1])**2),
                                          matvec=mv,
                                          dtype=mps.dtype.as_numpy_dtype)
    out, info = lgmres(
        A=LOP,
        b=inhom_numpy,
        x0=x0_numpy,
        tol=precision,
        maxiter=nmax,
        **kwargs)

    return tf.reshape(tf.convert_to_tensor(out), [mps.D[0], mps.D[0]]), info
def create_output(decoder_output, rows, cols, targets, hparams):
  """Creates output from decoder output and vars.

  Args:
    decoder_output: Tensor of shape [batch, ...], where ... can be any rank such
      that the number of elements is batch * rows * cols * hparams.hidden_size.
    rows: Integer representing number of rows in a 2-D data point.
    cols: Integer representing number of columns in a 2-D data point.
    targets: Tensor of shape [batch, hparams.img_len, hparams.img_len,
      hparams.num_channels].
    hparams: tf.contrib.training.HParams set.

  Returns:
    Tensor of shape [batch, hparams.img_len, hparams.img_len,
    hparams.num_mixtures * 10] if hparams.likelihood is DMOL, otherwise
    [batch, hparams.img_len, hparams.img_len, hparams.num_channels, 256].
    In the special case of predict mode, it is a Tensor of rank 5.
  """
  decoded_image = postprocess_image(decoder_output, rows, cols, hparams)
  depth = common_layers.shape_list(decoded_image)[-1]
  batch, height, width, channels = common_layers.shape_list(targets)
  likelihood = getattr(hparams, "likelihood", DistributionType.CAT)
  if hparams.mode == tf.estimator.ModeKeys.PREDICT:
    y = tf.reshape(decoded_image, [batch, -1, 1, 1, depth])
    output = y[:, :height, :, :, :]
  elif likelihood == DistributionType.CAT:
    # Unpack the cols dimension of the Categorical.
    output = tf.reshape(decoded_image,
                        [batch, height, width, channels, depth])
  else:
    output = decoded_image
  return output
def alex_net(_X, _dropout):
    # Reshape input picture
    _X = tf.reshape(_X, shape=[-1, 40, 40, 1])

    # First convolutional layer
    conv1 = conv2d('conv1', _X, wc1, bc1)
    pool1 = max_pool('pool1', conv1, k=2)
    norm1 = norm('norm1', pool1, lsize=4)
    norm1 = tf.nn.dropout(norm1, _dropout)

    # Second convolutional layer
    conv2 = conv2d('conv2', norm1, wc2, bc2)
    pool2 = max_pool('pool2', conv2, k=2)
    norm2 = norm('norm2', pool2, lsize=4)
    norm2 = tf.nn.dropout(norm2, _dropout)

    # Third convolutional layer
    conv3 = conv2d('conv3', norm2, wc3, bc3)
    pool3 = max_pool('pool3', conv3, k=2)
    norm3 = norm('norm3', pool3, lsize=4)
    norm3 = tf.nn.dropout(norm3, _dropout)

    # Reshape conv3 output to fit dense layer input
    dense1 = tf.reshape(norm3, [-1, wd1.get_shape().as_list()[0]])

    # Fully connected layers
    dense1 = tf.nn.relu(tf.matmul(dense1, wd1) + bd1, name='fc1')  # Relu activation
    dense2 = tf.nn.relu(tf.matmul(dense1, wd2) + bd2, name='fc2')  # Relu activation

    # Output, class prediction
    out = tf.matmul(dense2, wout) + bout
    return out
Пример #11
0
def one_minus_pseudo_unitcell_transfer_op(direction, mps, left_dominant,
                                          right_dominant, vector):
    """
    calculates action of 11-Transfer-Operator +|r)(l|
    Parameters:
    ---------------------------
    direction:  int or str 
                if (1,'l','left'): do left multiplication
                if (-1,'r','right'): do right multiplication
    mps:        InfiniteMPSCentralGauge object
                an infinite mps
    left_dominant:  tf.tensor of shape (mps.D[0],mps.D[0])
                    left dominant eigenvector of the unit-cell transfer operator of mps
    right_dominant: tf.tensor of shape (mps.D[-1],mps.D[-1])
                    right dominant eigenvector of the unit-cell transfer operator of mps
    vector:         tf.tensor of shape (mps.D[0]*mps.D[0]) or (mps.D[-1]*mps.D[-1])
                    the input vector
    Returns
    ---------------------------
    np.ndarray of shape (mps.D[0]*mps.D[0]) or (mps.D[-1]*mps.D[-1])

    """

    if direction in (1, 'l', 'left'):
        x = tf.reshape(tf.convert_to_tensor(vector), (mps.D[0], mps.D[0]))
        temp = x - mps.unitcell_transfer_op('left', x) + ncon(
            [x, right_dominant], [[1, 2], [1, 2]]) * left_dominant
        return tf.reshape(temp, [mps.D[-1] * mps.D[-1]]).numpy()

    if direction in (-1, 'r', 'right'):
        x = tf.reshape(tf.convert_to_tensor(vector), [mps.D[-1], mps.D[-1]])
        temp = x - mps.unitcell_transfer_op('right', x) + ncon(
            [left_dominant, x], [[1, 2], [1, 2]]) * right_dominant
        return tf.reshape(temp, [mps.D[0] * mps.D[0]]).numpy()
Пример #12
0
def loss(logits, labels, batch_size=None):
  """Adds all losses for the model.

  Note the final loss is not returned. Instead, the list of losses are collected
  by slim.losses. The losses are accumulated in tower_loss() and summed to
  calculate the total loss.

  Args:
    logits: List of logits from inference(). Each entry is a 2-D float Tensor.
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]
    batch_size: integer
  """
  if not batch_size:
    batch_size = FLAGS.batch_size

  # Reshape the labels into a dense Tensor of
  # shape [FLAGS.batch_size, num_classes].
  sparse_labels = tf.reshape(labels, [batch_size, 1])
  indices = tf.reshape(tf.range(batch_size), [batch_size, 1])
  concated = tf.concat(1, [indices, sparse_labels])
  num_classes = logits[0].get_shape()[-1].value
  dense_labels = tf.sparse_to_dense(concated,
                                    [batch_size, num_classes],
                                    1.0, 0.0)

  # Cross entropy loss for the main softmax prediction.
  slim.losses.cross_entropy_loss(logits[0],
                                 dense_labels,
                                 label_smoothing=0.1,
                                 weight=1.0)
Пример #13
0
def forward_propagation(images):
  with tf.variable_scope('conv1') as scope:
      W_conv1 = weight_variable([5, 5, 3, 32])
      b_conv1 = bias_variable([32])
      image_matrix = tf.reshape(images, [-1, 1750, 1750, 3])
      h_conv1 = tf.nn.sigmoid(conv2d(image_matrix, W_conv1) + b_conv1)
      _activation_summary(h_conv1)
      h_pool1 = max_pool_5x5(h_conv1)

  with tf.variable_scope('conv2') as scope:
      W_conv2 = weight_variable([5, 5, 32, 64])
      b_conv2 = bias_variable([64])
      h_conv2 = tf.nn.sigmoid(conv2d(h_pool1, W_conv2) + b_conv2)
      _activation_summary(h_conv2)
      h_pool2 = max_pool_5x5(h_conv2)

  with tf.variable_scope('conv3') as scope:
      W_conv3 = weight_variable([5, 5, 64, 128])
      b_conv3 = bias_variable([128])
      h_conv3 = tf.nn.sigmoid(conv2d(h_pool2, W_conv3) + b_conv3)
      _activation_summary(h_conv3)
      h_pool3 = max_pool_5x5(h_conv3)

  with tf.variable_scope('local3') as scope:
      W_fc1 = weight_variable([14 * 14 * 128, 256])
      b_fc1 = bias_variable([256])
      h_pool3_flat = tf.reshape(h_pool3, [-1, 14 * 14 * 128])
      h_fc1 = tf.nn.sigmoid(tf.matmul(h_pool3_flat, W_fc1) + b_fc1)
      _activation_summary(h_fc1)
      keep_prob = tf.Variable(1.0)
      W_fc2 = weight_variable([256, 4])
      b_fc2 = bias_variable([4])
      y_conv = tf.nn.softmax(tf.matmul(h_fc1, W_fc2) + b_fc2)
      _activation_summary(y_conv)
      return y_conv
Пример #14
0
def BatchClipByL2norm(t, upper_bound, name=None):
  """Clip an array of tensors by L2 norm.

  Shrink each dimension-0 slice of tensor (for matrix it is each row) such
  that the l2 norm is at most upper_bound. Here we clip each row as it
  corresponds to each example in the batch.

  Args:
    t: the input tensor.
    upper_bound: the upperbound of the L2 norm.
    name: optional name.
  Returns:
    the clipped tensor.
  """

  assert upper_bound > 0
  with tf.op_scope([t, upper_bound], name, "batch_clip_by_l2norm") as name:
    saved_shape = tf.shape(t)
    batch_size = tf.slice(saved_shape, [0], [1])
    t2 = tf.reshape(t, tf.concat(0, [batch_size, [-1]]))
    upper_bound_inv = tf.fill(tf.slice(saved_shape, [0], [1]),
                              tf.constant(1.0/upper_bound))
    # Add a small number to avoid divide by 0
    l2norm_inv = tf.rsqrt(tf.reduce_sum(t2 * t2, [1]) + 0.000001)
    scale = tf.minimum(l2norm_inv, upper_bound_inv) * upper_bound
    clipped_t = tf.matmul(tf.diag(scale), t2)
    clipped_t = tf.reshape(clipped_t, saved_shape, name=name)
  return clipped_t
Пример #15
0
def SoftThreshold(t, threshold_ratio, name=None):
  """Soft-threshold a tensor by the mean value.

  Softthreshold each dimension-0 vector (for matrix it is each column) by
  the mean of absolute value multiplied by the threshold_ratio factor. Here
  we soft threshold each column as it corresponds to each unit in a layer.

  Args:
    t: the input tensor.
    threshold_ratio: the threshold ratio.
    name: the optional name for the returned tensor.
  Returns:
    the thresholded tensor, where each entry is soft-thresholded by
    threshold_ratio times the mean of the aboslute value of each column.
  """

  assert threshold_ratio >= 0
  with tf.op_scope([t, threshold_ratio], name, "soft_thresholding") as name:
    saved_shape = tf.shape(t)
    t2 = tf.reshape(t, tf.concat(0, [tf.slice(saved_shape, [0], [1]), -1]))
    t_abs = tf.abs(t2)
    t_x = tf.sign(t2) * tf.nn.relu(t_abs -
                                   (tf.reduce_mean(t_abs, [0],
                                                   keep_dims=True) *
                                    threshold_ratio))
    return tf.reshape(t_x, saved_shape, name=name)
Пример #16
0
def get_idx_map(shape):
    """Get index map for a image.
    Args:
        shape: [B, T, H, W] or [B, H, W]
    Returns:
        idx: [B, T, H, W, 2], or [B, H, W, 2]
    """
    s = shape
    ndims = tf.shape(s)
    wdim = ndims - 1
    hdim = ndims - 2
    idx_shape = tf.concat(0, [s, tf.constant([1])])
    ones_h = tf.ones(hdim - 1, dtype='int32')
    ones_w = tf.ones(wdim - 1, dtype='int32')
    h_shape = tf.concat(0, [ones_h, tf.constant([-1]), tf.constant([1, 1])])
    w_shape = tf.concat(0, [ones_w, tf.constant([-1]), tf.constant([1])])

    idx_y = tf.zeros(idx_shape, dtype='float')
    idx_x = tf.zeros(idx_shape, dtype='float')

    h = tf.slice(s, ndims - 2, [1])
    w = tf.slice(s, ndims - 1, [1])
    idx_y += tf.reshape(tf.to_float(tf.range(h[0])), h_shape)
    idx_x += tf.reshape(tf.to_float(tf.range(w[0])), w_shape)
    idx = tf.concat(ndims[0], [idx_y, idx_x])

    return idx
Пример #17
0
def tf_random_modifiers(flat_img, window_dims, name=None):
    float_img = tf.cast(flat_img, tf.float32)

    w, h = window_dims
    mod_image = tf.reshape(float_img, (h, w, 3))

    # # Define the modifier ops:
    # brightness_mod = lambda x: tf.image.random_brightness(x, max_delta=0.3)
    # contrast_mod = lambda x: tf.image.random_contrast(x, lower=0.2, upper=1.8)
    # saturation_mod = lambda x: tf.image.random_saturation(x, lower=0.2, upper=1.8)
    # hue_mod = lambda x: tf.image.random_hue(x, max_delta=0.025)
    # modifier_ops = [brightness_mod, contrast_mod, saturation_mod, hue_mod]
    # # Choose a random order for the modifiers:
    # perm = np.arange(len(modifier_ops))
    # np.random.shuffle(perm)
    # # Apply the modifiers in a random order:
    # for i in perm:
    #     mod_op = modifier_ops[i]
    #     mod_image = mod_op(mod_image)

    mod_image = tf.image.random_brightness(mod_image, max_delta=0.3)
    mod_image = tf.image.random_contrast(mod_image, lower=0.2, upper=1.8)
    mod_image = tf.image.random_saturation(mod_image, lower=0.2, upper=1.8)
    mod_image = tf.image.random_hue(mod_image, max_delta=0.025)

    # Subtract off the mean and divide by the variance of the pixels.
    final_image = tf.image.per_image_whitening(mod_image)

    final_flat_image = tf.reshape(final_image, (w*h*3,), name=name)
    print 'final_flat_image.get_shape()', final_flat_image.get_shape()

    return final_flat_image
Пример #18
0
def one_hot_mask(labels, num_classes, scope=None):
  """Compute 1-hot encodings for masks.

  Given a label image, this computes the one hot encoding at
  each pixel.

  Args:
    labels: (batch_size, width, height, 1) tensor containing labels.
    num_classes: number of classes
    scope: optional scope name

  Returns:
    Tensor of shape (batch_size, width, height, num_classes) with
    a 1-hot encoding.
  """
  with tf.name_scope(scope, "OneHotMask", [labels]):
    height, width, depth = _shape(labels)
    assert depth == 1
    sparse_labels = tf.to_int32(tf.reshape(labels, [-1, 1]))
    sparse_size, _ = _shape(sparse_labels)
    indices = tf.reshape(tf.range(0, sparse_size, 1), [-1, 1])
    concated = tf.concat_v2([indices, sparse_labels], 1)
    dense_result = tf.sparse_to_dense(concated, [sparse_size, num_classes], 1.0,
                                      0.0)
    result = tf.reshape(dense_result, [height, width, num_classes])
    return result
Пример #19
0
def read_and_decode(filename_queue):
  reader = tf.TFRecordReader()
  _, serialized_example = reader.read(filename_queue)
  features = tf.parse_single_example(
  serialized_example,
  # Defaults are not specified since both keys are required.
  features={
  'vector': tf.FixedLenFeature([], tf.string),
  'label': tf.FixedLenFeature([], tf.int64),
  })  
  
  
  
  # features = tf.parse_single_example(serialized_example, dense_keys=['vector', 'label'], dense_types=[tf.string, tf.int64])
  # Convert from a scalar string tensor (whose single string has
  # length tf_model.IMAGE_PIXELS) to a uint8 tensor with shape
  # [tf_model.IMAGE_PIXELS].
  image = tf.decode_raw(features['vector'], tf.float32)
  image.set_shape([FEATURE_DIMENSIONALITY])
  if FLAGS.transpose_input:
    image = tf.reshape(image, FEATURE_INPUT_SHAPE)
    image = tf.transpose(image, perm=[0,2,1])
    image = tf.reshape(image, [-1])

  # print("Image shape is %s" %(image.shape))
  # OPTIONAL: Could reshape into a 28x28 image and apply distortions
  # here.  Since we are not applying any distortions in this
  # example, and the next step expects the image to be flattened
  # into a vector, we don't bother.
  # Convert from [0, 255] -> [-0.5, 0.5] floats.
  # image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
  # Convert label from a scalar uint8 tensor to an int32 scalar.
  label = tf.cast(features['label'], tf.int32)
  return image, label
Пример #20
0
 def testPaddingCrossEntropyFactored(self):
   vocab_size = 19
   rows = 5
   cols = 4
   depth = 11
   label_smoothing = 0.1
   features = np.random.rand(rows, cols, depth)
   weights = np.random.rand(vocab_size, depth)
   labels = np.random.randint(0, vocab_size - 1, size=(rows, cols))
   with self.test_session() as session:
     features = tf.to_float(features)
     weights = tf.to_float(weights)
     labels = tf.to_int32(labels)
     logits = tf.matmul(
         tf.reshape(features, [rows * cols, depth]), weights, transpose_b=True)
     logits = tf.reshape(logits, [rows, cols, vocab_size])
     loss_num, loss_den = common_layers.padded_cross_entropy(
         logits, labels, label_smoothing=label_smoothing, reduce_sum=False)
     factored_logits = common_layers.FactoredTensor(features, weights)
     loss_num_f, loss_den_f = common_layers.padded_cross_entropy_factored(
         factored_logits,
         labels=labels,
         label_smoothing=label_smoothing,
         reduce_sum=False)
     num, den, num_f, den_f = session.run(
         [loss_num, loss_den, loss_num_f, loss_den_f])
   self.assertEqual(num.shape, (rows, cols))
   self.assertEqual(den.shape, (rows, cols))
   self.assertEqual(num_f.shape, (rows, cols))
   self.assertEqual(den_f.shape, (rows, cols))
   self.assertAllClose(num, num_f)
   self.assertAllClose(den, den_f)
Пример #21
0
def conv_net(x, weights, biases, dropout):
    # Reshape input picture
    x = tf.reshape(x, shape=[-1, 28, 28, 1])

    # Convolution Layer
    conv1 = conv2d(x, weights['wc1'], biases['bc1'])
    # Max Pooling (down-sampling)
    conv1 = maxpool2d(conv1, k=2)

    # Convolution Layer
    conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
    # Max Pooling (down-sampling)
    conv2 = maxpool2d(conv2, k=2)

    # Fully connected layer
    # Reshape conv2 output to fit fully connected layer input
    fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
    fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
    fc1 = tf.nn.relu(fc1)
    # Apply Dropout
    fc1 = tf.nn.dropout(fc1, dropout)

    # Output, class prediction
    out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
    return out
    def get_reconstructed_image(self, real, imag, name=None):
        """
        :param real:
        :param imag:
        :param name:
        :return:
        """
        complex_k_space_label = tf.complex(real=tf.squeeze(real), imag=tf.squeeze(imag), name=name+"_complex_k_space")
        rec_image_complex = tf.expand_dims(tf.ifft2d(complex_k_space_label), axis=1)
        
        rec_image_real = tf.reshape(tf.real(rec_image_complex), shape=[-1, 1, self.dims_out[1], self.dims_out[2]])
        rec_image_imag = tf.reshape(tf.imag(rec_image_complex), shape=[-1, 1, self.dims_out[1], self.dims_out[2]])

        # Shifting
        top, bottom = tf.split(rec_image_real, num_or_size_splits=2, axis=2)
        top_left, top_right = tf.split(top, num_or_size_splits=2, axis=3)
        bottom_left, bottom_right = tf.split(bottom, num_or_size_splits=2, axis=3)

        top_shift = tf.concat(axis=3, values=[bottom_right, bottom_left])
        bottom_shift = tf.concat(axis=3, values=[top_right, top_left])
        shifted_image = tf.concat(axis=2, values=[top_shift, bottom_shift])


        # Shifting
        top_imag, bottom_imag = tf.split(rec_image_imag, num_or_size_splits=2, axis=2)
        top_left_imag, top_right_imag = tf.split(top_imag, num_or_size_splits=2, axis=3)
        bottom_left_imag, bottom_right_imag = tf.split(bottom_imag, num_or_size_splits=2, axis=3)

        top_shift_imag = tf.concat(axis=3, values=[bottom_right_imag, bottom_left_imag])
        bottom_shift_imag = tf.concat(axis=3, values=[top_right_imag, top_left_imag])
        shifted_image_imag = tf.concat(axis=2, values=[top_shift_imag, bottom_shift_imag])

        shifted_image_two_channels = tf.stack([shifted_image[:,0,:,:], shifted_image_imag[:,0,:,:]], axis=1)
        return shifted_image_two_channels
Пример #23
0
def buildSpImageConverter(channelOrder, img_dtype):
    """
    Convert a imageIO byte encoded image into a image tensor suitable as input to ConvNets
    The name of the input must be a subset of those specified in `image.imageIO.imageSchema`.

    :param img_dtype: the type of data the underlying image bytes represent
    """
    with IsolatedSession() as issn:
        # Flat image data -> image dimensions
        # This has to conform to `imageIO.imageSchema`
        height = tf.placeholder(tf.int32, [], name="height")
        width = tf.placeholder(tf.int32, [], name="width")
        num_channels = tf.placeholder(tf.int32, [], name="nChannels")
        image_buffer = tf.placeholder(tf.string, [], name="data")

        # The image is packed into bytes with height as leading dimension
        # This is the default behavior of Python Image Library
        shape = tf.reshape(tf.stack([height, width, num_channels], axis=0),
                           shape=(3,), name='shape')
        if img_dtype == 'uint8':
            image_uint8 = tf.decode_raw(image_buffer, tf.uint8, name="decode_raw")
            image_float = tf.to_float(image_uint8)
        elif img_dtype == 'float32':
            image_float = tf.decode_raw(image_buffer, tf.float32, name="decode_raw")
        else:
            raise ValueError('''unsupported image data type "%s", currently only know how to
            handle uint8 and float32''' % img_dtype)
        image_reshaped = tf.reshape(image_float, shape, name="reshaped")
        image_reshaped = imageIO.fixColorChannelOrdering(channelOrder, image_reshaped)
        image_input = tf.expand_dims(image_reshaped, 0, name="image_input")
        gfn = issn.asGraphFunction([height, width, image_buffer, num_channels], [image_input])

    return gfn
Пример #24
0
def conv_net(_X, _weights, _biases, _dropout):
    # Reshape input picture
    _X = tf.reshape(_X, shape=[-1, 28, 28, 1])

    # Convolution Layer
    conv1 = conv2d(_X, _weights['wc1'], _biases['bc1'])
    # Max Pooling (down-sampling)
    conv1 = max_pool(conv1, k=2)
    # Apply Dropout
    conv1 = tf.nn.dropout(conv1, _dropout)

    # Convolution Layer
    conv2 = conv2d(conv1, _weights['wc2'], _biases['bc2'])
    # Max Pooling (down-sampling)
    conv2 = max_pool(conv2, k=2)
    # Apply Dropout
    conv2 = tf.nn.dropout(conv2, _dropout)

    # Fully connected layer
    dense1 = tf.reshape(conv2, [-1, _weights['wd1'].get_shape().as_list()[0]]) # Reshape conv2 output to fit dense layer input
    dense1 = tf.nn.relu(tf.add(tf.matmul(dense1, _weights['wd1']), _biases['bd1'])) # Relu activation
    dense1 = tf.nn.dropout(dense1, _dropout) # Apply Dropout

    # Output, class prediction
    out = tf.add(tf.matmul(dense1, _weights['out']), _biases['out'])
    return out
Пример #25
0
def knn_point(k, xyz1, xyz2):
    '''
    Input:
        k: int32, number of k in k-nn search
        xyz1: (batch_size, ndataset, c) float32 array, input points
        xyz2: (batch_size, npoint, c) float32 array, query points
    Output:
        val: (batch_size, npoint, k) float32 array, L2 distances
        idx: (batch_size, npoint, k) int32 array, indices to input points
    '''
    b = xyz1.get_shape()[0].value
    n = xyz1.get_shape()[1].value
    c = xyz1.get_shape()[2].value
    m = xyz2.get_shape()[1].value
    print b, n, c, m
    print xyz1, (b,1,n,c)
    xyz1 = tf.tile(tf.reshape(xyz1, (b,1,n,c)), [1,m,1,1])
    xyz2 = tf.tile(tf.reshape(xyz2, (b,m,1,c)), [1,1,n,1])
    dist = tf.reduce_sum((xyz1-xyz2)**2, -1)
    print dist, k
    outi, out = select_top_k(k, dist)
    idx = tf.slice(outi, [0,0,0], [-1,-1,k])
    val = tf.slice(out, [0,0,0], [-1,-1,k])
    print idx, val
    #val, idx = tf.nn.top_k(-dist, k=k) # ONLY SUPPORT CPU
    return val, idx
Пример #26
0
  def accumulate_privacy_spending(self, eps_delta, unused_sigma,
                                  num_examples):
    """Accumulate the privacy spending.

    Currently only support approximate privacy. Here we assume we use Gaussian
    noise on randomly sampled batch so we get better composition: 1. the per
    batch privacy is computed using privacy amplication via sampling bound;
    2. the composition is done using the composition with Gaussian noise.
    TODO(liqzhang) Add a link to a document that describes the bounds used.

    Args:
      eps_delta: EpsDelta pair which can be tensors.
      unused_sigma: the noise sigma. Unused for this accountant.
      num_examples: the number of examples involved.
    Returns:
      a TensorFlow operation for updating the privacy spending.
    """

    eps, delta = eps_delta
    with tf.control_dependencies(
        [tf.Assert(tf.greater(delta, 0),
                   ["delta needs to be greater than 0"])]):
      amortize_ratio = (tf.cast(num_examples, tf.float32) * 1.0 /
                        self._total_examples)
      # Use privacy amplification via sampling bound.
      # See Lemma 2.2 in http://arxiv.org/pdf/1405.7085v2.pdf
      # TODO(liqzhang) Add a link to a document with formal statement
      # and proof.
      amortize_eps = tf.reshape(tf.log(1.0 + amortize_ratio * (
          tf.exp(eps) - 1.0)), [1])
      amortize_delta = tf.reshape(amortize_ratio * delta, [1])
      return tf.group(*[tf.assign_add(self._eps_squared_sum,
                                      tf.square(amortize_eps)),
                        tf.assign_add(self._delta_sum, amortize_delta)])
Пример #27
0
def din_fcn_shine(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):
    if isinstance(facts, tuple):
        # In case of Bi-RNN, concatenate the forward and the backward RNN
        # outputs.
        facts = tf.concat(facts, 2)

    if time_major:
        # (T,B,D) => (B,T,D)
        facts = tf.array_ops.transpose(facts, [1, 0, 2])
    # Trainable parameters
    mask = tf.equal(mask, tf.ones_like(mask))
    # D value - hidden size of the RNN layer
    facts_size = facts.get_shape().as_list()[-1]
    querry_size = query.get_shape().as_list()[-1]
    query = tf.layers.dense(
        query, facts_size, activation=None, name='f1_trans_shine' + stag)
    query = prelu(query)
    queries = tf.tile(query, [1, tf.shape(facts)[1]])
    queries = tf.reshape(queries, tf.shape(facts))
    din_all = tf.concat(
        [queries, facts, queries - facts, queries * facts], axis=-1)
    d_layer_1_all = tf.layers.dense(
        din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag)
    d_layer_2_all = tf.layers.dense(
        d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag)
    d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts))
    output = d_layer_2_all
    return output
Пример #28
0
    def read_data(self, filename_queue, has_3d=False):
        with tf.name_scope(None, 'read_data', [filename_queue]):
            reader = tf.TFRecordReader()
            _, example_serialized = reader.read(filename_queue)
            if has_3d:
                image, image_size, label, center, fname, pose, shape, gt3d, has_smpl3d = data_utils.parse_example_proto(
                    example_serialized, has_3d=has_3d)
                # Need to send pose bc image can get flipped.
                image, label, pose, gt3d = self.image_preprocessing(
                    image, image_size, label, center, pose=pose, gt3d=gt3d)

                # Convert pose to rotation.
                # Do not ignore the global!!
                rotations = batch_rodrigues(tf.reshape(pose, [-1, 3]))
                gt3d_flat = tf.reshape(gt3d, [-1])
                # Label 3d is:
                #   [rotations, shape-beta, 3Djoints]
                #   [216=24*3*3, 10, 42=14*3]
                label3d = tf.concat(
                    [tf.reshape(rotations, [-1]), shape, gt3d_flat], 0)
            else:
                image, image_size, label, center, fname = data_utils.parse_example_proto(
                    example_serialized)
                image, label = self.image_preprocessing(
                    image, image_size, label, center)

            # label should be K x 3
            label = tf.transpose(label)

            if has_3d:
                return image, label, label3d, has_smpl3d
            else:
                return image, label
    def add_logits_op(self):
        """
        Adds logits to self
        """
        with tf.variable_scope("bi-lstm"):
            lstm_fwrd_cell = tf.contrib.rnn.LSTMCell(self.hidden_size)
            lstm_back_cell = tf.contrib.rnn.LSTMCell(self.hidden_size)
            (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(lstm_fwrd_cell,
                                                                        lstm_back_cell,
                                                                        self.word_embeddings,
                                                                        sequence_length=self.sequence_lengths,
                                                                        dtype=tf.float32)
            output = tf.concat([output_fw, output_bw], axis=-1)
            output = tf.nn.dropout(output, self.dropout)

        with tf.variable_scope("proj"):
            W = tf.get_variable("W", shape=[2*self.hidden_size, self.ntags],
                dtype=tf.float32)

            b = tf.get_variable("b", shape=[self.ntags], dtype=tf.float32,
                initializer=tf.zeros_initializer())

            ntime_steps = tf.shape(output)[1]
            output = tf.reshape(output, [-1, 2*self.hidden_size])
            pred = tf.matmul(output, W) + b
            self.logits = tf.reshape(pred, [-1, ntime_steps, self.ntags])
def convAutoencoder(x, weights, bias, weights_key, bias_key):
    
    x = tf.reshape(x, shape = [-1, 32, 32, 3])
    print(weights_key, bias_key) 
    #encoder procedure
    encoder_conv_1 = conv2d(x, weights[weights_key[0]], bias[bias_key[0]])
    encoder_pool_1 = maxpool2d(encoder_conv_1)
    encoder_conv_2 = conv2d(encoder_pool_1, weights[weights_key[1]], bias[bias_key[1]])
    encoder_pool_2 = maxpool2d(encoder_conv_2)
    encoder_conv_3 = conv2d(encoder_pool_2, weights[weights_key[2]], bias[bias_key[2]])
    encoder_pool_3 = maxpool2d(encoder_conv_3)
    print(encoder_pool_3.get_shape())
    encoder_pool_3_reshape = tf.reshape(encoder_pool_3, shape = [-1, 1024])
    encoder_dense_1 = dense_layer(encoder_pool_3_reshape, weights[weights_key[3]], bias[bias_key[3]])


    #decoder_procedure
    decoder_dense_1 = dense_layer(encoder_dense_1, weights[weights_key[4]], bias[bias_key[4]])
    decoder_dense_1_reshape = tf.reshape(decoder_dense_1, shape = [-1, 4, 4, 64])
    decoder_upscale_3 = upscale2d(decoder_dense_1_reshape, [1, 2], scale = 2)
    decoder_conv_3 = conv2d(decoder_upscale_3, weights[weights_key[5]], bias[bias_key[5]])
    decoder_upscale_2 = upscale2d(decoder_conv_3, [1, 2], scale = 2)
    decoder_conv_2 = conv2d(decoder_upscale_2, weights[weights_key[6]], bias[bias_key[6]])
    decoder_upscale_1 = upscale2d(decoder_conv_2, [1, 2], scale = 2)
    decoder_conv_1 = conv2d(decoder_upscale_1, weights[weights_key[7]], bias[bias_key[7]])
    
    print(decoder_conv_1.get_shape())
    #output
    output = tf.reshape(decoder_conv_1, shape = [-1, 3072])
    print(output.get_shape())
    
    return output
Пример #31
0
def loss(model, n, A_stencil, A_matrices, S_matrices, index=None, pos=-1., phase="Training", epoch=-1, grid_size=8,
         remove=True):
    with tf.device(DEVICE):
        A_matrices = tf.conj(A_matrices)
        S_matrices = tf.conj(S_matrices)
        pi = tf.constant(np.pi)
        theta_x = np.array(([i * 2 * pi / n for i in range(-n // (grid_size * 2) + 1, n // (grid_size * 2) + 1)]))
    with tf.device(DEVICE):
        if phase == "Test" and epoch == 0:
            P_stencil = model(A_stencil, True)
            P_matrix = utils.compute_p2LFA(P_stencil, n, grid_size)
            P_matrix = tf.transpose(P_matrix, [2, 0, 1, 3, 4])
            P_matrix_t = tf.transpose(P_matrix, [0, 1, 2, 4, 3], conjugate=True)
            A_c = tf.matmul(tf.matmul(P_matrix_t, A_matrices), P_matrix)

            index_to_remove = len(theta_x) * (-1 + n // (2 * grid_size)) + n // (2 * grid_size) - 1
            A_c = tf.reshape(A_c, (-1, int(theta_x.shape[0]) ** 2, (grid_size // 2) ** 2, (grid_size // 2) ** 2))
            A_c_removed = tf.concat([A_c[:, :index_to_remove], A_c[:, index_to_remove + 1:]], 1)
            P_matrix_t_reshape = tf.reshape(P_matrix_t,
                                            (-1, int(theta_x.shape[0]) ** 2, (grid_size // 2) ** 2, grid_size ** 2))
            P_matrix_reshape = tf.reshape(P_matrix,
                                          (-1, int(theta_x.shape[0]) ** 2, grid_size ** 2, (grid_size // 2) ** 2))
            A_matrices_reshaped = tf.reshape(A_matrices,
                                             (-1, int(theta_x.shape[0]) ** 2, grid_size ** 2, grid_size ** 2))
            A_matrices_removed = tf.concat(
                [A_matrices_reshaped[:, :index_to_remove], A_matrices_reshaped[:, index_to_remove + 1:]], 1)

            P_matrix_removed = tf.concat(
                [P_matrix_reshape[:, :index_to_remove], P_matrix_reshape[:, index_to_remove + 1:]], 1)
            P_matrix_t_removed = tf.concat(
                [P_matrix_t_reshape[:, :index_to_remove], P_matrix_t_reshape[:, index_to_remove + 1:]], 1)

            A_coarse_inv_removed = tf.matrix_solve(A_c_removed, P_matrix_t_removed)

            CGC_removed = tf.eye(grid_size ** 2, dtype=tf.complex128) \
                          - tf.matmul(tf.matmul(P_matrix_removed, A_coarse_inv_removed), A_matrices_removed)
            S_matrices_reshaped = tf.reshape(S_matrices,
                                             (-1, int(theta_x.shape[0]) ** 2, grid_size ** 2, grid_size ** 2))
            S_removed = tf.concat(
                [S_matrices_reshaped[:, :index_to_remove], S_matrices_reshaped[:, index_to_remove + 1:]], 1)
            iteration_matrix = tf.matmul(tf.matmul(CGC_removed, S_removed), S_removed)
            loss_test = tf.reduce_mean(tf.reduce_mean(tf.reduce_sum(tf.square(tf.abs(iteration_matrix)), [2, 3]), 1))
            return tf.constant([0.]), loss_test.numpy()
        if index is not None:
            P_stencil = model(A_stencil, index=index, pos=pos, phase=phase)
        else:
            P_stencil = model(A_stencil, phase=phase)

        if not (phase == "Test" and epoch == 0):
            P_matrix = utils.compute_p2LFA(P_stencil, n, grid_size)

            P_matrix = tf.transpose(P_matrix, [2, 0, 1, 3, 4])
            P_matrix_t = tf.transpose(P_matrix, [0, 1, 2, 4, 3], conjugate=True)

            A_c = tf.matmul(tf.matmul(P_matrix_t, A_matrices), P_matrix)
            index_to_remove = len(theta_x) * (-1 + n // (2 * grid_size)) + n // (2 * grid_size) - 1
            A_c = tf.reshape(A_c, (-1, int(theta_x.shape[0]) ** 2, (grid_size // 2) ** 2, (grid_size // 2) ** 2))
            A_c_removed = tf.concat([A_c[:, :index_to_remove], A_c[:, index_to_remove + 1:]], 1)
            P_matrix_t_reshape = tf.reshape(P_matrix_t,
                                            (-1, int(theta_x.shape[0]) ** 2, (grid_size // 2) ** 2, grid_size ** 2))
            P_matrix_reshape = tf.reshape(P_matrix,
                                          (-1, int(theta_x.shape[0]) ** 2, grid_size ** 2, (grid_size // 2) ** 2))
            A_matrices_reshaped = tf.reshape(A_matrices,
                                             (-1, int(theta_x.shape[0]) ** 2, grid_size ** 2, grid_size ** 2))
            A_matrices_removed = tf.concat(
                [A_matrices_reshaped[:, :index_to_remove], A_matrices_reshaped[:, index_to_remove + 1:]], 1)

            P_matrix_removed = tf.concat(
                [P_matrix_reshape[:, :index_to_remove], P_matrix_reshape[:, index_to_remove + 1:]], 1)
            P_matrix_t_removed = tf.concat(
                [P_matrix_t_reshape[:, :index_to_remove], P_matrix_t_reshape[:, index_to_remove + 1:]], 1)
            A_coarse_inv_removed = tf.matrix_solve(A_c_removed, P_matrix_t_removed)

            CGC_removed = tf.eye(grid_size ** 2, dtype=tf.complex128) \
                          - tf.matmul(tf.matmul(P_matrix_removed, A_coarse_inv_removed), A_matrices_removed)
            S_matrices_reshaped = tf.reshape(S_matrices,
                                             (-1, int(theta_x.shape[0]) ** 2, grid_size ** 2, grid_size ** 2))
            S_removed = tf.concat(
                [S_matrices_reshaped[:, :index_to_remove], S_matrices_reshaped[:, index_to_remove + 1:]], 1)
            iteration_matrix_all = tf.matmul(tf.matmul(CGC_removed, S_removed), S_removed)

            if remove:
                if phase != 'Test':
                    iteration_matrix = iteration_matrix_all
                    for _ in range(0):
                        iteration_matrix = tf.matmul(iteration_matrix_all, iteration_matrix_all)
                else:
                    iteration_matrix = iteration_matrix_all
                loss = tf.reduce_mean(
                    tf.reduce_max(tf.pow(tf.reduce_sum(tf.square(tf.abs(iteration_matrix)), [2, 3]), 1), 1))
            else:
                loss = tf.reduce_mean(
                    tf.reduce_mean(tf.reduce_sum(tf.square(tf.abs(iteration_matrix_all)), [2, 3]), 1))

                print("Real loss: ", loss.numpy())
            real_loss = loss.numpy()
            return loss, real_loss
Пример #32
0
    def jacobian(self, x=None, mean_output=False, mc_num=1, denormalize=False):
        """
        | Calculate jacobian of gradient of output to input high performance calculation update on 15 April 2018
        |
        | Please notice that the de-normalize (if True) assumes the output depends on the input data first orderly
        | in which the equation is simply jacobian divided the input scaling, usually a good approx. if you use ReLU all the way

        :param x: Input Data
        :type x: ndarray
        :param mean_output: False to get all jacobian, True to get the mean
        :type mean_output: boolean
        :param mc_num: Number of monte carlo integration
        :type mc_num: int
        :param denormalize: De-normalize Jacobian
        :type denormalize: bool
        :return: An array of Jacobian
        :rtype: ndarray
        :History:
            | 2017-Nov-20 - Written - Henry Leung (University of Toronto)
            | 2018-Apr-15 - Updated - Henry Leung (University of Toronto)
        """
        self.has_model_check()
        if x is None:
            raise ValueError('Please provide data to calculate the jacobian')

        if mc_num < 1 or isinstance(mc_num, float):
            raise ValueError('mc_num must be a positive integer')

        if self.input_normalizer is not None:
            x_data = self.input_normalizer.normalize(x, calc=False)
        else:
            # Prevent shallow copy issue
            x_data = np.array(x)
            x_data -= self.input_mean
            x_data /= self.input_std

        try:
            input_tens = self.keras_model_predict.get_layer("input").input
            output_tens = self.keras_model_predict.get_layer("output").output
            input_shape_expectation = self.keras_model_predict.get_layer("input").input_shape
            output_shape_expectation = self.keras_model_predict.get_layer("output").output_shape
        except AttributeError:
            input_tens = self.keras_model.get_layer("input").input
            output_tens = self.keras_model.get_layer("output").output
            input_shape_expectation = self.keras_model.get_layer("input").input_shape
            output_shape_expectation = self.keras_model.get_layer("output").output_shape
        except ValueError:
            raise ValueError("astroNN expects input layer is named as 'input' and output layer is named as 'output', "
                             "but None is found.")

        if len(input_shape_expectation) == 1:
            input_shape_expectation = input_shape_expectation[0]

        # just in case only 1 data point is provided and mess up the shape issue
        if len(input_shape_expectation) == 3:
            x_data = np.atleast_3d(x_data)
        elif len(input_shape_expectation) == 4:
            if len(x_data.shape) < 4:
                x_data = x_data[:, :, :, np.newaxis]
        else:
            raise ValueError('Input data shape do not match neural network expectation')

        total_num = x_data.shape[0]

        grad_list = []
        for j in range(self._labels_shape):
            grad_list.append(tf.gradients(output_tens[:, j], input_tens))

        final_stack = tf.stack(tf.squeeze(grad_list))

        # Looping variables for tensorflow setup
        i = tf.constant(0)
        mc_num_tf = tf.constant(mc_num)
        #  To store final result
        l = tf.TensorArray(dtype=tf.float32, infer_shape=False, size=1, dynamic_size=True)

        def body(i, l):
            l = l.write(i, final_stack)
            return i + 1, l

        tf_index, loop = tf.while_loop(lambda i, *_: tf.less(i, mc_num_tf), body, [i, l])

        loops = tf.cond(tf.greater(mc_num_tf, 1), lambda: tf.reduce_mean(loop.stack(), axis=0), lambda: loop.stack())
        loops = tf.reshape(loops,
                           shape=[tf.shape(input_tens)[0], *output_shape_expectation[1:], *input_shape_expectation[1:]])
        start_time = time.time()

        jacobian = np.concatenate(
            [get_session().run(loops, feed_dict={input_tens: x_data[i:i + 1], tfk.backend.learning_phase(): 0}) for i
             in range(0, total_num)], axis=0)

        if mean_output is True:
            jacobian_master = np.mean(jacobian, axis=0)
        else:
            jacobian_master = np.array(jacobian)

        jacobian_master = np.squeeze(jacobian_master)

        if denormalize:
            if self.input_std is not None:
                jacobian_master = jacobian_master / np.squeeze(self.input_std)

            if self.labels_std is not None:
                try:
                    jacobian_master = jacobian_master * self.labels_std
                except ValueError:
                    jacobian_master = jacobian_master * self.labels_std.reshape(-1, 1)

        print(f'Finished all gradient calculation, {(time.time() - start_time):.{2}f} seconds elapsed')

        return jacobian_master
Пример #33
0
def merge_states(x):
    """Smash the last two dimensions of x into a single dimension."""
    *start, a, b = shape_list(x)
    return tf.reshape(x, start + [a * b])
Пример #34
0
def split_states(x, n):
    """Reshape the last dimension of x into [n, x.shape[-1]/n]."""
    *start, m = shape_list(x)
    return tf.reshape(x, start + [n, m // n])
Пример #35
0
def flatten(x_tensor):
    x_shape = x_tensor.get_shape().as_list()
    x_tensor = tf.reshape(x_tensor, shape=[-1, x_shape[1] * x_shape[2] * x_shape[3]])

    return x_tensor
Пример #36
0
def conv_to_fc(x):
    nh = np.prod([v.value for v in x.get_shape()[1:]])
    x = tf.reshape(x, [-1, nh])
    return x
Пример #37
0
def reshape(x, h, w, c, data_format='NHWC'):
    if data_format == 'NCHW':
        x = tf.reshape(x, [-1, c, h, w])
    else:
        x = tf.reshape(x, [-1, h, w, c])
    return x
Пример #38
0
def map_fun(args, ctx):
    from tensorflowonspark import TFNode
    from datetime import datetime
    import math
    import os
    import tensorflow as tf
    import time

    num_workers = args.cluster_size if args.driver_ps_nodes else args.cluster_size - args.num_ps
    worker_num = ctx.worker_num
    job_name = ctx.job_name
    task_index = ctx.task_index

    # Parameters
    IMAGE_PIXELS = 28
    hidden_units = 128

    # Get TF cluster and server instances
    cluster, server = TFNode.start_cluster_server(ctx, 1, args.rdma)

    def _parse_csv(ln):
        splits = tf.string_split([ln], delimiter='|')
        lbl = splits.values[0]
        img = splits.values[1]
        image_defaults = [[0.0] for col in range(IMAGE_PIXELS * IMAGE_PIXELS)]
        image = tf.stack(tf.decode_csv(img, record_defaults=image_defaults))
        norm = tf.constant(255, dtype=tf.float32, shape=(784, ))
        normalized_image = tf.div(image, norm)
        label_value = tf.string_to_number(lbl, tf.int32)
        label = tf.one_hot(label_value, 10)
        return (normalized_image, label)

    def _parse_tfr(example_proto):
        feature_def = {
            "label": tf.FixedLenFeature(10, tf.int64),
            "image": tf.FixedLenFeature(IMAGE_PIXELS * IMAGE_PIXELS, tf.int64)
        }
        features = tf.parse_single_example(example_proto, feature_def)
        norm = tf.constant(255, dtype=tf.float32, shape=(784, ))
        image = tf.div(tf.to_float(features['image']), norm)
        label = tf.to_float(features['label'])
        return (image, label)

    if job_name == "ps":
        server.join()
    elif job_name == "worker":
        # Assigns ops to the local worker by default.
        with tf.device(
                tf.train.replica_device_setter(
                    worker_device="/job:worker/task:%d" % task_index,
                    cluster=cluster)):

            # Dataset for input data
            image_dir = TFNode.hdfs_path(ctx, args.images_labels)
            file_pattern = os.path.join(image_dir, 'part-*')
            files = tf.gfile.Glob(file_pattern)

            if args.format == 'csv2':
                ds = tf.data.TextLineDataset(files)
                parse_fn = _parse_csv
            else:  # args.format == 'tfr'
                ds = tf.data.TFRecordDataset(files)
                parse_fn = _parse_tfr

            ds = ds.shard(num_workers, task_index).repeat(args.epochs).shuffle(
                args.shuffle_size)
            ds = ds.map(parse_fn).batch(args.batch_size)
            iterator = ds.make_initializable_iterator()
            x, y_ = iterator.get_next()

            # Variables of the hidden layer
            hid_w = tf.Variable(tf.truncated_normal(
                [IMAGE_PIXELS * IMAGE_PIXELS, hidden_units],
                stddev=1.0 / IMAGE_PIXELS),
                                name="hid_w")
            hid_b = tf.Variable(tf.zeros([hidden_units]), name="hid_b")
            tf.summary.histogram("hidden_weights", hid_w)

            # Variables of the softmax layer
            sm_w = tf.Variable(tf.truncated_normal([hidden_units, 10],
                                                   stddev=1.0 /
                                                   math.sqrt(hidden_units)),
                               name="sm_w")
            sm_b = tf.Variable(tf.zeros([10]), name="sm_b")
            tf.summary.histogram("softmax_weights", sm_w)

            x_img = tf.reshape(x, [-1, IMAGE_PIXELS, IMAGE_PIXELS, 1])
            tf.summary.image("x_img", x_img)

            hid_lin = tf.nn.xw_plus_b(x, hid_w, hid_b)
            hid = tf.nn.relu(hid_lin)

            y = tf.nn.softmax(tf.nn.xw_plus_b(hid, sm_w, sm_b))

            global_step = tf.Variable(0)

            loss = -tf.reduce_sum(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))
            tf.summary.scalar("loss", loss)
            train_op = tf.train.AdagradOptimizer(0.01).minimize(
                loss, global_step=global_step)

            # Test trained model
            label = tf.argmax(y_, 1, name="label")
            prediction = tf.argmax(y, 1, name="prediction")
            correct_prediction = tf.equal(prediction, label)
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),
                                      name="accuracy")
            tf.summary.scalar("acc", accuracy)

            saver = tf.train.Saver()
            summary_op = tf.summary.merge_all()
            init_op = tf.global_variables_initializer()

        # Create a "supervisor", which oversees the training process and stores model state into HDFS
        logdir = TFNode.hdfs_path(ctx, args.model)
        print("tensorflow model path: {0}".format(logdir))
        summary_writer = tf.summary.FileWriter("tensorboard_%d" % worker_num,
                                               graph=tf.get_default_graph())

        if args.mode == "train":
            sv = tf.train.Supervisor(is_chief=(task_index == 0),
                                     logdir=logdir,
                                     init_op=init_op,
                                     summary_op=None,
                                     saver=saver,
                                     global_step=global_step,
                                     stop_grace_secs=300,
                                     save_model_secs=10)
        else:
            sv = tf.train.Supervisor(is_chief=(task_index == 0),
                                     logdir=logdir,
                                     summary_op=None,
                                     saver=saver,
                                     global_step=global_step,
                                     stop_grace_secs=300,
                                     save_model_secs=0)
            output_dir = TFNode.hdfs_path(ctx, args.output)
            tf.gfile.MkDir(output_dir)
            output_file = tf.gfile.Open("{0}/part-{1:05d}".format(
                output_dir, worker_num),
                                        mode='w')

        # The supervisor takes care of session initialization, restoring from
        # a checkpoint, and closing when done or an error occurs.
        with sv.managed_session(server.target) as sess:
            print("{0} session ready".format(datetime.now().isoformat()))

            # Loop until the supervisor shuts down or 1000000 steps have completed.
            sess.run(iterator.initializer)
            step = 0
            count = 0
            while not sv.should_stop() and step < args.steps:

                # Run a training step asynchronously.
                # See `tf.train.SyncReplicasOptimizer` for additional details on how to
                # perform *synchronous* training.

                # using QueueRunners/Readers
                if args.mode == "train":
                    if (step % 100 == 0):
                        print("{0} step: {1} accuracy: {2}".format(
                            datetime.now().isoformat(), step,
                            sess.run(accuracy)))
                    _, summary, step = sess.run(
                        [train_op, summary_op, global_step])
                    if sv.is_chief:
                        summary_writer.add_summary(summary, step)
                else:  # args.mode == "inference"
                    labels, pred, acc = sess.run([label, prediction, accuracy])
                    # print("label: {0}, pred: {1}".format(labels, pred))
                    print("acc: {0}".format(acc))
                    for i in range(len(labels)):
                        count += 1
                        output_file.write("{0} {1}\n".format(
                            labels[i], pred[i]))
                    print("count: {0}".format(count))

        if args.mode == "inference":
            output_file.close()
            # Delay chief worker from shutting down supervisor during inference, since it can load model, start session,
            # run inference and request stop before the other workers even start/sync their sessions.
            if task_index == 0:
                time.sleep(60)

        # Ask for all the services to stop.
        print("{0} stopping supervisor".format(datetime.now().isoformat()))
        sv.stop()
Пример #39
0
    def build_whole_detection_network(self, input_img_batch, gtboxes_batch_h, gtboxes_batch_r,
                                      gt_smooth_label, gpu_id=0):

        if self.is_training:
            gtboxes_batch_h = tf.reshape(gtboxes_batch_h, [-1, 5])
            gtboxes_batch_h = tf.cast(gtboxes_batch_h, tf.float32)

            gtboxes_batch_r = tf.reshape(gtboxes_batch_r, [-1, 6])
            gtboxes_batch_r = tf.cast(gtboxes_batch_r, tf.float32)

            gt_smooth_label = tf.reshape(gt_smooth_label, [-1, cfgs.ANGLE_RANGE])
            gt_smooth_label = tf.cast(gt_smooth_label, tf.float32)

        img_shape = tf.shape(input_img_batch)

        # 1. build base network
        feature_pyramid = self.build_base_network(input_img_batch)

        # 2. build rpn
        rpn_box_pred, rpn_cls_score, rpn_cls_prob, rpn_angle_cls = self.rpn_net(feature_pyramid)

        # 3. generate_anchors
        anchors = self.make_anchors(feature_pyramid)

        # 4. postprocess rpn proposals. such as: decode, clip, filter
        if self.is_training:
            with tf.variable_scope('build_loss'):
                labels, target_delta, anchor_states, target_boxes, target_smooth_label = tf.py_func(
                    func=anchor_target_layer,
                    inp=[gtboxes_batch_h, gtboxes_batch_r,
                         gt_smooth_label, anchors, gpu_id],
                    Tout=[tf.float32, tf.float32, tf.float32,
                          tf.float32, tf.float32])

                if self.method == 'H':
                    self.add_anchor_img_smry(input_img_batch, anchors, anchor_states, 0)
                else:
                    self.add_anchor_img_smry(input_img_batch, anchors, anchor_states, 1)

                cls_loss = losses.focal_loss(labels, rpn_cls_score, anchor_states)

                if cfgs.REG_LOSS_MODE == 0:
                    reg_loss = losses.iou_smooth_l1_loss(target_delta, rpn_box_pred, anchor_states, target_boxes,
                                                         anchors)
                elif cfgs.REG_LOSS_MODE == 1:
                    reg_loss = losses.smooth_l1_loss_atan(target_delta, rpn_box_pred, anchor_states)
                else:
                    reg_loss = losses.smooth_l1_loss(target_delta, rpn_box_pred, anchor_states)

                angle_cls_loss = losses.angle_focal_loss(target_smooth_label, rpn_angle_cls, anchor_states)

                self.losses_dict = {'cls_loss': cls_loss * cfgs.CLS_WEIGHT,
                                    'reg_loss': reg_loss * cfgs.REG_WEIGHT,
                                    'angle_cls_loss': angle_cls_loss * cfgs.ANGLE_WEIGHT}

        with tf.variable_scope('postprocess_detctions'):
            boxes, scores, category, boxes_angle = postprocess_detctions(rpn_bbox_pred=rpn_box_pred,
                                                                         rpn_cls_prob=rpn_cls_prob,
                                                                         rpn_angle_prob=tf.sigmoid(rpn_angle_cls),
                                                                         anchors=anchors,
                                                                         is_training=self.is_training)
            boxes = tf.stop_gradient(boxes)
            scores = tf.stop_gradient(scores)
            category = tf.stop_gradient(category)

        if self.is_training:
            return boxes, scores, category, boxes_angle, self.losses_dict
        else:
            return boxes, scores, category, boxes_angle
Пример #40
0
    # define network
    m = Pnetwork(grid_size=grid_size, device=DEVICE)

    root = tf.train.Checkpoint(optimizer=optimizer, model=m, optimizer_step=tf.train.get_or_create_global_step())

    with tf.device(DEVICE):
        pi = tf.constant(np.pi)
        ci = tf.to_complex128(1j)

    A_stencils_test, A_matrices_test, S_matrices_test, num_of_modes = utils.get_A_S_matrices(num_test_samples, np.pi,
                                                                                             grid_size, n_test)

    with tf.device(DEVICE):
        A_stencils_test = tf.convert_to_tensor(A_stencils_test, dtype=tf.double)
        A_matrices_test = tf.convert_to_tensor(A_matrices_test, dtype=tf.complex128)
        S_matrices_test = tf.reshape(tf.convert_to_tensor(S_matrices_test, dtype=tf.complex128),
                                     (-1, num_of_modes, num_of_modes, grid_size ** 2, grid_size ** 2))

    A_stencils_train = np.array(utils.two_d_stencil(num_training_samples))
    n_train_list = [16, 16, 32]
    initial_epsi = 1e-0

    numiter = -1
    for j in range(len(n_train_list)):
        A_stencils = A_stencils_train.copy()
        n_train = n_train_list[j]

        theta_x = np.array(
            [i * 2 * pi / n_train for i in range(-n_train // (2 * grid_size) + 1, n_train // (2 * grid_size) + 1)])
        theta_y = np.array(
            [i * 2 * pi / n_train for i in range(-n_train // (2 * grid_size) + 1, n_train // (2 * grid_size) + 1)])
Пример #41
0
def alexnet(x, keep_dropout, train_phase):
    weights = {
        'wc1': tf.Variable(tf.random_normal([11, 11, 1, 96], stddev=np.sqrt(2./(11*11)))),
        'wc2': tf.Variable(tf.random_normal([5, 5, 96, 32], stddev=np.sqrt(2./(5*5*96)))),
        'wc3': tf.Variable(tf.random_normal([3, 3, 32, 384], stddev=np.sqrt(2./(3*3*256)))),
        'wc4': tf.Variable(tf.random_normal([3, 3, 384, 32], stddev=np.sqrt(2./(3*3*384)))),
        'wc5': tf.Variable(tf.random_normal([3, 3, 32, 32], stddev=np.sqrt(2./(3*3*32)))),

        'wf6': tf.Variable(tf.random_normal([32, 4096], stddev=np.sqrt(2./(7*7*32)))),
        'wf7': tf.Variable(tf.random_normal([4096, 4096], stddev=np.sqrt(2./4096))),
        'wo': tf.Variable(tf.random_normal([4096, 3], stddev=np.sqrt(2./4096)))
    }

    biases = {
        'bo': tf.Variable(tf.ones(3))
    }

    # Conv + ReLU + Pool, 224->55->27
    image = tf.image.rgb_to_grayscale(x)
    conv1 = tf.nn.conv2d(image, weights['wc1'], strides=[1, 4, 4, 1], padding='SAME')
    conv1 = batch_norm_layer(conv1, train_phase, 'bn1')
    conv1 = tf.nn.relu(conv1)
    pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')

    # Conv + ReLU  + Pool, 27-> 13
    conv2 = tf.nn.conv2d(pool1, weights['wc2'], strides=[1, 1, 1, 1], padding='SAME')
    conv2 = batch_norm_layer(conv2, train_phase, 'bn2')
    conv2 = tf.nn.relu(conv2)
    pool2 = tf.nn.max_pool(conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')

    # Conv + ReLU, 13-> 13
    conv3 = tf.nn.conv2d(pool2, weights['wc3'], strides=[1, 1, 1, 1], padding='SAME')
    conv3 = batch_norm_layer(conv3, train_phase, 'bn3')
    conv3 = tf.nn.relu(conv3)

    # Conv + ReLU, 13-> 13
    conv4 = tf.nn.conv2d(conv3, weights['wc4'], strides=[1, 1, 1, 1], padding='SAME')
    conv4 = batch_norm_layer(conv4, train_phase, 'bn4')
    conv4 = tf.nn.relu(conv4)

    # Conv + ReLU + Pool, 13->6
    conv5 = tf.nn.conv2d(conv4, weights['wc5'], strides=[1, 1, 1, 1], padding='SAME')
    conv5 = batch_norm_layer(conv5, train_phase, 'bn5')
    conv5 = tf.nn.relu(conv5)
    pool5 = tf.nn.max_pool(conv5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')

    # FC + ReLU + Dropout
    fc6 = tf.reshape(pool5, [-1, weights['wf6'].get_shape().as_list()[0]])
    fc6 = tf.matmul(fc6, weights['wf6'])
    fc6 = batch_norm_layer(fc6, train_phase, 'bn6')
    fc6 = tf.nn.relu(fc6)
    fc6 = tf.nn.dropout(fc6, keep_dropout)
    
    # FC + ReLU + Dropout
    fc7 = tf.matmul(fc6, weights['wf7'])
    fc7 = batch_norm_layer(fc7, train_phase, 'bn7')
    fc7 = tf.nn.relu(fc7)
    fc7 = tf.nn.dropout(fc7, keep_dropout)

    # Output FC
    out = tf.add(tf.matmul(fc7, weights['wo']), biases['bo'])
    
    return out
    return tf.Variable(initial)

def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')

def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')

x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])


W_conv1 = weight_variable([5,5,1,32])
b_conv1 = bias_variable([32])

x_image = tf.reshape(x, [-1,28,28,1])

h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)



W_conv2 = weight_variable([5,5,32,64])
b_conv2 = bias_variable([64])

h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)



W_fc1 = weight_variable([7*7*64, 1024])
Пример #43
0
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data  #读取数据
import matplotlib.pyplot as plt
import numpy as np
from time import time

mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

#模型构建
#mnist中每张图片共有28*28=784个像素点
#构建输入层
x = tf.placeholder(tf.float32, [None, 784], name="X")
image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])
tf.summary.image('input', image_shaped_input, 10)


#构建隐藏层
#定义全连接层函数
def fcn_layer(
        inputs,  #输入数据
        input_dim,  #输入神经元数量
        output_dim,  #输出神经元数量
        activation=None):  #激活函数
    W = tf.Variable(tf.truncated_normal([input_dim, output_dim], stddev=0.1))
    #以截断正态分布的随机数初始化W
    b = tf.Variable(tf.zeros([output_dim]))
    #以零初始化b
    XWb = tf.matmul(inputs, W) + b  #建立表达式 input * W +b
    if activation is None:  #默认不使用激活函数
        outputs = XWb
    else:
Пример #44
0
    def _module_fn():
        """
        Function building the module
        """

        feature_layer = tf.placeholder(
            tf.float32,
            shape=[None, None, None, None, nchannels],
            name='input')
        obs_layer = tf.placeholder(tf.float32,
                                   shape=[None, None, None, None, n_y],
                                   name='observations')

        # Builds the neural network
        net = slim.conv3d(feature_layer,
                          16,
                          5,
                          activation_fn=tf.nn.leaky_relu,
                          padding='valid')
        #net = wide_resnet(feature_layer, 8, activation_fn=tf.nn.leaky_relu, is_training=is_training)
        net = wide_resnet(net,
                          16,
                          activation_fn=tf.nn.leaky_relu,
                          keep_prob=dropout,
                          is_training=is_training)
        net = wide_resnet(net,
                          32,
                          activation_fn=tf.nn.leaky_relu,
                          keep_prob=dropout,
                          is_training=is_training)
        net = wide_resnet(net,
                          32,
                          activation_fn=tf.nn.leaky_relu,
                          keep_prob=dropout,
                          is_training=is_training)
        net = slim.conv3d(net, 32, 3, activation_fn=tf.nn.tanh)

        # Define the probabilistic layer
        net = slim.conv3d(net, n_mixture * 3 * n_y, 1, activation_fn=None)
        cube_size = tf.shape(obs_layer)[1]
        net = tf.reshape(
            net, [-1, cube_size, cube_size, cube_size, n_y, n_mixture * 3])
        #         net = tf.reshape(net, [None, None, None, None, n_y, n_mixture*3])
        loc, unconstrained_scale, logits = tf.split(net,
                                                    num_or_size_splits=3,
                                                    axis=-1)
        scale = tf.nn.softplus(unconstrained_scale)

        # Form mixture of discretized logistic distributions. Note we shift the
        # logistic distribution by -0.5. This lets the quantization capture "rounding"
        # intervals, `(x-0.5, x+0.5]`, and not "ceiling" intervals, `(x-1, x]`.
        discretized_logistic_dist = tfd.QuantizedDistribution(
            distribution=tfd.TransformedDistribution(
                distribution=tfd.Logistic(loc=loc, scale=scale),
                bijector=tfb.AffineScalar(shift=-0.5)),
            low=0.,
            high=2.**3 - 1)

        mixture_dist = tfd.MixtureSameFamily(
            mixture_distribution=tfd.Categorical(logits=logits),
            components_distribution=discretized_logistic_dist)

        # Define a function for sampling, and a function for estimating the log likelihood
        sample = tf.squeeze(mixture_dist.sample())
        loglik = mixture_dist.log_prob(obs_layer)
        hub.add_signature(inputs={
            'features': feature_layer,
            'labels': obs_layer
        },
                          outputs={
                              'sample': sample,
                              'loglikelihood': loglik
                          })
Пример #45
0
 def relprop_flatten(self, a, r):
     return tf.reshape(r, tf.shape(a))
Пример #46
0
    def __init__(self, num_words, num_chars, num_classes, num_steps, word_len, embedding_matrix=None):
        # Parameter
        self.config = Config()
        self.dropout_rate = self.config.model_para['dropout_rate']
        self.batch_size = self.config.model_para['batch_size']
        self.num_layers = self.config.model_para['lstm_layer_num']
        self.input_dim = self.config.model_para['input_dim']
        self.hidden_dim = self.config.model_para['hidden_dim']
        self.char_input_dim = self.config.model_para['char_input_dim']
        self.char_hidden_dim = self.config.model_para['char_hidden_dim']
        self.use_pa_learning = self.config.model_para['use_pa_learning']
        
        self.embedding_matrix = embedding_matrix
        
        self.word_len = word_len
        self.num_steps = num_steps
        self.num_words = num_words
        self.num_chars = num_chars
        self.num_classes = num_classes
        
        
        self.char_inputs = tf.placeholder(tf.int32, [None, self.word_len])

        with tf.variable_scope("character-based-emb"):
            # char embedding
            self.char_embedding = tf.get_variable("char_emb", [self.num_chars, self.char_input_dim])

            self.char_inputs_emb = tf.nn.embedding_lookup(self.char_embedding, self.char_inputs)
            self.char_inputs_emb = tf.transpose(self.char_inputs_emb, [1, 0, 2])
            self.char_inputs_emb = tf.reshape(self.char_inputs_emb, [-1, self.char_input_dim])
            self.char_inputs_emb = tf.split(self.char_inputs_emb, self.word_len, 0)
            
        # char forward and backward
        with tf.variable_scope("char-bi-lstm"):
            # char lstm cell
            char_lstm_cell_fw = rnn.LSTMCell(self.char_hidden_dim)
            char_lstm_cell_bw = rnn.LSTMCell(self.char_hidden_dim)

            # get the length of each word
            self.word_length = tf.reduce_sum(tf.sign(self.char_inputs), reduction_indices=1)
            self.word_length = tf.cast(self.word_length, tf.int32)

            char_outputs, f_output, r_output = tf.contrib.rnn.static_bidirectional_rnn(
                char_lstm_cell_fw, 
                char_lstm_cell_bw,
                self.char_inputs_emb, 
                dtype=tf.float32,
                sequence_length=self.word_length
            )
        final_word_output = tf.concat([f_output.h, r_output.h], -1)

        self.word_lstm_last_output = tf.reshape(final_word_output, [-1, self.num_steps, self.char_hidden_dim*2])
        
        # '''
        #     word input
        # '''
        with tf.variable_scope("word-based-emb"):
            self.inputs = tf.placeholder(tf.int32, [None, self.num_steps])
            # self.targets = tf.placeholder(tf.int32, [None, self.num_steps])
            if self.use_pa_learning:
                self.targets = tf.placeholder(tf.float32, [None, self.num_steps+2, self.num_classes+1])
            else:
                self.targets = tf.placeholder(tf.int32, [None, self.num_steps])
            self.targets_transition = tf.placeholder(tf.int32, [None])
            self.keep_prob = tf.placeholder(tf.float32)

            if embedding_matrix is not None:
                self.embedding = tf.Variable(embedding_matrix, trainable=True, name="word_emb", dtype=tf.float32)
            else:
                self.embedding = tf.get_variable("word_emb", [self.num_words, self.input_dim])

            self.inputs_emb = tf.nn.embedding_lookup(self.embedding, self.inputs)
            self.inputs_emb = tf.concat([self.inputs_emb, self.word_lstm_last_output], -1)

            self.inputs_emb = tf.nn.dropout(self.inputs_emb, self.keep_prob)
            self.inputs_emb = tf.transpose(self.inputs_emb, [1, 0, 2])
            self.inputs_emb = tf.reshape(self.inputs_emb, [-1, self.input_dim+self.char_hidden_dim*2])
            self.inputs_emb = tf.split(self.inputs_emb, self.num_steps, 0)

            # word lstm cell
            lstm_cell_fw = rnn.LSTMCell(self.hidden_dim)
            lstm_cell_bw = rnn.LSTMCell(self.hidden_dim)

            # get the length of each sample
            self.length = tf.reduce_sum(tf.sign(self.inputs), reduction_indices=1)
            self.length = tf.cast(self.length, tf.int32) 
        
        # forward and backward
        with tf.variable_scope("word-bi-lstm"):
            self.outputs, _, _ = tf.contrib.rnn.static_bidirectional_rnn(
                lstm_cell_fw, 
                lstm_cell_bw,
                self.inputs_emb, 
                dtype=tf.float32,
                sequence_length=self.length
            )

        # bidirect concat
        final_outputs = tf.reshape(tf.concat(self.outputs, 1), [-1, self.hidden_dim * 2])
        tanh_layer_w = tf.get_variable("tanh_layer_w", [self.hidden_dim * 2, self.hidden_dim])
        tanh_layer_b = tf.get_variable("tanh_layer_b", [self.hidden_dim])
        self.final_outputs = tf.tanh(tf.matmul(final_outputs, tanh_layer_w) + tanh_layer_b)

        
 
    # def add_placeholders(self):
#         '''
#             char input = sen_batch * sen_len
#         '''
#         self.char_inputs = tf.placeholder(tf.int32, [None, self.word_len])
#         '''
#             word input
#         '''
#         self.inputs = tf.placeholder(tf.int32, [None, self.num_steps])
#         self.targets = tf.placeholder(tf.int32, [None, self.num_steps])
#         self.targets_transition = tf.placeholder(tf.int32, [None])
#         self.keep_prob = tf.placeholder(tf.float32)

#     def add_lookup_op(self):
#         with tf.variable_scope("character-based-emb"):
#             # char embedding
#             self.char_embedding = tf.get_variable("char_emb", [self.num_chars, self.char_input_dim])
#             self.char_inputs_emb = tf.nn.embedding_lookup(self.char_embedding, self.char_inputs)
            
#         with tf.variable_scope("word-based-emb"):
#             if self.embedding_matrix is not None:
#                 self.embedding = tf.Variable(self.embedding_matrix, trainable=True, name="word_emb", dtype=tf.float32)
#             else:
#                 self.embedding = tf.get_variable("word_emb", [self.num_words, self.input_dim])
#             self.inputs_emb = tf.nn.embedding_lookup(self.embedding, self.inputs)

#     def add_feature_extractor_op(self):
#         with tf.variable_scope("char_bi-lstm"):
#             self.char_inputs_emb = tf.transpose(self.char_inputs_emb, [1, 0, 2])
#             self.char_inputs_emb = tf.reshape(self.char_inputs_emb, [-1, self.char_input_dim])
#             self.char_inputs_emb = tf.split(self.char_inputs_emb, self.word_len, 0)

#             # char lstm cell
#             char_lstm_cell_fw = rnn.LSTMCell(self.char_hidden_dim)
#             char_lstm_cell_bw = rnn.LSTMCell(self.char_hidden_dim)

#             # get the length of each word
#             self.word_length = tf.reduce_sum(tf.sign(self.char_inputs), reduction_indices=1)
#             self.word_length = tf.cast(self.word_length, tf.int32)
            
#             char_outputs, f_output, r_output = tf.contrib.rnn.static_bidirectional_rnn(
#                 char_lstm_cell_fw, 
#                 char_lstm_cell_bw,
#                 self.char_inputs_emb, 
#                 dtype=tf.float32,
#                 sequence_length=self.word_length
#             )
#             final_word_output = tf.concat([f_output.h, r_output.h], -1)
#             self.word_lstm_last_output = tf.reshape(final_word_output, [-1, self.num_steps, self.char_hidden_dim*2])
            
#         with tf.variable_scope("word_bi-lstm"):
#             self.inputs_emb = tf.concat([self.inputs_emb, self.word_lstm_last_output], -1)
#             self.inputs_emb = tf.nn.dropout(self.inputs_emb, self.keep_prob)
#             self.inputs_emb = tf.transpose(self.inputs_emb, [1, 0, 2])
#             self.inputs_emb = tf.reshape(self.inputs_emb, [-1, self.input_dim+self.char_hidden_dim*2])
#             # self.inputs_emb = tf.reshape(self.inputs_emb, [-1, self.input_dim])

#             self.inputs_emb = tf.split(self.inputs_emb, self.num_steps, 0)

#             # word lstm cell
#             lstm_cell_fw = rnn.LSTMCell(self.hidden_dim)
#             lstm_cell_bw = rnn.LSTMCell(self.hidden_dim)

#             # get the length of each sample
#             self.length = tf.reduce_sum(tf.sign(self.inputs), reduction_indices=1)
#             self.length = tf.cast(self.length, tf.int32) 
            
#             self.outputs, _, _ = tf.contrib.rnn.static_bidirectional_rnn(
#                 lstm_cell_fw, 
#                 lstm_cell_bw,
#                 self.inputs_emb, 
#                 dtype=tf.float32,
#                 sequence_length=self.length
#             )
    
#         with tf.variable_scope("bidirect-concat"):
#             final_outputs = tf.reshape(tf.concat(self.outputs, 1), [-1, self.hidden_dim * 2])
#             tanh_layer_w = tf.get_variable("tanh_layer_w", [self.hidden_dim * 2, self.hidden_dim])
#             tanh_layer_b = tf.get_variable("tanh_layer_b", [self.hidden_dim])
#             self.final_outputs = tf.tanh(tf.matmul(final_outputs, tanh_layer_w) + tanh_layer_b)

    # def forward(self):
    #     self.add_placeholders()
    #     self.add_lookup_op()
    #     self.add_feature_extractor_op()
        # return self.final_outputs, self.length
        
        
        
Пример #47
0
    def train(self, configure):
        # data
        data = tools.Data(configure, epoch_walked)
        # network
        X = tf.placeholder(
            shape=[batch_size, input_shape[0], input_shape[1], input_shape[2]],
            dtype=tf.float32)
        Y = tf.placeholder(shape=[
            batch_size, output_shape[0], output_shape[1], output_shape[2]
        ],
                           dtype=tf.float32)
        lr = tf.placeholder(tf.float32)
        training = tf.placeholder(tf.bool)
        threshold = tf.placeholder(tf.float32)
        with tf.variable_scope('generator'):
            Y_pred, Y_pred_modi, Y_pred_nosig = self.ae_u(
                X, training, batch_size, threshold)
        with tf.variable_scope('discriminator'):
            XY_real_pair = self.dis(X, Y, training)
        with tf.variable_scope('discriminator', reuse=True):
            XY_fake_pair = self.dis(X, Y_pred, training)

        # accuracy
        block_acc = tf.placeholder(tf.float32)
        total_acc = tf.placeholder(tf.float32)
        train_sum = tf.summary.scalar("train_block_accuracy", block_acc)
        test_sum = tf.summary.scalar("total_test_accuracy", total_acc)
        train_merge_op = tf.summary.merge([train_sum])
        test_merge_op = tf.summary.merge([test_sum])

        # loss function
        # generator loss
        Y_ = tf.reshape(Y, shape=[batch_size, -1])
        Y_pred_modi_ = tf.reshape(Y_pred_modi, shape=[batch_size, -1])
        w = tf.placeholder(tf.float32)  # foreground weight
        g_loss = tf.reduce_mean(-tf.reduce_mean(
            w * Y_ * tf.log(Y_pred_modi_ + 1e-8), reduction_indices=[1]) -
                                tf.reduce_mean((1 - w) * (1 - Y_) *
                                               tf.log(1 - Y_pred_modi_ + 1e-8),
                                               reduction_indices=[1]))
        # discriminator loss
        gan_d_loss = tf.reduce_mean(XY_fake_pair) - tf.reduce_mean(
            XY_real_pair)
        alpha = tf.random_uniform(shape=[
            batch_size, input_shape[0] * input_shape[1] * input_shape[2]
        ],
                                  minval=0.0,
                                  maxval=1.0)
        Y_pred_ = tf.reshape(Y_pred, shape=[batch_size, -1])
        differences_ = Y_pred_ - Y_
        interpolates = Y_ + alpha * differences_
        with tf.variable_scope('discriminator', reuse=True):
            XY_fake_intep = self.dis(X, interpolates, training)
        gradients = tf.gradients(XY_fake_intep, [interpolates])[0]
        slopes = tf.sqrt(
            tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
        gradient_penalty = tf.reduce_mean((slopes - 1.0)**2)
        gan_d_loss += 10 * gradient_penalty

        # generator loss with gan loss
        gan_g_loss = -tf.reduce_mean(XY_fake_pair)
        gan_g_w = 5
        ae_w = 100 - gan_g_w
        ae_gan_g_loss = ae_w * g_loss + gan_g_w * gan_g_loss

        # trainers
        ae_var = [
            var for var in tf.trainable_variables()
            if var.name.startswith('generator')
        ]
        dis_var = [
            var for var in tf.trainable_variables()
            if var.name.startswith('discriminator')
        ]
        ae_g_optim = tf.train.AdamOptimizer(learning_rate=lr,
                                            beta1=0.9,
                                            beta2=0.999,
                                            epsilon=1e-8).minimize(
                                                ae_gan_g_loss, var_list=ae_var)
        dis_optim = tf.train.AdamOptimizer(learning_rate=lr,
                                           beta1=0.9,
                                           beta2=0.999,
                                           epsilon=1e-8).minimize(
                                               gan_d_loss, var_list=dis_var)

        saver = tf.train.Saver(max_to_keep=1)
        # config = tf.ConfigProto(allow_soft_placement=True)
        # config.gpu_options.visible_device_list = GPU0

        with tf.Session() as sess:
            # define tensorboard writer
            sum_writer_train = tf.summary.FileWriter(self.train_sum_dir,
                                                     sess.graph)
            sum_write_test = tf.summary.FileWriter(self.test_sum_dir,
                                                   sess.graph)
            # load model data if pre-trained
            sess.run(
                tf.group(tf.global_variables_initializer(),
                         tf.local_variables_initializer()))
            if os.path.isfile(self.train_models_dir +
                              'model.cptk.data-00000-of-00001'):
                print "restoring saved model"
                saver.restore(sess, self.train_models_dir + 'model.cptk')
            learning_rate_g = ori_lr * pow(power, (epoch_walked / decay_step))
            # start training loop
            global_step = step_walked
            for epoch in range(epoch_walked, MAX_EPOCH):
                if epoch % 5 == 0 and epoch > 0:
                    del data
                    gc.collect()
                    data = tools.Data(configure, epoch)
                train_amount = len(data.train_numbers)
                test_amount = len(data.test_numbers)
                if train_amount >= test_amount and train_amount > 0 and test_amount > 0 and data.total_train_batch_num > 0 and data.total_test_seq_batch > 0:
                    # actual foreground weight
                    weight_for = 0.8
                    if epoch % 2 == 0 and epoch > 0:
                        print '********************** FULL TESTING ********************************'
                        time_begin = time.time()
                        origin_dir = read_dicoms(test_dir + "original1")
                        mask_dir = test_dir + "artery"
                        test_batch_size = batch_size
                        # test_data = tools.Test_data(dicom_dir,input_shape)
                        test_data = tools.Test_data(origin_dir, input_shape,
                                                    'vtk_data')
                        test_data.organize_blocks()
                        test_mask = read_dicoms(mask_dir)
                        array_mask = ST.GetArrayFromImage(test_mask)
                        array_mask = np.transpose(array_mask, (2, 1, 0))
                        print "mask shape: ", np.shape(array_mask)
                        block_numbers = test_data.blocks.keys()
                        for i in range(0, len(block_numbers), test_batch_size):
                            batch_numbers = []
                            if i + test_batch_size < len(block_numbers):
                                temp_input = np.zeros([
                                    test_batch_size, input_shape[0],
                                    input_shape[1], input_shape[2]
                                ])
                                for j in range(test_batch_size):
                                    temp_num = block_numbers[i + j]
                                    temp_block = test_data.blocks[temp_num]
                                    batch_numbers.append(temp_num)
                                    block_array = temp_block.load_data()
                                    block_shape = np.shape(block_array)
                                    temp_input[j, 0:block_shape[0],
                                               0:block_shape[1],
                                               0:block_shape[2]] += block_array
                                Y_temp_pred, Y_temp_modi, Y_temp_pred_nosig = sess.run(
                                    [Y_pred, Y_pred_modi, Y_pred_nosig],
                                    feed_dict={
                                        X:
                                        temp_input,
                                        training:
                                        False,
                                        w:
                                        weight_for,
                                        threshold:
                                        upper_threshold + test_extra_threshold
                                    })
                                for j in range(test_batch_size):
                                    test_data.upload_result(
                                        batch_numbers[j],
                                        Y_temp_modi[j, :, :, :])
                            else:
                                temp_batch_size = len(block_numbers) - i
                                temp_input = np.zeros([
                                    temp_batch_size, input_shape[0],
                                    input_shape[1], input_shape[2]
                                ])
                                for j in range(temp_batch_size):
                                    temp_num = block_numbers[i + j]
                                    temp_block = test_data.blocks[temp_num]
                                    batch_numbers.append(temp_num)
                                    block_array = temp_block.load_data()
                                    block_shape = np.shape(block_array)
                                    temp_input[j, 0:block_shape[0],
                                               0:block_shape[1],
                                               0:block_shape[2]] += block_array
                                X_temp = tf.placeholder(shape=[
                                    temp_batch_size, input_shape[0],
                                    input_shape[1], input_shape[2]
                                ],
                                                        dtype=tf.float32)
                                with tf.variable_scope('generator',
                                                       reuse=True):
                                    Y_pred_temp, Y_pred_modi_temp, Y_pred_nosig_temp = self.ae_u(
                                        X_temp, training, temp_batch_size,
                                        threshold)
                                Y_temp_pred, Y_temp_modi, Y_temp_pred_nosig = sess.run(
                                    [
                                        Y_pred_temp, Y_pred_modi_temp,
                                        Y_pred_nosig_temp
                                    ],
                                    feed_dict={
                                        X_temp:
                                        temp_input,
                                        training:
                                        False,
                                        w:
                                        weight_for,
                                        threshold:
                                        upper_threshold + test_extra_threshold
                                    })
                                for j in range(temp_batch_size):
                                    test_data.upload_result(
                                        batch_numbers[j],
                                        Y_temp_modi[j, :, :, :])
                        test_result_array = test_data.get_result()
                        print "result shape: ", np.shape(test_result_array)
                        to_be_transformed = self.post_process(
                            test_result_array)
                        if epoch % 50 == 0:
                            self.output_img(to_be_transformed, test_data.space,
                                            epoch)
                        if epoch == 0:
                            mask_img = ST.GetImageFromArray(
                                np.transpose(array_mask, [2, 1, 0]))
                            mask_img.SetSpacing(test_data.space)
                            ST.WriteImage(mask_img,
                                          './test_result/test_mask.vtk')
                        test_IOU = 2 * np.sum(
                            to_be_transformed * array_mask) / (
                                np.sum(to_be_transformed) + np.sum(array_mask))
                        test_summary = sess.run(
                            test_merge_op, feed_dict={total_acc: test_IOU})
                        sum_write_test.add_summary(test_summary,
                                                   global_step=epoch * 6)
                        print "IOU accuracy: ", test_IOU
                        time_end = time.time()
                        print '******************** time of full testing: ' + str(
                            time_end - time_begin) + 's ********************'
                    data.shuffle_X_Y_pairs()
                    total_train_batch_num = data.total_train_batch_num
                    print "total_train_batch_num:", total_train_batch_num
                    for i in range(total_train_batch_num):
                        X_train_batch, Y_train_batch = data.load_X_Y_voxel_train_next_batch(
                        )
                        # calculate loss value
                        # print "calculate begin"
                        gan_d_loss_c, = sess.run(
                            [gan_d_loss],
                            feed_dict={
                                X: X_train_batch,
                                Y: Y_train_batch,
                                training: False,
                                w: weight_for,
                                threshold: upper_threshold
                            })
                        g_loss_c, gan_g_loss_c = sess.run(
                            [g_loss, gan_g_loss],
                            feed_dict={
                                X: X_train_batch,
                                Y: Y_train_batch,
                                training: False,
                                w: weight_for,
                                threshold: upper_threshold
                            })
                        # print "calculate ended"
                        if epoch % decay_step == 0 and epoch > 0 and i == 0:
                            learning_rate_g = learning_rate_g * power
                        sess.run(
                            [ae_g_optim],
                            feed_dict={
                                X: X_train_batch,
                                threshold: upper_threshold,
                                Y: Y_train_batch,
                                lr: learning_rate_g,
                                training: True,
                                w: weight_for
                            })
                        sess.run(
                            [dis_optim],
                            feed_dict={
                                X: X_train_batch,
                                threshold: upper_threshold,
                                Y: Y_train_batch,
                                lr: learning_rate_g,
                                training: True,
                                w: weight_for
                            })
                        # print "training ended"
                        global_step += 1
                        # output some results
                        if i % 2 == 0:
                            print "epoch:", epoch, " i:", i, " train ae loss:", g_loss_c, " gan g loss:", gan_g_loss_c, " gan d loss:", gan_d_loss_c, " learning rate: ", learning_rate_g
                        if i % 20 == 0 and epoch % 1 == 0:
                            try:
                                X_test_batch, Y_test_batch = data.load_X_Y_voxel_test_next_batch(
                                    fix_sample=False)
                                g_loss_t, gan_g_loss_t, gan_d_loss_t, Y_test_pred, Y_test_modi, Y_test_pred_nosig = \
                                    sess.run([g_loss, gan_g_loss, gan_d_loss, Y_pred, Y_pred_modi, Y_pred_nosig],
                                             feed_dict={X: X_test_batch, threshold: upper_threshold + test_extra_threshold,
                                                        Y: Y_test_batch, training: False, w: weight_for})
                                predict_result = np.float32(Y_test_modi > 0.01)
                                predict_result = np.reshape(
                                    predict_result, [
                                        batch_size, input_shape[0],
                                        input_shape[1], input_shape[2]
                                    ])
                                print np.max(Y_test_pred)
                                print np.min(Y_test_pred)
                                # IOU
                                predict_probablity = np.float32(
                                    (Y_test_modi - 0.01) > 0)
                                predict_probablity = np.reshape(
                                    predict_probablity, [
                                        batch_size, input_shape[0],
                                        input_shape[1], input_shape[2]
                                    ])
                                accuracy = 2 * np.sum(
                                    np.abs(predict_probablity *
                                           Y_test_batch)) / np.sum(
                                               np.abs(predict_result) +
                                               np.abs(Y_test_batch))
                                print "epoch:", epoch, " global step: ", global_step, "\nIOU accuracy: ", accuracy, "\ntest ae loss:", g_loss_t, " gan g loss:", gan_g_loss_t, " gan d loss:", gan_d_loss_t
                                print "weight of foreground : ", weight_for
                                train_summary = sess.run(
                                    train_merge_op,
                                    feed_dict={block_acc: accuracy})
                                sum_writer_train.add_summary(
                                    train_summary, global_step=global_step)
                            except Exception, e:
                                print e
                        #### model saving
                        if i % 30 == 0 and epoch % 1 == 0:
                            saver.save(sess,
                                       save_path=self.train_models_dir +
                                       'model.cptk')
                            print "epoch:", epoch, " i:", i, "regular model saved!"
                else:
                    print "bad data , next epoch", epoch
Пример #48
0
                                         num_repeats=1,
                                         batch_size=128,
                                         num_parallel_calls=4,
                                         prefetch_buffer_size=10,
                                         compute_shape=True)

# Construct the graph
x = training_iterator.get_next()

h2 = tf.layers.flatten(x)
h1 = tf.layers.dense(h2, 2048, activation=None)
h0 = tf.layers.dense(h1, 128, activation=None)
h1_ = tf.layers.dense(h0, 2048, activation=None)
h2_ = tf.layers.dense(h1_, h2.shape[-1], activation=tf.nn.sigmoid)

reconstructed_images = tf.reshape(h2_, tf.shape(x))

with tf.name_scope('loss'):
    error = h2_ - h2
    mse = tf.reduce_mean(tf.square(error), name='mse')

optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(mse)

init = tf.global_variables_initializer()
saver = tf.train.Saver()


def main():

    # Create session configuration
 def create_compressor(self, g):
     size = tf.reshape(g, [-1]).shape.as_list()[0]
     if size < 1024:
         return IdenticalCompressor()
     else:
         return QSGDCompressor(g.shape, c_dim=256)
Пример #50
0
    def ae_u(self, X, training, batch_size, threshold):
        original = 16
        growth = 16
        dense_layer_num = 16
        with tf.variable_scope("input"):
            X = tf.reshape(X, [
                batch_size, input_shape[0], input_shape[1], input_shape[2], 1
            ])
            down_sample_input = tools.Ops.conv3d(X,
                                                 k=3,
                                                 out_c=original,
                                                 str=1,
                                                 name='down_sample_input')
            bn_input = tools.Ops.batch_norm(down_sample_input,
                                            "bn_input",
                                            training=training)
            relu_input = tools.Ops.xxlu(bn_input, name="relu_input")
            down_sample_1 = tools.Ops.conv3d(relu_input,
                                             k=2,
                                             out_c=original,
                                             str=2,
                                             name='down_sample_1')
        with tf.variable_scope("dense_block_1"):
            c_e = []
            s_e = []
            layers_e = []
            layers_e.append(down_sample_1)
            for i in range(dense_layer_num):
                c_e.append(original + growth * (i + 1))
                s_e.append(1)
            for j in range(dense_layer_num):
                layer = tools.Ops.batch_norm(layers_e[-1],
                                             'bn_dense_1_1_' + str(j),
                                             training=training)
                layer = tools.Ops.xxlu(layer, name='relu_1')
                layer = tools.Ops.conv3d(layer,
                                         k=1,
                                         out_c=growth,
                                         str=s_e[j],
                                         name='dense_1_1_' + str(j))
                layer = tools.Ops.batch_norm(layer,
                                             'bn_dense_1_2_' + str(j),
                                             training=training)
                layer = tools.Ops.xxlu(layer, name='relu_2')
                layer = tools.Ops.conv3d(layer,
                                         k=3,
                                         out_c=growth,
                                         str=s_e[j],
                                         name='dense_1_2_' + str(j))
                next_input = tf.concat([layer, layers_e[-1]], axis=4)
                layers_e.append(next_input)
        with tf.variable_scope("middle_down_sample"):
            mid_layer = tools.Ops.batch_norm(layers_e[-1],
                                             'bn_mid',
                                             training=training)
            mid_layer = tools.Ops.xxlu(mid_layer, name='relu')
            mid_layer = tools.Ops.conv3d(mid_layer,
                                         k=3,
                                         out_c=original +
                                         growth * dense_layer_num / 2,
                                         str=1,
                                         name='mid_conv')
            mid_layer_down = tools.Ops.conv3d(mid_layer,
                                              k=2,
                                              out_c=original +
                                              growth * dense_layer_num / 2,
                                              str=2,
                                              name='down_conv')
        with tf.variable_scope("dense_block_2"):
            c_d = []
            s_d = []
            layers_d = []
            layers_d.append(mid_layer_down)
            for i in range(dense_layer_num):
                c_d.append(original + growth * (dense_layer_num + i + 1))
                s_d.append(1)
            for j in range(dense_layer_num):
                layer = tools.Ops.batch_norm(layers_d[-1],
                                             'bn_dense_2_1_' + str(j),
                                             training=training)
                layer = tools.Ops.xxlu(layer, name='relu_1')
                layer = tools.Ops.conv3d(layer,
                                         k=1,
                                         out_c=growth,
                                         str=s_d[j],
                                         name='dense_2_1_' + str(j))
                layer = tools.Ops.batch_norm(layer,
                                             'bn_dense_2_2_' + str(j),
                                             training=training)
                layer = tools.Ops.xxlu(layer, name='relu_2')
                layer = tools.Ops.conv3d(layer,
                                         k=3,
                                         out_c=growth,
                                         str=s_d[j],
                                         name='dense_2_2_' + str(j))
                next_input = tf.concat([layer, layers_d[-1]], axis=4)
                layers_d.append(next_input)
        with tf.variable_scope("up_sample"):

            bn_1 = tools.Ops.batch_norm(layers_d[-1],
                                        'bn_after_dense_1',
                                        training=training)
            relu_1 = tools.Ops.xxlu(bn_1, name='relu_1')
            deconv_1 = tools.Ops.deconv3d(relu_1,
                                          k=2,
                                          out_c=128,
                                          str=2,
                                          name='deconv_up_sample_2')
            concat_up_1 = tf.concat([deconv_1, layers_e[-1]],
                                    axis=4,
                                    name="concat_up_1")

            bn_2 = tools.Ops.batch_norm(concat_up_1,
                                        'bn_after_dense_2',
                                        training=training)
            relu_2 = tools.Ops.xxlu(bn_2, name='relu_2')
            deconv_2 = tools.Ops.deconv3d(relu_2,
                                          k=2,
                                          out_c=64,
                                          str=2,
                                          name='deconv_up_sample_1')
            concat_up_2 = tf.concat([deconv_2, down_sample_input],
                                    axis=4,
                                    name="concat_up_2")

            bn_3 = tools.Ops.batch_norm(concat_up_2,
                                        'bn_after_dense_3',
                                        training=training)
            relu_3 = tools.Ops.xxlu(bn_3, name='relu_2')
            conv_up_1 = tools.Ops.conv3d(relu_3,
                                         k=3,
                                         out_c=32,
                                         str=1,
                                         name="conv_up_1")
            predict_map = tools.Ops.conv3d(conv_up_1,
                                           k=1,
                                           out_c=1,
                                           str=1,
                                           name='predict_map')
            predice_bn = tools.Ops.batch_norm(predict_map,
                                              "final_bn",
                                              training=training)
        with tf.variable_scope("output"):
            vox_no_sig = tools.Ops.xxlu(predice_bn, name="final_relu")
            vox_sig = tf.sigmoid(predict_map)
            vox_sig_modified = tf.maximum(vox_sig - threshold, 0.01)
        return vox_sig, vox_sig_modified, vox_no_sig
Пример #51
0
    def _compile_cost(self, ac_seqs, get_pred_trajs=False):
        """
        Using num_particles, propagates each particle forward for MPC time horizon timesteps using ac_seqs.
        Calculates the average cost of the entire trajectory across all particles.

        """
        t, nopt = tf.constant(0), tf.shape(ac_seqs)[0]
        init_costs = tf.zeros([nopt, self.npart])
        ac_seqs = tf.reshape(ac_seqs, [-1, self.plan_hor, self.dU])
        ac_seqs = tf.reshape(
            tf.tile(
                tf.transpose(ac_seqs, [1, 0, 2])[:, :, None],
                [1, 1, self.npart, 1]), [self.plan_hor, -1, self.dU])
        init_obs = tf.tile(self.sy_cur_obs[None], [nopt * self.npart, 1])

        def continue_prediction(t, *args):
            return tf.less(t, self.plan_hor)

        if get_pred_trajs:
            pred_trajs = init_obs[None]

            def iteration(t, total_cost, cur_obs, pred_trajs):
                # ac_seq is mpc_horizon x num_particles x action_space
                cur_acs = ac_seqs[t]  # cur_acs is num_particles x action_space

                # _predict_next_obs uses learned mean and var to predict next state for each particle
                next_obs = self._predict_next_obs(
                    cur_obs,
                    cur_acs)  # next_obs is num_particles x state_space

                delta_cost = tf.reshape(  #delta_cost is 1 x num_particles, indicates predicted cost for this timestep of MPC horizon
                    self.obs_cost_fn(next_obs, cur_obs) +
                    self.ac_cost_fn(cur_acs), [-1, self.npart])

                #note, must have input and output that are part of tensorflow graph
                # def print_cost(t, delta_cost, next_obs, cur_obs):
                #     delta_cost = np.array(tf.identity(delta_cost))
                #     if t == 0:
                #         print(f"curr state {cur_obs}, NN predicted next state {next_obs}")
                #     # print(f"cur obs: {cur_obs}")
                #     # print(f"next obs: {next_obs}")
                #     return t

                # preserve shapes before call, restore after call
                # t_shape = t.get_shape()
                # t = tfe.py_func(func=print_cost, inp=[t, delta_cost, next_obs, cur_obs], Tout=t.dtype)
                # t = tf.convert_to_tensor(t)
                # t.set_shape(t_shape)
                # t=tf.squeeze(t)

                next_obs = self.obs_postproc2(next_obs)
                pred_trajs = tf.concat([pred_trajs, next_obs[None]], axis=0)
                return t + 1, total_cost + delta_cost, next_obs, pred_trajs  #this timestep's cost isn't preserved: it's added to the total trajectory cost

            _, costs, next_obs, pred_trajs = tf.while_loop(
                cond=continue_prediction,
                body=iteration,
                loop_vars=[t, init_costs, init_obs, pred_trajs],
                shape_invariants=[
                    t.get_shape(),
                    init_costs.get_shape(),
                    init_obs.get_shape(),
                    tf.TensorShape([None, None, self.dO])
                ])

            # Replace nan costs with very high cost
            #costs is average total cost of all 20 particles for their trajectory
            costs = tf.reduce_mean(tf.where(tf.is_nan(costs),
                                            1e6 * tf.ones_like(costs), costs),
                                   axis=1)
            # pred_trajs is mpc_horizon+1 x none x num_particles x state_space, and represents each particles predicted trajectory over MPC horizon
            pred_trajs = tf.reshape(
                pred_trajs, [self.plan_hor + 1, -1, self.npart, self.dO])

            return costs, pred_trajs
        else:

            def iteration(t, total_cost, cur_obs):
                cur_acs = ac_seqs[t]
                next_obs = self._predict_next_obs(cur_obs, cur_acs)
                delta_cost = tf.reshape(
                    self.obs_cost_fn(next_obs, cur_obs) +
                    self.ac_cost_fn(cur_acs), [-1, self.npart])
                return t + 1, total_cost + delta_cost, self.obs_postproc2(
                    next_obs)

            _, costs, next_obs = tf.while_loop(
                cond=continue_prediction,
                body=iteration,
                loop_vars=[t, init_costs, init_obs])

            # Replace nan costs with very high cost
            return tf.reduce_mean(tf.where(tf.is_nan(costs),
                                           1e6 * tf.ones_like(costs), costs),
                                  axis=1)
def CNN(input_tensor,regularizer,keep_prob): # 构建神经网络
    with tf.variable_scope('layer1_conv1'): #第一层 ,卷积层 此层输入 100x100x3 输出为 100x100x32
        W_conv1 = tf.get_variable("weight",[5,5,3,32],initializer=tf.truncated_normal_initializer(stddev=0.1))
        b_conv1 = tf.get_variable("bias",[32],initializer=tf.constant_initializer(0.0))
        conv1 = tf.nn.conv2d(input_tensor,W_conv1,strides=[1,1,1,1],padding="SAME") #第一层卷积结果
        relu1 = tf.nn.relu(tf.nn.bias_add(conv1,b_conv1)) # relu函数

    with tf.name_scope('layer2_pool1'): #第二层 池化层 此层输入为 100x100x32 输出为 50x50x32
        pool1 = tf.nn.max_pool(relu1,ksize=[1,2,2,1],strides=[1,2,2,1],padding="VALID") #使用 2x2 的核进行池化

    with tf.variable_scope('layer3_conv2'): #第三层 卷积层 此层输入 50x50x32 输出为 50x50x64
        W_conv2 = tf.get_variable("weight",[5,5,32,64],initializer=tf.truncated_normal_initializer(stddev=0.1))
        b_conv2 = tf.get_variable("bias",[64],initializer=tf.constant_initializer(0.0))
        conv2 = tf.nn.conv2d(pool1,W_conv2,strides=[1,1,1,1],padding="SAME")
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2,b_conv2))

    with tf.name_scope('layer4_pool2'):#第四层 池化层 此层输入为 50x50x32 输出为 25x25x64
        pool2 = tf.nn.max_pool(relu2,ksize=[1,2,2,1],strides=[1,2,2,1],padding="SAME")

    with tf.variable_scope('layer5_conv3'):# 第五层 卷积层 此层输入为 25x25x64 输出为 25x25x128
        W_conv3 = tf.get_variable("weight",[3,3,64,128],initializer=tf.truncated_normal_initializer(stddev=0.1))
        b_conv3 = tf.get_variable("bias",[128],initializer=tf.constant_initializer(0.0))
        conv3 = tf.nn.conv2d(pool2,W_conv3,strides=[1,1,1,1],padding="SAME")
        relu3 = tf.nn.relu(tf.nn.bias_add(conv3,b_conv3))

    with tf.name_scope('layer6_pool3'): #第六层 池化层 此层输入为 25x25x128 输出为 12x12x128
        pool3 = tf.nn.max_pool(relu3,ksize=[1,2,2,1],strides=[1,2,2,1],padding="VALID")

    with tf.variable_scope('layer7_conv4'): #第七层 卷积层 此层输入为 12x12x128 输出为12x12x128
        W_conv4 = tf.get_variable("weight", [3, 3, 128, 128],initializer=tf.truncated_normal_initializer(stddev=0.1))
        b_conv4 = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0))
        conv4 = tf.nn.conv2d(pool3,W_conv4,strides=[1,1,1,1],padding="SAME")
        relu4 = tf.nn.relu(tf.nn.bias_add(conv4,b_conv4))

    with tf.name_scope('layer8_pool4'): #第八层 池化层 此层输入为 12x12x128 输出为6x6x128
        pool4 = tf.nn.max_pool(relu4,ksize=[1,2,2,1],strides=[1,2,2,1],padding="VALID")
        nodes = 6*6*128 # 一张图像的池化层最终大小
        reshaped = tf.reshape(pool4,[-1,nodes])

    with tf.variable_scope('layer9_fc1'): #第九层 全连接层(输入->隐藏层1) 此层输入为 6*6*128 隐藏层有 1024个结点
        W_fc1 = tf.get_variable("weights",[nodes,1024],initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer != None: tf.add_to_collection("losses",regularizer(W_fc1))
        b_fc1 = tf.get_variable("bias",[1024],initializer=tf.constant_initializer(0.1))

        fc1 = tf.nn.relu(tf.matmul(reshaped,W_fc1)+b_fc1)
        fc1 = tf.nn.dropout(fc1,keep_prob=keep_prob)  # 如果是在训练时 使用dropout操作防止过拟合
        with tf.variable_scope('layer10_fc2'): # 第十层 全连接(隐藏层1->隐藏层2) 此层输入为 1024 输出为 512
            W_fc2 = tf.get_variable("weights", [1024, 512], initializer=tf.truncated_normal_initializer(stddev=0.1))
            if regularizer != None: tf.add_to_collection("losses", regularizer(W_fc2))
            b_fc2 = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.1))

            fc2 = tf.nn.relu(tf.matmul(fc1,W_fc2)+b_fc2)
            fc2 = tf.nn.dropout(fc2,keep_prob=keep_prob)


    with tf.variable_scope('layer11_fc3'): #第11层 全连接(隐藏层2->输出层) 此层输入为 512 输出为 5 分别代表五种花的类别
        W_fc3 = tf.get_variable("weights", [512, 5], initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer != None: tf.add_to_collection("losses", regularizer(W_fc3))
        b_fc3 = tf.get_variable("bias", [5], initializer=tf.constant_initializer(0.1))

        output = tf.matmul(fc2, W_fc3) + b_fc3

    return output
Пример #53
0
b_conv2 = tf.Variable(tf.constant(0.01, shape=[10]), name="b_conv2")
h_conv2 = tf.nn.relu(conv2d_st2(h_pool1, W_conv2) + b_conv2, "conv2") ## output size= 26x26x16
h_pool2 = max_pool_2x2(h_conv2)                          ## output= 13x13x20


# 3rd Convolutional Layer kernel dim= (3x3x20), #output channels= 32
W_conv3 = tf.Variable(tf.truncated_normal( [3, 3, 10, 10], stddev=0.03), name="w_conv3")  ## 
b_conv3 = tf.Variable(tf.constant(0.01, shape=[10]), name="b_conv3")
h_conv3 = tf.nn.relu(conv2d_st2(h_pool2, W_conv3) + b_conv3, "conv3") ## output size= 6x6x10
h_pool3 = max_pool_2x2(h_conv3)                          ## output= 3x3x10


# 1st fully connected layer. flatten last convolutional output, 50-dim
W_fc1 = tf.Variable(tf.truncated_normal( [3*3*10, 20], stddev=0.03), name="W_fc1")  ## 
b_fc1 = tf.Variable(tf.constant(0.01, shape=[20]), name="b_fc1")
h_pool3_flat = tf.reshape(h_pool3, [-1, 3*3*10])
h_fc1 = tf.nn.relu(tf.matmul(h_pool3_flat, W_fc1) + b_fc1, "fc1")



################### audio features processing
W_fc1_aud = tf.Variable(tf.truncated_normal( [200, 20], stddev=0.03), name="W_fc1_aud")  ## 
b_fc1_aud = tf.Variable(tf.constant(0.01, shape=[20]), name="b_fc1_aud")
h_fc1_aud = tf.nn.relu(tf.matmul( x2, W_fc1_aud) + b_fc1_aud, "aud_fc1")



################### fusing visual and audio information by concatenating them
# add dropout after fully connected layer
fused_video_audio= tf.concat([ h_fc1, h_fc1_aud], 1, "fused_feature")
h_fc1_drop = tf.nn.dropout( fused_video_audio, keep_prob )
Пример #54
0
    def _predict_next_obs(self, obs, acs):
        """
        Use learned mean and var to predict next state for each particle given a current state and action.
        Performs trajectory sampling or moment matching of potential action sequences
        """
        proc_obs = self.obs_preproc(obs)

        if self.model.is_tf_model:
            # TS Optimization: Expand so that particles are only passed through one of the networks.
            if self.prop_mode == "TS1":
                proc_obs = tf.reshape(
                    proc_obs,
                    [-1, self.npart, proc_obs.get_shape()[-1]])
                sort_idxs = tf.nn.top_k(tf.random_uniform(
                    [tf.shape(proc_obs)[0], self.npart]),
                                        k=self.npart).indices
                tmp = tf.tile(
                    tf.range(tf.shape(proc_obs)[0])[:, None],
                    [1, self.npart])[:, :, None]
                idxs = tf.concat([tmp, sort_idxs[:, :, None]], axis=-1)
                proc_obs = tf.gather_nd(proc_obs, idxs)
                proc_obs = tf.reshape(proc_obs, [-1, proc_obs.get_shape()[-1]])
            if self.prop_mode == "TS1" or self.prop_mode == "TSinf":
                proc_obs, acs = self._expand_to_ts_format(
                    proc_obs), self._expand_to_ts_format(acs)

            # Obtain model predictions
            inputs = tf.concat([proc_obs, acs], axis=-1)
            mean, var = self.model.create_prediction_tensors(inputs)

            #############################################
            # # Print the models mean and variance of prediction
            # def print_meanvar(obs, mean, var):
            #     mean = np.array(tf.identity(mean))
            #     var = np.array(tf.identity(var))
            #     print(f"Max var {np.max(var)}, Min var: {np.min(var)}")
            #     return obs

            # # preserve shapes before call, restore after call
            # shape = obs.get_shape()
            # obs = tfe.py_func(func=print_meanvar, inp=[obs, mean, var], Tout=obs.dtype)
            # obs = tf.convert_to_tensor(obs)
            # obs.set_shape(shape)
            # obs=tf.squeeze(obs)
            #############################################

            if self.model.is_probabilistic and not self.ign_var:
                predictions = mean + tf.compat.v1.random.normal(
                    shape=tf.shape(mean), mean=0, stddev=1) * tf.sqrt(var)
                if self.prop_mode == "MM":
                    model_out_dim = predictions.get_shape()[-1].value

                    predictions = tf.reshape(predictions,
                                             [-1, self.npart, model_out_dim])
                    prediction_mean = tf.reduce_mean(predictions,
                                                     axis=1,
                                                     keep_dims=True)
                    prediction_var = tf.reduce_mean(tf.square(predictions -
                                                              prediction_mean),
                                                    axis=1,
                                                    keep_dims=True)
                    z = tf.compat.v1.random.normal(shape=tf.shape(predictions),
                                                   mean=0,
                                                   stddev=1)
                    samples = prediction_mean + z * tf.sqrt(prediction_var)
                    predictions = tf.reshape(samples, [-1, model_out_dim])
            else:
                predictions = mean

            # TS Optimization: Remove additional dimension
            if self.prop_mode == "TS1" or self.prop_mode == "TSinf":
                predictions = self._flatten_to_matrix(predictions)
            if self.prop_mode == "TS1":
                predictions = tf.reshape(
                    predictions, [-1, self.npart,
                                  predictions.get_shape()[-1]])
                sort_idxs = tf.nn.top_k(-sort_idxs, k=self.npart).indices
                idxs = tf.concat([tmp, sort_idxs[:, :, None]], axis=-1)
                predictions = tf.gather_nd(predictions, idxs)
                predictions = tf.reshape(predictions,
                                         [-1, predictions.get_shape()[-1]])

            return self.obs_postproc(obs, predictions)
        else:
            raise NotImplementedError()
Пример #55
0
 def get_output(layers):
     Z = self.X
     for layer in layers:
         Z = layer.forward(Z)
     return tf.reshape(Z, [-1])
                image_new = cv2.resize(input_image[:, :, i + vertical * 9],
                                       (resize_size, resize_size),
                                       interpolation=cv2.INTER_NEAREST)
                a.append(image_new)
            b.append(np.hstack(a))

    return np.vstack(b)


# cnn model
# placeholder
x = tf.placeholder("float", [None, 784])
y_ = tf.placeholder("float", [None, 10])

# first layer
x_image = tf.reshape(
    x, [-1, 28, 28, 1])  # [batch, in_height, in_width, in_channels]
W_conv1 = weight_init([5, 5, 1,
                       32])  # filter:[size=5x5,channel=1,filter_amount=32]
b_conv1 = bias_init([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)

# second layer
W_conv2 = weight_init([5, 5, 32, 64])  # weight_init => Variables, can SGD
b_conv2 = bias_init([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)

# neural net layer
'y = x * w + b = [-1,7x7x64] * [7x7x64,1024] + [1024]'
W_fc1 = weight_init([7 * 7 * 64,
Пример #57
0
def batchLoss(out_batch,                   # [batch_size,(1..2)] tf_result
              target_disparity_batch,      # [batch_size]        tf placeholder
              gt_ds_batch,                 # [batch_size,2]      tf placeholder
              absolute_disparity =     True, #when false there should be no activation on disparity output ! 
              use_confidence =         True, 
              lambda_conf_avg =        0.01,
              lambda_conf_pwr =        0.1,
              conf_pwr =               2.0,
              gt_conf_offset =         0.08,
              gt_conf_pwr =            1.0,
              error2_offset =          0.0025): # (0.05^2)
    with tf.name_scope("BatchLoss"):
        """
        Here confidence should be after relU. Disparity - may be also if absolute, but no activation if output is residual disparity
        """
        tf_lambda_conf_avg = tf.constant(lambda_conf_avg, dtype=tf.float32, name="tf_lambda_conf_avg")
        tf_lambda_conf_pwr = tf.constant(lambda_conf_pwr, dtype=tf.float32, name="tf_lambda_conf_pwr")
        tf_conf_pwr =        tf.constant(conf_pwr,        dtype=tf.float32, name="tf_conf_pwr")
        tf_gt_conf_offset =  tf.constant(gt_conf_offset,  dtype=tf.float32, name="tf_gt_conf_offset")
        tf_gt_conf_pwr =     tf.constant(gt_conf_pwr,     dtype=tf.float32, name="tf_gt_conf_pwr")
        tf_num_tiles =       tf.shape(gt_ds_batch)[0]
        tf_0f =              tf.constant(0.0,             dtype=tf.float32, name="tf_0f")
        tf_1f =              tf.constant(1.0,             dtype=tf.float32, name="tf_1f")
        tf_maxw =            tf.constant(1.0,             dtype=tf.float32, name="tf_maxw")
        if gt_conf_pwr == 0:
            w = tf.ones((out_batch.shape[0]), dtype=tf.float32,name="w_ones")
        else:
    #        w_slice = tf.slice(gt_ds_batch,[0,1],[-1,1],              name = "w_gt_slice")
            w_slice = tf.reshape(gt_ds_batch[:,1],[-1],                     name = "w_gt_slice")
            
            w_sub =   tf.subtract      (w_slice, tf_gt_conf_offset,         name = "w_sub")
    #        w_clip =  tf.clip_by_value(w_sub, tf_0f,tf_maxw,              name = "w_clip")
            w_clip =  tf.maximum(w_sub, tf_0f,                              name = "w_clip")
            if gt_conf_pwr == 1.0:
                w = w_clip
            else:
                w=tf.pow(w_clip, tf_gt_conf_pwr, name = "w_pow")
    
        if use_confidence:
            tf_num_tilesf =      tf.cast(tf_num_tiles, dtype=tf.float32,     name="tf_num_tilesf")
    #        conf_slice =     tf.slice(out_batch,[0,1],[-1,1],                name = "conf_slice")
            conf_slice =     tf.reshape(out_batch[:,1],[-1],                 name = "conf_slice")
            conf_sum =       tf.reduce_sum(conf_slice,                       name = "conf_sum")
            conf_avg =       tf.divide(conf_sum, tf_num_tilesf,              name = "conf_avg")
            conf_avg1 =      tf.subtract(conf_avg, tf_1f,                    name = "conf_avg1")
            conf_avg2 =      tf.square(conf_avg1,                            name = "conf_avg2")
            cost2 =          tf.multiply (conf_avg2, tf_lambda_conf_avg,     name = "cost2")
    
            iconf_avg =      tf.divide(tf_1f, conf_avg,                      name = "iconf_avg")
            nconf =          tf.multiply (conf_slice, iconf_avg,             name = "nconf") #normalized confidence
            nconf_pwr =      tf.pow(nconf, conf_pwr,                         name = "nconf_pwr")
            nconf_pwr_sum =  tf.reduce_sum(nconf_pwr,                        name = "nconf_pwr_sum")
            nconf_pwr_offs = tf.subtract(nconf_pwr_sum, tf_1f,               name = "nconf_pwr_offs")
            cost3 =          tf.multiply (conf_avg2, nconf_pwr_offs,         name = "cost3")
            w_all =          tf.multiply (w, nconf,                          name = "w_all")
        else:
            w_all = w
#            cost2 = 0.0
#            cost3 = 0.0    
        # normalize weights
        w_sum =              tf.reduce_sum(w_all,                            name = "w_sum")
        iw_sum =             tf.divide(tf_1f, w_sum,                         name = "iw_sum")
        w_norm =             tf.multiply (w_all, iw_sum,                     name = "w_norm")
        
    #    disp_slice =         tf.slice(out_batch,[0,0],[-1,1],                name = "disp_slice")
    #    d_gt_slice =         tf.slice(gt_ds_batch,[0,0],[-1,1],              name = "d_gt_slice")
        disp_slice =         tf.reshape(out_batch[:,0],[-1],                 name = "disp_slice")
        d_gt_slice =         tf.reshape(gt_ds_batch[:,0],[-1],               name = "d_gt_slice")
        if absolute_disparity:
            out_diff =       tf.subtract(disp_slice, d_gt_slice,             name = "out_diff")
        else:
            td_flat =        tf.reshape(target_disparity_batch,[-1],         name = "td_flat")
            residual_disp =  tf.subtract(d_gt_slice, td_flat,                name = "residual_disp")
            out_diff =       tf.subtract(disp_slice, residual_disp,          name = "out_diff")
        out_diff2 =          tf.square(out_diff,                             name = "out_diff2")
        out_wdiff2 =         tf.multiply (out_diff2, w_norm,                 name = "out_wdiff2")
        
        cost1 =              tf.reduce_sum(out_wdiff2,                       name = "cost1")
        
        out_diff2_offset =   tf.subtract(out_diff2, error2_offset,           name = "out_diff2_offset")
        out_diff2_biased =   tf.maximum(out_diff2_offset, 0.0,               name = "out_diff2_biased")
        out_diff2_wbiased =  tf.multiply(out_diff2_biased, w_norm,           name = "out_diff2_wbiased")
        
        cost1b =             tf.reduce_sum(out_diff2_wbiased,                name = "cost1b")
        
        if use_confidence:
            cost12 =         tf.add(cost1b, cost2,                           name = "cost12")
            cost123 =        tf.add(cost12, cost3,                           name = "cost123")    
            
            return cost123, disp_slice, d_gt_slice, out_diff,out_diff2, w_norm, out_wdiff2, cost1
        else:
            return cost1b,  disp_slice, d_gt_slice, out_diff,out_diff2, w_norm, out_wdiff2, cost1
    def __init__(self,
                 max_len,
                 filter_sizes,
                 pool_sizes,
                 num_filters,
                 l2_reg_lambda=0.0,
                 type_CNN=2):
        channel_num = 4

        # Placeholders for input, output and dropout
        self.input_tensor = tf.placeholder(
            tf.float32, [None, max_len, max_len, channel_num],
            name="input_tensor")
        self.input_y = tf.placeholder(tf.float32, [None, 2], name="input_y")
        self.dropout_keep_prob = tf.placeholder(tf.float32,
                                                name="dropout_keep_prob")

        # Keeping track of l2 regularization loss (optional)
        l2_loss = tf.constant(0.0)

        # Create a convolution + maxpool layer for each filter size
        pooled_outputs = []

        input_tensor = tf.expand_dims(self.input_tensor,
                                      4)  # N x W x H x V  => N x W x H x V x C
        input_tensor = tf.transpose(
            input_tensor, perm=[3, 0, 1, 2,
                                4])  # N x W x H x V x C =>  V x N x W x H x C

        if type_CNN == 1:
            filter_shape1 = [
                filter_sizes[0], filter_sizes[1], 4, num_filters / 2
            ]
            p_size1 = [1, 2, 2, 1]
            filter_shape2 = [
                filter_sizes[2], filter_sizes[3], num_filters / 2, num_filters
            ]
            p_size2 = [1, 2, 2, 1]

            conv1 = self._conv("conv1", self.input_tensor, filter_shape1)
            pool1 = self._maxpool('pool1',
                                  conv1,
                                  ksize=p_size1,
                                  strides=[1, 1, 1, 1])
            conv2 = self._conv('conv2', pool1, filter_shape2)
            pool2 = self._maxpool('pool2',
                                  conv2,
                                  ksize=p_size2,
                                  strides=[1, 1, 1, 1])

            dim = np.prod(pool2.get_shape().as_list()[1:])
            y = tf.reshape(pool2, [-1, dim])
        else:
            for i in range(channel_num):
                # set reuse True for i > 0, for weight-sharing
                reuse_f = (i != 0)
                view = tf.gather(input_tensor, i)  # N x W x H x C

                filter_shape1 = [
                    filter_sizes[0], filter_sizes[1], 1, num_filters / 2
                ]
                p_size1 = [1, pool_sizes[0], pool_sizes[1], 1]

                conv1 = self._conv('conv1', view, filter_shape1, reuse=reuse_f)
                pool1 = self._maxpool('pool1',
                                      conv1,
                                      ksize=p_size1,
                                      strides=[1, 1, 1, 1])

                if len(filter_sizes) == 4:
                    filter_shape2 = [
                        filter_sizes[2], filter_sizes[3], num_filters / 2,
                        num_filters
                    ]
                    p_size2 = [1, pool_sizes[2], pool_sizes[3], 1]

                    conv2 = self._conv('conv2',
                                       pool1,
                                       filter_shape2,
                                       reuse=reuse_f)
                    pool2 = self._maxpool('pool2',
                                          conv2,
                                          ksize=p_size2,
                                          strides=[1, 1, 1, 1])

                    dim = np.prod(pool2.get_shape().as_list()[1:])
                    reshape = tf.reshape(pool2, [-1, dim])
                else:
                    dim = np.prod(pool1.get_shape().as_list()[1:])
                    reshape = tf.reshape(pool1, [-1, dim])

                pooled_outputs.append(reshape)

            with tf.name_scope("view_pooling"):
                x = tf.stack(pooled_outputs)  # 4 * N * dim
                x = tf.transpose(x, perm=[1, 2, 0])  # N * dim * 4
                reshape = tf.reshape(x, [-1, 4])  # (N * dim) * 4
                print reshape.get_shape().as_list()

                Weights = tf.Variable(tf.truncated_normal([4, 1], 0, 0.1),
                                      name="W")

                y = tf.matmul(reshape, Weights, name="view_pooling")
                y = tf.reshape(y, [-1, dim])
                print y.get_shape().as_list()
                print("DIM:!" + str(dim))

        # Add dropout
        with tf.name_scope("dropout"):
            self.h_drop = tf.nn.dropout(y,
                                        self.dropout_keep_prob,
                                        name="hidden_output_drop")
            print self.h_drop.get_shape().as_list()

        with tf.name_scope("fc1"):
            dim_ = dim / 2
            # dim_ = 100
            # W = tf.get_variable("W", [dim, dim_], initializer=tf.contrib.layers.xavier_initializer())
            W = tf.Variable(name="W",
                            initial_value=tf.truncated_normal(
                                shape=[dim, dim_], stddev=0.1))
            b = tf.Variable(tf.constant(0.1, shape=[dim_]), name="b")

            l2_loss += tf.nn.l2_loss(W)
            l2_loss += tf.nn.l2_loss(b)
            self.fc1 = tf.nn.relu(tf.matmul(self.h_drop, W) + b)
            self.fc_drop1 = tf.nn.dropout(self.fc1, self.dropout_keep_prob)

        # with tf.name_scope("fc2"):
        #     dim__ = dim_ / 2
        #     # dim_ = 100
        #     W = tf.Variable(name="W", initial_value=tf.truncated_normal(shape=[dim_, dim__], stddev=0.1))
        #     b = tf.Variable(tf.constant(0.1, shape=[dim__]), name="b")
        #
        #     l2_loss += tf.nn.l2_loss(W)
        #     l2_loss += tf.nn.l2_loss(b)
        #     self.fc2 = tf.nn.relu(tf.matmul(self.fc_drop1, W) + b)
        #     self.fc_drop2 = tf.nn.dropout(self.fc2, self.dropout_keep_prob)

        # Final (unnormalized) scores and predictions
        with tf.name_scope("output"):
            # W = tf.get_variable("W_output", [dim_, 2], initializer=tf.contrib.layers.xavier_initializer())
            W = tf.Variable(name="W_output",
                            initial_value=tf.truncated_normal(shape=[dim_, 2],
                                                              stddev=0.1))
            b = tf.Variable(tf.constant(0.1, shape=[2]), name="b")

            l2_loss += tf.nn.l2_loss(W)
            l2_loss += tf.nn.l2_loss(b)
            self.scores = tf.nn.xw_plus_b(self.fc_drop1, W, b, name="scores")
            self.predictions = tf.argmax(self.scores, 1, name="predictions")

        # Calculate Mean cross-entropy loss
        with tf.name_scope("loss"):
            losses = tf.nn.softmax_cross_entropy_with_logits(
                logits=self.scores, labels=self.input_y)
            self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss

        # Accuracy
        with tf.name_scope("accuracy"):
            correct_predictions = tf.equal(self.predictions,
                                           tf.argmax(self.input_y, 1))
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions,
                                                   "float"),
                                           name="accuracy")
    def _network2(self, network_name, input, num_actions):
        self.network_name = network_name

        # Layer 1 (Convolutional)
        layer_name = 'conv1'
        size = 3
        channels = 6
        filters = 16
        stride = 1
        self.w1 = tf.Variable(tf.random_normal([size, size, channels, filters],
                                               stddev=0.01),
                              name=self.network_name + '_' + layer_name +
                              '_weights')
        self.b1 = tf.Variable(tf.constant(0.1, shape=[filters]),
                              name=self.network_name + '_' + layer_name +
                              '_biases')
        self.c1 = tf.nn.conv2d(self.x,
                               self.w1,
                               strides=[1, stride, stride, 1],
                               padding='SAME',
                               name=self.network_name + '_' + layer_name +
                               '_convs')
        self.o1 = tf.nn.relu(tf.add(self.c1, self.b1),
                             name=self.network_name + '_' + layer_name +
                             '_activations')

        # Layer 2 (Convolutional)
        layer_name = 'conv2'
        size = 3
        channels = 16
        filters = 32
        stride = 1
        self.w2 = tf.Variable(tf.random_normal([size, size, channels, filters],
                                               stddev=0.01),
                              name=self.network_name + '_' + layer_name +
                              '_weights')
        self.b2 = tf.Variable(tf.constant(0.1, shape=[filters]),
                              name=self.network_name + '_' + layer_name +
                              '_biases')
        self.c2 = tf.nn.conv2d(self.o1,
                               self.w2,
                               strides=[1, stride, stride, 1],
                               padding='SAME',
                               name=self.network_name + '_' + layer_name +
                               '_convs')
        self.o2 = tf.nn.relu(tf.add(self.c2, self.b2),
                             name=self.network_name + '_' + layer_name +
                             '_activations')

        o2_shape = self.o2.get_shape().as_list()

        # Layer 3 (Fully connected)
        layer_name = 'fc3'
        hiddens = 256
        dim = o2_shape[1] * o2_shape[2] * o2_shape[3]
        self.o2_flat = tf.reshape(self.o2, [-1, dim],
                                  name=self.network_name + '_' + layer_name +
                                  '_input_flat')
        self.w3 = tf.Variable(tf.random_normal([dim, hiddens], stddev=0.01),
                              name=self.network_name + '_' + layer_name +
                              '_weights')
        self.b3 = tf.Variable(tf.constant(0.1, shape=[hiddens]),
                              name=self.network_name + '_' + layer_name +
                              '_biases')
        self.ip3 = tf.add(tf.matmul(self.o2_flat, self.w3),
                          self.b3,
                          name=self.network_name + '_' + layer_name + '_ips')
        self.o3 = tf.nn.relu(self.ip3,
                             name=self.network_name + '_' + layer_name +
                             '_activations')

        # Layer 4
        layer_name = 'fc4'
        hiddens = 4
        dim = 256
        self.w4 = tf.Variable(tf.random_normal([dim, hiddens], stddev=0.01),
                              name=self.network_name + '_' + layer_name +
                              '_weights')
        self.b4 = tf.Variable(tf.constant(0.1, shape=[hiddens]),
                              name=self.network_name + '_' + layer_name +
                              '_biases')
        self.y = tf.add(tf.matmul(self.o3, self.w4),
                        self.b4,
                        name=self.network_name + '_' + layer_name + '_outputs')

        #Q,Cost,Optimizer
        self.discount = tf.constant(self.params['discount'])
        self.yj = tf.add(
            self.rewards,
            tf.multiply(1.0 - self.terminals,
                        tf.multiply(self.discount, self.q_t)))
        self.Q_pred = tf.reduce_sum(tf.multiply(self.y, self.actions),
                                    reduction_indices=1)

        return self.Q_pred
Пример #60
0
conv1_stride = 1
conv1_pad = "SAME"

conv2_fmaps = 64
conv2_ksize = 3
conv2_stride = 2
conv2_pad = "SAME"

pool3_fmaps = conv2_fmaps

n_fc1 = 64
n_outputs = 10

with tf.name_scope("inputs"):
    X = tf.placeholder(tf.float32, shape=[None, n_inputs], name="X")  #[28*28]
    X_reshaped = tf.reshape(X, shape=[-1, height, width,
                                      channels])  #[n,28,28,1]
    y = tf.placeholder(tf.int32, shape=[None], name="y")  #[n,1]

conv1 = tf.layers.conv2d(
    X_reshaped,
    filters=conv1_fmaps,
    kernel_size=
    conv1_ksize,  #kernel.shape=[3,3,1,32],strides=[1,1,1,1]==>[n,28,28,32]
    strides=conv1_stride,
    padding=conv1_pad,
    activation=tf.nn.relu,
    name="conv1")
conv2 = tf.layers.conv2d(
    conv1,
    filters=conv2_fmaps,
    kernel_size=