Example #1
0
def cell_locate(size, bbox, S):

    """ 
    locate the center of ground truth in which grid cell

    """
    x = tf.cast(tf.slice(bbox, [0,0], [-1,1]), tf.float32)
    y = tf.cast(tf.slice(bbox, [0,1], [-1,1]), tf.float32)
    w = tf.cast(tf.slice(bbox, [0,2], [-1,1]), tf.float32)
    h = tf.cast(tf.slice(bbox, [0,3], [-1,1]), tf.float32)


    height, width = size

    cell_w = width / S
    cell_h = height / S

    center_y = tf.add(y, tf.mul(h, 0.5))
    center_x = tf.add(x, tf.mul(w, 0.5))

    cell_coord_x = tf.cast(tf.div(center_x, cell_w), tf.int32)
    cell_coord_y = tf.cast(tf.div(center_y, cell_h), tf.int32)

    cell_num = tf.add(tf.mul(cell_coord_y, S), cell_coord_x)

    return cell_num
Example #2
0
    def sample(self, projected_output):
        """Return integer ID tensor representing the sampled word.
        
        Args:
            projected_output: Tensor [1, 1, state_size], representing a single
                decoder timestep output. 
        """
        # TODO: We really need a tf.control_dependencies check here (for rank).
        with tf.name_scope('decoder_sampler', values=[projected_output]):

            # Protect against extra size-1 dimensions; grab the 1D tensor
            # of size state_size.
            logits = tf.squeeze(projected_output)
            if self.temperature < 0.02:
                return tf.argmax(logits, axis=0)

            # Convert logits to probability distribution.
            probabilities = tf.div(logits, self.temperature)
            projected_output = tf.div(
                tf.exp(probabilities),
                tf.reduce_sum(tf.exp(probabilities), axis=-1))

            # Sample 1 time from the probability distribution.
            sample_ID = tf.squeeze(
                tf.multinomial(tf.expand_dims(probabilities, 0), 1))
        return sample_ID
    def scaled_squared_distance(self, X, Y):
        """ Computes the squared distance.

        Parameters
        ----------
        X : np or tf nd.array. shape = (x_samples, n_dim)
            One of the design matrices
        Y : np or tf nd.array. shape = (y_samples, n_dim)
            One of the design matrices
        
        Returns
        -------
        NA : tf nd.array. shape = (x_samples, y_samples)
            Scaled squared distance matrix M where M[i, j] is the sq distance
            between X[i] and Y[j]
        """
        # Scale X and Y accordingly
        Xs, Ys = (tf.div(X, self.length_scales), tf.div(Y, self.length_scales))
        # Create matrix of ones
        Xo = tf.ones(tf.pack([tf.shape(X)[0], 1]))
        Yo = tf.ones(tf.pack([1, tf.shape(Y)[0]]))
        # Precompute squared norms for rows of each matrix
        Xsqn = tf.reshape(tf.reduce_sum(tf.square(Xs), 1), tf.shape(Xo))
        Ysqn = tf.reshape(tf.reduce_sum(tf.square(Ys), 1), tf.shape(Yo))
        # Precompute "interaction" norm
        XYn = tf.matmul(Xs, tf.transpose(Ys))
        # Return the matrix of squared distances
        return tf.matmul(Xsqn, Yo) + tf.matmul(Xo, Ysqn) - 2*XYn
Example #4
0
def loglik_discrete(a, b, y_, u_, output_collection=(), name=None):
    """Returns element-wise Weibull censored discrete log-likelihood.

    Unit-discretized weibull log-likelihood. loss=-loglikelihood.

    .. note::
        All input values must be of same type and shape.

    :param a:alpha. Positive nonzero `Tensor`.
    :type a: `float32` or `float64`.
    :param b:beta.  Positive nonzero `Tensor`.
    :type b: `float32` or `float64`.
    :param y_: time to event. Positive nonzero `Tensor` 
    :type y_: `float32` or `float64`.
    :param u_: indicator. 0.0 if right censored, 1.0 if uncensored `Tensor`
    :type u_: `float32` or `float64`.
    :param output_collection:name of the collection to collect result of this op.
    :type output_collection: Tuple of Strings.
    :param String name: name of the operation.
    :return: A `Tensor` of log-likelihoods of same shape as a, b, y_, u_.
    """

    with tf.name_scope(name, "weibull_loglik_discrete", [a, b, y_, u_]):
        hazard0 = tf.pow(tf.div(y_ + 1e-35, a), b)  # 1e-9 safe, really
        hazard1 = tf.pow(tf.div(y_ + 1.0, a), b)
        loglik = tf.multiply(u_, tf.log(
            tf.exp(hazard1 - hazard0) - 1.0)) - hazard1

        tf.add_to_collection(output_collection, loglik)
    return(loglik)
 def __init__(self, label, clauses, save_path=""):
     print "defining the knowledge base", label
     self.label = label
     self.clauses = clauses
     self.parameters = [par for cl in self.clauses for par in cl.parameters]
     if not self.clauses:
         self.tensor = tf.constant(1.0)
     else:
         clauses_value_tensor = tf.concat(0, [cl.tensor for cl in clauses])
         if default_clauses_aggregator == "min":
             print "clauses aggregator is min"
             self.tensor = tf.reduce_min(clauses_value_tensor)
         if default_clauses_aggregator == "mean":
             print "clauses aggregator is mean"
             self.tensor = tf.reduce_mean(clauses_value_tensor)
         if default_clauses_aggregator == "hmean":
             print "clauses aggregator is hmean"
             self.tensor = tf.div(tf.to_float(tf.size(clauses_value_tensor)), tf.reduce_sum(tf.inv(clauses_value_tensor), keep_dims=True))
         if default_clauses_aggregator == "wmean":
             print "clauses aggregator is weighted mean"
             weights_tensor = tf.constant([cl.weight for cl in clauses])
             self.tensor = tf.div(tf.reduce_sum(tf.mul(weights_tensor, clauses_value_tensor)), tf.reduce_sum(weights_tensor))
     if default_positive_fact_penality != 0:
         self.loss = smooth(self.parameters) + \
                     tf.mul(default_positive_fact_penality, self.penalize_positive_facts()) - \
                     PR(self.tensor)
     else:
         self.loss = smooth(self.parameters) - PR(self.tensor)
     self.save_path = save_path
     self.train_op = train_op(self.loss, default_optimizer)
     self.saver = tf.train.Saver(max_to_keep=20)
     print "knowledge base", label, "is defined"
Example #6
0
def allreduce(tensor, average=True):
  """Perform an MPI allreduce on a tf.Tensor or tf.IndexedSlices.

  Arguments:
  tensor: tf.Tensor, tf.Variable, or tf.IndexedSlices to reduce.
          The shape of the input must be identical across all ranks.
  average: If True, computes the average over all ranks.
           Otherwise, computes the sum over all ranks.

  This function performs a bandwidth-optimal ring allreduce on the input
  tensor. If the input is an tf.IndexedSlices, the function instead does an
  allgather on the values and the indices, effectively doing an allreduce on
  the represented tensor.
  """
  if isinstance(tensor, tf.IndexedSlices):
    # For IndexedSlices, do two allgathers intead of an allreduce.
    mpi_size = tf.cast(size(), tensor.values.dtype)
    values = allgather(tensor.values)
    indices = allgather(tensor.indices)

    # To make this operation into an average, divide all gathered values by
    # the MPI size.
    new_values = tf.div(values, mpi_size) if average else values
    return tf.IndexedSlices(new_values, indices,
                            dense_shape=tensor.dense_shape)
  else:
    mpi_size = tf.cast(size(), tensor.dtype)
    summed_tensor = _allreduce(tensor)
    new_tensor = (tf.div(summed_tensor, mpi_size)
                  if average else summed_tensor)
    return new_tensor
def tf_bivariate_normal(y, mu, sigma, rho, n_mixtures, batch_size):
    mu = tf.verify_tensor_all_finite(mu, "Mu not finite!")
    y = tf.verify_tensor_all_finite(y, "Y not finite!")
    delta = tf.sub(tf.tile(tf.expand_dims(y, 1), [1, n_mixtures, 1]), mu)
    delta = tf.verify_tensor_all_finite(delta, "Delta not finite!")
    sigma = tf.verify_tensor_all_finite(sigma, "Sigma not finite!")
    s = tf.reduce_prod(sigma, 2)
    s = tf.verify_tensor_all_finite(s, "S not finite!")
    # -1 <= rho <= 1
    z = tf.reduce_sum(tf.square(tf.div(delta, sigma + epsilon) + epsilon), 2) - \
        2 * tf.div(tf.mul(rho, tf.reduce_prod(delta, 2)), s + epsilon)
    
    z = tf.verify_tensor_all_finite(z, "Z not finite!")
    # 0 < negRho <= 1
    rho = tf.verify_tensor_all_finite(rho, "rho in bivariate normal not finite!")
    negRho = tf.clip_by_value(1 - tf.square(rho), epsilon, 1.0)
    negRho = tf.verify_tensor_all_finite(negRho, "negRho not finite!")
    # Note that if negRho goes near zero, or z goes really large, this explodes.
    negRho = tf.verify_tensor_all_finite(negRho, "negRho in bivariate normal not finite!")
    
    result = tf.clip_by_value(tf.exp(tf.div(-z, 2 * negRho)), 1.0e-8, 1.0e8)
    result = tf.verify_tensor_all_finite(result, "Result in bivariate normal not finite!")
    denom = 2 * np.pi * tf.mul(s, tf.sqrt(negRho))
    denom = tf.verify_tensor_all_finite(denom, "Denom in bivariate normal not finite!")
    result = tf.clip_by_value(tf.div(result, denom + epsilon), epsilon, 1.0)
    result = tf.verify_tensor_all_finite(result, "Result2 in bivariate normal not finite!")
    return result, delta
    def getBandWidth(self,input_x,input_y,n_source,n_target,dim):
        ''' calculate bandwidth
        gamma = 1/E(||x-y||) 
        :param input_x:
        :param input_y:
        :param sigma:
        :param n_source:
        :param n_target:
        :param dim:
        :return: gamma
        '''
        x = tf.cast(input_x, tf.float32)
        y = tf.cast(input_y, tf.float32)
        counter = tf.constant(float(n_source))
        sum_up = tf.constant(.0)
        shape = [1, dim]
        for s in range(n_source):
            list1 = tf.slice(x, [s, 0], shape)
            list2 = tf.slice(y, [s, 0], shape)

            # get ||x-y||
            squared = tf.square(tf.sub(list1, list2))
            norm = tf.reduce_sum(tf.sqrt(squared))
            norm = tf.div(norm,tf.constant(float(dim)))

            sum_up  = tf.add(sum_up,tf.to_float(norm))


        gamma = tf.div(counter,sum_up)

        return gamma
def filters_bank(M, N, J, L=8):
    filters = {}
    filters['psi'] = []

    offset_unpad = 0
    for j in range(J):
        for theta in range(L):
            psi = {}
            psi['j'] = j
            psi['theta'] = theta
            psi_signal = morlet_2d(M, N, 0.8 * 2**j, (int(L - L / 2 - 1) - theta) * np.pi / L, 3.0 / 4.0 * np.pi / 2**j,offset=offset_unpad)  # The 5 is here just to match the LUA implementation :)
            psi_signal_fourier = fft.fft2(psi_signal)
            for res in range(j + 1):
                psi_signal_fourier_res = crop_freq(psi_signal_fourier, res)
                psi[res] = tf.constant(np.stack((np.real(psi_signal_fourier_res), np.imag(psi_signal_fourier_res)), axis=2))
                psi[res] = tf.div(psi[res], (M * N // 2**(2 * j)), name="psi_theta%s_j%s" % (theta, j))
            filters['psi'].append(psi)

    filters['phi'] = {}
    phi_signal = gabor_2d(M, N, 0.8 * 2**(J - 1), 0, 0, offset=offset_unpad)
    phi_signal_fourier = fft.fft2(phi_signal)
    filters['phi']['j'] = J
    for res in range(J):
        phi_signal_fourier_res = crop_freq(phi_signal_fourier, res)
        filters['phi'][res] = tf.constant(np.stack((np.real(phi_signal_fourier_res), np.imag(phi_signal_fourier_res)), axis=2))
        filters['phi'][res] = tf.div(filters['phi'][res], (M * N // 2 ** (2 * J)), name="phi_res%s" % res)

    return filters
    def __init__(self, action1_bounds, action2_bounds, session):
        self.graph = session.graph
        with self.graph.as_default():
            self.sess = session

            self.action_bounds = [[action1_bounds[1], action2_bounds[1]],
                                  [action1_bounds[0], action2_bounds[0]]]

            self.action_size = len(self.action_bounds[0])
            self.action_input = tf.placeholder(tf.float32, [None, self.action_size])

            self.p_max = tf.constant(self.action_bounds[0], dtype=tf.float32)
            self.p_min = tf.constant(self.action_bounds[1], dtype=tf.float32)

            self.p_range = tf.constant([x - y for x, y in zip(self.action_bounds[0], self.action_bounds[1])],
                                       dtype=tf.float32)

            self.p_diff_max = tf.div(-self.action_input + self.p_max, self.p_range)
            self.p_diff_min = tf.div(self.action_input - self.p_min, self.p_range)

            self.zeros_act_grad_filter = tf.zeros([self.action_size])
            self.act_grad = tf.placeholder(tf.float32, [None, self.action_size])

            self.grad_inverter = tf.select(tf.greater(self.act_grad, self.zeros_act_grad_filter),
                                           tf.mul(self.act_grad, self.p_diff_max),
                                           tf.mul(self.act_grad, self.p_diff_min))
Example #11
0
def cosineface_losses(embedding, labels, out_num, w_init=None, s=30., m=0.4):
    '''
    :param embedding: the input embedding vectors
    :param labels:  the input labels, the shape should be eg: (batch_size, 1)
    :param s: scalar value, default is 30
    :param out_num: output class num
    :param m: the margin value, default is 0.4
    :return: the final cacualted output, this output is send into the tf.nn.softmax directly
    '''
    with tf.variable_scope('cosineface_loss'):
        # inputs and weights norm
        embedding_norm = tf.norm(embedding, axis=1, keep_dims=True)
        embedding = tf.div(embedding, embedding_norm, name='norm_embedding')
        weights = tf.get_variable(name='embedding_weights', shape=(embedding.get_shape().as_list()[-1], out_num),
                                  initializer=w_init, dtype=tf.float32)
        weights_norm = tf.norm(weights, axis=0, keep_dims=True)
        weights = tf.div(weights, weights_norm, name='norm_weights')
        # cos_theta - m
        cos_t = tf.matmul(embedding, weights, name='cos_t')
        cos_t_m = tf.subtract(cos_t, m, name='cos_t_m')

        mask = tf.one_hot(labels, depth=out_num, name='one_hot_mask')
        inv_mask = tf.subtract(1., mask, name='inverse_mask')

        output = tf.add(s * tf.multiply(cos_t, inv_mask), s * tf.multiply(cos_t_m, mask), name='cosineface_loss_output')
    return output
 def compute_auc(tp, fn, tn, fp, name):
   """Computes the roc-auc or pr-auc based on confusion counts."""
   rec = tf.div(tp + epsilon, tp + fn + epsilon)
   if curve == 'ROC':
     fp_rate = tf.div(fp, fp + tn + epsilon)
     x = fp_rate
     y = rec
   elif curve == 'R':  # recall auc
     x = tf.linspace(1., 0., num_thresholds)
     y = rec
   else:  # curve == 'PR'.
     prec = tf.div(tp + epsilon, tp + fp + epsilon)
     x = rec
     y = prec
   if summation_method == 'trapezoidal':
     return tf.reduce_sum(
       tf.multiply(x[:num_thresholds - 1] - x[1:],
                   (y[:num_thresholds - 1] + y[1:]) / 2.),
       name=name)
   elif summation_method == 'minoring':
     return tf.reduce_sum(
       tf.multiply(x[:num_thresholds - 1] - x[1:],
                   tf.minimum(y[:num_thresholds - 1], y[1:])),
       name=name)
   elif summation_method == 'majoring':
     return tf.reduce_sum(
       tf.multiply(x[:num_thresholds - 1] - x[1:],
                   tf.maximum(y[:num_thresholds - 1], y[1:])),
       name=name)
   else:
     raise ValueError('Invalid summation_method: %s' % summation_method)
Example #13
0
    def cross_entropy(u, label_u, alpha=0.5, normed=False):

        label_ip = tf.cast(
            tf.matmul(label_u, tf.transpose(label_u)), tf.float32)
        s = tf.clip_by_value(label_ip, 0.0, 1.0)

        # compute balance param
        # s_t \in {-1, 1}
        s_t = tf.multiply(tf.add(s, tf.constant(-0.5)), tf.constant(2.0))
        sum_1 = tf.reduce_sum(s)
        sum_all = tf.reduce_sum(tf.abs(s_t))
        balance_param = tf.add(tf.abs(tf.add(s, tf.constant(-1.0))),
                               tf.multiply(tf.div(sum_all, sum_1), s))

        if normed:
            # ip = tf.clip_by_value(tf.matmul(u, tf.transpose(u)), -1.5e1, 1.5e1)
            ip_1 = tf.matmul(u, tf.transpose(u))

            def reduce_shaper(t):
                return tf.reshape(tf.reduce_sum(t, 1), [tf.shape(t)[0], 1])
            mod_1 = tf.sqrt(tf.matmul(reduce_shaper(tf.square(u)),
                                      reduce_shaper(tf.square(u)), transpose_b=True))
            ip = tf.div(ip_1, mod_1)
        else:
            ip = tf.clip_by_value(tf.matmul(u, tf.transpose(u)), -1.5e1, 1.5e1)
        ones = tf.ones([tf.shape(u)[0], tf.shape(u)[0]])
        return tf.reduce_mean(tf.multiply(tf.log(ones + tf.exp(alpha * ip)) - s * alpha * ip, balance_param))
def batch_sample_with_temperature(a, temperature=1.0):
    '''this function is like sample_with_temperature except it can handle batch input a of [batch_size x logits]
        this function takes logits input, and produces a specific number from the array. This is all done on the gpu
        because this function uses tensorflow
        As you increase the temperature, you will get more diversified output but with more errors (usually gramatical if you're
            doing text)
    args:
        Logits -- this must be a 2d array [batch_size x logits]
        Temperature -- how much variance you want in output
    returns:
        Selected number from distribution
    '''

    '''
    Equation can be found here: https://en.wikipedia.org/wiki/Softmax_function (under reinforcement learning)
        Karpathy did it here as well: https://github.com/karpathy/char-rnn/blob/4297a9bf69726823d944ad971555e91204f12ca8/sample.lua'''
    '''a is [batch_size x logits]'''
    with tf.op_scope([a,temperature], "batch_sample_with_temperature"):

        exponent_raised = tf.exp(tf.div(a, temperature)) #start by reduction of temperature, and get rid of negative numbers with exponent
        matrix_X = tf.div(exponent_raised, tf.reduce_sum(exponent_raised, reduction_indices = 1)) #this will yield probabilities!
        matrix_U = tf.random_uniform(tf.shape(a), minval = 0, maxval = 1)
        final_number = tf.argmax(tf.sub(matrix_X, matrix_U), dimension = 1) #you want dimension = 1 because you are argmaxing across rows.

    return final_number
    def getkernel(self,input_x,input_y,n_source,n_target,dim,sigma):
        '''

        :param x: sourceMatrix
        :param y: targetMatrix
        :param n_source: # of source samples
        :param n_target: # of target samples
        :param dim: # of input dimension(features)
        :return: a scala showing the MMD
        '''
        # ---------------------------------------
        # x = tf.convert_to_tensor(input_x,dtype=tf.float32)
        # y = tf.convert_to_tensor(input_y, dtype=tf.float32)


        x = tf.cast(input_x,tf.float32)
        y = tf.cast(input_y, tf.float32)


        k_ss = k_st = k_tt = tf.constant(0.)
        n_ss = n_st = n_tt = tf.constant(0.)
        flag = tf.constant(1.)
        signal = tf.constant(-2.0)
        shape = [1,dim]
        for s in range(n_source):
            for s_ in range(n_source):
                list1 = tf.slice(x, [s, 0], shape)
                list2 = tf.slice(x, [s_, 0], shape)
                k_ss = tf.add(self.gaussiankernel(list1,list2,sigma),k_ss)
                n_ss = tf.add(n_ss,flag)


        for t in range(n_target):
            for t_ in range(n_target):
                list1 = tf.slice(y, [t, 0], shape)
                list2 = tf.slice(y, [t_, 0], shape)
                k_tt = tf.add(self.gaussiankernel(list1, list2, sigma), k_tt)
                n_st = tf.add(n_st, flag)


        for s in range(n_source):
            for t in range(n_target):
                list1 = tf.slice(x, [s, 0], shape)
                list2 = tf.slice(y, [t, 0], shape)
                k_st = tf.add(self.gaussiankernel(list1, list2, sigma), k_st)
                n_tt = tf.add(n_tt, flag)




        term1 = tf.div(k_ss,n_ss )
        term2 = tf.div( k_tt, n_tt)
        term3 = tf.mul(signal, tf.div(k_st,n_st))
        term4 = tf.add(term1,term2)

        kernel = tf.add(term3, term4)


        return kernel
Example #16
0
 def gaussian_cost(t, o):
     s = 1.0  # For now take unit variance
     norm = tf.sub(o, t)
     z = tf.square(tf.div(norm, s))
     result = tf.exp(tf.div(-z, 2.0))
     denom = 2.0 * np.pi * s
     p = tf.div(result, denom)
     return -tf.log(p)
  def build_search_images(self):
    """Crop search images from the input image based on the last target position

    1. The input image is scaled such that the area of target&context takes up to (scale_factor * z_image_size) ^ 2
    2. Crop an image patch as large as x_image_size centered at the target center.
    3. If the cropped image region is beyond the boundary of the input image, mean values are padded.
    """
    model_config = self.model_config
    track_config = self.track_config

    size_z = model_config['z_image_size']
    size_x = track_config['x_image_size']
    context_amount = 0.5

    num_scales = track_config['num_scales']
    scales = np.arange(num_scales) - get_center(num_scales)
    assert np.sum(scales) == 0, 'scales should be symmetric'
    search_factors = [track_config['scale_step'] ** x for x in scales]

    frame_sz = tf.shape(self.image)
    target_yx = self.target_bbox_feed[0:2]
    target_size = self.target_bbox_feed[2:4]
    avg_chan = tf.reduce_mean(self.image, axis=(0, 1), name='avg_chan')

    # Compute base values
    base_z_size = target_size
    base_z_context_size = base_z_size + context_amount * tf.reduce_sum(base_z_size)
    base_s_z = tf.sqrt(tf.reduce_prod(base_z_context_size))  # Canonical size
    base_scale_z = tf.div(tf.to_float(size_z), base_s_z)
    d_search = (size_x - size_z) / 2.0
    base_pad = tf.div(d_search, base_scale_z)
    base_s_x = base_s_z + 2 * base_pad
    base_scale_x = tf.div(tf.to_float(size_x), base_s_x)

    boxes = []
    for factor in search_factors:
      s_x = factor * base_s_x
      frame_sz_1 = tf.to_float(frame_sz[0:2] - 1)
      topleft = tf.div(target_yx - get_center(s_x), frame_sz_1)
      bottomright = tf.div(target_yx + get_center(s_x), frame_sz_1)
      box = tf.concat([topleft, bottomright], axis=0)
      boxes.append(box)
    boxes = tf.stack(boxes)

    scale_xs = []
    for factor in search_factors:
      scale_x = base_scale_x / factor
      scale_xs.append(scale_x)
    self.scale_xs = tf.stack(scale_xs)

    # Note we use different padding values for each image
    # while the original implementation uses only the average value
    # of the first image for all images.
    image_minus_avg = tf.expand_dims(self.image - avg_chan, 0)
    image_cropped = tf.image.crop_and_resize(image_minus_avg, boxes,
                                             box_ind=tf.zeros((track_config['num_scales']), tf.int32),
                                             crop_size=[size_x, size_x])
    self.search_images = image_cropped + avg_chan
Example #18
0
    def __init__(self, num_neurons, norm_constants, file_restore=None):
        """
        Creates variables for a three layer network
        :param num_neurons: Tuple of number of neurons per layer
        :param norm_constants: List of normalization constants for (x, y, force)
        """
        self.sess = sess = tf.InteractiveSession()
        with tf.name_scope('Input'):
            self.x = x = tf.placeholder(tf.float32, shape=[None, num_neurons[0]], name='X')
            norm_x = tf.constant(norm_constants, name='normX')
            norm_input = tf.div(x, norm_x)
            self.y_ = y_ = tf.placeholder(tf.float32, shape=[None, num_neurons[2]], name='y_')
            norm_y = tf.constant(norm_constants[0:2] * int(num_neurons[2]/2), name='normY')
            norm_desired_output = tf.div(y_, norm_y)
            tf.histogram_summary('Input/x', x)
            tf.histogram_summary('Input/normalized_x', norm_input)
            tf.histogram_summary('Input/y_', y_)
            tf.histogram_summary('Input/normalized_y_', norm_desired_output)

        with tf.name_scope('Hidden'):
            W1 = tf.Variable(tf.random_uniform([num_neurons[0], num_neurons[1]], -1.0, 1.0), name='W1')
            b1 = tf.Variable(tf.constant(0.1, shape=(num_neurons[1],)), name='b1')
            h = tf.nn.sigmoid(tf.matmul(norm_input, W1) + b1, name='h')
            tf.histogram_summary('Hidden/W1', W1)
            tf.histogram_summary('Hidden/b1', b1)
            tf.histogram_summary('Hidden/h', h)

        with tf.name_scope('Output'):
            W2 = tf.Variable(tf.random_uniform([num_neurons[1], num_neurons[2]], -1.0, 1.0), name='W2')
            b2 = tf.Variable(tf.constant(0.1, shape=(num_neurons[2],)), name='b2')
            self.y = y = tf.nn.sigmoid(tf.matmul(h, W2) + b2, name='y')
            self.out = tf.mul(y, norm_y)
            tf.histogram_summary('Output/W2', W2)
            tf.histogram_summary('Output/b2', b2)
            tf.histogram_summary('Output/y', y)
            tf.histogram_summary('Output/out', self.out)

        with tf.name_scope('Error'):
            self.error = tf.reduce_mean(tf.nn.l2_loss(tf.sub(y, norm_desired_output)), name='Error')
            #tf.summary.scalar("Error", self.error)
            self.error_summary = tf.scalar_summary("Error", self.error)

        # Merge all the summaries
        #self.merged = tf.summary.merge_all()
        self.merged = tf.merge_all_summaries()
        self.train_writer = tf.train.SummaryWriter(LOG_DIR + '/train', sess.graph)
        self.val_writer = tf.train.SummaryWriter(LOG_DIR + '/val', sess.graph)

        # Prepare for saving network state
        self.saver = tf.train.Saver()
        if file_restore is None:
            sess.run(tf.initialize_all_variables())
        else:
            self.saver.restore(self.sess, file_restore)
            print_msg("Model restored from ", file_restore)

        self.trained_cycles = 0
 def tf_2d_normal(x1, x2, mu1, mu2, s1, s2, rho):
   # eq # 24 and 25 of http://arxiv.org/abs/1308.0850
   norm1 = tf.sub(x1, mu1)
   norm2 = tf.sub(x2, mu2)
   s1s2 = tf.mul(s1, s2)
   z = tf.square(tf.div(norm1, s1))+tf.square(tf.div(norm2, s2))-2*tf.div(tf.mul(rho, tf.mul(norm1, norm2)), s1s2)
   negRho = 1-tf.square(rho)
   result = tf.exp(tf.div(-z,2*negRho))
   denom = 2*np.pi*tf.mul(s1s2, tf.sqrt(negRho))
   result = tf.div(result, denom)
   return result
Example #20
0
def Moment(k, tensor, standardize=False, reduction_indices=None, mask=None):
  """Compute the k-th central moment of a tensor, possibly standardized.

  Args:
    k: Which moment to compute. 1 = mean, 2 = variance, etc.
    tensor: Input tensor.
    standardize: If True, returns the standardized moment, i.e. the central
      moment divided by the n-th power of the standard deviation.
    reduction_indices: Axes to reduce across. If None, reduce to a scalar.
    mask: Mask to apply to tensor.

  Returns:
    The mean and the requested moment.
  """
  warnings.warn("Moment is deprecated. "
                "Will be removed in DeepChem 1.4.", DeprecationWarning)
  if reduction_indices is not None:
    reduction_indices = np.atleast_1d(reduction_indices).tolist()

  # get the divisor
  if mask is not None:
    tensor = Mask(tensor, mask)
    ones = tf.constant(1, dtype=tf.float32, shape=tensor.get_shape())
    divisor = tf.reduce_sum(
        Mask(ones, mask), axis=reduction_indices, keep_dims=True)
  elif reduction_indices is None:
    divisor = tf.constant(np.prod(tensor.get_shape().as_list()), tensor.dtype)
  else:
    divisor = 1.0
    for i in range(len(tensor.get_shape())):
      if i in reduction_indices:
        divisor *= tensor.get_shape()[i].value
    divisor = tf.constant(divisor, tensor.dtype)

  # compute the requested central moment
  # note that mean is a raw moment, not a central moment
  mean = tf.div(
      tf.reduce_sum(tensor, axis=reduction_indices, keep_dims=True), divisor)
  delta = tensor - mean
  if mask is not None:
    delta = Mask(delta, mask)
  moment = tf.div(
      tf.reduce_sum(
          math_ops.pow(delta, k), axis=reduction_indices, keep_dims=True),
      divisor)
  moment = tf.squeeze(moment, reduction_indices)
  if standardize:
    moment = tf.multiply(
        moment,
        math_ops.pow(
            tf.rsqrt(Moment(2, tensor, reduction_indices=reduction_indices)[1]),
            k))

  return tf.squeeze(mean, reduction_indices), moment
 def get_pi_mu_sigma(points, T): #M step
     Tk = tf.reduce_sum(T, 0, keep_dims=True) # S(t_ik, i) [1, k]
     pi = Tk/n  # pi_k [1,k]
     tmp = tf.tile(tf.reshape(T, [n,k,1,1]), [1,1,p,1]) # (T_ik)_p [n,k,p,1]
     tmp2 = tf.tile(tf.reshape(points, [n,1,p,1]), [1,k,1,1]) # (x_i)_k [n,k,p,1]
     mu = tf.div(tf.reduce_sum(tf.mul(tmp,tmp2), 0), tf.tile(tf.reshape(Tk, [k,1,1]), [1,p,1]))# mu_k [k,p,1]
     diff_tmp = tf.sub(tf.tile(points, [k,1,1]), tf.tile(mu, [n,1,1])) # x_i-u_k [n*k, p, 1]
     tmp_mat = tf.reshape(tf.batch_matmul( diff_tmp, tf.transpose(diff_tmp,perm=[0,2,1]) ), [n,k,p,p]) # (x_i-u_k)(x_i-u_k)t [k,n,p,p]
     sigma = tf.reduce_sum(tf.mul(tmp_mat, tf.tile(tf.reshape(T, [n,k,1,1]), [1,1,p,p])), 0) # S_k [k, p, p]
     sigma = tf.div(sigma, tf.tile(tf.reshape(Tk, [k,1,1]), [1,p,p]))
     return pi, mu, sigma
 def content_loss(self, layers):
     activations = [self.activations_for_layer(i) for i in layers]
     activation_diffs = [
         tf.sub(
             tf.tile(tf.slice(a, [self.num_style, 0, 0, 0], [self.num_content, -1, -1, -1]), [self.num_synthesized - self.num_content + 1, 1, 1, 1]),
             tf.slice(a, [self.num_style + self.num_content, 0, 0, 0], [self.num_content, -1, -1, -1]))
         for a in activations]
     # This normalizer is in JCJohnson's paper, but not Gatys' I think?
     Ns = [a.get_shape().as_list()[1] * a.get_shape().as_list()[2] * a.get_shape().as_list()[3] for a in activations]
     content_loss = tf.div(tf.add_n([tf.div(tf.reduce_sum(tf.square(a)), n) for a, n in zip(activation_diffs, Ns)]), 2.0)
     return content_loss
def tf_2d_normal(x1, x2, mu1, mu2, s1, s2, rho):
  #Inspired from Hardmaru's implementation on Github
  norm1 = tf.sub(x1, mu1)
  norm2 = tf.sub(x2, mu2)
  s1s2 = tf.mul(s1, s2)
  z = tf.square(tf.div(norm1, s1))+tf.square(tf.div(norm2, s2))-2*tf.div(tf.mul(rho, tf.mul(norm1, norm2)), s1s2)
  negRho = 1-tf.square(rho)
  result = tf.exp(tf.div(-z,2*negRho))
  denom = 2*np.pi*tf.mul(s1s2, tf.sqrt(negRho))
  result = tf.div(result, denom)
  return result
 def create_cost_bending(self, c):
   c1 = tf.slice(c, [0,1,0], [-1,-1,-1]);
   c2 = tf.slice(c, [0,0,0], [-1,self.npoints-1,-1]);
   dc = tf.sub(c1,c2);
   dc1 = tf.slice(dc, [0,1,0], [-1,-1,-1]);
   dc2 = tf.slice(dc, [0,0,0], [-1,self.npoints-2,-1]);
   dn1 = tf.sqrt(tf.reduce_sum(tf.square(dc1), reduction_indices =2));
   dn2 = tf.sqrt(tf.reduce_sum(tf.square(dc2), reduction_indices =2));
   dp = tf.reduce_sum(tf.mul(dc1, dc2), reduction_indices =2);
   dp = tf.div(tf.div(dp, dn1), dn2);
   return tf.mul(tf.constant(-1.0, "float32"), tf.reduce_mean(dp));
Example #25
0
 def tf_2d_normal(x1, x2, mu1, mu2, s1, s2, rho):
   """Returns result of eq # 24 and 25 of http://arxiv.org/abs/1308.0850."""
   norm1 = tf.subtract(x1, mu1)
   norm2 = tf.subtract(x2, mu2)
   s1s2 = tf.multiply(s1, s2)
   z = (tf.square(tf.div(norm1, s1)) + tf.square(tf.div(norm2, s2)) -
        2 * tf.div(tf.multiply(rho, tf.multiply(norm1, norm2)), s1s2))
   neg_rho = 1 - tf.square(rho)
   result = tf.exp(tf.div(-z, 2 * neg_rho))
   denom = 2 * np.pi * tf.multiply(s1s2, tf.sqrt(neg_rho))
   result = tf.div(result, denom)
   return result
def create_cost_bending(self, c):
  c_shape = c.get_shape().as_list();
  c1 = tf.slice(c, [1,0], [-1,-1]);
  c2 = tf.slice(c, [0,0], [c_shape[0]-1,-1]);
  dc = tf.sub(c1,c2);
  dc1 = tf.slice(dc, [1,0], [-1,-1]);
  dc2 = tf.slice(dc, [0,0], [c_shape[0]-2,-1]);
  dn1 = tf.sqrt(tf.reduce_sum(tf.square(dc1), reduction_indices = 1));
  dn2 = tf.sqrt(tf.reduce_sum(tf.square(dc2), reduction_indices = 1));
  dp = tf.reduce_sum(tf.mul(dc1, dc2), reduction_indices = 1);
  dp = tf.div(tf.div(dp, dn1), dn2);
  return tf.mul(tf.constant(-1.0, "float32"), tf.reduce_mean(dp));
Example #27
0
def elloss(feature_class,feature_location,groundtruth_class,groundtruth_location,groundtruth_positives,groundtruth_count):
    softmax_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=feature_class,
                                                                           labels=groundtruth_class)
    loss_location = tf.div(tf.reduce_sum(tf.multiply(
        tf.reduce_sum(smooth_L1(tf.subtract(groundtruth_location, feature_location)),
                      reduction_indices=2), groundtruth_positives), reduction_indices=1),
        tf.reduce_sum(groundtruth_positives, reduction_indices=1))
    loss_class = tf.div(
        tf.reduce_sum(tf.multiply(softmax_cross_entropy, groundtruth_count), reduction_indices=1),
        tf.reduce_sum(groundtruth_count, reduction_indices=1))
    loss_all = tf.reduce_sum(tf.add(loss_class, loss_location*5))
    return loss_all,loss_class,loss_location
Example #28
0
def tf_1d_normal(x3,mu3,s3):
  """ 3D normal distribution Under assumption that x3 is uncorrelated with x1 and x2
  input
  - x,mu: input vectors
  - s1,s2,s3: standard deviances over x1 and x2 and x3
  - rho: correlation coefficient in x1-x2 plane
  """
  norm3 = tf.sub(x3, mu3)
  z = tf.square(tf.div(norm3, s3))
  result = tf.exp(tf.div(-z,2))
  denom = 2.0*np.pi*s3
  px3 = tf.div(result, denom)  #probability in x3 dimension
  return px3
Example #29
0
    def critic_network(self, input_state):
        w1 = tf.Variable(tf.div(tf.random_normal(
            [self.input_dim, self.hidden_units]),
            np.sqrt(self.input_dim)), name='w1')
        b1 = tf.Variable(
            tf.constant(0.0, shape=[self.hidden_units]), name='b1')
        h1 = tf.nn.relu(tf.matmul(input_state, w1) + b1)
        w2 = tf.Variable(tf.div(tf.random_normal(
            [self.hidden_units, 1]), np.sqrt(self.hidden_units)), name='w2')
        b2 = tf.Variable(tf.constant(0.0, shape=[1]), name='b2')
        state_value = tf.matmul(h1, w2) + b2

        return state_value
Example #30
0
    def network(self, input_state):
        hidden_unit = 100
        w1 = tf.Variable(tf.div(tf.random_normal(
            [self.state_dim, hidden_unit]), np.sqrt(self.state_dim)))
        b1 = tf.Variable(tf.constant(0.0, shape=[hidden_unit]))
        hidden = tf.nn.relu(tf.matmul(input_state, w1) + b1)

        w2 = tf.Variable(tf.div(tf.random_normal(
            [hidden_unit, self.action_dim]), np.sqrt(hidden_unit)))
        b2 = tf.Variable(tf.constant(0.0, shape=[self.action_dim]))
        output_Q = tf.matmul(hidden, w2) + b2

        return output_Q
Example #31
0
    def create_inference_IFM(self):
        # None * M * K
        self.nonzero_embeddings = tf.nn.embedding_lookup(
            self.weights['feature_embeddings'], self.train_features)
        element_wise_product_list = []
        interactions = []
        for i in range(0, self.num_field):
            for j in range(i + 1, self.num_field):
                # (M * K) MUL (M * K) -> M * K
                element_wise_product_list.append(
                    tf.multiply(self.nonzero_embeddings[:, i, :],
                                self.nonzero_embeddings[:, j, :]))
                #  H MUL H -> [M*(M-1)/2] * H
                interactions.append(
                    tf.multiply(self.weights['interaction'][i, :],
                                self.weights['interaction'][j, :]))

        # (M'*(M'-1)/2) * None * K
        self.element_wise_product = tf.stack(element_wise_product_list)
        # None * (M'*(M'-1)/2) * K
        self.element_wise_product = tf.transpose(self.element_wise_product,
                                                 perm=[1, 0, 2],
                                                 name="element_wise_product")
        num_interactions = self.num_field * (self.num_field - 1) / 2
        #
        # [None * (M'*(M'-1)/2)) * K]  *  (K * A) -> (None * (M'*(M'-1)/2)) * A
        self.attention_mul = tf.tensordot(self.element_wise_product,
                                          self.weights['attention_W'],
                                          axes=1)
        self.attention_mul = self.attention_mul / self.temp
        # (None * (M'*(M'-1)/2)) * A
        self.attention_relu = tf.nn.relu(self.attention_mul +
                                         self.weights['attention_b'])
        # (None * (M'*(M'-1)/2)) * 1
        self.attention_exp = tf.exp(
            tf.reduce_sum(tf.multiply(self.weights['attention_p'],
                                      self.attention_relu),
                          axis=2,
                          keep_dims=True))
        # None * 1 * 1
        self.attention_sum = tf.reduce_sum(self.attention_exp,
                                           1,
                                           keep_dims=True)
        # None * (M'*(M'-1)) * 1
        self.attention_out = tf.div(self.attention_exp,
                                    self.attention_sum,
                                    name="attention_out")
        # Or employing softmax function instead of above three steps
        # None * (M'*(M'-1)/2) * 1
        # self.attention_out = tf.transpose(tf.nn.softmax(tf.transpose(self.attention_relu,perm=[0,2,1])),perm=[0,2,1], name="attention_out")
        # self.attention_out = tf.nn.softmax(self.attention_relu, name="attention_out")
        # dropout -> None * (M'*(M'-1)) * 1
        self.attention_out = tf.nn.dropout(self.attention_out,
                                           self.dropout_keep[0])

        # None * (M'*(M'-1)/2) * K   MUL  None * (M'*(M'-1)/2) * 1  -> (None * (M'*(M'-1)/2)) * K
        self.AFM = tf.multiply(self.element_wise_product, self.attention_out)
        # None * [(M'*(M' - 1) / 2) * K]
        self.AFM = tf.reshape(self.AFM, [
            -1,
            tf.cast(num_interactions * self.hidden_factor, dtype=tf.int32)
        ])
        # None * [(M'*(M' - 1) / 2) * K]
        self.AFM = tf.nn.dropout(self.AFM, self.dropout_keep[1])

        # _____interaction____
        # M*(M-1)/2 * H
        self.field_interactions = tf.stack(interactions)
        # M*(M-1)/2 * H  *  H * K -> M*(M-1)/2 * K
        self.attention_interaction = tf.matmul(self.field_interactions,
                                               self.weights['factor'])
        # [M*(M-1)/2 * K] * 1
        self.attention_interaction = tf.reshape(self.attention_interaction, [
            tf.cast(num_interactions * self.hidden_factor, dtype=tf.int32), 1
        ])

        if self.INN == True:
            # [M*(M-1)/2 * K]
            self.attention_interaction = tf.reshape(
                self.attention_interaction, [
                    tf.cast(num_interactions * self.hidden_factor,
                            dtype=tf.int32)
                ])
            # None * [(M'*(M' - 1) / 2) * K]
            self.AFM = self.AFM * self.attention_interaction
            regularizer = tf.contrib.layers.l2_regularizer(
                scale=self.lamda_attention1)
            # None * 5
            hidden1 = tf.layers.dense(self.AFM,
                                      5,
                                      tf.nn.relu,
                                      kernel_regularizer=regularizer)
            # None * 1
            self.IFM = tf.layers.dense(hidden1, 1)
        else:
            # None * [(M'*(M' - 1) / 2) * K]  dot [M*(M-1)/2 * K] * 1 -> None * 1
            self.IFM = tf.tensordot(self.AFM,
                                    self.attention_interaction,
                                    axes=1)
        # None
        self.IFM = tf.reduce_sum(self.AFM, reduction_indices=[1])
        # None
        self.prediction = self.IFM
        # None * 1
        Bilinear = tf.expand_dims(self.prediction, -1)
        # None * features_M * 1 -> None * 1 * 1
        Feature_bias = tf.reduce_sum(tf.nn.embedding_lookup(
            self.weights['feature_bias'], self.train_features),
                                     axis=1)
        # None * 1
        Bais = self.weights['bias'] * tf.ones_like(self.train_labels)
        self.out = tf.add_n([Bilinear, Feature_bias, Bais], name='out')
Example #32
0
    def __init__(self, user_count, item_count, cate_count, cate_list):

        self.u = tf.placeholder(tf.int32, [
            None,
        ])  # [B]
        self.i = tf.placeholder(tf.int32, [
            None,
        ])  # [B]
        self.j = tf.placeholder(tf.int32, [
            None,
        ])  # [B]
        self.y = tf.placeholder(tf.float32, [
            None,
        ])  # [B]
        self.hist_i = tf.placeholder(tf.int32, [None, None])  # [B, T]
        self.sl = tf.placeholder(tf.int32, [
            None,
        ])  # [B]
        self.lr = tf.placeholder(tf.float64, [])

        hidden_units = 128

        user_emb_w = tf.get_variable("user_emb_w", [user_count, hidden_units])
        item_emb_w = tf.get_variable("item_emb_w",
                                     [item_count, hidden_units // 2])
        item_b = tf.get_variable("item_b", [item_count],
                                 initializer=tf.constant_initializer(0.0))
        cate_emb_w = tf.get_variable("cate_emb_w",
                                     [cate_count, hidden_units // 2])
        cate_list = tf.convert_to_tensor(cate_list, dtype=tf.int64)

        #u_emb = tf.nn.embedding_lookup(user_emb_w, self.u)
        ic = tf.gather(cate_list, self.i)
        i_emb = tf.concat(values=[
            tf.nn.embedding_lookup(item_emb_w, self.i),
            tf.nn.embedding_lookup(cate_emb_w, ic),
        ],
                          axis=1)
        i_b = tf.gather(item_b, self.i)

        jc = tf.gather(cate_list, self.j)
        j_emb = tf.concat([
            tf.nn.embedding_lookup(item_emb_w, self.j),
            tf.nn.embedding_lookup(cate_emb_w, jc),
        ],
                          axis=1)
        j_b = tf.gather(item_b, self.j)

        hc = tf.gather(cate_list, self.hist_i)
        h_emb = tf.concat([
            tf.nn.embedding_lookup(item_emb_w, self.hist_i),
            tf.nn.embedding_lookup(cate_emb_w, hc),
        ],
                          axis=2)

        #-- sum begin --------
        # mask the zero padding part
        mask = tf.sequence_mask(self.sl, tf.shape(h_emb)[1],
                                dtype=tf.float32)  # [B, T]
        mask = tf.expand_dims(mask, -1)  # [B, T, 1]
        mask = tf.tile(mask, [1, 1, tf.shape(h_emb)[2]])  # [B, T, H]
        h_emb *= mask  # [B, T, H]
        hist = h_emb
        hist = tf.reduce_sum(hist, 1)
        hist = tf.div(
            hist,
            tf.cast(tf.tile(tf.expand_dims(self.sl, 1), [1, 128]), tf.float32))
        print h_emb.get_shape().as_list()
        #-- sum end ---------

        hist = tf.layers.batch_normalization(inputs=hist)
        hist = tf.reshape(hist, [-1, hidden_units])
        hist = tf.layers.dense(hist, hidden_units)

        u_emb = hist
        #pos user_item -- fcn begin -------
        din_i = tf.concat([u_emb, i_emb], axis=-1)
        din_i = tf.layers.batch_normalization(inputs=din_i, name='b1')
        d_layer_1_i = tf.layers.dense(din_i,
                                      80,
                                      activation=tf.nn.sigmoid,
                                      name='f1')
        d_layer_2_i = tf.layers.dense(d_layer_1_i,
                                      40,
                                      activation=tf.nn.sigmoid,
                                      name='f2')
        d_layer_3_i = tf.layers.dense(d_layer_2_i,
                                      1,
                                      activation=None,
                                      name='f3')

        #user_item fm part
        d_layer_fm_i = tf.concat([
            tf.reduce_sum(u_emb * i_emb, axis=-1, keep_dims=True),
            tf.gather(u_emb, [0], axis=-1) + tf.gather(i_emb, [0], axis=-1)
        ],
                                 axis=-1)
        d_layer_fm_i = tf.layers.dense(d_layer_fm_i,
                                       1,
                                       activation=None,
                                       name='f_fm')

        #neg user_item fcn begin
        #din_i = tf.concat([u_emb, i_emb], axis=-1)
        din_j = tf.concat([u_emb, j_emb], axis=-1)
        din_j = tf.layers.batch_normalization(inputs=din_j,
                                              name='b1',
                                              reuse=True)
        d_layer_1_j = tf.layers.dense(din_j,
                                      80,
                                      activation=tf.nn.sigmoid,
                                      name='f1',
                                      reuse=True)
        d_layer_2_j = tf.layers.dense(d_layer_1_j,
                                      40,
                                      activation=tf.nn.sigmoid,
                                      name='f2',
                                      reuse=True)
        d_layer_3_j = tf.layers.dense(d_layer_2_j,
                                      1,
                                      activation=None,
                                      name='f3',
                                      reuse=True)

        #user_cat fm part
        d_layer_fm_j = tf.concat([
            tf.reduce_sum(u_emb * j_emb, axis=-1, keep_dims=True),
            tf.gather(u_emb, [0], axis=-1) + tf.gather(j_emb, [0], axis=-1)
        ],
                                 axis=-1)
        d_layer_fm_j = tf.layers.dense(d_layer_fm_j,
                                       1,
                                       activation=None,
                                       name='f_fm',
                                       reuse=True)

        d_layer_3_i = tf.reshape(d_layer_3_i, [-1])
        d_layer_3_j = tf.reshape(d_layer_3_j, [-1])
        d_layer_fm_i = tf.reshape(d_layer_fm_i, [-1])
        d_layer_fm_j = tf.reshape(d_layer_fm_j, [-1])

        x = i_b - j_b + d_layer_3_i - d_layer_3_j + d_layer_fm_i - d_layer_fm_j  # [B]
        self.logits = i_b + d_layer_3_i + d_layer_fm_i

        u_emb_all = tf.expand_dims(u_emb, 1)
        u_emb_all = tf.tile(u_emb_all, [1, item_count, 1])
        # logits for all item:
        all_emb = tf.concat(
            [item_emb_w,
             tf.nn.embedding_lookup(cate_emb_w, cate_list)],
            axis=1)
        all_emb = tf.expand_dims(all_emb, 0)
        all_emb = tf.tile(all_emb, [512, 1, 1])
        din_all = tf.concat([u_emb_all, all_emb], axis=-1)
        din_all = tf.layers.batch_normalization(inputs=din_all,
                                                name='b1',
                                                reuse=True)
        d_layer_1_all = tf.layers.dense(din_all,
                                        80,
                                        activation=tf.nn.sigmoid,
                                        name='f1',
                                        reuse=True)
        d_layer_2_all = tf.layers.dense(d_layer_1_all,
                                        40,
                                        activation=tf.nn.sigmoid,
                                        name='f2',
                                        reuse=True)
        d_layer_3_all = tf.layers.dense(d_layer_2_all,
                                        1,
                                        activation=None,
                                        name='f3',
                                        reuse=True)
        d_layer_fm_all = tf.concat([
            tf.reduce_sum(u_emb_all * all_emb, axis=-1, keep_dims=True),
            tf.gather(u_emb_all, [0], axis=-1) +
            tf.gather(all_emb, [0], axis=-1)
        ],
                                   axis=-1)
        d_layer_fm_all = tf.layers.dense(d_layer_fm_all,
                                         1,
                                         activation=None,
                                         name='f_fm',
                                         reuse=True)
        d_layer_3_all = tf.reshape(d_layer_3_all, [-1, item_count])
        d_layer_fm_all = tf.reshape(d_layer_fm_all, [-1, item_count])
        self.logits_all = tf.sigmoid(item_b + d_layer_3_all + d_layer_fm_all)
        #-- fcn end -------

        self.mf_auc = tf.reduce_mean(tf.to_float(x > 0))
        self.score_i = tf.sigmoid(i_b + d_layer_3_i + d_layer_fm_i)
        self.score_j = tf.sigmoid(j_b + d_layer_3_j + d_layer_fm_j)
        self.score_i = tf.reshape(self.score_i, [-1, 1])
        self.score_j = tf.reshape(self.score_j, [-1, 1])
        self.p_and_n = tf.concat([self.score_i, self.score_j], axis=-1)
        print self.p_and_n.get_shape().as_list()

        # Step variable
        self.global_step = tf.Variable(0, trainable=False, name='global_step')
        self.global_epoch_step = \
            tf.Variable(0, trainable=False, name='global_epoch_step')
        self.global_epoch_step_op = \
            tf.assign(self.global_epoch_step, self.global_epoch_step+1)

        regulation_rate = 0.0
        self.loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits,
                                                    labels=self.y))

        trainable_params = tf.trainable_variables()
        self.opt = tf.train.AdamOptimizer(learning_rate=self.lr)
        # deepFM get worse result with SGD
        #self.opt = tf.train.GradientDescentOptimizer(learning_rate=self.lr)
        gradients = tf.gradients(self.loss, trainable_params)
        clip_gradients, _ = tf.clip_by_global_norm(gradients, 5)
        self.train_op = self.opt.apply_gradients(zip(clip_gradients,
                                                     trainable_params),
                                                 global_step=self.global_step)
Example #33
0
def cifar10(
        path,  # pylint: disable=invalid-name
        conv_channels=None,
        linear_layers=None,
        batch_norm=True,
        batch_size=128,
        num_threads=4,
        min_queue_examples=1000,
        mode="train"):
    """Cifar10 classification with a convolutional network."""

    # Data.
    _maybe_download_cifar10(path)

    # Read images and labels from disk.
    if mode == "train":
        filenames = [
            os.path.join(path, CIFAR10_FOLDER, "data_batch_{}.bin".format(i))
            for i in xrange(1, 6)
        ]
    elif mode == "test":
        filenames = [os.path.join(path, "test_batch.bin")]
    else:
        raise ValueError("Mode {} not recognised".format(mode))

    depth = 3
    height = 32
    width = 32
    label_bytes = 1
    image_bytes = depth * height * width
    record_bytes = label_bytes + image_bytes
    reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
    _, record = reader.read(tf.train.string_input_producer(filenames))
    record_bytes = tf.decode_raw(record, tf.uint8)

    label = tf.cast(tf.slice(record_bytes, [0], [label_bytes]), tf.int32)
    raw_image = tf.slice(record_bytes, [label_bytes], [image_bytes])
    image = tf.cast(tf.reshape(raw_image, [depth, height, width]), tf.float32)
    # height x width x depth.
    image = tf.transpose(image, [1, 2, 0])
    image = tf.div(image, 255)

    queue = tf.RandomShuffleQueue(
        capacity=min_queue_examples + 3 * batch_size,
        min_after_dequeue=min_queue_examples,
        dtypes=[tf.float32, tf.int32],
        shapes=[image.get_shape(), label.get_shape()])
    enqueue_ops = [queue.enqueue([image, label]) for _ in xrange(num_threads)]
    tf.train.add_queue_runner(tf.train.QueueRunner(queue, enqueue_ops))

    # Network.
    def _conv_activation(x):  # pylint: disable=invalid-name
        return tf.nn.max_pool(tf.nn.relu(x),
                              ksize=[1, 2, 2, 1],
                              strides=[1, 2, 2, 1],
                              padding="SAME")

    conv = snt.nets.ConvNet2D(output_channels=conv_channels,
                              kernel_shapes=[5],
                              strides=[1],
                              paddings=[snt.SAME],
                              activation=_conv_activation,
                              activate_final=True,
                              initializers=_nn_initializers,
                              use_batch_norm=batch_norm)

    if batch_norm:
        linear_activation = lambda x: tf.nn.relu(snt.BatchNorm()(x))
    else:
        linear_activation = tf.nn.relu

    mlp = snt.nets.MLP(list(linear_layers) + [10],
                       activation=linear_activation,
                       initializers=_nn_initializers)
    network = snt.Sequential([conv, snt.BatchFlatten(), mlp])

    def build():
        image_batch, label_batch = queue.dequeue_many(batch_size)
        label_batch = tf.reshape(label_batch, [batch_size])

        output = network(image_batch)
        return _xent_loss(output, label_batch)

    return build
Example #34
0
 def softmax(self, target, axis, name=None):
     max_axis = tf.reduce_max(target, axis, keepdims=True)
     target_exp = tf.exp(target - max_axis)
     normalize = tf.reduce_sum(target_exp, axis, keepdims=True)
     softmax = tf.div(target_exp, normalize, name)
     return softmax
Example #35
0
def ow_pooling(x,
               weights,
               padding='SAME',
               strides=(2, 2),
               pool_size=(2, 2),
               norm='None',
               sort=True):

    _, height, width, channels = x.get_shape().as_list()
    pad_bottom = pool_size[0] * height % pool_size[0]
    pad_right = pool_size[1] * width % pool_size[1]

    if (padding == 'SAME'):  # Complete size to pad 'SAME'
        paddings = tf.constant([[0, 0], [0, pad_bottom], [0, pad_right],
                                [0, 0]])
        x = tf.pad(x, paddings, "CONSTANT")

    # Extract pooling regions
    stride = [1, strides[0], strides[1], 1]
    ksize = [1, pool_size[0], pool_size[1], 1]

    x = tf.extract_image_patches(x,
                                 ksizes=ksize,
                                 strides=stride,
                                 rates=[1, 1, 1, 1],
                                 padding='SAME')

    _, pool_height, pool_width, elems = x.get_shape().as_list()

    # Extract pooling regions for each channel
    elems = int(elems / channels)
    x = tf.reshape(
        x, [-1, pool_height, pool_width, elems, channels])  # Reshape tensor
    x = tf.transpose(x, perm=[0, 1, 2, 4, 3])

    # Sort values for pooling
    if sort:
        if ((pool_size[0] == 2) and (pool_size[1] == 2)):
            x = sort_p2x2(x)
        if ((pool_size[0] == 3) and (pool_size[1] == 3)):
            x = sort_p3x3(x)
        else:
            x = sort_p(x)

    if norm == 'w_norm':
        assign_op = weights.assign(
            tf.div(weights, tf.reduce_sum(tf.abs(weights))))
        with tf.control_dependencies([assign_op]):
            x = weights * x
    elif norm == 'w_norm_p':
        assign_op = weights.assign(
            tf.div(tf.maximum(weights, 0.0001),
                   tf.reduce_sum(tf.maximum(weights, 0.0001))))
        with tf.control_dependencies([assign_op]):
            x = weights * x
    elif norm == 'w2_norm':
        assign_op = weights.assign(
            tf.div(
                weights,
                tf.transpose(
                    tf_repeat([tf.reduce_sum(tf.abs(weights), 1)],
                              [tf.shape(weights)[1], 1]))))
        with tf.control_dependencies([assign_op]):
            x = weights * x
    elif norm == 'w2_norm_p':
        assign_op = weights.assign(
            tf.div(
                tf.maximum(weights, 0.0001),
                tf.transpose(
                    tf_repeat([tf.reduce_sum(tf.maximum(weights, 0.0001), 1)],
                              [tf.shape(weights)[1], 1]))))
        with tf.control_dependencies([assign_op]):
            x = weights * x
    else:
        x = weights * x

    x = tf.reduce_sum(x, 4)  #Reduce de 4th dimension
    return x
Example #36
0
    def body(depth_index, state1, state2, state3, depth_image, max_prob_image,
             exp_sum, incre):
        """Loop body."""

        # calculate cost
        ave_feature = ref_tower.get_output()
        ave_feature2 = tf.square(ref_tower.get_output())
        for view in range(0, FLAGS.view_num - 1):
            homographies = view_homographies[view]
            homographies = tf.transpose(homographies, perm=[1, 0, 2, 3])
            homography = homographies[depth_index]
            # warped_view_feature = homography_warping(view_towers[view].get_output(), homography)
            warped_view_feature = tf_transform_homography(
                view_towers[view].get_output(), homography)
            ave_feature = ave_feature + warped_view_feature
            ave_feature2 = ave_feature2 + tf.square(warped_view_feature)
        ave_feature = ave_feature / FLAGS.view_num
        ave_feature2 = ave_feature2 / FLAGS.view_num
        cost = ave_feature2 - tf.square(ave_feature)
        cost.set_shape(
            [FLAGS.batch_size, feature_shape[1], feature_shape[2], 32])

        # gru
        reg_cost1, state1 = conv_gru1(-cost, state1, scope='conv_gru1')
        reg_cost2, state2 = conv_gru2(reg_cost1, state2, scope='conv_gru2')
        reg_cost3, state3 = conv_gru3(reg_cost2, state3, scope='conv_gru3')
        reg_cost = tf.layers.conv2d(reg_cost3,
                                    1,
                                    3,
                                    padding='same',
                                    reuse=tf.AUTO_REUSE,
                                    name='prob_conv')
        prob = tf.exp(reg_cost)

        # index
        d_idx = tf.cast(depth_index, tf.float32)
        if inverse_depth:
            inv_depth_start = tf.div(1.0, depth_start)
            inv_depth_end = tf.div(1.0, depth_end)
            inv_interval = (inv_depth_start - inv_depth_end) / (
                tf.cast(depth_num, 'float32') - 1)
            inv_depth = inv_depth_start - d_idx * inv_interval
            depth = tf.div(1.0, inv_depth)
        else:
            depth = depth_start + d_idx * depth_interval
        temp_depth_image = tf.reshape(depth, [FLAGS.batch_size, 1, 1, 1])
        temp_depth_image = tf.tile(temp_depth_image,
                                   [1, feature_shape[1], feature_shape[2], 1])

        # update the best
        update_flag_image = tf.cast(tf.less(max_prob_image, prob),
                                    dtype='float32')
        new_max_prob_image = update_flag_image * prob + (
            1 - update_flag_image) * max_prob_image
        new_depth_image = update_flag_image * temp_depth_image + (
            1 - update_flag_image) * depth_image
        max_prob_image = tf.assign(max_prob_image, new_max_prob_image)
        depth_image = tf.assign(depth_image, new_depth_image)

        # update counter
        exp_sum = tf.assign_add(exp_sum, prob)
        depth_index = tf.add(depth_index, incre)

        return depth_index, state1, state2, state3, depth_image, max_prob_image, exp_sum, incre
Example #37
0
def _div_maybe_zero(total_loss, num_present):
  """Normalizes the total loss with the number of present pixels."""
  return tf.to_float(num_present > 0) * tf.div(total_loss,
                                               tf.maximum(1e-5, num_present))
    def __init__(self, sess):
        self.sess = sess
        self.global_step = tf.Variable(0.0,
                                       name='global_step',
                                       dtype=tf.float32,
                                       trainable=False)

        #for data input
        self.pipline_data_train = cdata.get_pipline_data_train(
            img_size, batchsize)
        self.pipline_data_test = cdata.get_pipline_data_test(
            img_size, batchsize_test)

        #3个placeholder, img和noise,training
        self.imgs_pla = tf.placeholder(
            tf.float32,
            [batchsize, img_size_h, img_size_w, G_group_img_num * img_channel],
            name='imgs_in')
        self.training = tf.placeholder(tf.bool,
                                       name='training_in')  #这里没用上但是为了兼容就保留了
        self.timerates_pla = tf.placeholder(tf.float32, [batchsize],
                                            name='timerates_in')
        self.timerates_expand = tf.expand_dims(self.timerates_pla, -1)
        self.timerates_expand = tf.expand_dims(self.timerates_expand, -1)
        self.timerates_expand = tf.expand_dims(self.timerates_expand,
                                               -1)  #12*1*1*1

        print('placeholders:\n', 'img_placeholder:', self.imgs_pla,
              self.timerates_pla)
        #img_placeholder: Tensor("imgs_in:0", shape=(10, 180, 320, 9), dtype=float32) Tensor("timerates_in:0", shape=(10,), dtype=float32)

        self.frame0 = self.imgs_pla[:, :, :, :img_channel]
        self.frame1 = self.imgs_pla[:, :, :, img_channel:img_channel * 2]
        self.frame2 = self.imgs_pla[:, :, :, img_channel * 2:]

        with tf.variable_scope("first_unet", reuse=tf.AUTO_REUSE) as scope:
            firstinput = tf.concat([self.frame0, self.frame2], -1)
            #self.first_opticalflow=my_unet( firstinput, 4,training=self.training , withbias=True, withbn=False)  #注意这里是直接作为optical flow
            self.first_opticalflow = my_unet_split(
                firstinput,
                4,
                training=self.training,
                withbias=True,
                withbn=True)  #注意这里是直接作为optical flow

        self.first_opticalflow_0_1 = self.first_opticalflow[:, :, :, :2]
        self.first_opticalflow_0_1 = tf.identity(self.first_opticalflow_0_1,
                                                 name="first_opticalflow_0_1")
        print('first_opticalflow_0_1:', self.first_opticalflow_0_1)
        self.first_opticalflow_1_0 = self.first_opticalflow[:, :, :, 2:]
        self.first_opticalflow_1_0 = tf.identity(self.first_opticalflow_1_0,
                                                 name="first_opticalflow_1_0")
        print('first_opticalflow_1_0:', self.first_opticalflow_1_0)
        #first_opticalflow_0_1: Tensor("first_opticalflow_0_1:0", shape=(10, 180, 320, 2), dtype=float32)
        #first_opticalflow_1_0: Tensor("first_opticalflow_1_0:0", shape=(10, 180, 320, 2), dtype=float32)

        #输出光流形状
        self.flow_size_h = self.first_opticalflow_0_1.get_shape().as_list()[1]
        self.flow_size_w = self.first_opticalflow_0_1.get_shape().as_list()[2]
        self.flow_channel = self.first_opticalflow_0_1.get_shape().as_list(
        )[-1]

        ########################################################
        self.step2_flow_channel = 5
        self.flow_shape = [
            self.flow_size_h, self.flow_size_w, self.step2_flow_channel
        ]
        #获取数据时的一些cpu上的参数,用于扩张数据和判定时序
        self.last_flow_init_np = np.zeros(self.flow_shape, dtype=np.float32)
        print(self.last_flow_init_np.shape)  #(180, 320, 5)
        ##############################################################

        self.last_optical_flow = tf.placeholder(tf.float32,
                                                self.flow_shape,
                                                name='second_last_flow')

        #初始化train和test的初始0状态
        self.last_flow_new_train = self.last_flow_init_np
        self.last_flow_new_test = self.last_flow_init_np

        #反向光流算中间帧
        self.first_opticalflow_t_0=tf.add( -(1-self.timerates_expand)*self.timerates_expand*self.first_opticalflow_0_1 ,\
                                      self.timerates_expand*self.timerates_expand*self.first_opticalflow_1_0 , name="first_opticalflow_t_0")
        self.first_opticalflow_t_2=tf.add( (1-self.timerates_expand)*(1-self.timerates_expand)*self.first_opticalflow_0_1 ,\
                                      self.timerates_expand*(self.timerates_expand-1)*self.first_opticalflow_1_0, name="first_opticalflow_t_2")

        #2种方法合成t时刻的帧
        self.first_img_flow_2_t = self.warp_op(
            self.frame2, -self.first_opticalflow_t_2)  #!!!
        self.first_img_flow_0_t = self.warp_op(
            self.frame0, -self.first_opticalflow_t_0)  #!!!

        #虽然论文里用不到第一步的输出中间帧,但是这里也给他输出看看效果
        self.first_output = tf.add(
            self.timerates_expand * self.first_img_flow_2_t,
            (1 - self.timerates_expand) * self.first_img_flow_0_t,
            name="first_outputimg")
        print('first output img:', self.first_output)
        #first output img: Tensor("first_outputimg:0", shape=(10, 180, 320, 3), dtype=float32)

        #利用光流前后帧互相合成
        self.first_img_flow_2_0 = self.warp_op(
            self.frame2, -self.first_opticalflow_0_1)  #frame2->frame0
        self.first_img_flow_0_2 = self.warp_op(
            self.frame0, -self.first_opticalflow_1_0)  #frame0->frame2

        ####################################################################################################################3
        #第二个unet
        with tf.variable_scope("second_unet", reuse=tf.AUTO_REUSE) as scope:
            secinput=tf.concat([self.frame0[0], self.frame2[0], \
                                self.first_opticalflow_0_1[0], self.first_opticalflow_1_0[0], \
                                self.first_opticalflow_t_2[0], self.first_opticalflow_t_0[0],\
                                self.first_img_flow_2_t[0], self.first_img_flow_0_t[0],\
                                self.last_optical_flow], -1)
            secinput = tf.expand_dims(secinput, 0)
            print(
                "secinput:", secinput
            )  #secinput: Tensor("second_unet/ExpandDims:0", shape=(1, 180, 320, 25), dtype=float32)

            step2_withbn = False
            new_step2_flow = my_unet(
                secinput,
                self.step2_flow_channel,
                training=self.training,
                withbias=True,
                withbn=step2_withbn)  #注意这里是直接作为optical flow
            kep_step2_flow = [new_step2_flow]
            print("new_step2_flow:", new_step2_flow)
            #new_step2_flow: Tensor("second_unet/unet_end0_relu/LeakyRelu:0", shape=(1, 180, 320, 5), dtype=float32)

            for ti in range(1, batchsize):
                secinput=tf.concat([self.frame0[ti], self.frame2[ti], \
                                self.first_opticalflow_0_1[ti], self.first_opticalflow_1_0[ti], \
                                self.first_opticalflow_t_2[ti], self.first_opticalflow_t_0[ti],\
                                self.first_img_flow_2_t[ti], self.first_img_flow_0_t[ti],\
                                new_step2_flow[0] ], -1)
                secinput = tf.expand_dims(secinput, 0)
                new_step2_flow = my_unet(secinput,
                                         self.step2_flow_channel,
                                         withbias=True,
                                         withbn=step2_withbn)
                kep_step2_flow.append(new_step2_flow)

            self.second_batch_last_flow = new_step2_flow[0]
            #self.second_batch_last_flow=tf.identity(self.second_batch_last_flow, name="second_batch_last_flow")
            print(
                "second_batch_last_flow:", self.second_batch_last_flow
            )  #Tensor("second_unet/strided_slice_89:0", shape=(180, 320, 5), dtype=float32)
            self.second_opticalflow = tf.concat(kep_step2_flow, 0)
            print(
                "self.second_opticalflow:", self.second_opticalflow
            )  #self.second_opticalflow: Tensor("second_unet/concat_60:0", shape=(10, 180, 320, 5), dtype=float32)
        self.second_opticalflow_t_0 = tf.add(
            self.second_opticalflow[:, :, :, :2],
            self.first_opticalflow_t_0,
            name="second_opticalflow_t_0")
        self.second_opticalflow_t_1 = tf.add(self.second_opticalflow[:, :, :,
                                                                     2:4],
                                             self.first_opticalflow_t_2,
                                             name="second_opticalflow_t_1")
        print('second_opticalflow_t_0:', self.second_opticalflow_t_0)
        print('second_opticalflow_t_1:', self.second_opticalflow_t_1)
        #second_opticalflow_t_0: Tensor("second_opticalflow_t_0:0", shape=(10, 180, 320, 2), dtype=float32)
        #second_opticalflow_t_1: Tensor("second_opticalflow_t_1:0", shape=(10, 180, 320, 2), dtype=float32)

        self.vmap_t_0 = tf.expand_dims(
            tf.sigmoid(self.second_opticalflow[:, :, :, -1]), -1)
        self.vmap_t_1 = 1 - self.vmap_t_0

        #2种方法合成t时刻的帧
        self.second_img_flow_1_t = self.warp_op(
            self.frame2, -self.second_opticalflow_t_1)  #!!!
        self.second_img_flow_0_t = self.warp_op(
            self.frame0, -self.second_opticalflow_t_0)  #!!!

        #最终输出的图
        print(self.timerates_expand, self.vmap_t_0, self.second_img_flow_0_t)
        #Tensor("ExpandDims_2:0", shape=(6, 1, 1, 1), dtype=float32) Tensor("Sigmoid:0", shape=(6, 180, 320, 1), dtype=float32)
        #Tensor("dense_image_warp_5/Reshape_1:0", shape=(6, 180, 320, 3), dtype=float32)
        self.second_output=tf.div(  ( (1-self.timerates_expand)*self.vmap_t_0*self.second_img_flow_0_t+self.timerates_expand*self.vmap_t_1*self.second_img_flow_1_t),  \
                             ((1-self.timerates_expand)*self.vmap_t_0+self.timerates_expand*self.vmap_t_1) , name="second_outputimg" )
        print('second output img:', self.second_output)
        #second output img: Tensor("second_outputimg:0", shape=(10, 180, 320, 3), dtype=float32)

        #判别器的网络构建
        self.D_1_net_F, self.D_1_net_F_logit = Discriminator_net(
            self.second_output, name="D1", training=self.training)
        self.D_1_net_T, self.D_1_net_T_logit = Discriminator_net(
            self.frame1, name="D1", training=self.training)
        #D的loss计算
        self.D_1_net_loss_sum, _, _ = self.D_loss_TandF_logits(
            self.D_1_net_T_logit, self.D_1_net_F_logit, "D_1_net")

        #计算loss
        self.second_L1_loss_interframe,self.first_warp_loss,self.second_contex_loss,self.second_local_var_loss_all,self.second_global_var_loss_all,self.second_ssim,self.second_psnr,\
                self.first_L1_loss_interframe, self.first_ssim, self.first_psnr, self.second_GAN_loss_mean_D1=self.loss_cal_all()

        #训练G的总loss
        self.G_loss_all = 100 * self.second_L1_loss_interframe + 30 * (
            self.first_L1_loss_interframe +
            self.first_warp_loss) + 0.05 * self.second_contex_loss
        #self.second_global_var_loss_all
        #+ self.second_GAN_loss_mean_D1*0.03

        #训练D的总loss
        self.D_loss_all = self.D_1_net_loss_sum

        #####################################
        self.last_label_train = '#'
        self.last_label_test = '#'
        self.state_random_row_train = 0
        self.state_random_col_train = 0
        self.state_flip_train = False

        self.state_random_row_test = 0
        self.state_random_col_test = 0
        self.state_flip_test = False

        #为了兼容性
        self.batchsize_inputimg = batchsize
        self.img_size_w = img_size_w
        self.img_size_h = img_size_h

        t_vars = tf.trainable_variables()
        print("trainable vars cnt:", len(t_vars))
        self.first_para = [
            var for var in t_vars if var.name.startswith('first')
        ]
        self.sec_para = [
            var for var in t_vars if var.name.startswith('second')
        ]
        self.vgg_para = [var for var in t_vars if var.name.startswith('VGG')]
        self.D_para = [var for var in t_vars if var.name.startswith('D')]
        print("first param len:", len(self.first_para))
        print("second param len:", len(self.sec_para))
        print("VGG param len:", len(self.vgg_para))
        print("D param len:", len(self.D_para))
        print(self.vgg_para)
        '''
        trainable vars cnt: 144
        first param len: 46
        second param len: 46
        VGG param len: 52
        '''

        #G训练过程
        self.lr_rate = tf.train.exponential_decay(base_lr,
                                                  global_step=self.global_step,
                                                  decay_steps=decay_steps,
                                                  decay_rate=decay_rate)
        self.train_op_G = tf.train.AdamOptimizer(self.lr_rate, beta1=beta1, name="superslomo_adam_G").minimize(self.G_loss_all,  \
                                                                                              global_step=self.global_step  , var_list=self.first_para+self.sec_para  )

        # weight clipping
        self.clip_D = [
            p.assign(tf.clip_by_value(p, weightclip_min, weightclip_max))
            for p in self.D_para
        ]

        #D训练过程
        self.train_op_D = tf.train.AdamOptimizer(
            self.lr_rate, beta1=beta1,
            name="superslomo_adam_D").minimize(self.D_loss_all,
                                               var_list=self.D_para)

        #最后构建完成后初始化参数
        self.sess.run(tf.global_variables_initializer())
def detect_visualize(tfrecords, bbox_priors, checkpoint_path, cfg):

    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)

    graph = tf.Graph()

    #logdir = os.path.dirname(checkpoint_path)
    #feature_cache_file = os.path.join(logdir, "feature_cache_created")
    #if not os.path.exists(feature_cache_file):
    feature_cache_file = None

    # Force all Variables to reside on the CPU.
    with graph.as_default():
        if feature_cache_file:
            with open(feature_cache_file) as f:
                feat_shape = [int(a) for a in f.read().split(' ')]
            batched_features, batched_bboxes, batched_num_bboxes, image_ids = inputs.input_nodes_precomputed_features(
                tfrecords=tfrecords,
                max_num_bboxes=cfg.MAX_NUM_BBOXES,
                num_epochs=1,
                batch_size=cfg.BATCH_SIZE,
                num_threads=cfg.NUM_INPUT_THREADS,
                capacity=cfg.QUEUE_CAPACITY,
                min_after_dequeue=cfg.QUEUE_MIN,
                cfg=cfg,
                feat_shape=feat_shape)
        else:
            batched_images, batched_offsets, batched_dims, batched_is_flipped, batched_bbox_restrictions, batched_max_to_keep, batched_heights_widths, batched_image_ids = input_nodes(
                tfrecords=tfrecords,
                num_epochs=1,
                batch_size=cfg.BATCH_SIZE,
                num_threads=cfg.NUM_INPUT_THREADS,
                capacity=cfg.QUEUE_CAPACITY,
                cfg=cfg)

        batch_norm_params = {
            # Decay for the batch_norm moving averages.
            'decay': cfg.BATCHNORM_MOVING_AVERAGE_DECAY,
            # epsilon to prevent 0s in variance.
            'epsilon': 0.001,
            'variables_collections': [tf.GraphKeys.MOVING_AVERAGE_VARIABLES],
            'is_training': False
        }
        with slim.arg_scope([slim.conv2d],
                            activation_fn=tf.nn.relu,
                            normalizer_fn=slim.batch_norm,
                            normalizer_params=batch_norm_params,
                            weights_regularizer=slim.l2_regularizer(0.00004),
                            biases_regularizer=slim.l2_regularizer(0.00004)):

            locations, confidences, inception_vars = model.build(
                inputs=batched_images,
                num_bboxes_per_cell=cfg.NUM_BBOXES_PER_CELL,
                reuse=False,
                scope='')

        ema = tf.train.ExponentialMovingAverage(decay=cfg.MOVING_AVERAGE_DECAY)
        shadow_vars = {
            ema.average_name(var): var
            for var in slim.get_model_variables()
        }

        # Restore the parameters
        saver = tf.train.Saver(shadow_vars, reshape=True)

        fetches = [
            locations, confidences, batched_offsets, batched_dims,
            batched_is_flipped, batched_bbox_restrictions, batched_max_to_keep,
            batched_heights_widths, batched_image_ids, batched_images
        ]

        coord = tf.train.Coordinator()

        sess_config = tf.ConfigProto(
            log_device_placement=False,
            #device_filters = device_filters,
            allow_soft_placement=True,
            gpu_options=tf.GPUOptions(
                per_process_gpu_memory_fraction=cfg.SESSION_CONFIG.
                PER_PROCESS_GPU_MEMORY_FRACTION))
        sess = tf.Session(graph=graph, config=sess_config)

        # Little utility to convert the float images to uint8
        image_to_convert = tf.placeholder(tf.float32)
        convert_image_to_uint8 = tf.image.convert_image_dtype(
            tf.add(tf.div(image_to_convert, 2.0), 0.5), tf.uint8)

        detection_results = []
        with sess.as_default():

            tf.global_variables_initializer().run()
            tf.local_variables_initializer().run()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            try:

                if tf.gfile.IsDirectory(checkpoint_path):
                    checkpoint_path = tf.train.latest_checkpoint(
                        checkpoint_path)

                if checkpoint_path is None:
                    print "ERROR: No checkpoint file found."
                    return

                # Restores from checkpoint
                saver.restore(sess, checkpoint_path)
                # Assuming model_checkpoint_path looks something like:
                #   /my-favorite-path/cifar10_train/model.ckpt-0,
                # extract global_step from it.
                global_step = int(
                    checkpoint_path.split('/')[-1].split('-')[-1])
                print "Found model for global step: %d" % (global_step, )

                print_str = ', '.join(['Step: %d', 'Time/image (ms): %.1f'])

                plt.ion()
                original_image = None
                current_image_id = None

                step = 0
                done = False
                while not coord.should_stop() and not done:

                    t = time.time()
                    outputs = sess.run(fetches)
                    dt = time.time() - t

                    locs = outputs[0]
                    confs = outputs[1]
                    patch_offsets = outputs[2]
                    patch_dims = outputs[3]
                    patch_is_flipped = outputs[4]
                    patch_bbox_restrictions = outputs[5]
                    patch_max_to_keep = outputs[6]
                    image_height_widths = outputs[7]
                    image_ids = outputs[8]

                    images = outputs[9]

                    for b in range(cfg.BATCH_SIZE):

                        print "Patch Dims: ", patch_dims[b]
                        print "Patch Offset: ", patch_offsets[b]
                        print "Max to keep: ", patch_max_to_keep[b]
                        print "Patch restrictions: ", patch_bbox_restrictions[
                            b]
                        print "Image HxW: ", image_height_widths[b]
                        print

                        if current_image_id is None or current_image_id != image_ids[
                                b]:
                            original_image = images[b]
                            original_image = sess.run(
                                convert_image_to_uint8,
                                {image_to_convert: images[b]})
                            current_image_id = image_ids[b]

                        img_id = int(np.asscalar(image_ids[b]))

                        predicted_bboxes = locs[b] + bbox_priors
                        predicted_bboxes = np.clip(predicted_bboxes, 0., 1.)
                        predicted_confs = confs[b]

                        # Keep only the predictions that are completely contained in the [0.1, 0.1, 0.9, 0.9] square
                        # for this patch
                        #if patch_restrict[b]:
                        #  filtered_bboxes, filtered_confs = filter_proposals(predicted_bboxes, predicted_confs)
                        #else:
                        #  filtered_bboxes = predicted_bboxes
                        #  filtered_confs = predicted_confs
                        filtered_bboxes, filtered_confs = filter_proposals(
                            predicted_bboxes, predicted_confs,
                            patch_bbox_restrictions[b])

                        # No valid predictions?
                        if filtered_bboxes.shape[0] == 0:
                            continue

                        # Lets get rid of some of the predictions
                        num_preds_to_keep = np.asscalar(patch_max_to_keep[b])
                        sorted_idxs = np.argsort(filtered_confs.ravel())[::-1]
                        sorted_idxs = sorted_idxs[:num_preds_to_keep]
                        filtered_bboxes = filtered_bboxes[sorted_idxs]
                        filtered_confs = filtered_confs[sorted_idxs]

                        plt.figure('Cropped Size')
                        uint8_image = sess.run(convert_image_to_uint8,
                                               {image_to_convert: images[b]})
                        plt.imshow(uint8_image)
                        num_detections_to_render = min(
                            filtered_bboxes.shape[0], 10)
                        for i in range(num_detections_to_render):

                            loc = filtered_bboxes[i].ravel()
                            conf = filtered_confs[i]

                            #print "Location: ", loc
                            #print "Conf: ", conf

                            # Plot the predicted location in red
                            xmin, ymin, xmax, ymax = loc * cfg.INPUT_SIZE
                            plt.plot([xmin, xmax, xmax, xmin, xmin],
                                     [ymin, ymin, ymax, ymax, ymin], 'r-')

                        # Convert the bounding boxes to the original image dimensions
                        converted_bboxes = convert_proposals(
                            bboxes=filtered_bboxes,
                            offset=patch_offsets[b],
                            patch_dims=patch_dims[b],
                            image_dims=image_height_widths[b],
                            is_flipped=patch_is_flipped[b])

                        plt.figure('Resized')
                        plt.imshow(original_image)
                        num_detections_to_render = min(
                            converted_bboxes.shape[0], 10)
                        for i in range(num_detections_to_render):

                            loc = converted_bboxes[i].ravel()
                            conf = filtered_confs[i]

                            #print "Location: ", loc
                            #print "Conf: ", conf

                            # Plot the predicted location in red
                            xmin, ymin, xmax, ymax = loc * cfg.INPUT_SIZE
                            plt.plot([xmin, xmax, xmax, xmin, xmin],
                                     [ymin, ymin, ymax, ymax, ymin], 'r-')

                        r = raw_input("press button: ")
                        if r != "":
                            done = True
                            break

                        plt.close('all')

                    step += 1
                    print print_str % (step, (dt / cfg.BATCH_SIZE) * 1000)

            except tf.errors.OutOfRangeError as e:
                pass

            coord.request_stop()
            coord.join(threads)
Example #40
0
c = tf.constant(3.0, name='constant_c')
d = tf.constant(100.2, name='constant_d')

square = tf.square(a, name='square_a')
power = tf.pow(b,c, name='pow_b_c')
sqrt = tf.sqrt(d, name='sqrt_d')
final_sum = tf.add_n([square, power, sqrt], name='final_sum')
 '''

x = tf.placeholder(tf.int32, shape=[3], name='x')
y = tf.placeholder(tf.int32, shape=[3], name='y')
sum_x = tf.reduce_sum(x, name='sum_x')
prod_y = tf.reduce_prod(y, name='prod_y')

final_div = tf.div(sum_x, prod_y, name="final_div")
final_mean = tf.reduce_mean([sum_x, prod_y], name="final_mean")

sess = tf.Session()
''' print("square of a: ", sess.run(square))
print( "power of b ^ c: ", sess.run(power))
print( "square root d: ", sess.run(square))

print( "Sum of above : ", sess.run(final_sum)) '''

print("sum(x): ", sess.run(sum_x, feed_dict={x: [100, 200, 300]}))
print("prod(y): ", sess.run(prod_y, feed_dict={y: [1, 2, 3]}))
print("sum(x)/prod(y): ",
      sess.run(final_div, feed_dict={
          x: [10, 20, 30],
          y: [1, 2, 3]
Example #41
0
def get_train_model(num_channels, label_len, b, img_size):
    inputs = tf.placeholder(tf.float32,
                            shape=(b, img_size[0], img_size[1], num_channels))

    # targets = tf.sparse_placeholder(tf.int32)

    seq_len = tf.placeholder(tf.int32, [None])

    x = inputs

    x = conv(x, num_channels, 64, ksize=[3, 3])
    x = tf.layers.batch_normalization(x, training=True)
    x = tf.nn.relu(x)
    x = tf.nn.max_pool(x,
                       ksize=[1, 3, 3, 1],
                       strides=[1, 1, 1, 1],
                       padding='SAME')
    x = small_basic_block(x, 64, 64)
    x2 = x
    x = tf.layers.batch_normalization(x, training=True)
    x = tf.nn.relu(x)

    x = tf.nn.max_pool(x,
                       ksize=[1, 3, 3, 1],
                       strides=[1, 2, 1, 1],
                       padding='SAME')

    x = small_basic_block(x, 64, 256)
    x = tf.layers.batch_normalization(x, training=True)
    x = tf.nn.relu(x)

    x = small_basic_block(x, 256, 256)
    x3 = x
    x = tf.layers.batch_normalization(x, training=True)
    x = tf.nn.relu(x)

    x = tf.nn.max_pool(x,
                       ksize=[1, 3, 3, 1],
                       strides=[1, 2, 1, 1],
                       padding='SAME')

    x = tf.layers.dropout(x)

    x = conv(x, 256, 256, ksize=[4, 1])
    x = tf.layers.dropout(x)

    x = tf.layers.batch_normalization(x, training=True)
    x = tf.nn.relu(x)

    x = conv(x, 256, NUM_CHARS + 1, ksize=[1, 13], pad='SAME')
    x = tf.nn.relu(x)
    cx = tf.reduce_mean(tf.square(x))
    x = tf.div(x, cx)

    x1 = tf.nn.avg_pool(inputs,
                        ksize=[1, 4, 1, 1],
                        strides=[1, 4, 1, 1],
                        padding='SAME')
    cx1 = tf.reduce_mean(tf.square(x1))
    x1 = tf.div(x1, cx1)

    x2 = tf.nn.avg_pool(x2,
                        ksize=[1, 4, 1, 1],
                        strides=[1, 4, 1, 1],
                        padding='SAME')
    cx2 = tf.reduce_mean(tf.square(x2))
    x2 = tf.div(x2, cx2)

    x3 = tf.nn.avg_pool(x3,
                        ksize=[1, 2, 1, 1],
                        strides=[1, 2, 1, 1],
                        padding='SAME')
    cx3 = tf.reduce_mean(tf.square(x3))
    x3 = tf.div(x3, cx3)

    x = tf.concat([x, x1, x2, x3], 3)
    x = conv(x, x.get_shape().as_list()[3], NUM_CHARS + 1, ksize=(1, 1))
    logits = tf.reduce_mean(x, axis=2)

    return logits, inputs, seq_len
Example #42
0
    def __init__(self,
                 sess,
                 state_space_size,
                 action_space_size,
                 batch_size,
                 ra_length,
                 history_length,
                 embedding_size,
                 tau,
                 learning_rate,
                 scope='actor'):
        self.sess = sess
        self.state_space_size = state_space_size
        self.action_space_size = action_space_size
        self.batch_size = batch_size
        self.ra_length = ra_length
        self.history_length = history_length
        self.embedding_size = embedding_size
        self.tau = tau
        self.learning_rate = learning_rate
        self.scope = scope

        with tf.variable_scope(self.scope):
            # Build Actor network
            self.action_weights, self.state, self.sequence_length = self._build_net(
                'estimator_actor')
            self.network_params = tf.trainable_variables()

            # Build target Actor network
            self.target_action_weights, self.target_state, self.target_sequence_length = self._build_net(
                'target_actor')
            self.target_network_params = tf.trainable_variables()[len(
                self.network_params
            ):]  # TODO: why sublist [len(x):]? Maybe because its equal to network_params + target_network_params

            # Initialize target network weights with network weights (θ^π′ ← θ^π)
            self.init_target_network_params = [
                self.target_network_params[i].assign(self.network_params[i])
                for i in range(len(self.target_network_params))
            ]

            # Update target network weights (θ^π′ ← τθ^π + (1 − τ)θ^π′)
            self.update_target_network_params = [
                self.target_network_params[i].assign(
                    tf.multiply(self.tau, self.network_params[i]) +
                    tf.multiply(1 - self.tau, self.target_network_params[i]))
                for i in range(len(self.target_network_params))
            ]

            # Gradient computation from Critic's action_gradients
            self.action_gradients = tf.placeholder(
                tf.float32, [None, self.action_space_size])
            gradients = tf.gradients(
                tf.reshape(self.action_weights,
                           [self.batch_size, self.action_space_size],
                           name='42222222222'), self.network_params,
                self.action_gradients)
            params_gradients = list(
                map(
                    lambda x: tf.div(x, self.batch_size * self.
                                     action_space_size), gradients))

            # Compute ∇_a.Q(s, a|θ^µ).∇_θ^π.f_θ^π(s)
            self.optimizer = tf.train.AdamOptimizer(
                self.learning_rate).apply_gradients(
                    zip(params_gradients, self.network_params))
Example #43
0
def div(x, y, name=None):
    if tf.__version__ < '2.0.0':
        return tf.div(x, y, name=name)
    else:
        return tf.divide(x, y, name=name)
Example #44
0
def ssd_losses(logits,
               localisations,
               glabels,
               glocalisations,
               gscores,
               match_threshold=0.5,
               negative_ratio=3.,
               alpha=0.2,
               label_smoothing=0.,
               batch_size=16,
               scope=None):
    '''Loss functions for training the text box network.
	Arguments:
	  logits: (list of) predictions logits Tensors;                x
	  localisations: (list of) localisations Tensors;              l
	  glocalisations: (list of) groundtruth localisations Tensors; g
	  gscores: (list of) groundtruth score Tensors;                c
	'''
    # from ssd loss
    with tf.name_scope(scope, 'txt_losses'):
        lshape = tfe.get_shape(logits[0], 5)
        num_classes = lshape[-1]
        batch_size = batch_size

        l_cross_pos = []
        l_cross_neg = []
        l_loc = []

        # Flatten out all vectors!
        flogits = logits
        fgscores = gscores
        flocalisations = localisations
        fglocalisations = glocalisations
        fglabels = glabels
        # for i in range(len(logits)):
        # 	flogits.append(tf.reshape(logits[i], [-1, num_classes]))
        # 	fgscores.append(tf.reshape(gscores[i], [-1]))
        # 	fglabels.append(tf.reshape(glabels[i], [-1]))
        # 	flocalisations.append(tf.reshape(localisations[i], [-1, 12]))
        # 	fglocalisations.append(tf.reshape(glocalisations[i], [-1, 12]))
        # And concat the crap!
        glabels = tf.concat(fglabels, axis=1)
        logits = tf.concat(flogits, axis=1)  # x
        gscores = tf.concat(fgscores, axis=1)  # c
        localisations = tf.concat(flocalisations, axis=1)  # l
        glocalisations = tf.concat(fglocalisations, axis=1)  # g
        dtype = logits.dtype

        # Compute positive matching mask...
        pmask = gscores > match_threshold  # positive mask
        # pmask = tf.concat(axis=0, values=[pmask[:tf.argmax(gscores, axis=0)], [True], pmask[tf.argmax(gscores, axis=0) + 1:]])

        ipmask = tf.cast(pmask, tf.int32)  # int positive mask
        fpmask = tf.cast(pmask, dtype)  # float positive mask
        n_positives = tf.reduce_sum(fpmask)  # calculate all number

        # Hard negative mining...
        # conf loss ??
        no_classes = tf.cast(pmask, tf.int32)
        predictions = slim.softmax(logits)  #
        nmask = tf.logical_and(tf.logical_not(pmask), gscores > -0.5)  #
        fnmask = tf.cast(nmask, dtype)
        nvalues = tf.where(nmask, predictions[:, :, 0], 1. - fnmask)
        nvalues_flat = tf.reshape(nvalues, [-1])
        # Number of negative entries to select.
        max_neg_entries = tf.cast(tf.reduce_sum(fnmask), tf.int32)
        n_neg = tf.cast(negative_ratio * n_positives, tf.int32) + batch_size
        n_neg = tf.minimum(n_neg, max_neg_entries)

        val, idxes = tf.nn.top_k(-nvalues_flat, k=n_neg)
        max_hard_pred = -val[-1]
        # Final negative mask.
        nmask = tf.logical_and(nmask, nvalues < max_hard_pred)
        fnmask = tf.cast(nmask, dtype)
        inmask = tf.cast(nmask, tf.int32)
        # Add cross-entropy loss.
        # logits [batch_size, num_classes] labels [batch_size] ~ 0,num_class
        with tf.name_scope('cross_entropy_pos'):
            loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=glabels)
            loss = tf.div(tf.reduce_sum(loss * fpmask),
                          batch_size,
                          name='value')
            tf.losses.add_loss(loss)
            l_cross_pos.append(loss)

        with tf.name_scope('cross_entropy_neg'):
            loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=no_classes)
            loss = tf.div(tf.reduce_sum(loss * fnmask),
                          batch_size,
                          name='value')
            tf.losses.add_loss(loss)
            l_cross_neg.append(loss)

        # Add localization loss: smooth L1, L2, ...
        with tf.name_scope('localization'):
            # Weights Tensor: positive mask + random negative.
            weights = tf.expand_dims(alpha * fpmask, axis=-1)
            # localisations = tf.Print(localisations, [localisations, tf.shape(localisations)], "pre is:         ", summarize=20)
            # glocalisations = tf.Print(glocalisations, [glocalisations,  tf.shape(glocalisations)], "gt is :         ",summarize=20)
            loss = custom_layers.abs_smooth(localisations - glocalisations)
            loss = tf.div(tf.reduce_sum(loss * weights),
                          batch_size,
                          name='value')
            tf.losses.add_loss(loss)
            l_loc.append(loss)

        with tf.name_scope('total'):
            total_cross_pos = tf.add_n(l_cross_pos, 'cross_entropy_pos')
            total_cross_neg = tf.add_n(l_cross_neg, 'cross_entropy_neg')
            total_cross = tf.add(total_cross_pos, total_cross_neg,
                                 'cross_entropy')
            total_loc = tf.add_n(l_loc, 'localization')

            # Add to EXTRA LOSSES TF.collection
            tf.add_to_collection('EXTRA_LOSSES', total_cross_pos)
            tf.add_to_collection('EXTRA_LOSSES', total_cross_neg)
            tf.add_to_collection('EXTRA_LOSSES', total_cross)
            tf.add_to_collection('EXTRA_LOSSES', total_loc)
Example #45
0
def get_losses(logits,
               localisations,
               gclasses,
               glocalisations,
               gscores,
               match_threshold=0,
               negative_ratio=2.5,
               alpha=1.,
               scope=None):
    """Loss functions for training the SSD 300 VGG network.

    This function defines the different loss components of the SSD, and
    adds them to the TF loss collection.

    Arguments:
      logits: (list of) predictions logits Tensors;
      localisations: (list of) localisations Tensors;
      gclasses: (list of) groundtruth labels Tensors;
      glocalisations: (list of) groundtruth localisations Tensors;
      gscores: (list of) groundtruth score Tensors;
    """
    with tf.name_scope(scope, 'ssd_losses'):
        lshape = tfe.get_shape(logits[0], 5)
        num_classes = lshape[-1]
        # batch_size = lshape[0]

        # Flatten out all vectors!
        flogits = []
        fgclasses = []
        fgscores = []
        flocalisations = []
        fglocalisations = []
        for i in range(len(logits)):
            flogits.append(tf.reshape(logits[i], [-1, num_classes]))
            fgclasses.append(tf.reshape(gclasses[i], [-1]))
            fgscores.append(tf.reshape(gscores[i], [-1]))
            flocalisations.append(tf.reshape(localisations[i], [-1, 4]))
            fglocalisations.append(tf.reshape(glocalisations[i], [-1, 4]))
        # And concat the crap!
        logits = tf.concat(flogits, axis=0)
        gclasses = tf.concat(fgclasses, axis=0)
        gscores = tf.concat(fgscores, axis=0)
        localisations = tf.concat(flocalisations, axis=0)
        glocalisations = tf.concat(fglocalisations, axis=0)
        dtype = logits.dtype

        # Compute positive matching mask...
        pmask = gclasses > match_threshold
        fpmask = tf.cast(pmask, dtype)
        n_positives = tf.reduce_sum(fpmask)

        # Hard negative mining...
        # for no_classes, we only care that false positive's label is 0
        # this is why pmask sufice our needs
        no_classes = tf.cast(pmask, tf.int32)
        predictions = slim.softmax(logits)
        nmask = tf.logical_not(pmask)

        fnmask = tf.cast(nmask, dtype)
        nvalues = tf.where(nmask, predictions[:, 0], 1. - fnmask)
        nvalues_flat = tf.reshape(nvalues, [-1])
        # Number of negative entries to select.
        max_neg_entries = tf.cast(tf.reduce_sum(fnmask), tf.int32)

        n_neg = tf.cast(negative_ratio * n_positives, tf.int32)
        n_neg = tf.minimum(n_neg, max_neg_entries)
        # avoid n_neg is zero, and cause error when doing top_k later on
        n_neg = tf.maximum(n_neg, 1)

        val, idxes = tf.nn.top_k(-nvalues_flat, k=n_neg)
        max_hard_pred = -val[-1]
        # Final negative mask, hard negative mining
        nmask = tf.logical_and(nmask, nvalues <= max_hard_pred)
        fnmask = tf.cast(nmask, dtype)

        # Add cross-entropy loss.
        with tf.name_scope('cross_entropy_pos'):
            total_cross_pos = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=gclasses)
            total_cross_pos = tf.reduce_sum(total_cross_pos * fpmask,
                                            name="cross_entropy_pos")
            tf.losses.add_loss(total_cross_pos)

        with tf.name_scope('cross_entropy_neg'):
            total_cross_neg = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=no_classes)
            total_cross_neg = tf.reduce_sum(total_cross_neg * fnmask,
                                            name="cross_entropy_neg")
            tf.losses.add_loss(total_cross_neg)

        # Add localization loss: smooth L1, L2, ...
        with tf.name_scope('localization'):
            # Weights Tensor: positive mask + random negative.
            weights = tf.expand_dims(alpha * fpmask, axis=-1)
            total_loc = custom_layers.abs_smooth_2(localisations -
                                                   glocalisations)
            total_loc = tf.reduce_sum(total_loc * weights, name="localization")
            tf.losses.add_loss(total_loc)

        total_cross = tf.add(total_cross_pos, total_cross_neg, 'cross_entropy')

        # Add to EXTRA LOSSES TF.collection
        tf.add_to_collection('EXTRA_LOSSES', total_cross_pos)
        tf.add_to_collection('EXTRA_LOSSES', total_cross_neg)
        tf.add_to_collection('EXTRA_LOSSES', total_cross)
        tf.add_to_collection('EXTRA_LOSSES', total_loc)

        # stick with the original paper in terms of definig model loss
        model_loss = tf.get_collection(tf.GraphKeys.LOSSES)
        model_loss = tf.add_n(model_loss)
        model_loss = array_ops.where(tf.equal(n_positives, 0),
                                     array_ops.zeros_like(model_loss),
                                     tf.div(1.0, n_positives) * model_loss)
        # Add regularization loss
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        regularization_loss = tf.add_n(regularization_losses,
                                       name='regularization_loss')

        # if model loss is zero, no need to do gradient update on this batch
        total_loss = array_ops.where(tf.equal(n_positives, 0),
                                     array_ops.zeros_like(model_loss),
                                     tf.add(model_loss, regularization_loss))

        # debugging info
        tf.summary.scalar("postive_num", n_positives)
        tf.summary.scalar("negative_num", n_neg)
        tf.summary.scalar("regularization_loss", regularization_loss)
        #             with tf.name_scope('variables_loc'):
        #                 selected_p = tf.boolean_mask(glocalisations, pmask)
        #                 p_mean, p_variance = tf.nn.moments(selected_p, [0])
        #                 tf.summary.scalar("mean_cx", p_mean[0])
        #                 tf.summary.scalar("mean_cy", p_mean[1])
        #                 tf.summary.scalar("mean_w", p_mean[2])
        #                 tf.summary.scalar("mean_h", p_mean[3])
        #
        #                 tf.summary.scalar("var_cx", p_variance[0])
        #                 tf.summary.scalar("var_cy", p_variance[1])
        #                 tf.summary.scalar("var_w", p_variance[2])
        #                 tf.summary.scalar("var_h", p_variance[3])

        return total_loss
Example #46
0
def log_loss_(label, difference_):
    predicts = difference_
    labels_ = tf.div(tf.add(label, 1), 2)
    loss_ = tf.losses.log_loss(labels=labels_, predictions=predicts)
    return loss_
Example #47
0
def build_graph(mode, config, sequence_example_file=None):
    """Builds the TensorFlow graph.

  Args:
    mode: 'train', 'eval', or 'generate'. Only mode related ops are added to
        the graph.
    config: A MelodyRnnConfig containing the MelodyEncoderDecoder and HParams to
        use.
    sequence_example_file: A string path to a TFRecord file containing
        tf.train.SequenceExample protos. Only needed for training and
        evaluation.

  Returns:
    A tf.Graph instance which contains the TF ops.

  Raises:
    ValueError: If mode is not 'train', 'eval', or 'generate', or if
        sequence_example_file does not match a file when mode is 'train' or
        'eval'.
  """
    if mode not in ('train', 'eval', 'generate'):
        raise ValueError("The mode parameter must be 'train', 'eval', "
                         "or 'generate'. The mode parameter was: %s" % mode)

    hparams = config.hparams
    encoder_decoder = config.encoder_decoder

    tf.logging.info('hparams = %s', hparams.values())

    input_size = encoder_decoder.input_size
    num_classes = encoder_decoder.num_classes
    no_event_label = encoder_decoder.default_event_label

    with tf.Graph().as_default() as graph:
        inputs, labels, lengths, = None, None, None
        state_is_tuple = True

        if mode == 'train' or mode == 'eval':
            inputs, labels, lengths = magenta.common.get_padded_batch(
                [sequence_example_file], hparams.batch_size, input_size)

        elif mode == 'generate':
            inputs = tf.placeholder(tf.float32,
                                    [hparams.batch_size, None, input_size])
            # If state_is_tuple is True, the output RNN cell state will be a tuple
            # instead of a tensor. During training and evaluation this improves
            # performance. However, during generation, the RNN cell state is fed
            # back into the graph with a feed dict. Feed dicts require passed in
            # values to be tensors and not tuples, so state_is_tuple is set to False.
            state_is_tuple = False

        cells = []
        for num_units in hparams.rnn_layer_sizes:
            cell = tf.nn.rnn_cell.BasicLSTMCell(num_units,
                                                state_is_tuple=state_is_tuple)
            cell = tf.nn.rnn_cell.DropoutWrapper(
                cell, output_keep_prob=hparams.dropout_keep_prob)
            cells.append(cell)

        cell = tf.nn.rnn_cell.MultiRNNCell(cells,
                                           state_is_tuple=state_is_tuple)
        if hparams.attn_length:
            cell = tf.contrib.rnn.AttentionCellWrapper(
                cell, hparams.attn_length, state_is_tuple=state_is_tuple)
        initial_state = cell.zero_state(hparams.batch_size, tf.float32)

        outputs, final_state = tf.nn.dynamic_rnn(cell,
                                                 inputs,
                                                 lengths,
                                                 initial_state,
                                                 parallel_iterations=1,
                                                 swap_memory=True)

        outputs_flat = tf.reshape(outputs, [-1, hparams.rnn_layer_sizes[-1]])
        logits_flat = tf.contrib.layers.linear(outputs_flat, num_classes)

        if mode == 'train' or mode == 'eval':
            if hparams.skip_first_n_losses:
                logits = tf.reshape(logits_flat,
                                    [hparams.batch_size, -1, num_classes])
                logits = logits[:, hparams.skip_first_n_losses:, :]
                logits_flat = tf.reshape(logits, [-1, num_classes])
                labels = labels[:, hparams.skip_first_n_losses:]

            labels_flat = tf.reshape(labels, [-1])
            softmax_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits_flat, labels_flat)
            loss = tf.reduce_mean(softmax_cross_entropy)
            perplexity = tf.reduce_mean(tf.exp(softmax_cross_entropy))

            correct_predictions = tf.to_float(
                tf.nn.in_top_k(logits_flat, labels_flat, 1))
            accuracy = tf.reduce_mean(correct_predictions) * 100

            event_positions = tf.to_float(
                tf.not_equal(labels_flat, no_event_label))
            event_accuracy = tf.truediv(
                tf.reduce_sum(tf.mul(correct_predictions, event_positions)),
                tf.reduce_sum(event_positions)) * 100

            no_event_positions = tf.to_float(
                tf.equal(labels_flat, no_event_label))
            no_event_accuracy = tf.truediv(
                tf.reduce_sum(tf.mul(correct_predictions, no_event_positions)),
                tf.reduce_sum(no_event_positions)) * 100

            global_step = tf.Variable(0, trainable=False, name='global_step')

            tf.add_to_collection('loss', loss)
            tf.add_to_collection('perplexity', perplexity)
            tf.add_to_collection('accuracy', accuracy)
            tf.add_to_collection('global_step', global_step)

            summaries = [
                tf.scalar_summary('loss', loss),
                tf.scalar_summary('perplexity', perplexity),
                tf.scalar_summary('accuracy', accuracy),
                tf.scalar_summary('event_accuracy', event_accuracy),
                tf.scalar_summary('no_event_accuracy', no_event_accuracy),
            ]

            if mode == 'train':
                learning_rate = tf.train.exponential_decay(
                    hparams.initial_learning_rate,
                    global_step,
                    hparams.decay_steps,
                    hparams.decay_rate,
                    staircase=True,
                    name='learning_rate')

                opt = tf.train.AdamOptimizer(learning_rate)
                params = tf.trainable_variables()
                gradients = tf.gradients(loss, params)
                clipped_gradients, _ = tf.clip_by_global_norm(
                    gradients, hparams.clip_norm)
                train_op = opt.apply_gradients(zip(clipped_gradients, params),
                                               global_step)
                tf.add_to_collection('learning_rate', learning_rate)
                tf.add_to_collection('train_op', train_op)

                summaries.append(
                    tf.scalar_summary('learning_rate', learning_rate))

            if mode == 'eval':
                summary_op = tf.merge_summary(summaries)
                tf.add_to_collection('summary_op', summary_op)

        elif mode == 'generate':
            temperature = tf.placeholder(tf.float32, [])
            softmax_flat = tf.nn.softmax(
                tf.div(logits_flat, tf.fill([num_classes], temperature)))
            softmax = tf.reshape(softmax_flat,
                                 [hparams.batch_size, -1, num_classes])

            tf.add_to_collection('inputs', inputs)
            tf.add_to_collection('initial_state', initial_state)
            tf.add_to_collection('final_state', final_state)
            tf.add_to_collection('temperature', temperature)
            tf.add_to_collection('softmax', softmax)

    return graph
Example #48
0
hidden3num=2000
W3 = tf.Variable(tf.truncated_normal([hidden2num, hidden3num], stddev=0.1))
b3 = tf.Variable(tf.zeros([hidden3num]) + 0.1)
L3 = tf.matmul(L2, W3) + b3


hidden4num=2000
W4=tf.Variable(tf.truncated_normal([hidden3num,hidden4num], stddev=0.1))
b4=tf.Variable(tf.zeros([hidden4num])+0.1)

prediction=tf.matmul(L3,W4)+b4

# 代价函数及训练方法
# loss = tf.reduce_max(tf.square(tf.subtract(Y,prediction)))
loss = tf.reduce_mean(tf.abs(tf.subtract(np.float32(1), tf.div(prediction, Y))))
maxLoss = tf.reduce_max(tf.abs(tf.subtract(np.float32(1), tf.div(prediction, Y))))
train_step = tf.train.AdamOptimizer(0.001).minimize(loss)
init = tf.global_variables_initializer()
minAccuracy = tf.reduce_min(prediction / Y)


f=open("out.txt", "w")
sess.run(init)
for __epo in range(10000):
    for __batch in range(Nbatch):
        # 将样本和标签根据划分的batch导入程序中
        if __batch == Nbatch - 1:  # 把余数中的数据放在最后一个batch中
            batch_xs = xTrain[__batch * batch_size:]
            batch_ys = yTrain[__batch * batch_size:]
        else:
Example #49
0
 def FProp(self, theta, current_step):
     p = self.params
     num_decays = tf.floor(
         tf.div(tf.cast(current_step, tf.float32),
                float(p.num_steps_per_decay)))
     return tf.pow(p.decay, num_decays)
def accuracy(logits, labels):
    predict = tf.equal(tf.argmax(logits, axis=1, output_type=tf.int32), labels)
    right_num = tf.reduce_sum(tf.cast(predict, tf.int32))
    total_num = labels.shape[0]
    return tf.div(tf.cast(right_num, tf.float32), total_num)
Example #51
0
    def __init__(self,
                 vocab_size,
                 seq_length,
                 eos_idx,
                 reference=None,
                 hypothesis=None,
                 input_type=ONEHOT_HARD,
                 ngram_lengths=None,
                 parallel_iterations=1,
                 combinators=None):

        # either all inputs are given value or none of them is
        inputs = [reference, hypothesis]
        assert None not in inputs or all(i is None for i in inputs)
        assert input_type in [ONEHOT_HARD, ONEHOT_SOFT, TOKENS]
        self.input_type = input_type
        self.parallel_iterations = parallel_iterations
        self.combinators = combinators

        if reference is None:  # hypothesis is also None (asserted above)
            if input_type in [ONEHOT_SOFT, ONEHOT_HARD]:
                onehot_shape = (None, seq_length, vocab_size)
                hypothesis = tf.placeholder(tf.float32,
                                            shape=onehot_shape,
                                            name='hypothesis')
                reference = tf.placeholder(tf.float32,
                                           shape=onehot_shape,
                                           name='reference')
            elif input_type == TOKENS:
                hypothesis = tf.placeholder(tf.int32,
                                            shape=(None, seq_length),
                                            name='hypothesis')
                reference = tf.placeholder(tf.int32,
                                           shape=(None, seq_length),
                                           name='reference')

        if input_type in [ONEHOT_SOFT, ONEHOT_HARD]:
            reference_onehot = reference
            hypothesis_onehot = hypothesis
        elif input_type == TOKENS:
            reference_onehot = tf.one_hot(reference,
                                          depth=vocab_size,
                                          axis=-1,
                                          dtype=tf.float32)
            hypothesis_onehot = tf.one_hot(hypothesis,
                                           depth=vocab_size,
                                           axis=-1,
                                           dtype=tf.float32)

        if ngram_lengths is None:
            ngram_lengths = [1, 2, 3, 4]

        self.vocab_size = vocab_size
        self.seq_length = seq_length
        self.hypothesis = hypothesis
        self.reference = reference
        self.hypothesis_onehot = hypothesis_onehot
        self.reference_onehot = reference_onehot

        # count the total ngram count in the reference, based on their length
        ref_length_mask = self.compute_length_mask('ref_mask',
                                                   reference_onehot,
                                                   self.seq_length, eos_idx)
        ref_lengths = tf.reduce_sum(ref_length_mask, axis=1)
        tpfn = sum([
            tf.maximum(ref_lengths - float(n - 1), 0.) for n in ngram_lengths
        ])
        self.ref_lengths = ref_lengths

        # count the total ngram count in the reference, based on their length
        hyp_length_mask = self.compute_length_mask('hyp_mask',
                                                   hypothesis_onehot,
                                                   self.seq_length, eos_idx)
        hyp_lengths = tf.reduce_sum(hyp_length_mask, axis=1)

        tpfp = sum([
            tf.maximum(hyp_lengths - float(n - 1), 0.) for n in ngram_lengths
        ])
        self.tpfn = tpfn
        self.tpfp = tpfp
        self.hyp_lengths = hyp_lengths

        # count the ngram matches between hypothesis and reference
        self.ngrams = self.build_ngrams(ngram_lengths, reference_onehot,
                                        ref_length_mask, hypothesis_onehot,
                                        hyp_length_mask)

        n_match = tf.reduce_sum(self.ngrams, axis=1)
        self.sentence_n_match = n_match

        dividend = tf.maximum(tpfp, tpfn)
        self.sentence_n_all = dividend

        # move zeros from dividend to n_match
        ones = tf.ones_like(n_match)
        zeros = tf.ones_like(n_match)
        fixed_nmatch = tf.where(dividend > 0., n_match, zeros)
        fixed_dividend = tf.where(dividend > 0., dividend, ones)
        self.sentence_gleu_score = tf.div(fixed_nmatch, fixed_dividend)

        self.batch_n_match = tf.reduce_sum(n_match)
        self.batch_n_all = tf.reduce_sum(dividend)
        self.batch_gleu_score = self.batch_n_match / (1e-7 + self.batch_n_all)
        self.batch_score = self.batch_gleu_score
        self.sentence_score = self.sentence_gleu_score

        # store result and byproduct tensors for easier debugging
        self.results = {
            'gleu': self.sentence_gleu_score,
            'tpfp': tpfp,
            'tpfn': tpfn,
            'nmatch': n_match
        }
import tensorflow as tf

graph = tf.Graph()

with graph.as_default():
    in_1 = tf.placeholder(tf.float32,shape=[],name="input_a")
    in_2 = tf.placeholder(tf.float32,shape=[],name="input_b")
    const = tf.constant(3,dtype=tf.float32,name="static_value")

    with tf.name_scope("Transformation"):

        with tf.name_scope("A"):
            A_mul = tf.multiply(in_1,const)
            A_out = tf.subtract(A_mul,in_1)
        
        with tf.name_scope("B"):
            B_mul = tf.multiply(in_2,const)
            B_out = tf.subtract(B_mul,in_2)

        with tf.name_scope("C"):
            C_div = tf.div(A_out,B_out)
            C_out = tf.add(C_div,const)

        with tf.name_scope("D"):
            D_div = tf.div(B_out,A_out)
            D_out = tf.add(D_div,const)
        
    out = tf.maximum(C_out,D_out)
    
    writer = tf.summary.FileWriter("./namescope_graph",graph=graph)
    writer.close()
    def _init_graph(self):
        #初始化Tensorflow计算图,包括输入数据,变量,模型,损失和优化
        
        self.graph = tf.Graph()
        with self.graph.as_default():  # 默认使用cpu:
            
            tf.set_random_seed(self.random_seed)
            # 输入数据
            self.train_features = tf.placeholder(tf.int32, shape=[None, None], name="train_features")  # None * features_M
            self.train_labels = tf.placeholder(tf.float32, shape=[None, 1], name="train_labels")  # None * 1
            self.dropout_keep = tf.placeholder(tf.float32, shape=[None], name="dropout_keep")
            self.train_phase = tf.placeholder(tf.bool, name="train_phase")

            # 变量
            self.weights = self._initialize_weights()        
        
        
            # 模型定义
            self.nonzero_embeddings = tf.nn.embedding_lookup(self.weights['feature_embeddings'], self.train_features) # None * M' * K; M'即fields, K即em_factor
            #Pair-wise Interation Layer
            element_wise_product_list = []
            for i in range(0, self.fields):
                for j in range(i+1, self.fields):
                    element_wise_product_list.append(tf.multiply(self.nonzero_embeddings[:,i,:], self.nonzero_embeddings[:,j,:]))
            #将一个list变为一个tensor,上述list由M'*(M'-1)个None * K的tensor组成
            self.element_wise_product = tf.stack(element_wise_product_list) # (M'*(M'-1)) * None * K
            self.element_wise_product = tf.transpose(self.element_wise_product, perm=[1,0,2], name="element_wise_product") # None * (M'*(M'-1)) * K
            self.interactions = tf.reduce_sum(self.element_wise_product, 2, name="interactions")  # None * (M'*(M'-1))
            
            # _________ 注意力机制部分 _____________
            num_interactions = int(self.fields*(self.fields-1)/2)
            if self.attention:
                self.attention_mul = tf.reshape(tf.matmul(tf.reshape(self.element_wise_product, shape=[-1, self.em_factor]), \
                    self.weights['attention_W']), shape=[-1, num_interactions, self.attention_factor])
                #上式中第一个reshape的目的size由None * (M'*(M'-1)) * K 变为 (None*(M'*(M'-1))) * K, 因为后面的权重为二维tensor
                #第一个reshpae再讲size变回None * (M'*(M'-1)) * attention_factor
                self.attention_exp = tf.exp(tf.reduce_sum(tf.multiply(self.weights['attention_p'], tf.nn.relu(self.attention_mul + \
                    self.weights['attention_b'])), 2, keep_dims=True)) # None * (M'*(M'-1)) * 1
                self.attention_sum = tf.reduce_sum(self.attention_exp, 1, keep_dims=True) # None * 1 * 1
                self.attention_out = tf.div(self.attention_exp, self.attention_sum, name="attention_out") # None * (M'*(M'-1)) * 1
            #attention不使用dropout和bn处理,对该网络的权重使用L2正则化
            
            # _________ 基于注意力机制的池化层 _____________
            if self.attention:
                self.AFM = tf.reduce_sum(tf.multiply(self.attention_out, self.element_wise_product), 1, name="afm") # None * K
            else:
                self.AFM = tf.reduce_sum(self.element_wise_product, 1, name="afm") # None * K
            
            #对attention后的输出执行BN操作
            if self.bn:
                self.AFM = self.batch_norm_layer(self.AFM, train_phase=self.train_phase, scope_bn='bn_fm')                
            #对attention后的输出执行dropout操作
            self.AFM = tf.nn.dropout(self.AFM, self.dropout_keep[0]) # dropout
            
            # ___________ 输出层 ___________________
            self.Bilinear = tf.matmul(self.AFM, self.weights['prediction']) # None * 1
            #Bilinear = tf.reduce_sum(self.Bilinear, 1, keep_dims=True)  # None * 1
            self.Feature_bias = tf.reduce_sum(tf.nn.embedding_lookup(self.weights['feature_bias'], self.train_features) , 1)  # None * 1
            Bias = self.weights['bias'] * tf.ones_like(self.train_labels)  # None * 1
            self.out = tf.add_n([self.Bilinear, self.Feature_bias, Bias], name="out_afm")  # None * 1   
        
            # 计算损失
            if self.attention and self.lamda_attention > 0:
                self.loss = tf.nn.l2_loss(tf.subtract(self.train_labels, self.out)) + tf.contrib.layers.l2_regularizer(self.lamda_attention)(self.weights['attention_W'])  # regulizer
            else:
                self.loss = tf.nn.l2_loss(tf.subtract(self.train_labels, self.out))     
            
            if self.lamde_em > 0:
                self.loss = self.loss + tf.contrib.layers.l2_regularizer(self.lamda_em)(self.weights['feature_embeddings'])  # regulizer
                
            # 优化方法
            if self.optimizer_type == 'AdamOptimizer':
                self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-8).minimize(self.loss)
            elif self.optimizer_type == 'AdagradOptimizer':
                self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate, initial_accumulator_value=1e-8).minimize(self.loss)
            elif self.optimizer_type == 'GradientDescentOptimizer':
                self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
            elif self.optimizer_type == 'MomentumOptimizer':
                self.optimizer = tf.train.MomentumOptimizer(learning_rate=self.learning_rate, momentum=0.95).minimize(self.loss)

            # 初始化
            self.saver = tf.train.Saver()
            init = tf.global_variables_initializer()
            self.sess = tf.Session()
            self.sess.run(init)

            # 参数数目
            total_parameters = 0
            for variable in self.weights.values():
                shape = variable.get_shape() # shape is an array of tf.Dimension
                variable_parameters = 1
                for dim in shape:
                    variable_parameters *= dim.value
                total_parameters += variable_parameters
            if self.verbose > 0:
                print ("#params: %d" %total_parameters)              
Example #54
0
    def inference(self):
        self.input_x1_embed = tf.nn.embedding_lookup(self.Embedding,
                                                     self.input_x1)
        self.input_x2_embed = tf.nn.embedding_lookup(self.Embedding,
                                                     self.input_x2)

        with tf.variable_scope("lstm") as scope:
            lstm_fw_cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_size)
            lstm_bw_cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_size)
            outputs1, _ = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell,
                                                          lstm_bw_cell,
                                                          self.input_x1_embed,
                                                          dtype=tf.float32)
            self.outputs1_rnn = tf.concat(outputs1, axis=2)

            scope.reuse_variables()
            outputs2, _ = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell,
                                                          lstm_bw_cell,
                                                          self.input_x2_embed,
                                                          dtype=tf.float32)

            self.outputs2_rnn = tf.concat(outputs2, axis=2)

        with tf.variable_scope("cos"):
            self.dot_wise = tf.matmul(self.outputs1_rnn,
                                      self.outputs2_rnn,
                                      transpose_b=True,
                                      name="matual")
            self.norm1 = tf.sqrt(tf.reduce_sum(tf.square(self.outputs1_rnn),
                                               axis=2,
                                               keepdims=True),
                                 name="norm1")
            self.norm2 = tf.sqrt(tf.reduce_sum(tf.square(self.outputs2_rnn),
                                               axis=2,
                                               keepdims=True),
                                 name="norm1")
            self.norm = tf.matmul(self.norm1,
                                  self.norm2,
                                  transpose_b=True,
                                  name="matual_norm")
            self.cos = tf.div(self.dot_wise, self.norm, name="cos")
            self.kmax = tf.nn.top_k(self.cos, k=8, name="k-max-pool")
            self.inputs = tf.layers.flatten(self.kmax[0], name="flatten")
        with tf.variable_scope("outputs"):
            self.fc1 = tf.layers.dense(self.inputs, 256, activation=tf.nn.relu)
            self.fc1 = tf.nn.dropout(self.fc1,
                                     keep_prob=self.dropout_keep_prob)

            self.fc2 = tf.layers.dense(self.fc1, 128, activation=tf.nn.relu)
            self.fc2 = tf.nn.dropout(self.fc2,
                                     keep_prob=self.dropout_keep_prob)

            self.fc3 = tf.layers.dense(self.fc2, 32, activation=tf.nn.relu)
            self.fc3 = tf.nn.dropout(self.fc3,
                                     keep_prob=self.dropout_keep_prob)

            self.logits = tf.squeeze(tf.layers.dense(self.fc3,
                                                     1,
                                                     activation=tf.nn.sigmoid),
                                     axis=1,
                                     name="predict")
        return self.logits
Example #55
0
    def build_model(self):
        #placeholder
        self.u = tf.placeholder(tf.int32, [
            None,
        ])  # user idx [B]
        self.hist_i = tf.placeholder(tf.int32,
                                     [None, None])  # history click[B, T]
        self.sl = tf.placeholder(tf.int32, [
            None,
        ])  # history len [B]
        self.last = tf.placeholder(tf.int32, [
            None,
        ])  # last click[B]
        self.basic = tf.placeholder(
            tf.float32, [None, None])  #user basic feature[B,basic_size]
        self.sub_sample = tf.placeholder(
            tf.int32,
            [None, None])  # soft layer (pos_clict,neg_list)[B,sub_size]
        self.y = tf.placeholder(tf.float32, [None, None])  # label one hot[B]
        self.lr = tf.placeholder(tf.float64, [])

        #emb variable
        item_emb_w = tf.get_variable("item_emb_w",
                                     [self.item_count, self.embedding_size])
        item_b = tf.get_variable("item_b", [self.item_count],
                                 initializer=tf.constant_initializer(0.0))
        brand_emb_w = tf.get_variable("brand_emb_w",
                                      [self.brand_count, self.embedding_size])
        msort_emb_w = tf.get_variable("msort_emb_w",
                                      [self.msort_count, self.embedding_size])

        brand_list = tf.convert_to_tensor(self.brand_list, dtype=tf.int32)
        msort_list = tf.convert_to_tensor(self.msort_list, dtype=tf.int32)

        #historty seq
        hist_b = tf.gather(brand_list, self.hist_i)
        hist_m = tf.gather(msort_list, self.hist_i)

        h_emb = tf.concat([
            tf.nn.embedding_lookup(item_emb_w, self.hist_i),
            tf.nn.embedding_lookup(brand_emb_w, hist_b),
            tf.nn.embedding_lookup(msort_emb_w, hist_m)
        ],
                          axis=2)
        #historty mask
        mask = tf.sequence_mask(self.sl, tf.shape(h_emb)[1],
                                dtype=tf.float32)  #[B,T]
        mask = tf.expand_dims(mask, -1)  #[B,T,1]
        mask = tf.tile(mask, [1, 1, tf.shape(h_emb)[2]])  #[B,T,3*e]

        h_emb *= mask  #[B,T,3*e]
        hist = tf.reduce_sum(h_emb, 1)  #[B,3*e]
        hist = tf.div(hist,
                      tf.cast(
                          tf.tile(tf.expand_dims(self.sl, 1),
                                  [1, 3 * self.embedding_size]),
                          tf.float32))  #[B,3*e]
        #last
        last_b = tf.gather(brand_list, self.last)
        last_m = tf.gather(msort_list, self.last)
        l_emb = tf.concat([
            tf.nn.embedding_lookup(item_emb_w, self.last),
            tf.nn.embedding_lookup(brand_emb_w, last_b),
            tf.nn.embedding_lookup(msort_emb_w, last_m)
        ],
                          axis=1)
        #net input
        self.input = tf.concat([hist, l_emb], axis=-1)
        # print('',)

        # dd net
        bn = tf.layers.batch_normalization(inputs=self.input, name='b1')
        layer_1 = tf.layers.dense(bn, 1024, activation=tf.nn.relu, name='f1')
        layer_2 = tf.layers.dense(layer_1,
                                  512,
                                  activation=tf.nn.relu,
                                  name='f2')
        layer_3 = tf.layers.dense(layer_2,
                                  3 * self.embedding_size,
                                  activation=tf.nn.relu,
                                  name='f3')

        #softmax
        if self.is_training:
            sa_b = tf.gather(brand_list, self.sub_sample)
            sa_m = tf.gather(msort_list, self.sub_sample)

            sample_w = tf.concat([
                tf.nn.embedding_lookup(item_emb_w, self.sub_sample),
                tf.nn.embedding_lookup(brand_emb_w, sa_b),
                tf.nn.embedding_lookup(msort_emb_w, sa_m)
            ],
                                 axis=2)  #[B,sample,3*e]
            #sample_w=tf.nn.embedding_lookup(item_emb_w,self.sub_sample)
            sample_b = tf.nn.embedding_lookup(item_b,
                                              self.sub_sample)  #[B,sample]
            user_v = tf.expand_dims(layer_3, 1)  #[B,1,3*e]
            sample_w = tf.transpose(sample_w, perm=[0, 2, 1])  #[B,3*e,sample]
            self.logits = tf.squeeze(tf.matmul(user_v, sample_w),
                                     axis=1) + sample_b

            # Step variable
            self.global_step = tf.Variable(0,
                                           trainable=False,
                                           name='global_step')
            self.global_epoch_step = tf.Variable(0,
                                                 trainable=False,
                                                 name='global_epoch_step')
            self.global_epoch_step_op = tf.assign(self.global_epoch_step,
                                                  self.global_epoch_step + 1)
            '''
        self.loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(
                logits=self.logits,
                labels=self.y)
           )
        '''
            self.yhat = tf.nn.softmax(self.logits)

            self.loss = tf.reduce_mean(-self.y * tf.log(self.yhat + 1e-24))

            trainable_params = tf.trainable_variables()
            self.opt = tf.train.GradientDescentOptimizer(learning_rate=self.lr)
            gradients = tf.gradients(self.loss, trainable_params)
            clip_gradients, _ = tf.clip_by_global_norm(gradients, 5)
            self.train_op = self.opt.apply_gradients(
                zip(clip_gradients, trainable_params),
                global_step=self.global_step)

        else:
            all_emb = tf.concat([
                item_emb_w,
                tf.nn.embedding_lookup(brand_emb_w, brand_list),
                tf.nn.embedding_lookup(msort_emb_w, msort_list)
            ],
                                axis=1)
            self.logits = tf.matmul(layer_3, all_emb,
                                    transpose_b=True) + item_b
            self.output = tf.nn.softmax(self.logits)
Example #56
0
    return tf.Variable(initial, name=name)


#产生一个shape大小的偏置
def bias_variable(shape, name):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial, name=name)


#input 的图片尺寸为列数为3072的矩阵
#输入的labels是10个
#转换为原有尺寸大小
x = tf.placeholder(tf.float32, [None, 3072])
y_ = tf.placeholder(tf.float32, [None, 10])
x_image = tf.reshape(x, [-1, 32, 32, 3])
x_image = tf.div(x_image, 255.)

conv1_1 = tf.layers.conv2d(inputs=x_image,
                           filters=64,
                           kernel_size=[3, 3],
                           padding='same',
                           use_bias=True,
                           activation=tf.nn.relu)
#conv2 size out_channels=64
conv1_2 = tf.layers.conv2d(inputs=conv1_1,
                           filters=64,
                           kernel_size=[3, 3],
                           padding='same',
                           use_bias=True,
                           activation=tf.nn.relu)
conv1_3 = tf.layers.conv2d(inputs=conv1_2,
Example #57
0
    def pathint_stabilization(self, adam_optimizer,
                              previous_weights_mu_minus_1):
        # Zenke method

        optimizer_task = tf.train.GradientDescentOptimizer(learning_rate=1.0)
        small_omega_var = {}

        reset_small_omega_ops = []
        update_small_omega_ops = []
        update_big_omega_ops = []
        initialize_prev_weights_ops = []

        for var in self.variables:

            small_omega_var[var.op.name] = tf.Variable(tf.zeros(
                var.get_shape()),
                                                       trainable=False)
            reset_small_omega_ops.append(
                tf.assign(small_omega_var[var.op.name],
                          small_omega_var[var.op.name] * 0.0))
            update_big_omega_ops.append( tf.assign_add( self.big_omega_var[var.op.name], tf.div(tf.nn.relu(small_omega_var[var.op.name]), \
             (p.par['omega_xi'] + tf.square(var-previous_weights_mu_minus_1[var.op.name])))))

        # After each task is complete, call update_big_omega and reset_small_omega
        self.update_big_omega = tf.group(*update_big_omega_ops)

        # Reset_small_omega also makes a backup of the final weights, used as hook in the auxiliary loss
        self.reset_small_omega = tf.group(*reset_small_omega_ops)

        # This is called every batch
        with tf.control_dependencies([self.train_op]):
            self.delta_grads = adam_optimizer.return_delta_grads()
            self.gradients = optimizer_task.compute_gradients(self.total_loss)
            self.grads = adam_optimizer.return_delta_grads()
            for grad, var in self.gradients:
                update_small_omega_ops.append(
                    tf.assign_add(small_omega_var[var.op.name],
                                  -self.delta_grads[var.op.name] * grad))
            self.update_small_omega = tf.group(
                *update_small_omega_ops
            )  # 1) update small_omega after each train!
Example #58
0
    def build_model(self):

        vox_real_ = tf.placeholder(tf.int32, [
            self.batch_size, self.vox_shape[0], self.vox_shape[1],
            self.vox_shape[2]
        ])
        vox_real = tf.one_hot(vox_real_, self.n_class)
        vox_real = tf.cast(vox_real, tf.float32)
        Z = tf.placeholder(tf.float32, [
            self.batch_size, self.start_vox_size[0], self.start_vox_size[1],
            self.start_vox_size[2], self.dim_z
        ])

        filter_bilateral = tf.placeholder(
            tf.float32, [self.batch_size] +
            [self.vox_shape[0], self.vox_shape[1], self.vox_shape[2], 4])
        mean, sigma = self.encoder(vox_real)
        Z_encode = mean

        #code_discriminator
        p_code_encode, h_code_encode = self.code_discriminator(Z_encode)
        p_code_real, h_code_real = self.code_discriminator(Z)

        code_encode_loss = tf.reduce_mean(
            tf.reduce_sum(
                tf.nn.sigmoid_cross_entropy_with_logits(
                    h_code_encode, tf.ones_like(h_code_encode)), [1]))
        code_discrim_loss = tf.reduce_mean(tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(h_code_real, tf.ones_like(h_code_real)), [1]))\
         + tf.reduce_mean(tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(h_code_encode, tf.zeros_like(h_code_encode)), [1]))

        #reconstruction
        vox_gen_decode, _ = self.generate(Z_encode)

        batch_mean_vox_real = tf.reduce_mean(vox_real, [0, 1, 2, 3])
        ones = tf.ones_like(batch_mean_vox_real)
        inverse = tf.div(ones, tf.add(batch_mean_vox_real, ones))
        weight = inverse * tf.div(1., tf.reduce_sum(inverse))
        recons_loss = -tf.reduce_sum(
            self.lamda_gamma * vox_real * tf.log(1e-6 + vox_gen_decode) +
            (1 - self.lamda_gamma) *
            (1 - vox_real) * tf.log(1e-6 + 1 - vox_gen_decode), [1, 2, 3])
        recons_loss = tf.reduce_mean(tf.reduce_sum(recons_loss * weight, 1))

        #Refiner
        vox_after_refine_dec = self.refine(vox_gen_decode)

        recons_loss_refine = -tf.reduce_sum(
            self.lamda_gamma * vox_real * tf.log(1e-6 + vox_after_refine_dec) +
            (1 - self.lamda_gamma) *
            (1 - vox_real) * tf.log(1e-6 + 1 - vox_after_refine_dec),
            [1, 2, 3])
        recons_loss_refine = tf.reduce_mean(
            tf.reduce_sum(recons_loss_refine * weight, 1))

        #GAN_generate
        vox_gen, _ = self.generate(Z)
        vox_after_refine_gen = self.refine(vox_gen)

        p_real, h_real = self.discriminate(vox_real)
        p_gen, h_gen = self.discriminate(vox_gen)
        p_gen_dec, h_gen_dec = self.discriminate(vox_gen_decode)
        p_gen_ref, h_gen_ref = self.discriminate(vox_after_refine_gen)
        p_gen_dec_ref, h_gen_dec_ref = self.discriminate(vox_after_refine_dec)

        #Standard_GAN_Loss
        discrim_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(h_real, tf.ones_like(h_real)))\
         + tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(h_gen, tf.zeros_like(h_gen))) + tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(h_gen_dec, tf.zeros_like(h_gen_dec)))

        gen_loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                h_gen, tf.ones_like(h_gen))) + tf.reduce_mean(
                    tf.nn.sigmoid_cross_entropy_with_logits(
                        h_gen_dec, tf.ones_like(h_gen_dec)))

        #for refine
        discrim_loss_refine = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(h_real, tf.ones_like(h_real)))\
         + tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(h_gen_ref, tf.zeros_like(h_gen_ref))) + tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(h_gen_dec_ref, tf.zeros_like(h_gen_dec_ref)))

        gen_loss_refine = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                h_gen_ref, tf.ones_like(h_gen_ref))) + tf.reduce_mean(
                    tf.nn.sigmoid_cross_entropy_with_logits(
                        h_gen_dec_ref, tf.ones_like(h_gen_dec_ref)))
        """
        #LS_GAN_Loss
        a=-1
        b=1
        c=0

        discrim_loss = tf.reduce_mean(0.5*((h_real-b)**2) + 0.5*((h_gen-a)**2) + 0.5*((h_gen_dec-a)**2))
        gen_loss = tf.reduce_mean(0.5*((h_gen-c)**2) + 0.5*((h_gen_dec-c)**2))
        """

        #Cost
        cost_enc = code_encode_loss + self.lamda_recons * recons_loss
        cost_gen = self.lamda_recons * recons_loss + gen_loss
        cost_discrim = discrim_loss
        cost_code = code_discrim_loss
        cost_gen_ref = self.lamda_recons * recons_loss_refine + gen_loss_refine
        cost_discrim_ref = discrim_loss_refine

        tf.summary.scalar("recons_loss", tf.reduce_mean(recons_loss))
        tf.summary.scalar("gen_loss", tf.reduce_mean(gen_loss))
        tf.summary.scalar("discrim_loss", tf.reduce_mean(discrim_loss))
        tf.summary.scalar("code_encode_loss", tf.reduce_mean(code_encode_loss))
        tf.summary.scalar("code_discrim_loss",
                          tf.reduce_mean(code_discrim_loss))

        summary_op = tf.summary.merge_all()

        return Z, Z_encode, vox_real_, vox_gen, vox_gen_decode, vox_after_refine_dec, vox_after_refine_gen,\
         recons_loss, code_encode_loss, gen_loss, discrim_loss, recons_loss_refine, gen_loss_refine, discrim_loss_refine,\
          cost_enc, cost_code, cost_gen, cost_discrim, cost_gen_ref, cost_discrim_ref, summary_op
Example #59
0
 def integral_Guaussian(self, mu, theta):
     a = -4.0 / math.sqrt(2.0 * math.pi) / theta
     exp_mu = tf.exp(a * mu)
     ig = tf.div(exp_mu, exp_mu + 1) * -1.0 + 1
     return ig
Example #60
0
    def __build_graph(self):
        self.__graph = tf.Graph()
        with self.__graph.as_default(), self.__graph.device(_device_for_node):
            count_max = tf.constant([self.cooccurrence_cap],
                                    dtype=tf.float32,
                                    name='max_cooccurrence_count')
            scaling_factor = tf.constant([self.scaling_factor],
                                         dtype=tf.float32,
                                         name="scaling_factor")

            self.__focal_input = tf.placeholder(tf.int32,
                                                shape=[self.batch_size],
                                                name="focal_words")
            print(self.__focal_input.graph)
            self.__context_input = tf.placeholder(tf.int32,
                                                  shape=[self.batch_size],
                                                  name="context_words")
            self.__cooccurrence_count = tf.placeholder(
                tf.float32, shape=[self.batch_size], name="cooccurrence_count")

            if self.preload:
                focal_embeddings = tf.Variable(self.init_embeddings,
                                               name="focal_embeddings")
                context_embeddings = tf.Variable(self.init_embeddings,
                                                 name="context_embeddings")
                print(focal_embeddings.graph)
            else:
                focal_embeddings = tf.Variable(tf.random_uniform(
                    [self.vocab_size, self.embedding_size], 1.0, -1.0),
                                               name="focal_embeddings")
                context_embeddings = tf.Variable(tf.random_uniform(
                    [self.vocab_size, self.embedding_size], 1.0, -1.0),
                                                 name="context_embeddings")

            focal_biases = tf.Variable(tf.random_uniform([self.vocab_size],
                                                         1.0, -1.0),
                                       name='focal_biases')
            context_biases = tf.Variable(tf.random_uniform([self.vocab_size],
                                                           1.0, -1.0),
                                         name="context_biases")

            focal_embedding = tf.nn.embedding_lookup([focal_embeddings],
                                                     self.__focal_input)
            context_embedding = tf.nn.embedding_lookup([context_embeddings],
                                                       self.__context_input)
            focal_bias = tf.nn.embedding_lookup([focal_biases],
                                                self.__focal_input)
            context_bias = tf.nn.embedding_lookup([context_biases],
                                                  self.__context_input)

            weighting_factor = tf.minimum(
                1.0,
                tf.pow(tf.div(self.__cooccurrence_count, count_max),
                       scaling_factor))

            embedding_product = tf.reduce_sum(
                tf.multiply(focal_embedding, context_embedding), 1)

            log_cooccurrences = tf.log(tf.to_float(self.__cooccurrence_count))

            distance_expr = tf.square(
                tf.add_n([
                    embedding_product, focal_bias, context_bias,
                    tf.negative(log_cooccurrences)
                ]))

            single_losses = tf.multiply(weighting_factor, distance_expr)
            self.__total_loss = tf.reduce_sum(single_losses)
            tf.summary.scalar("GloVe_loss", self.__total_loss)
            self.__optimizer = tf.train.AdagradOptimizer(
                self.learning_rate).minimize(self.__total_loss)
            self.__summary = tf.summary.merge_all()

            self.__combined_embeddings = tf.add(focal_embeddings,
                                                context_embeddings,
                                                name="combined_embeddings")