def triplet_loss(y_true, y_pred, alpha=0.2):
    """
    Implementation of the triplet loss as defined by formula (3)

    Arguments:
    y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.
    y_pred -- python list containing three objects:
            anchor -- the encodings for the anchor images, of shape (None, 128)
            positive -- the encodings for the positive images, of shape (None, 128)
            negative -- the encodings for the negative images, of shape (None, 128)

    Returns:
    loss -- real number, value of the loss
    """

    anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]

    # (M samples, dimension per image)
    # Step 1: Compute the (encoding) distance between the anchor and the positive, you will need to sum over axis=-1
    pos_dist = tf.reduce_sum(tf.square(tf.substract(anchor, positive)),
                             axis=-1)
    # Step 2: Compute the (encoding) distance between the anchor and the negative, you will need to sum over axis=-1
    neg_dist = tf.reduce_sum(tf.square(tf.substract(anchor, negative)),
                             axis=-1)
    # Step 3: subtract the two previous distances and add alpha.
    # Step 4: Take the maximum of basic_loss and 0.0. Sum over the training examples.
    loss = tf.reduce_sum(basic_loss)

    return loss
def _smmoth_l1(sigma, box_pred, box_targets, box_inside_weights,
               box_outside_weights):
    """
        loss = bbox_outside_weights*smoothL1(inside_weights * (box_pred - box_targets))
        smoothL1(x) = 0.5*(sigma*x)^2, if |x| < 1 / sigma^2
                       |x| - 0.5 / sigma^2, otherwise
    """
    sigma2 = sigma * sigma
    inside_mul = tf.multiply(box_inside_weights,
                             tf.substract(box_pred, box_targets))

    smooth_l1_sign = tf.cast(tf.less(tf.abs(inside_mul), 1.0 / sigma2),
                             tf.float32)
    smooth_l1_option1 = tf.multiply(tf.multiply(inside_mul, inside_mul),
                                    0.5 * sigma2)
    smooth_l1_option2 = tf.substract(tf.abs(inside_mul), 0.5 * sigma2)

    smooth_l1_result = tf.add(
        tf.multiply(smooth_l1_option1, smooth_l1_sign),
        tf.multiply(smooth_l1_option2,
                    tf.abs(tf.substract(smooth_l1_sign, 1.0))))

    outside_mul = tf.multiply(box_outside_weights, smooth_l1_result)

    return outside_mul
Esempio n. 3
0
    def __init__(self, sequence_length, batch_size, embedding_size, epoch,
                 filter_sizes, num_filters):

        self.sequence_length, self.batch_size, self.embedding_size, self.epoch, self.filter_sizes, self.num_filters = \
            sequence_length, batch_size, embedding_size, epoch, filter_sizes, num_filters

        self.q, self.qp = self.load_data()
        self.qn = random.shuffle(self.qp)
        self.dropout_keep_prob = 0.2
        self.margin = 0.02

        x_qp = tf.placeholder(
            tf.float32,
            [self.batch_size, self.sequence_length, self.embedding_size])
        x_qn = tf.plecaholder(
            tf.float32,
            [self.batch_size, self.sequence_length, self.embedding_size])
        x_a = tf.placeholder(
            tf.float32,
            [self.batch_size, self.sequence_length, self.embedding_size])

        qp_conv = self.conv(x_qp)
        qn_conv = self.conv(x_qn)
        a_conv = self.conv(x_a)

        cosin_q_qp = self.coscin(qp_conv, a_conv)
        cosin_q_qn = self.coscin(qn_conv, a_conv)
        zeros = tf.constant(0, shape=[self.batch_size])
        margins = tf.constant(self.margin, shape=[self.batch_size])

        losses = tf.maximum(
            tf.substract(margins, tf.sunstract(cosin_q_qp, cosin_q_qn)))
        self.loss = tf.reduce_sum(losses)
Esempio n. 4
0
def triplet_loss_gor(anchor, positive, negative, alpha, gor_alfa,
                     embedding_dim):
    """Calculate the triplet loss according to the FaceNet paper
    
    Args:
      anchor: the embeddings for the anchor images.
      positive: the embeddings for the positive images.
      negative: the embeddings for the negative images.
  
    Returns:
      the triplet loss according to the FaceNet paper as a float tensor.
    """
    with tf.variable_scope('triplet_loss'):
        pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
        neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)

        m1 = tf.pow(tf.reduce_sum(tf.multiply(anchor, negative), 1), 2)

        mean = tf.pow(
            tf.reduce_mean(tf.reduce_sum(tf.multiply(anchor, negative), 1)), 2)

        gor = mean + tf.sqrt(
            tf.abs(tf.substract(tf.reduce_mean(m1), 1 / embedding_dim)))

        basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)
        loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0) + gor * gor_alfa

    return loss
Esempio n. 5
0
        def build_graph():
            with tf.variable_scope("linear_layer"):
                w = tf.get_variable(
                    "w",
                    shape=[self.feature_size, self.class_num],
                    initializer=tf.truncated_normal_initializer(mean=0,
                                                                stddev=1e-2))
                b = tf.get_variable("b",
                                    shape=[self.class_num],
                                    initializer=tf.zeros_initializer())
                self.linear_output = tf.matmul(self.X, w) + b

            with tf.variable_scope("interaction_layer"):
                v = tf.get_variable(
                    "v",
                    shape=[self.feature_size, self.hidden_size],
                    initializer=tf.truncated_normal_initializer(mean=0,
                                                                stddev=1e-2))
                # ∑_{i=1}^n∑_{j=i+1}^n<V_i,V_j>x_ix_j =
                #                                     = 1/2 ∑{f=1}^k(( ∑{i=1}^n*v_if*xi)2−∑{i=1}^n*v_{if}^2*x_i^2))
                # self.interaction_terms = tf.multiply(0.5,
                #                                  tf.reduce_sum(
                #                                      tf.subtract(
                #                                          tf.pow(tf.sparse_tensor_dense_matmul(self.X, v), 2),
                #                                          tf.sparse_tensor_dense_matmul(tf.pow(self.X, 2), tf.pow(v, 2))),
                #                                      1, keep_dims=True))
                pow_part = tf.pow(tf.sparse_tensor_dense_matmul(self.X, v), 2)
                square_mul_part = tf.sparse_tensor_dense_matmul(
                    tf.pow(self.X, 2), tf.pow(v, 2))
                subtract_v = tf.substract(pow_part, square_mul_part)
                self.interaction_terms = tf.multiply(
                    0.5, tf.reduce_sum(subtract_v, axis=1, keep_dims=True))
Esempio n. 6
0
    def __init__(self, scope, globalAC=None):
        if scope == GLOBAL_NET_SCOPE:
            with tf.variable_scope(scope):
                self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
                self.a_params, self.c_params = self._build_net(scope)[-2:]
        else:
            with tf.variable_scope(scope):
                self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
                self.a_his = tf.placeholder(tf.float32, [None, N_A], 'A')
                self.v_target = tf.placeholder(tf.float32, [None, 1],
                                               'Vtarget')

                mu, sigma, self.v, self.a_params, self.c_params = self._build_net(
                    scope)

                td = tf.substract(self.v_target, self.v, name='TD_error')
                with tf.name_scope('c_loss'):
                    self.c_loss = tf.reduce_mean(tf.sqaure(td))

                with tf.name_scope('wrap_a_out'):
                    mu, sigma = mu * A_bound[1], sigma + 1e-4

                normal_dist = tf.distributions.Normal(mu, sigma)

                with tf.name_scope('a_loss'):
                    log_prob = normal_dist.log_prob(self.a_his)
                    exp_v = log_prob * td
                    entropy = normal_dist.entropy()
                    self.exp_v = ENTROPY_BETA * entropy + exp_v
                    self.a_loss = tf.reduce_mean(-self.exp_v)

                with tf.name_scope('choose_a'):
                    self.A = tf.clip_by_value(
                        tf.squeeze(normal_dist.sample(1), axis=0), A_BOUND[0],
                        A_BOUND[1])

                with tf.name_scope('local_grad'):
                    self.a_grads = tf.gradients(self.a_loss, self.a_params)
                    self.c_grads = tf.gradients(self.c_loss, self.c_params)

            with tf.name_scope('sync'):
                with tf.name_scope('pull'):
                    self.pull_a_params_op = [
                        l_p.assign(g_p)
                        for l_p, g_p in zip(self.a_params, globalAC.a_params)
                    ]
                    self.pull_c_params_op = [
                        l_p.assign(g_p)
                        for l_p, g_p in zip(self.c_params, globalAC.c_params)
                    ]

                with tf.name_scope('push'):
                    self.update_a_op = OPT_A.apply_gradients(
                        zip(self.a_grads, globalAC.a_params))
                    self.update_c_op = OPT_C.apply_gradients(
                        zip(self.c_grads, globalAC.c_params))
def triplet_loss(y_true, y_pred, alpha=0.2):
    """
    Implementation of the triplet loss as defined by formula (3)

    Arguments:
    y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.
    y_pred -- python list containing three objects:
            anchor -- the encodings for the anchor images, of shape (None, 128)
            positive -- the encodings for the positive images, of shape (None, 128)
            negative -- the encodings for the negative images, of shape (None, 128)

    Returns:
    loss -- real number, value of the loss
    """
    anchor,positive,negative = y_pred[0],y_pred[1],y_pred[2]

    distance_1 = tf.reduce_sum(tf.square(tf.substract(anchor,postive)))
    distance_2 = tf.reduce_sum(tf.square(tf.substract(anchor,negative)))
    dis = tf.add(tf.substract(distance1,distance2),alpha)
Esempio n. 8
0
    def __init__(self,n_input,n_hidden,transfer_function=tf.nn.softplus,optimizer=tf.train.AdamOptimizer(),scale=0.1):
        self.n_input=n_input
        self.n_hidden=n_hidden
        self.transfer=transfer_function
        self.scale=tf.placeholder(tf.float32)
        self.training_scale=scale
        network_weights=self._initialize_dweights()
        self.weights=network_weights
        self.x=tf.placeholder(tf.float32,[None,self.n_input])
        self.hidden=self.transfer(tf.add(tf.matmul(self.x+scale*tf.random_normal((n_input)),
                                                   self.weights['w1']),self.weights['b1']))
        self.reconstruction=tf.add(tf.matmul(self.hidden,self.weights['w2']),self.weights['b2'])

#定义自编码器的损失函数
        self.cost=0.5*tf.reduce_sum(tf.pow(tf.substract(self.reconstruction,self.x),2.0))
        self.optimizer=optimizer.minimize(self.cost)
        init=tf.global_variables_initializer()
        self.sess=tf.Session()
        self.sess.run(init)
Esempio n. 9
0
  def loss( self ):
#logits
    logits_class = tf.reshape( self.logits[ :,:self.boundary1 ], [self.batchsize, self.cell, self.cell, self.object_classes] )    #batchsize*7*7*20

    logits_p = tf.reshape( self.logits[ :, self.boundary1:self.boundary2 ], [self.batchsize, self.cell, self.cell, self.cell_boxes] )    #batchsize*7*7*2, object exist or not

    logits_boxes = tf.reshape( self.logits[ :,self.boundary2: ], [self.batchsize, self.cell, self.cell, self.cell_boxes, 4] )    #batchsize*7*7*2*4

#labels
    labels_response = tf.reshape( self.labels[...,0], [self.batchsize, self.cell, self.cell, 1] )   #exist or not exist

    label_boxes = tf.reshape( self.labels[..., 1:4], [self.batchsize, self.cell, self.cell, 1, 4] )
    label_boxes = tf.tile( labels, [1,1,1,2,1] )  #every cell has two boxes

    label_classes = tf.reshape( self.labels[..., 5:], [self.batchsize, self.cell, self.cell, self.object_classes] )

#boxes loss
    box_loss_square = tf.square( tf.sqrt( label_boxes[...,2:3] ) - tf.sqrt( logits_boxes[...,2:3] ) )
    box_loss_delta  = tf.reduce_sum( tf.multiply( box_loss_square, labels_response ) )
    box_loss = self.lambda_coord * box_loss_delta

#confidence loss
    iou = self.calc_iou( label_classes, logits_boxes )     # 7*7*2
    conf_obj_loss = tf.multiply( iou, logits_p )
    conf_obj_loss_tmp = tf.square( tf.substract( labels_response - conf_obj_loss ) )
    conf_obj_loss = tf.multiply( labels_response, conf_obj_loss_tmp )
    conf_obj_loss = tf.reduce_sum( conf_obj_loss )

    mask = tf.subtract( 1 - tf.ones_like( labels_response ) ) # no object is 1, or is 0
    conf_noobj_loss = tf.multiply( mask, conf_obj_loss_tmp )
    conf_noobj_loss = tf.reduce_sum( conf_noobj_loss )
    conf_noobj_loss = self.lambda_noobj*conf_noobj_loss

    conf_loss = conf_obj_loss + conf_noobj_loss

#class loss
    class_loss = tf.sbutract( label_classes - logits_class )
    class_loss = tf.reduce_sum( tf.square( class_loss ), 4 )
    class_loss = tf.multiply( labels_response, class_loss )
    class_loss = tf.reduce_sum( class_loss )

    self.loss = box_loss + conf_loss + class_loss
    return self.loss
Esempio n. 10
0
	def ConstructSVM(self, column_number, value, batch_size, constant_epsilon = 0.5, learning_rate = 0.075):
		ops.reset_default_graph()
		
		self.session = tf.Session()

		self.batch_size = batch_size
		self.placeholder_data = tf.placeholder(dtype = tf.float32, shape = [None, column_number])
		self.placeholder_value = tf.placeholder(dtype = tf.float32, shape = [None, 1]);
		
		self.A = tf.Variable(tf.random_normal(shape = [len(data.columns_), 1]), name = 'A')
		self.B = tf.Variable(tf. random_normal(shape = [1, 1]), name = 'B')
		
		self.epsilon = tf.constant(constant_epsilon)

		self.model = tf.add(tf.matmul(placeholder_data, self.A), self.B)

		self.loss = tf.reduce_mean(tf.maximum(0., tf.subtract(tf.abs(tf.substract(self.model, self.placeholder_value)), self.epsilon)))
		
		self.optimizer = tf.train.GradientDescentOptimizer(learning_rate)
Esempio n. 11
0
    def __init__(self,h_size):
        self.scalarInput = tf.placeholder(shape=[None, 21168], dtype = tf.float32)
        self.imageIn = tf.reshape(self.scalarInput, shape=[-1, 84, 84, 3])
        self.conv1 = tf.contrib.layers.convolution2d(
            inputs=self.imageIn, num_outputs = 32,
            kernel_size = [8,8], stride = [4,4],
            padding = 'VALID', biases_initializer = None)
        self.conv2 = tf.contrib.layers.convolution2d(
            inputs = self.conv1, num_outputs = 64,
            kernel_size = [4,4], stride = [2,2],
            padding = 'VALID', biases_initializer = None)
        self.conv3 = tf.contrib.layers.convolution2d(
            inputs=self.conv2, num_outputs=64,
            kernel_size=[3,3], stride=[1,1],
            padding='VALID', biases_initializer=None)
        self.conv4 = tf.contrib.layers.convolution2d(
            inputs=self.conv3, num_outputs=512,
            kernel_size=[7,7], stride=[1,1],
            padding='VALID', biases_initializer=None)

        self.streamAC, self.streamVC = tf.split(self.conv4,2,3)
        self.streamA = tf.contrib.layers.flatten(self.streamAC)
        self.streamV = tf.contrib.layers.flatten(self.streamVC)
        self.AW = tf.Variable(tf.random_normal([h_size//2, env.actions]))
        self.VW = tf.Variable(tf.random_normal([h_size//2, 1]))
        self.Advantage = tf.matmul(self.streamA, self.AW)
        self.Value = tf.matmul(self.streamV, self.VW)

        self.Qout = self.Value + tf.substract(self.Advantage,tf.reduce_mean(self.Advantage, reduction_indices = 1, keep_dims = True))
        self.predict = tf.argmax(self.Qout, 1)

        self.targetQ = tf.placeholder(shape=[None], dtype=tf.float32)
        self.actions = tf.placeholder(shape=[None], dtype=tf.int32)
        self.actions_onehot = tf.one_hot(self.actions,env.actions, dtype=tf.float32)
        self.Q = tf.reduce_sum(tf.multiply(self.Qout, self.actions_onehot), reduction_indices=1)
        self.td_error = tf.square(self.targetQ - self.Q)
        self.loss = tf.reduce_mean(self.td_error)
        self.trainer = tf.train.AdamOptimizer(learning_rate=0.0001)
        self.updateModel = self.trainer.minimize(self.loss)
Esempio n. 12
0
class2_x = [x[0] for i, x in enumerate(x_vals) if y_vals[i] == -1]
class2_y = [x[1] for i, x in enumerate(x_vals) if y_vals[i] == -1]

batch_size = 350

x_data = tf.placeholder(shape=[None, 2], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
prediction_grid = tf.placeholder(shape=[None, 2], dtype=tf.float32)

b = tf.Variable(tf.random_normal(shape=[1, batch_size]))

gamma = tf.constant(-50, 0)
dist = tf.reduce_sum(tf.square(x_data), 1)
dist = tf.reshape(dist, [-1, 1])
sq_dists = tf.add(
    tf.substract(dist, tf.multiply(2, tf.matmul(x_data,
                                                tf.transpose(x_data)))),
    tf.transpose(dist))
my_kernel = tf.exp(tf.multiply(gamma, tf.abs(sq_dists)))

first_term = tf.reduce_mean(b)
b_vec_cross = tf.matmul(tf.transpose(b), b)
y_target_cross = tf.matmul(y_target, tf.transpose(y_target))
second_term = tf.reduce_sum(
    tf.multiply(my_kernel, tf.multiply(b_vec_cross, y_target_cross)))
loss = tf.negative(tf.subtract(first_term, second_term))

rA = tf.reshape(tf.reduce_sum(tf.square(x_data), 1), [-1, 1])
rB = tf.reshape(tf.reduce_sum(tf.square(prediction_grid), 1), [-1, 1])
pred_sq_dist = tf.add(
    tf.subtract(
        rA, tf.multiply(2., tf.matmul(x_data, tf.transpose(prediction_grid)))),
    def _init_graph(self):
        '''
        Init a tensorflow Graph containing: input data, variables, model, loss, optimizer
        '''
        self.graph = tf.Graph()
        with self.graph.as_default():  # , tf.device('/cpu:0'):
            # Set graph level random seed
            tf.set_random_seed(self.random_seed)
            # Input data.
            self.train_features = tf.placeholder(tf.int32,
                                                 shape=[None, None
                                                        ])  # None * features_M
            self.train_labels = tf.placeholder(tf.float32,
                                               shape=[None, 1])  # None * 1
            self.dropout_keep = tf.placeholder(tf.float32, shape=[None])
            self.train_phase = tf.placeholder(tf.bool)

            # Variables.
            self.weights = self._initialize_weights()

            # Model.
            # _________ sum_square part _____________
            # get the summed up embeddings of features.
            nonzero_embeddings = tf.nn.embedding_lookup(
                self.weights['feature_embeddings'], self.train_features)
            self.summed_features_emb = tf.reduce_sum(nonzero_embeddings,
                                                     1)  # None * K
            # get the element-multiplication
            self.summed_features_emb_square = tf.square(
                self.summed_features_emb)  # None * K

            # _________ square_sum part _____________
            self.squared_features_emb = tf.square(nonzero_embeddings)
            self.squared_sum_features_emb = tf.reduce_sum(
                self.squared_features_emb, 1)  # None * K

            # ________ FM __________
            self.FM = 0.5 * tf.substract(
                self.summed_features_emb_square,
                self.squared_sum_features_emb)  # None * K
            if self.batch_norm:
                self.FM = self.batch_norm_layer(self.FM,
                                                train_phase=self.train_phase,
                                                scope_bn='bn_fm')
            self.FM = tf.nn.dropout(
                self.FM, self.dropout_keep[-1]
            )  # dropout at the bilinear interactin layer

            # ________ Deep Layers __________
            for i in range(0, len(self.layers)):
                self.FM = tf.add(
                    tf.matmul(self.FM, self.weights['layer_%d' % i]),
                    self.weights['bias_%d' % i])  # None * layer[i] * 1
                if self.batch_norm:
                    self.FM = self.batch_norm_layer(
                        self.FM,
                        train_phase=self.train_phase,
                        scope_bn='bn_%d' % i)  # None * layer[i] * 1
                self.FM = self.activation_function(self.FM)
                self.FM = tf.nn.dropout(
                    self.FM,
                    self.dropout_keep[i])  # dropout at each Deep layer
            self.FM = tf.matmul(self.FM,
                                self.weights['prediction'])  # None * 1

            # _________out _________
            Bilinear = tf.reduce_sum(self.FM, 1, keep_dims=True)  # None * 1
            self.Feature_bias = tf.reduce_sum(
                tf.nn.embedding_lookup(self.weights['feature_bias'],
                                       self.train_features), 1)  # None * 1
            Bias = self.weights['bias'] * tf.ones_like(
                self.train_labels)  # None * 1
            self.out = tf.add_n([Bilinear, self.Feature_bias,
                                 Bias])  # None * 1

            # Compute the loss.
            if self.loss_type == 'square_loss':
                if self.lamda_bilinear > 0:
                    self.loss = tf.nn.l2_loss(
                        tf.substract(self.train_labels, self.out)
                    ) + tf.contrib.layers.l2_regularizer(self.lamda_bilinear)(
                        self.weights['feature_embeddings'])  # regulizer
                else:
                    self.loss = tf.nn.l2_loss(
                        tf.substract(self.train_labels, self.out))
            elif self.loss_type == 'log_loss':
                self.out = tf.sigmoid(self.out)
                if self.lamda_bilinear > 0:
                    self.loss = tf.contrib.losses.log_loss(
                        self.out,
                        self.train_labels,
                        weights=1.0,
                        epsilon=1e-07,
                        scope=None
                    ) + tf.contrib.layers.l2_regularizer(self.lamda_bilinear)(
                        self.weights['feature_embeddings'])  # regulizer
                else:
                    self.loss = tf.contrib.losses.log_loss(self.out,
                                                           self.train_labels,
                                                           weights=1.0,
                                                           epsilon=1e-07,
                                                           scope=None)

            # Optimizer.
            if self.optimizer_type == 'AdamOptimizer':
                self.optimizer = tf.train.AdamOptimizer(
                    learning_rate=self.learning_rate,
                    beta1=0.9,
                    beta2=0.999,
                    epsilon=1e-8).minimize(self.loss)
            elif self.optimizer_type == 'AdagradOptimizer':
                self.optimizer = tf.train.AdagradOptimizer(
                    learning_rate=self.learning_rate,
                    initial_accumulator_value=1e-8).minimize(self.loss)
            elif self.optimizer_type == 'GradientDescentOptimizer':
                self.optimizer = tf.train.GradientDescentOptimizer(
                    learning_rate=self.learning_rate).minimize(self.loss)
            elif self.optimizer_type == 'MomentumOptimizer':
                self.optimizer = tf.train.MomentumOptimizer(
                    learning_rate=self.learning_rate,
                    momentum=0.95).minimize(self.loss)

            # init
            self.saver = tf.train.Saver()
            init = tf.global_variables_initializer()
            self.sess = tf.Session()
            self.sess.run(init)

            # number of params
            total_parameters = 0
            for variable in self.weights.values():
                shape = variable.get_shape(
                )  # shape is an array of tf.Dimension
                variable_parameters = 1
                for dim in shape:
                    variable_parameters *= dim.value
                total_parameters += variable_parameters
            if self.verbose > 0:
                print "#params: %d" % total_parameters
Esempio n. 14
0
out = tf.cond(tf.greater(x, y), lambda: tf.add(x, y),
              lambda: tf.subtract(x, y))

###############################################################################
# 1b: Create two 0-d tensors x and y randomly selected from the range [-1, 1).
# Return x + y if x < y, x - y if x > y, 0 otherwise.
# Hint: Look up tf.case().
###############################################################################

# YOUR CODE
x = tf.random_uniform([], -1, 1)
y = tf.random_uniform([], -1, 1)
out = tf.case(
    {
        tf.less(x, y): lambda: tf.add(x, y),
        tf.greater(x, y): lambda: tf.substract(x, y)
    },
    default=lambda: tf.constant(0, 0),
    exclusive=True)
###############################################################################
# 1c: Create the tensor x of the value [[0, -2, -1], [0, 1, 2]]
# and y as a tensor of zeros with the same shape as x.
# Return a boolean tensor that yields Trues if x equals y element-wise.
# Hint: Look up tf.equal().
###############################################################################

# YOUR CODE

###############################################################################
# 1d: Create the tensor x of value
# [29.05088806,  27.61298943,  31.19073486,  29.35532951,
Esempio n. 15
0
def compute_z(a, b, c):
    r1 = tf.substract(a, b)
    r2 = tf.multiply(2, r1)
    z  = tf.add(r2, c) 
    return z
Esempio n. 16
0
def main():
	# define dataset object
	train_set = dataset(train_path, batch_size)
	# define input 
	train_input = tf.placeholder(
		tf.float32, 
		shape=(image_size,image_size,batch_size)
		)
	train_label = tf.placeholder(
		tf.float32, 
		shape=(image_size,image_size,batch_size)
		)

	# the model and loss
	shared_model = tf.make_template('shared_model', model)
	train_output, weights = shared_model(train_input)
	loss = tf.reduce_sum(tf.nn.l2_loss(tf.substract(train_output, train_label)))

	# normlization weight.
	for w in weights:
		loss += tf.nn.l2_loss(w)*1e-4

	# record loss
	tf.summary.scalar("loss", loss)

	# training step and learning rate
	global_step = tf.Variable(0, trainable=False)
	learning_rate = tf.train.exponential_decay(
		base_learning_rate, 
		global_step*batch_size,
		train_set.instance_num*lr_step_size,
		lr_decay,
		staircase=True
		)
	tf.summary.scalar("learning_rate", learning_rate)

	# Optimizer
	optimizer = tf.train.AdamOptimizer(learning_rate)
	opt = optimizer.minimize(loss, global_step=global_step)

	saver = tf.train.Saver(weights, max_to_keep=0)
	config = tf.ConfigProto()

	# training
	with tf.Session(config=config) as sess:
		#TensorBoard open log with "tensorboard --logdir=logs"
		if not os.path.exists('logs'):
			os.mkdir('logs')
		merged = tf.summary.merge_all()
		file_writer = tf.summary.FileWriter('logs',sess.graph)

		# var initializaion
		tf.initialize_all_variables().run()

		if model_path:
			print "restore model..."
			saver.restore(sess,model_path)
			print "successfully restore previous model."

		# train
		for epoch in xrange(0,max_epoch):
			for step in range(train_set.instance_num//batch_size):
				data, label = train_set.next_batch()
				feed_dict = {train_input : data, train_label : label}
				_,l,output,lr,g_step = sess.run([opt, loss, train_output, learning_rate, global_step],feed_dict=feed_dict)
				print "[epoch %2.4f] loss %.4f\t lr %.5f" % (epoch+(float(step)*batch_size/train_set.instance_num), np.sum(l)/batch_size, lr)
				del data, label

			saver.save(sess, "./checkpoints/VDSR_const_clip_0.01_epoch_%03d.ckpt" % epoch ,global_step=global_step)
data3 = tf.constant(2.5, dtype=tf.int32)
print(data1)
print(data2)
sess = tf.session()
print(sess.run(data1))

init = tf.global_variables_initializer()
sess.run(init)
print(sess.run(data3))

###############################################33
init = tf.global_variables_initializer()
sess = tf.session()
with sess:
    sess.run(init)


###############################################
import tensorflow as tf
data1 = tf.constant(2, dtype=tf.int32)
data2 = tf.constant(4, dtype=tf.int32)
dataAdd = tf.add(data1, data2)
dataMul = tf.multiply(data1, data2)
dataMinus = tf.substract(data1, data2)
dataDiv = tf.divide(data1, data2)
with tf.session() as sess:
    print(sess.run(dataAdd))
    print(sess.run(dataMul))
    print(sess.run(dataMinus))
    print(sess.run(dataDiv))
Esempio n. 18
0
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split

a = tf.constant(5.5)
b = tf.constant(8.6)

sess = tf.InteractiveSession()
print(sess.run(a))
print("sub a from b:",
      sess.run(tf.substract(b, a)))  ## tf.add, multiply, or divide

## matrices

#matrices ops, like numpy
#tf.random_normal()
#tf.random_uniform()
#tf.ones()
#tf.zeros()
#tf.ones_like() generate a matrix of ones with the same shape as the matrix passed in

#### variable needs to be initialized upon using them by
# tf.global_variables_initializer()

### dot product / multiplication

# sess.run(tf.matmul(matrix11, matrix2))

### placeholders
Esempio n. 19
0
	def ms_error(labels, logits):
		return tf.square(tf.substract(labels, logits))
Esempio n. 20
0
    def _init_graph(self):
        self.graph = tf.Graph()
        with self.graph.as_default():
            tf.set_random_seed(self.random_seed)

            # 1. for input data
            self.feat_index = tf.placeholder(tf.int32,
                                             shape=[None, None],
                                             name='feat_index')
            self.feat_value = tf.placeholder(tf.float32,
                                             shape=[None, None],
                                             name='feat_value')

            self.numeric_value = tf.placeholder(tf.float32,
                                                shape=[None, None],
                                                name='numeric_value')

            self.label = tf.placeholder(tf.float32,
                                        shape=[None, 1],
                                        name='label')
            self.dropout_keep_deep = tf.placeholder(tf.float32,
                                                    shape=[None],
                                                    name='dropout_keep_deep')
            self.train_phase = tf.placeholder(tf.bool, name='train_phase')

            # 2. weights
            self.weights = self._initialize_weights()

            # 3. define the computing graph
            self.embeddings = tf.nn.embedding_lookup(
                self.weights['feature_embeddings'], self.feat_index)
            feat_value = tf.reshape(self.feat_value,
                                    shape=[-1, self.field_size, 1])
            self.embeddings = tf.multiply(self.embeddings, feat_value)

            self.x0 = tf.concat([
                self.numeric_value,
                tf.reshape(self.embeddings,
                           shape=[-1, self.field_size * self.embedding_size])
            ],
                                axis=1)

            with tf.name_scope('deep_part'):
                self.y_deep = self.x0  # x0需要进行dropout?

                for i in range(len(self.deep_layers)):
                    self.y_deep = tf.add(
                        tf.matmul(self.y_deep,
                                  self.weights['deep_layer_%d' % i]),
                        self.weights['deep_bias_%d' % i])
                    self.y_deep = self.deep_layers_activation(self.y_deep)
                    self.y_deep = tf.nn.dropout(self.y_deep,
                                                self.dropout_keep_deep[i])  #

            with tf.name_scope('cross_part'):
                self._x0 = tf.reshape(self.x0, (-1, self.total_size, 1))
                x_l = self._x0

                for l in range(self.cross_layer_num):
                    # cross_layer: x_l+1 = x_0 * x_l.T * W_l + b_l + x_l
                    # 默认为列向量
                    # x_l = tf.tensordot(tf.matmul(self._x0, x_l, transpose_b=True),
                    # self.weights['cross_layer_%d' % l], 1) + self.weights['cross_bias_%d' % l] + x_l
                    # 上式可以简化运算,先算x_l.T * w_l 得到一个实数,然后再和前面的x_0进行计算
                    xlT_w = tf.tensordot(x_l,
                                         self.weights['cross_layer_%d' % l],
                                         axes=(1, 0))
                    print(xlT_w)
                    print(self.weights['cross_layer_%d' % l])
                    x_l = tf.matmul(self._x0, xlT_w) + \
                        self.weights['cross_bias_%d' % l] + x_l

                self.cross_part_out = tf.reshape(x_l, (-1, self.total_size))

            ## concat_part
            concat_input = tf.concat([self.cross_part_out, self.y_deep],
                                     axis=1)
            ## last layer
            self.out = tf.add(
                tf.matmul(concat_input, self.weights['concat_projection']),
                self.weights['concat_bias'])

            # 4. loss function
            if self.loss_type == "logloss":
                self.out = tf.nn.sigmoid(self.out)
                self.loss = tf.losses.log_loss(self.label, self.out)
            elif self.loss_type == "mse":
                self.loss = tf.nn.l2_loss(tf.substract(self.label, self.out))

            ## regularization
            if self.l2_reg > 0.0:
                self.loss += tf.contrib.layers.l2_regularizer(self.l2_reg)(
                    self.weights['concat_projection'])

                for i in range(len(self.deep_layers)):
                    self.loss += tf.contrib.layers.l2_regularizer(self.l2_reg)(
                        self.weights['deep_layer_%d' % i])

                for i in range(self.cross_layer_num):
                    self.loss += tf.contrib.layers.l2_regularizer(self.l2_reg)(
                        self.weights['cross_layer_%d' % i])

            # 5 optimizer
            if self.optimizer_type == "adam":
                self.optimizer = tf.train.AdamOptimizer(
                    learning_rate=self.learning_rate,
                    beta1=0.9,
                    beta2=0.999,
                    epsilon=1e-8).minimize(self.loss)
            elif self.optimizer_type == 'adagrad':
                self.optimizer = tf.train.AdagradOptimizer(
                    learning_rate=self.learning_rate,
                    initial_accumulator=1e-8).minimize(self.loss)
            elif self.optimizer_type == 'gd':
                self.optimizer = tf.train.GradientDescentOptimizer(
                    learning_rate=self.learning_rate).minimize(self.loss)
            elif self.optimizer_type == 'momentum':
                self.optimizer = tf.train.MomentumOptimizer(
                    learning_rate=self.learning_rate,
                    momentum=0.5).minimize(self.loss)

            # init
            init = tf.global_variables_initializer()
            self.sess = tf.Session()
            self.sess.run(init)

            ## 算一下参数数量
            total_parameters = 0
            for variable in self.weights.values():
                shape = variable.shape
                temp = 1
                for dim in shape:
                    temp *= dim
                total_parameters += temp

            if self.verbose > 0:
                print("#params: %d" % total_parameters)
Esempio n. 21
0
def 


# 39 二叉树的深度
def getHeight(root):
	if root is None:
		return 0

	lh = getHeight(root.left)
	lr = getHeight(root.right)

	return max(lh, lr) + 1


# 52 构建乘积数组
def productExceptSelf(nums):
	res, right = [1 for _ in range(len(nums))], 1

	for i in range(1, len(nums)):
		res[i] = res[i-1] * nums[i-1]

	for i in range(len(nums)-1, 0, -1):
		res[i] = res[i] * right
		right = right * nums[i]

	return res


# 53 正则表达式匹配, 初始条件与匹配零次以上的条件最复杂
def isMatch(s, p):
	dp = [[False for _ in range(len(p) + 1)] for _ in range(len(s)+1)]
	dp[0][0] = True

	for i in range(2, len(p) + 1):
		if p[i-1] == "*":
			dp[0][i] = dp[0][i-2]

	for i in range(1, len(s)+1):
		for j in range(1, len(p) + 1):
			if s[i-1] == p[j-1]:
				dp[i][j] = dp[i-1][j-1]

			elif p[j-1] == ".":
				dp[i][j] = dp[i-1][j-1]

			elif p[j-1] == "*": # dp[i][j-1]匹配一次dp[i][j-2] 匹配零次, 
				dp[i][j] = dp[i][j-1] or dp[i][j-2] or (s[i-1] == p[j-2] or p[j-2] == ".")

	return dp[len(s)][len(p)]


# 56 链表中环的入口结点
def detectCycle(head):
	slow, fast = head, head

	while fast and fast.next:
		slow, fast = slow.next, fast.next.next

		if slow == fast:
			break

	if fast is None or fast.next is None:
		return None

	slow = head

	while slow != fast:
		slow, fast = slow.next, fast.next

	return slow


# 57 删除链表中重复的节点
def deleteDuplicates(head):
	if head is None:
		return None

	dumpy = ListNode(0)

	dumpy.next = head

	cur1, cur2 = dumpy.next, head.next

	while cur2:
		if cur2.val != cur1.val:
			cur1.next = cur2
			cur1 = cur1.next
		cur2 = cur2.next

	cur1.next = None
	return dumpy.next


# 60 把二叉树打印成多行
def levelOrder(root):
	res, queue = [], []

	if root is None:
		return res

	queue.append(root)

	while queue:
		out, new_queue = [], []

		for node in queue:
			out.append(node.val)

			if node.left:
				new_queue.append(node.left)

			if node.right:
				new_queue.append(node.right)

			res.append(out)
		queue = new_queue

	return res

# 61 按之字形顺序打印二叉树
def zigZag(root):
	res, queue, level = [], [], 0

	if root is None:
		return res

	queue.append(root)

	while queue:
		new_queue, out = [], []

		for node in queue:
			out.append(node.val)

			if node.left:
				new_queue.append(node.left)

			if node.right:
				new_queue.append(node.right)

		res.append(out[::-1]) if level % 2 else res.append(out)
		queue = new_queue

	return res


# 62 序列化二叉树
class Codec:
	def serialize(self, root):
		res = []

		if root is None:
			res.append("#")
			return res

		res.append(str(root.val))
		self.serialize(root.left)
		self.serialize(root.right)

		return ",".join(res)

	def deserialize(self, data):
		data = iter(data.split(","))

		def build():
			val = next(data)

			if val == "#":
				return None

			root = TreeNode(val)

			root.left = self.deserialize(data)

			root.right = self.deserialize(data)

			return root

		return build()

# 63 二叉搜索树的第 k 个节点
def kthSmallest(root):
	st, p, cnt = [], root, 1

	while st or p:
		while p:
			st.append(p)
			p = p.left

		p = st.pop()

		if cnt == k:
			return p.val

		p, cnt = p.right, cnt + 1


# Kmeans 算法
def kmeans(data, k):
	m = len(data) # 数据点个数
	n = len(data[0]) # 数据维度
	cluster_center = np.zeros((k, n)) # 每行表示一个聚类中心

	init_list = np.random.randomint(low=0, high=m, size=k)

	for index, j in enumerate(init_list):
		cluster_center[index] = data[j][:] # 随机选取 k 个点做聚类中心

	# 聚类
	cluster = np.zeros(m, dtype=np.int) - 1

	cc = np.zeros((k, n)) # 下一轮聚类中心

	c_number = np.zeros(k) # 每个聚类中心上样本的数目


	for times in range(1000):
		for i in range(m):
			c = nearest(data[i], cluster_center) # 计算每个数据点和所有聚类中心的距离,返回属于哪个聚类中心
			cluster[i] = c # 第 i 个点属于 第 c 个聚类中心
			c_number[c] += 1 # 第 c 个聚类中心的个数加一
			cc[c] += data[i]

		for i in range(k):
			cluster_center[i] = cc[c] / c_number[i]

		cc.flat, c_number.flat = 0, 0

	return cluster


def nearest(data, cluster_center):
	nearest_center_index = 0
	distance = float("inf")

	for index, cluster_center_one in enumerate(cluster_center):
		dis = np.sum((data - center) ** 2)

		if dis < distance:
			nearest_center_index = index
			distance = dis

	return nearest_center_index


# 两个有序数组找中位数
def topK(A, s1, e1, B, s2, e2, k):
	l1, l2 = e1- s1 + 1, e2 - s2 + 1
	i, j = s1 + min(k//2, l1) - 1, s2 + min(k//2, l2) - 1

	if l1 == 0 and l2 > 0:
		return B[s2 + k - 1]

	if l2 == 0 and l1 > 0:
		return A[s1 + k - 1]

	if k == 1:
		return min(A[s1], B[s2])

	if A[i] > B[j]:
		return topK(A, s1, e1, B, j+1, e2, k-(j-s2+1))
	else:
		return topK(A, i+1, e1, B, s2, e2, k-(i-s1+1))


def findMedianSortedArrays(nums1, nums2):
	if len(num1) > len(nums2):
		nums1, nums2 = nums2, nums1
	m, n = len(nums1), len(nums2)

	if (m + n) % 2 == 0:
		return (topK(nums1, 0, m-1, nums2, 0, n-1, (m+n)/2) + topK(nums1, 0, m-1, nums2, 0, n-1, (m+n)/2+1))/2.0
	else:
		return topK(nums1, 0, m-1, nums2, 0, n-1, (m+n+1)/2)

# KNN tensorflow实现
import tensorflow as tf
import numpy as np

# Build Graph
tr = tf.placeholder(tf.float32, [None, 784])
te = tf.placeholder(tf.float32, [784])

distance = tf.reduce_sum(tf.square(tf.substract(te, tr)), axis=1)
pred = tf.nn.top_k(distance, k)

init = tf.global_variables_initializer()

with tf.Session() as sess:
	sess.run(init)

	for i in range(len(testdata)):
		nn_index = sess.run(pred, feed_dict={tr:traindata, te:testdata[i, :]})


# 链表的快排
def sortList(head):
	if head is None or head.next is None:
		return head

	small, large, cur = ListNode(0), ListNode(0), head.next
	sp, lp = small, large

	while cur:
		if cur.val <= head.val:
			sp.next = cur
			sp = sp.next
		else:
			lp.next = cur
			lp = lp.next
		cur = cur.next

	sp.next, lp.next = None, None

	small, large = self.sortList(small.next), self.sortList(large.next)

	sp = small

	if sp:
		while sp.next:
			sp = sp.next
		sp.next = head
		head.next = large
		return small
	else:
		head.next = large
		return head

def maxPooling(nums, k):
	from collections import deque
	q = deque()

	for i, x in enumerate(nums):
		if q and i - q[0] >= k:
			q.popleft()

		while q and nums[q[-1]] <= x:
			q.pop()

		q.append(i)

		if i >= k - 1:
			res.append(nums[q[0]])
	return res
	
Esempio n. 22
0
#########################################################

## TF v2 style



a = tf.constant(1, name='a')
b = tf.constant(2, name='b')
c = tf.constant(3, name='c')

z = 2 * (a - b) + c

tf.print("my result: ", z)


'''

tf.substract(a, b)
tf.multiply(2, r1)  ## elementwise
tf.add(a, b)

'''

#########################################################

## function decorators
## autograph - transforms python code into tensorflow graph code


@tf.function
def compute_z(a, b, c):