コード例 #1
0
ファイル: utils.py プロジェクト: Johannes-brahms/Yolo
def IoU(bbox, gt):

    # bbox = [ x , y , w , h ] ( x , y  left up)

    shape = [-1, 1]

    x1 = tf.maximum(tf.cast(bbox[0], tf.float32), tf.reshape(tf.cast(gt[:,0], tf.float32), shape))
    y1 = tf.maximum(tf.cast(bbox[1], tf.float32), tf.reshape(tf.cast(gt[:,1], tf.float32), shape))
    x2 = tf.minimum(tf.cast(bbox[2] + bbox[0], tf.float32), tf.reshape(tf.cast(gt[:,2] + gt[:,0], tf.float32), shape))
    y2 = tf.minimum(tf.cast(bbox[3] + bbox[1], tf.float32), tf.reshape(tf.cast(gt[:,3] + gt[:,1], tf.float32), shape))


    inter_w = tf.sub(x2,x1)

    inter_h = tf.sub(y2,y1)

    inter = tf.cast(inter_w * inter_h, tf.float32)

    bounding_box = tf.cast(tf.mul(bbox[2],bbox[3]), tf.float32)

    ground_truth = tf.reshape(tf.cast(tf.mul(gt[:,2],gt[:,3]), tf.float32), shape)

    #iou = tf.div(inter,tf.sub(tf.add(bounding_box,tf.reshape(ground_truth,shape)),inter))

    iou = inter / (bounding_box + ground_truth - inter)

    # limit the iou range between 0 and 1
    
    mask_less = tf.cast(tf.logical_not(tf.less(iou, tf.zeros_like(iou))), tf.float32)
    #mask_great = tf.cast(tf.logical_not(tf.greater(iou, tf.ones_like(iou))), tf.float32)
    
    iou = tf.mul(iou, mask_less)
    #iou = tf.mul(iou, positive_mask)
    
    return iou
コード例 #2
0
ファイル: regression.py プロジェクト: bgshin/cnntweets
    def __init__(self, num_features, num_output, l2_reg_lambda=0.0, neg_output=False):
        self.input_x = tf.placeholder(tf.float32, [None, num_features], name="input_x")
        self.input_y = tf.placeholder(tf.float32, [None, num_output], name="input_y")

        # Keeping track of l2 regularization loss (optional)
        l2_loss = tf.constant(0.0)

        with tf.name_scope("softmax"):
            filter_shape = [num_features, num_output]
            W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1))
            b = tf.Variable(tf.constant(0.1, shape=[num_output]))

            self.raw_scores = tf.nn.xw_plus_b(self.input_x, W, b, name="scores")
            if neg_output:
                self.scores = tf.nn.elu(self.raw_scores, name="tanh")

            else:
                self.scores = tf.nn.relu(self.raw_scores, name="relu")


            l2_loss += tf.nn.l2_loss(W)
            l2_loss += tf.nn.l2_loss(b)

        with tf.name_scope("loss"):
            self.losses = tf.square(tf.sub(self.scores, self.input_y))
            self.avgloss = tf.reduce_mean(tf.abs(tf.sub(self.scores, self.input_y)))
            self.loss = tf.reduce_mean(self.losses) + l2_reg_lambda * l2_loss
コード例 #3
0
ファイル: yolo_utils.py プロジェクト: Johannes-brahms/Yolo
def convert_to_one(bbox, width, height, S):

    x, y, w, h = bbox

    x = tf.cast(x, tf.float32)
    y = tf.cast(y, tf.float32)
    w = tf.cast(w, tf.float32)
    h = tf.cast(h, tf.float32)

    global_center_x = tf.mul(tf.add(tf.mul(x, 2), w), 0.5)
    global_center_y = tf.mul(tf.add(tf.mul(y, 2), h), 0.5)

    w = tf.div(w, width)
    h = tf.div(h, height)

    cell_w = tf.cast(tf.div(tf.cast(width, tf.int32), S), tf.float32)
    cell_h = tf.cast(tf.div(tf.cast(height, tf.int32), S), tf.float32)

    cell_coord_x = tf.cast(tf.cast(tf.div(global_center_x, cell_w), tf.int32), tf.float32)
    cell_coord_y = tf.cast(tf.cast(tf.div(global_center_y, cell_h), tf.int32), tf.float32)

    offset_x = tf.div(tf.sub(global_center_x, tf.mul(cell_coord_x, cell_w)), cell_w)
    offset_y = tf.div(tf.sub(global_center_y, tf.mul(cell_coord_y, cell_h)), cell_h)

    assert offset_x.dtype == tf.float32 and \
            offset_y.dtype == tf.float32 and \
            w.dtype == tf.float32 and \
            h.dtype == tf.float32

    bbox = [offset_x, offset_y, w, h]

    return bbox
コード例 #4
0
ファイル: error_bars_loss.py プロジェクト: lishali/clusternet
def r_loss(communities = 2, group_size = 10, seed=None, p=0.4, q=0.05, r=1.0, projection_dim=2):
    """testing to see if the loss will decrease backproping through very simple function"""
    B = np.asarray(balanced_stochastic_blockmodel(communities, group_size, p, q, seed)).astype(np.double)
    B = tf.cast(B, tf.float64)
    Diag = tf.diag(tf.reduce_sum(B,0))
    Diag = tf.cast(Diag, tf.float64)

    #r_grid = tf.linspace(r_min, r_max, grid_size)
    r = tf.cast(r, tf.float64)
    
    BH = (tf.square(r)-1)*tf.diag(tf.ones(shape=[communities*group_size], dtype=tf.float64))-tf.mul(r, B)+Diag 
    
    with tf.Session() as sess:
        eigenval, eigenvec = tf.self_adjoint_eig(BH)
        eigenvec_proj = tf.slice(eigenvec, [0,0], [communities*group_size, projection_dim])
                
        true_assignment_a = tf.concat(0, [-1*tf.ones([group_size], dtype=tf.float64),
                                      tf.ones([group_size], dtype=tf.float64)])
        true_assignment_b = -1*true_assignment_a
        true_assignment_a = tf.expand_dims(true_assignment_a, 1)
        true_assignment_b = tf.expand_dims(true_assignment_b, 1)

            
        projected_a = tf.matmul(tf.matmul(eigenvec_proj, tf.transpose(eigenvec_proj)), true_assignment_a)#tf.transpose(true_assignment_a))
        projected_b = tf.matmul(tf.matmul(eigenvec_proj, tf.transpose(eigenvec_proj)), true_assignment_b)#tf.transpose(true_assignment_b))
            
        loss = tf.minimum(tf.reduce_sum(tf.square(tf.sub(projected_a, true_assignment_a))),
                              tf.reduce_sum(tf.square(tf.sub(projected_b, true_assignment_b))))
            

        d = sess.run(loss)
    return d
コード例 #5
0
    def _build_loss(self):

        with tf.variable_scope("loss"):

            # Compute y_j = r_j * discount*best_qvalue
            self.tf_discount = tf.constant(self.discount)
            self.qtarget = tf.add(self.pl_rewards, tf.mul(1.0-self.pl_terminals, tf.mul(self.tf_discount, self.pl_qtargets)))

            # Select Q-values for given actions
            self.actions_one_hot = tf.one_hot(self.pl_actions, self.num_actions, 1.0, 0.0)
            self.qvalue_pred = tf.reduce_sum(tf.mul(self.qvalues, self.actions_one_hot), reduction_indices=1)

            # Difference between target and predicted Q-network output
            self.delta = tf.sub(self.qtarget, self.qvalue_pred)

            if self.clip_delta > 0:
                # Perform clipping of the error term, default clipping is to (-1, +1) range
                self.quadratic_part = tf.minimum(tf.abs(self.delta), tf.constant(self.clip_delta))
                self.linear_part    = tf.sub(tf.abs(self.delta), self.quadratic_part)
                self.delta_square   = tf.mul(tf.constant(0.5), tf.square(self.quadratic_part)) + (self.clip_delta*self.linear_part)
                #self.delta_clipped = tf.clip_by_value(self.delta, -1.0*self.clip_delta, self.clip_delta)
                #self.delta_square  = tf.square(self.delta_clipped)
            else:
                # No error clipping
                self.delta_square  = tf.square(self.delta)

        # Actual loss
        if self.batch_accumulator == "sum":
           self.loss = tf.reduce_sum(self.delta_square)
        else:
           self.loss = tf.reduce_mean(self.delta_square)

        # Running average of the loss for TensorBoard
        self.loss_moving_avg    = tf.train.ExponentialMovingAverage(decay=0.999)
        self.loss_moving_avg_op = self.loss_moving_avg.apply([self.loss])
コード例 #6
0
def lossFunction(logits, labels, scale_factor):
    print "TrackNet:  building loss function..."
    logit_trans, logit_rot = tf.split(1,2,logits)
    label_trans, label_rot = tf.split(1,2,labels)
    trans_loss = tf.nn.l2_loss(tf.sub(logit_trans, label_trans))
    rot_loss = tf.mul(scale_factor, tf.nn.l2_loss(tf.sub(logit_trans, label_trans)))
    return tf.add(trans_loss,rot_loss)
コード例 #7
0
ファイル: knn.py プロジェクト: AidanGG/tensorflow_tmva
def metric_single(training, test, scale_frac, scales):
    """Calculates the distance between a training and test instance."""
    if scale_frac == 0:
        distance = tf.sqrt(tf.reduce_sum(tf.square(tf.sub(training, test)), reduction_indices=1, keep_dims=True))
    else:
        distance = tf.sqrt(
            tf.reduce_sum(tf.square(tf.div(tf.sub(training, test), scales)), reduction_indices=1, keep_dims=True)
        )
    return distance
コード例 #8
0
    def __init__(self, inputX, C=None, hidden_dims=[300,150,300], lambda1=0.01, lambda2=0.01, activation='tanh', \
                weight_init='uniform', noise=None, learning_rate=0.1, optimizer='Adam'):

        self.noise = noise
        n_sample, n_feat = inputX.shape

        # M must be a even number
        assert len(hidden_dims) % 2 == 1

        # Add the end layer
        hidden_dims.append(n_feat)

        # self.depth = len(dims)

        # This is not the symbolic variable of tensorflow, this is real!
        self.inputX = inputX

        if C is None:
            # Transpose the matrix first, and get the whole matrix of C
            self.inputC = sparseCoefRecovery(inputX.T)
        else:
            self.inputC = C

        self.C = tf.placeholder(dtype=tf.float32, shape=[None, None], name='C')

        self.hidden_layers = []
        self.X = self._add_noise(tf.placeholder(dtype=tf.float32, shape=[None, n_feat], name='X'))

        input_hidden = self.X
        weights, biases = init_layer_weight(hidden_dims, inputX, weight_init)

        # J3 regularization term
        J3_list = []
        for init_w, init_b in zip(weights, biases):
            self.hidden_layers.append(DenseLayer(input_hidden, init_w, init_b, activation=activation))
            input_hidden = self.hidden_layers[-1].output
            J3_list.append(tf.reduce_mean(tf.square(self.hidden_layers[-1].w)))
            J3_list.append(tf.reduce_mean(tf.square(self.hidden_layers[-1].b)))

        J3 = lambda2 * tf.add_n(J3_list)

        self.H_M = self.hidden_layers[-1].output
        # H(M/2) the output of the mid layer
        self.H_M_2 = self.hidden_layers[(len(hidden_dims)-1)/2].output

        # calculate loss J1
        # J1 = tf.nn.l2_loss(tf.sub(self.X, self.H_M))

        J1 = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(self.X, self.H_M))))

        # calculate loss J2
        J2 = lambda1 * tf.sqrt(tf.reduce_mean(tf.square(tf.sub(tf.transpose(self.H_M_2), \
                                     tf.matmul(tf.transpose(self.H_M_2), self.C)))))

        self.cost = J1 + J2 + J3

        self.optimizer = optimize(self.cost, learning_rate, optimizer)
def comU(a, b, tag = 2):

    fea = []
    fea.append(cosine_distance(a, b))
    #fea.append(tf.sqrt(tf.reduce_sum(tf.square(tf.sub(a,b)), axis=1)))
    fea.append(tf.sqrt(tf.reduce_sum(tf.square(tf.sub(a,b)), axis=1)))
    if tag == 2:
        fea.append(tf.reduce_max(tf.abs(tf.sub(a, b)), axis=1))
    #print 'fea=', fea
    return tf.pack(fea, axis=1)
コード例 #10
0
def binary_cross_entropy(prediction, target):
    """
    let o=prediction, t=target
    -(t*log(o) + (1-t)*log(1-o))
    
    Adds a small (1e-12) value to the logarithms to avoid log(0)
    """
    op1 = tf.mul(target, tf.log(prediction + 1e-12))
    op2 = tf.mul(tf.sub(1., target), tf.log(tf.sub(1., prediction) + 1e-12))
    return tf.neg(tf.add(op1, op2))
コード例 #11
0
def norm(name, input_layer):
    """
    Batch-normalizes the layer as in http://arxiv.org/abs/1502.03167
    This is important since it allows the different scales to talk to each other when they get joined.
    """
    mean, variance = tf.nn.moments(input_layer, [0, 1, 2])
    variance_epsilon = 0.01  # TODO: Check what this value should be
    inv = tf.rsqrt(variance + variance_epsilon)
    scale = tf.Variable(tf.random_uniform([1]), name="scale")  # TODO: How should these initialize?
    offset = tf.Variable(tf.random_uniform([1]), name="offset")
    return tf.sub(tf.mul(tf.mul(scale, inv), tf.sub(input_layer, mean)), offset, name=name)
コード例 #12
0
def tf_2d_normal(x1, x2, mu1, mu2, s1, s2, rho):
  #Inspired from Hardmaru's implementation on Github
  norm1 = tf.sub(x1, mu1)
  norm2 = tf.sub(x2, mu2)
  s1s2 = tf.mul(s1, s2)
  z = tf.square(tf.div(norm1, s1))+tf.square(tf.div(norm2, s2))-2*tf.div(tf.mul(rho, tf.mul(norm1, norm2)), s1s2)
  negRho = 1-tf.square(rho)
  result = tf.exp(tf.div(-z,2*negRho))
  denom = 2*np.pi*tf.mul(s1s2, tf.sqrt(negRho))
  result = tf.div(result, denom)
  return result
コード例 #13
0
 def tf_2d_normal(x1, x2, mu1, mu2, s1, s2, rho):
   # eq # 24 and 25 of http://arxiv.org/abs/1308.0850
   norm1 = tf.sub(x1, mu1)
   norm2 = tf.sub(x2, mu2)
   s1s2 = tf.mul(s1, s2)
   z = tf.square(tf.div(norm1, s1))+tf.square(tf.div(norm2, s2))-2*tf.div(tf.mul(rho, tf.mul(norm1, norm2)), s1s2)
   negRho = 1-tf.square(rho)
   result = tf.exp(tf.div(-z,2*negRho))
   denom = 2*np.pi*tf.mul(s1s2, tf.sqrt(negRho))
   result = tf.div(result, denom)
   return result
コード例 #14
0
	def mem_body(self, step, story_len, facts, q_double, mem_state_double):
		print ("!!!!!!!!!!!!!!!!!!!!!")
		z = tf.concat(1, [tf.mul(tf.gather(facts, step), q_double), tf.mul(tf.gather(facts, step), mem_state_double), 
			tf.abs(tf.sub(tf.gather(facts, step), q_double)), tf.abs(tf.sub(tf.gather(facts, step), mem_state_double))])
		# record Z (all episodic memory states)
		def f1(): return seq2seq.feedforward_nn(z, self.attention_ff_size, self.attention_ff_l1_size, self.attention_ff_l2_size)
		def f2(): return tf.concat(0, [tf.reshape(tf.to_float(self.episodic_array),[-1]), tf.reshape(seq2seq.feedforward_nn(z, self.attention_ff_size, self.attention_ff_l1_size, self.attention_ff_l2_size),[-1])])
		
		self.episodic_array = tf.cond(tf.less(step,1), f1, f2)
		print (self.episodic_array)
		print ('=-=-=-=-=', tf.to_float(self.episodic_array), seq2seq.feedforward_nn(z, self.attention_ff_size, self.attention_ff_l1_size, self.attention_ff_l2_size))
		step =tf.add(step, 1)
		return step, story_len, facts, q_double, mem_state_double
コード例 #15
0
 def loss_with_step(self):
     margin = 5.0
     labels_t = self.y_
     labels_f = tf.sub(1.0, self.y_, name="1-yi")          # labels_ = !labels;
     eucd2 = tf.pow(tf.sub(self.o1, self.o2), 2)
     eucd2 = tf.reduce_sum(eucd2, 1)
     eucd = tf.sqrt(eucd2+1e-6, name="eucd")
     C = tf.constant(margin, name="C")
     pos = tf.mul(labels_t, eucd, name="y_x_eucd")
     neg = tf.mul(labels_f, tf.maximum(0.0, tf.sub(C, eucd)), name="Ny_C-eucd")
     losses = tf.add(pos, neg, name="losses")
     loss = tf.reduce_mean(losses, name="loss")
     return loss
コード例 #16
0
def spatial_batch_norm(input_layer, name='spatial_batch_norm'):
    """
    Batch-normalizes the layer as in http://arxiv.org/abs/1502.03167
    This is important since it allows the different scales to talk to each other when they get joined.
    """
    mean, variance = tf.nn.moments(input_layer, [0, 1, 2])
    variance_epsilon = 0.01  # TODO: Check what this value should be
    inv = tf.rsqrt(variance + variance_epsilon)
    num_channels = input_layer.get_shape().as_list()[3]  # TODO: Clean this up
    scale = tf.Variable(tf.random_uniform([num_channels]), name='scale')  # TODO: How should these initialize?
    offset = tf.Variable(tf.random_uniform([num_channels]), name='offset')
    return_val = tf.sub(tf.mul(tf.mul(scale, inv), tf.sub(input_layer, mean)), offset, name=name)
    return return_val
コード例 #17
0
    def getNeighborWeights(self, transformedCoordinates, clampedCoordinatesList):
        flooredCoordinates = tf.slice(clampedCoordinatesList[0], [0, 1], [tf.shape(clampedCoordinatesList[0])[0], 3])

        if self.isVerbose:
            transformedCoordinates = tf.Print(transformedCoordinates, [transformedCoordinates], summarize=1000)
            flooredCoordinates     = tf.Print(flooredCoordinates, [flooredCoordinates], summarize=1000)
        
        deltas = tf.sub(transformedCoordinates, flooredCoordinates)
        
        if self.isVerbose:
            deltas = tf.Print(deltas, [deltas], summarize=1000)

        deltaW = self.sliceIndex(deltas, 2)
        deltaH = self.sliceIndex(deltas, 1)
        deltaC = self.sliceIndex(deltas, 0)

        if self.isVerbose:
            deltaW = tf.Print(deltaW, [deltaW], summarize=1000)
            deltaH = tf.Print(deltaH, [deltaH], summarize=1000)
            deltaC = tf.Print(deltaC, [deltaC], summarize=1000)

        #just declare for concisely writing the various weights
        ConstantOne = tf.constant([1], dtype=tf.float32)

        W_lll = tf.mul(tf.mul(tf.sub(ConstantOne, deltaW) , tf.sub(ConstantOne, deltaH)) , tf.sub(ConstantOne, deltaC))
        W_llu = tf.mul(tf.mul(tf.sub(ConstantOne, deltaW) , tf.sub(ConstantOne, deltaH)) , deltaC                     )
        W_lul = tf.mul(tf.mul(tf.sub(ConstantOne, deltaW) , deltaH                     ) , tf.sub(ConstantOne, deltaC))
        W_luu = tf.mul(tf.mul(tf.sub(ConstantOne, deltaW) , deltaH                     ) , deltaC                     )
        W_ull = tf.mul(tf.mul(deltaW                      , tf.sub(ConstantOne, deltaH)) , tf.sub(ConstantOne, deltaC))
        W_ulu = tf.mul(tf.mul(deltaW                      , tf.sub(ConstantOne, deltaH)) , deltaC                     )
        W_uul = tf.mul(tf.mul(deltaW                      , deltaH                     ) , tf.sub(ConstantOne, deltaC))
        W_uuu = tf.mul(tf.mul(deltaW                      , deltaH                     ) , deltaC                     )

        if self.isVerbose:
            W_lll = tf.Print(W_lll, [W_llu], summarize=1000)
            W_llu = tf.Print(W_llu, [W_lll], summarize=1000)
            W_lul = tf.Print(W_lul, [W_lul], summarize=1000)
            W_luu = tf.Print(W_luu, [W_luu], summarize=1000)
            W_ull = tf.Print(W_ull, [W_ull], summarize=1000)
            W_ulu = tf.Print(W_ulu, [W_ulu], summarize=1000)
            W_uul = tf.Print(W_uul, [W_uul], summarize=1000)
            W_uuu = tf.Print(W_uuu, [W_uuu], summarize=1000)

        weightList = []

        weightList.append(W_lll) 
        weightList.append(W_llu) 
        weightList.append(W_lul) 
        weightList.append(W_luu) 
        weightList.append(W_ull) 
        weightList.append(W_ulu) 
        weightList.append(W_uul) 
        weightList.append(W_uuu) 
       

        return weightList
コード例 #18
0
ファイル: yolo_utils.py プロジェクト: Johannes-brahms/Yolo
def convert_to_reality(bbox, width, height, S):

    relative_center_x, relative_center_y, global_w, global_h = bbox

    w = tf.cast(tf.cast(tf.mul(global_w, width), tf.int32), tf.float32)
    h = tf.cast(tf.cast(tf.mul(global_h, height), tf.int32), tf.float32)

    index = tf.reshape(tf.range(S * S),[-1,1])

    cell_coord_y = tf.cast(tf.div(index, S), tf.float32)
    cell_coord_x = tf.cast(tf.mod(index, S), tf.float32)


    S = tf.cast(S, tf.float32)

    width = tf.cast(width, tf.float32)
    height = tf.cast(height, tf.float32)


    cell_w = tf.cast(width / S, tf.float32)
    cell_h = tf.cast(height / S, tf.float32)

    
    #real_x_left_up = tf.reshape((cell_coord_x + relative_center_x) * cell_w - w / 2,[-1])

    #real_y_left_up = tf.reshape((cell_coord_y + relative_center_y) * cell_h - h / 2, [-1])

    real_x_left_up = tf.sub(tf.add(tf.reshape(tf.mul(cell_coord_x, cell_w), [-1]), relative_center_x * cell_w), tf.cast(w * 0.5, tf.float32))
    real_y_left_up = tf.sub(tf.add(tf.reshape(tf.mul(cell_coord_y, cell_h), [-1]), relative_center_y * cell_h), tf.cast(h * 0.5, tf.float32))


    real_x_left_up = tf.cast(tf.nn.relu(real_x_left_up), tf.int32)
    real_y_left_up = tf.cast(tf.nn.relu(real_y_left_up), tf.int32)
    w = tf.cast(w, tf.int32)
    h = tf.cast(h, tf.int32)

    print 'real x ', relative_center_x.get_shape()
    print 'real w' , w.get_shape()



    """
    assert real_x_left_up.dtype == tf.int32 and \
           real_y_left_up.dtype == tf.int32 and \
            w.dtype == tf.int32 and \
            h.dtype == tf.int32
    """
    bbox = [real_x_left_up, real_y_left_up, w, h]

    return bbox
コード例 #19
0
ファイル: test_K-means.py プロジェクト: forfish/cestlavie
def kMeansCluster(vector_values, num_clusters, max_num_steps, stop_coeficient = 0.0):
  vectors = tf.constant(vector_values)
  centroids = tf.Variable(tf.slice(tf.random_shuffle(vectors),
                                   [0,0],[num_clusters,-1]))
  old_centroids = tf.Variable(tf.zeros([num_clusters,2]))
  centroid_distance = tf.Variable(tf.zeros([num_clusters,2]))

  expanded_vectors = tf.expand_dims(vectors, 0)
  expanded_centroids = tf.expand_dims(centroids, 1)

  print expanded_vectors.get_shape()
  print expanded_centroids.get_shape()

  distances = tf.reduce_sum(
    tf.square(tf.sub(expanded_vectors, expanded_centroids)), 2)
  assignments = tf.argmin(distances, 0)

  means = tf.concat(0, [
    tf.reduce_mean(
        tf.gather(vectors,
                  tf.reshape(
                    tf.where(
                      tf.equal(assignments, c)
                    ),[1,-1])
                 ),reduction_indices=[1])
    for c in xrange(num_clusters)])

  save_old_centroids = tf.assign(old_centroids, centroids)

  update_centroids = tf.assign(centroids, means)
  init_op = tf.initialize_all_variables()

  performance = tf.assign(centroid_distance, tf.sub(centroids, old_centroids))
  check_stop = tf.reduce_sum(tf.abs(performance))

  with tf.Session() as sess:
    sess.run(init_op)
    for step in xrange(max_num_steps):
      print "Running step " + str(step)
      sess.run(save_old_centroids)
      _, centroid_values, assignment_values = sess.run([update_centroids,
                                                        centroids,
                                                        assignments])
      sess.run(check_stop)
      current_stop_coeficient = check_stop.eval()
      print "coeficient:", current_stop_coeficient
      if current_stop_coeficient <= stop_coeficient:
        break

    return centroid_values, assignment_values
コード例 #20
0
ファイル: bondvolution.py プロジェクト: rbharath/deepchem
def mol_conv_layer(atoms, cH_params, aux_params, layer):
    #Sum all neighbors using adjacency matrix
    atom_sum_neigh = sum_neigh(atoms, aux_params, layer)

    # Partition the atom matrix by degree of atoms
    # THIS CREATES PROBLEMS WITH GRADIENTS. NEED TO USE SLICING
    indices = tf.sub(deg_list_ph, tf.constant(1,dtype=tf.int32))
    
    atom_partitions = tf.dynamic_partition(atom_sum_neigh, indices, max_deg)

    # Get collection of modified atom features
    new_rel_atoms_collection = []
    for deg in range(1,6):
        # Obtain relevant atoms for this degree
        rel_atoms = atom_partitions[deg-1]

        # Apply hidden affine to relevant atoms and append
        if bool_separate_conv_depths:
            out = affine(rel_atoms, cH_params['W'+str(deg)+'_'+str(layer)], cH_params['b'+str(deg)+'_'+str(layer)])
        else:
            out = affine(rel_atoms, cH_params['W'+str(deg)], cH_params['b'+str(deg)])
        new_rel_atoms_collection.append(out)

    # Combine all atoms back into the list
    # NOTE: FOR NOW USE CONCATENATION. MEANS WE CANNOT USE ARBITARY deg_list ORDER
    hidden_atoms = tf.concat(0, new_rel_atoms_collection)

    # Apply relu
    activated_atoms = tf.nn.relu(hidden_atoms)

    return activated_atoms
コード例 #21
0
 def testCond_2(self):
   with self.test_session():
     x = tf.constant(10)
     r = tf.cond(tf.less(1, 0), lambda: tf.add(x, 1), lambda: tf.sub(x, 1))
     result = r.eval()
   self.assertTrue(check_op_order(x.graph))
   self.assertAllEqual(9, result)
コード例 #22
0
ファイル: image_processing.py プロジェクト: mclumd/baxNet
def image_preprocessing(image_buffer, bbox, train, thread_id=0):
  """Decode and preprocess one image for evaluation or training.

  Args:
    image_buffer: JPEG encoded string Tensor
    bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
      where each coordinate is [0, 1) and the coordinates are arranged as
      [ymin, xmin, ymax, xmax].
    train: boolean
    thread_id: integer indicating preprocessing thread

  Returns:
    3-D float Tensor containing an appropriately scaled image

  Raises:
    ValueError: if user does not provide bounding box
  """
  if bbox is None:
    raise ValueError('Please supply a bounding box.')

  image = decode_jpeg(image_buffer)
  height = FLAGS.image_size
  width = FLAGS.image_size

  if train:
    image = distort_image(image, height, width, bbox, thread_id)
  else:
    image = eval_image(image, height, width)

  # Finally, rescale to [-1,1] instead of [0, 1)
  image = tf.sub(image, 0.5)
  image = tf.mul(image, 2.0)
  return image
コード例 #23
0
    def get_mixture_coef(output):
      # returns the tf slices containing mdn dist params
      # ie, eq 18 -> 23 of http://arxiv.org/abs/1308.0850
      z = output
      z_eos = z[:, 0:1]
      z_pi, z_mu1, z_mu2, z_sigma1, z_sigma2, z_corr = tf.split(1, 6, z[:, 1:])

      # process output z's into MDN paramters

      # end of stroke signal
      z_eos = tf.sigmoid(z_eos) # should be negated, but doesn't matter.

      # softmax all the pi's:
      max_pi = tf.reduce_max(z_pi, 1, keep_dims=True)
      z_pi = tf.sub(z_pi, max_pi)
      z_pi = tf.exp(z_pi)
      normalize_pi = tf.inv(tf.reduce_sum(z_pi, 1, keep_dims=True))
      z_pi = tf.mul(normalize_pi, z_pi)

      # exponentiate the sigmas and also make corr between -1 and 1.
      z_sigma1 = tf.exp(z_sigma1)
      z_sigma2 = tf.exp(z_sigma2)
      z_corr = tf.tanh(z_corr)

      return [z_pi, z_mu1, z_mu2, z_sigma1, z_sigma2, z_corr, z_eos]
コード例 #24
0
ファイル: bondvolution.py プロジェクト: rbharath/deepchem
def bond_conv_layer(activated_atoms, bv_params, layer):
    flow_depth = flow_layer_depths[layer]
    
    next_activated_atoms = tf.zeros(tf.pack([N_atoms_ph, flow_depth]))

    for deg in range(1, 6):
        indices = tf.sub(deg_list_ph, tf.constant(1,dtype=tf.int32))
        flow_param = bv_params['A_flow'+str(layer)+'_'+str(deg)]
        flow_map = tf.gather(flow_param, type_adj_ph)

        multiples = tf.pack([N_atoms_ph, 1, 1])
        activated_atoms_dim = tf.expand_dims(tf.tile(tf.expand_dims(activated_atoms, 0), multiples), 2)

        adj_mul = tf.batch_matmul(activated_atoms_dim, flow_map)
        adj_mul = tf.squeeze(adj_mul, [2])

        deg_mask = tf.to_float(tf.equal(deg_list_ph, deg))

        multiples = tf.pack([1, N_atoms_ph, flow_depth])
        deg_list_dim = tf.tile(tf.expand_dims(tf.expand_dims(deg_mask, 1), 1), multiples)

        multiples = tf.pack([N_atoms_ph, N_atoms_ph, 1])
        biases = tf.tile(bv_params['b_flow'+str(layer)+'_'+str(deg)], multiples)
        filtered_atoms = tf.add(tf.mul(adj_mul, deg_list_dim), biases)

        next_activated_atoms = next_activated_atoms + tf.reduce_sum(filtered_atoms, 1)
        
    next_activated_atoms = tf.nn.relu(next_activated_atoms)
    return next_activated_atoms
コード例 #25
0
ファイル: inception_export.py プロジェクト: damienmg/serving
def preprocess_image(image_buffer):
  """Preprocess JPEG encoded bytes to 3D float Tensor."""

  # Decode the string as an RGB JPEG.
  # Note that the resulting image contains an unknown height and width
  # that is set dynamically by decode_jpeg. In other words, the height
  # and width of image is unknown at compile-time.
  image = tf.image.decode_jpeg(image_buffer, channels=3)
  # After this point, all image pixels reside in [0,1)
  # until the very end, when they're rescaled to (-1, 1).  The various
  # adjust_* ops all require this range for dtype float.
  image = tf.image.convert_image_dtype(image, dtype=tf.float32)
  # Crop the central region of the image with an area containing 87.5% of
  # the original image.
  image = tf.image.central_crop(image, central_fraction=0.875)
  # Resize the image to the original height and width.
  image = tf.expand_dims(image, 0)
  image = tf.image.resize_bilinear(image,
                                   [FLAGS.image_size, FLAGS.image_size],
                                   align_corners=False)
  image = tf.squeeze(image, [0])
  # Finally, rescale to [-1,1] instead of [0, 1)
  image = tf.sub(image, 0.5)
  image = tf.mul(image, 2.0)
  return image
コード例 #26
0
def w(input_data, cu, kappas_t_1, config):
	
	batch_size = config.batch_size
	mixture_size = config.mixture_size
	vocab_length = config.vocab_length

	# split along dim of mixture size * 3
	hat_alphas_t, hat_betas_t, hat_kappas_t = tf.split(1, 3, input_data)

	alphas_t = tf.exp(hat_alphas_t)
	betas_t = tf.exp(hat_betas_t)
	kappas_t = tf.add(kappas_t_1, tf.exp(hat_kappas_t))

	speech_length = tf.shape(cu)[1]

	u = tf.linspace(1.0, tf.cast(speech_length,tf.float32) , speech_length)
	u = tf.expand_dims(u, 0)
	u = tf.expand_dims(u, 0)
	u = tf.tile(u, [batch_size, mixture_size, 1])

	alphas_t_expanded = tf.tile(tf.expand_dims(alphas_t, -1), [1, 1, speech_length])
	betas_t_expanded = tf.tile(tf.expand_dims(betas_t, -1), [1, 1, speech_length])
	kappas_t_expanded = tf.tile(tf.expand_dims(kappas_t, -1), [1, 1, speech_length])

	calc = tf.square(tf.sub(kappas_t_expanded, u))
	calc = tf.mul(calc, tf.neg(betas_t_expanded))
	calc = tf.exp(calc)
	calc = tf.mul(calc, alphas_t_expanded)

	phi_t = tf.expand_dims(tf.reduce_sum(calc, 1), 1)

	output = tf.squeeze(tf.batch_matmul(phi_t, cu), [1])

	return output, kappas_t, phi_t
コード例 #27
0
ファイル: fast0tag.py プロジェクト: agude/attalos
        def fztloss( f, pVecs, nVecs ):
            """
            Tensorized cost function from Fast Zero-Shot Learning paper

            Args:
                f: The output from the network, a tensor of shape (# images, word embedding size)
                pVecs: The vector embeddings of the ground truth tags, a tensor
                    of shape (# images, # positive tags, word embedding size)
                nVecs: The vector embeddings of negatively sampled tags, a tensor
                    of shape (# images, # negative samples, word embedding size)

            Returns:
                Scalar tensor representing the batch cost
            """
            posmul = tf.mul(pVecs, f)
            negmul = tf.mul(nVecs, f)

            tfpos = tf.reduce_sum(posmul, reduction_indices=2)
            tfneg = tf.reduce_sum(negmul, reduction_indices=2)

            tfpos = tf.transpose(tfpos, [1,0])
            tfneg = tf.transpose(tfneg, [1,0])

            negexpan = tf.tile( tf.expand_dims(tfneg, -1), [1, 1, tf.shape(tfpos)[1]] )
            posexpan = tf.tile( tf.transpose(tf.expand_dims(tfpos, -1), [0,2,1]), [1, tf.shape(tfneg)[1], 1])
            differences = tf.sub(negexpan, posexpan)  

            return tf.reduce_sum(tf.reduce_sum(tf.log(1 + tf.exp(differences)), reduction_indices=[1,2]))
コード例 #28
0
def distance_model(batch_x1, batch_x2):
    with tf.variable_scope("siamese") as scope:
        model1 = build_model(batch_x1)
        scope.reuse_variables()
        model2 = build_model(batch_x2)
    distance = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(model1, model2), 2), 1, keep_dims=True))
    return model1, model2, distance
コード例 #29
0
ファイル: seq2seq_model.py プロジェクト: PhiphyZhou/protein
def square_loss(outputs, targets):
    '''
    Loss function - square loss (rmsd^2)

    Args: outputs, targets
        The shape of both outputs and targets is a list of tensors: 
        [sequence_len, tensor(batch_size,feature_size)]

    Returns: 
        batch_loss: 1D tensor with the size of batch_size. 
                    Each element is the loss value of that batch

    '''
    if len(outputs) != len(targets):
        raise ValueError("Outputs length must be equal to the targets length,"
                                     " %d != %d." % (len(outputs), len(targets))) 
    with tf.device("/cpu:0"):
        frame_loss = [] # list of batch losses of single frames
        for i in xrange(len(outputs)):
            a = tf.sub(outputs[i],targets[i])
            b = tf.square(a)
            c = tf.reduce_sum(b,1)
            n = tf.constant(a.get_shape().dims[1].value/3,dtype=tf.float32) # number of atoms
            frame_loss.append(tf.div(c,n)) 
        frame_loss = tf.pack(frame_loss)
        # average over the whole sequence to get the batch losses
        batch_loss = tf.reduce_mean(frame_loss,0)
        return batch_loss
コード例 #30
0
ファイル: helper.py プロジェクト: wbaek/tensorflow-tutorials
def _multichannel_image_summary(name, images, perm=[0, 3, 1, 2], max_summary_images=16):
    _min = tf.reduce_min(images)
    _max = tf.reduce_max(images)
    _ = tf.mul(tf.div(tf.add(images, _min), tf.sub(_max, _min)), 255.0)
    _ = tf.transpose(_, perm=perm)
    shape = _.get_shape().as_list()
    tf.image_summary(name, tf.reshape(tf.transpose(_, perm=perm), [reduce(lambda x,y:x*y, shape)/(shape[3]*shape[2]), shape[2], shape[3], 1]), max_images=max_summary_images)
コード例 #31
0
    def __init__(self, config):
        ### Initialize setting
        print("initializing")
        np.set_printoptions(precision=4)
        self.stage = config['stage']
        self.device = config['device']
        self.output_dim = config['output_dim']
        self.n_class = config['label_dim']

        self.subspace_num = config['n_subspace']
        self.subcenter_num = config['n_subcenter']
        self.code_batch_size = config['code_batch_size']
        self.cq_lambda = config['cq_lambda']
        self.max_iter_update_Cb = config['max_iter_update_Cb']
        self.max_iter_update_b = config['max_iter_update_b']
        #self.centers_device = config['centers_device']

        self.batch_size = config['batch_size']
        self.max_iter = config['max_iter']
        self.img_model = config['img_model']
        self.loss_type = config['loss_type']
        self.console_log = (config['console_log'] == 1)
        self.learning_rate = config['learning_rate']
        self.learning_rate_decay_factor = config['learning_rate_decay_factor']
        self.decay_step = config['decay_step']

        self.finetune_all = config['finetune_all']

        self.margin_param = config['margin_param']
        self.wordvec_dict = config['wordvec_dict']
        self.partlabel = config['partlabel']
        ### Format as 'path/to/save/dir/lr_{$0}_output_dim{$1}_iter_{$2}'
        self.save_dir = config['save_dir'] + self.loss_type + '_lr_' + str(
            self.learning_rate) + '_cqlambda_' + str(
                self.cq_lambda) + '_subspace_' + str(
                    self.subspace_num) + '_margin_' + str(
                        self.margin_param) + '_partlabel_' + str(
                            self.partlabel) + '_iter_' + str(
                                self.max_iter) + '_output_' + str(
                                    self.output_dim) + '_'

        ### Setup session
        print("launching session")
        configProto = tf.ConfigProto()
        configProto.gpu_options.allow_growth = True
        configProto.allow_soft_placement = True
        self.sess = tf.Session(config=configProto)

        ### Create variables and placeholders

        with tf.device(self.device):
            self.img = tf.placeholder(tf.float32,
                                      [self.batch_size, 256, 256, 3])
            self.img_label = tf.placeholder(tf.float32,
                                            [self.batch_size, self.n_class])

            self.img_last_layer, self.img_output, self.C = \
                self.load_model(config['model_weights'])

            ### Centers shared in different modalities (image & text)
            ### Binary codes for different modalities (image & text)
            self.img_output_all = tf.placeholder(tf.float32,
                                                 [None, self.output_dim])
            self.img_b_all = tf.placeholder(
                tf.float32, [None, self.subspace_num * self.subcenter_num])

            self.b_img = tf.placeholder(
                tf.float32, [None, self.subspace_num * self.subcenter_num])
            self.ICM_m = tf.placeholder(tf.int32, [])
            self.ICM_b_m = tf.placeholder(tf.float32,
                                          [None, self.subcenter_num])
            self.ICM_b_all = tf.placeholder(
                tf.float32, [None, self.subcenter_num * self.subspace_num])
            self.ICM_X = tf.placeholder(
                tf.float32, [self.code_batch_size, self.output_dim])
            self.ICM_C_m = tf.slice(self.C,
                                    [self.ICM_m * self.subcenter_num, 0],
                                    [self.subcenter_num, self.output_dim])
            self.ICM_X_residual = tf.add(
                tf.sub(self.ICM_X, tf.matmul(self.ICM_b_all, self.C)),
                tf.matmul(self.ICM_b_m, self.ICM_C_m))
            ICM_X_expand = tf.expand_dims(self.ICM_X_residual, 1)
            ICM_C_m_expand = tf.expand_dims(self.ICM_C_m, 0)
            # N*sc*D  *  D*n
            word_dict = tf.constant(np.loadtxt(self.wordvec_dict),
                                    dtype=tf.float32)
            ICM_word_dict = tf.reshape(
                tf.matmul(
                    tf.reshape(tf.sub(ICM_X_expand, ICM_C_m_expand), [
                        self.code_batch_size * self.subcenter_num,
                        self.output_dim
                    ]), tf.transpose(word_dict)),
                [self.code_batch_size, self.subcenter_num, self.n_class])
            ICM_sum_squares = tf.reduce_sum(tf.square(ICM_word_dict),
                                            reduction_indices=2)
            ICM_best_centers = tf.argmin(ICM_sum_squares, 1)
            self.ICM_best_centers_one_hot = tf.one_hot(ICM_best_centers,
                                                       self.subcenter_num,
                                                       dtype=tf.float32)

            self.global_step = tf.Variable(0, trainable=False)
            self.train_op = self.apply_loss_function(self.global_step)
            self.sess.run(tf.initialize_all_variables())
        return
コード例 #32
0
    def apply_loss_function(self, global_step):
        ### loss function
        if self.loss_type == 'cos_margin_multi_label':
            assert self.output_dim == 300
            word_dict = tf.constant(np.loadtxt(self.wordvec_dict),
                                    dtype=tf.float32)
            margin_param = tf.constant(self.margin_param, dtype=tf.float32)

            # N: batchsize, L: label_dim, D: 300
            # img_label: N * L
            # word_dic: L * D
            # v_label: N * L * D
            v_label = tf.mul(tf.expand_dims(self.img_label, 2),
                             tf.expand_dims(word_dict, 0))
            # img_last: N * D
            # ip_1: N * L
            ip_1 = tf.reduce_sum(
                tf.mul(tf.expand_dims(self.img_last_layer, 1), v_label), 2)
            # mod_1: N * L
            v_label_mod = tf.mul(
                tf.expand_dims(tf.ones([self.batch_size, self.n_class]), 2),
                tf.expand_dims(word_dict, 0))
            mod_1 = tf.sqrt(
                tf.mul(
                    tf.expand_dims(
                        tf.reduce_sum(tf.square(self.img_last_layer), 1), 1),
                    tf.reduce_sum(tf.square(v_label_mod), 2)))
            #mod_1 = tf.select(tf.less(mod_1_1, tf.constant(0.0000001)), tf.ones([self.batch_size, self.n_class]), mod_1_1)
            # cos_1: N * L
            cos_1 = tf.div(ip_1, mod_1)

            ip_2 = tf.matmul(self.img_last_layer, word_dict, transpose_b=True)

            # multiply ids to inner product
            #ip_2 = tf.mul(ip_2_1, ids_dict)
            def reduce_shaper(t):
                return tf.reshape(tf.reduce_sum(t, 1), [tf.shape(t)[0], 1])

            mod_2_2 = tf.sqrt(
                tf.matmul(reduce_shaper(tf.square(self.img_last_layer)),
                          reduce_shaper(tf.square(word_dict)),
                          transpose_b=True))
            mod_2 = tf.select(tf.less(mod_2_2, tf.constant(0.0000001)),
                              tf.ones([self.batch_size, self.n_class]),
                              mod_2_2)
            # cos_2: N * L
            cos_2 = tf.div(ip_2, mod_2)

            # cos - cos: N * L * L
            cos_cos_1 = tf.sub(
                margin_param,
                tf.sub(tf.expand_dims(cos_1, 2), tf.expand_dims(cos_2, 1)))
            # we need to let the wrong place be 0
            cos_cos = tf.mul(cos_cos_1, tf.expand_dims(self.img_label, 2))

            cos_loss = tf.reduce_sum(
                tf.maximum(tf.constant(0, dtype=tf.float32), cos_cos))
            self.cos_loss = tf.div(
                cos_loss,
                tf.mul(tf.constant(self.n_class, dtype=tf.float32),
                       tf.reduce_sum(self.img_label)))

            self.test1 = cos_cos
            self.test2 = cos_1
            self.test3 = cos_2
            self.test00 = tf.reduce_sum(
                tf.cast(
                    tf.not_equal(
                        cos_cos,
                        tf.zeros([self.batch_size, self.n_class,
                                  self.n_class])), tf.int32))
            self.test0 = tf.mul(tf.constant(self.n_class, dtype=tf.float32),
                                tf.reduce_sum(self.img_label))
            self.check0 = tf.check_numerics(cos_cos, "cos_cos")
            self.check1 = tf.check_numerics(cos_1, "cos_1")
            self.check2 = tf.check_numerics(cos_2, "cos_2")
        elif self.loss_type == 'cos_softmargin_multi_label':
            assert self.output_dim == 300
            word_dict = tf.constant(np.loadtxt(self.wordvec_dict),
                                    dtype=tf.float32)
            #margin_param = tf.constant(self.margin_param, dtype=tf.float32)

            # N: batchsize, L: label_dim, D: 300
            # img_label: N * L
            # word_dic: L * D
            # v_label: N * L * D
            v_label = tf.mul(tf.expand_dims(self.img_label, 2),
                             tf.expand_dims(word_dict, 0))
            # img_last: N * D
            # ip_1: N * L
            ip_1 = tf.reduce_sum(
                tf.mul(tf.expand_dims(self.img_last_layer, 1), v_label), 2)
            # mod_1: N * L
            v_label_mod = tf.mul(
                tf.expand_dims(tf.ones([self.batch_size, self.n_class]), 2),
                tf.expand_dims(word_dict, 0))
            mod_1 = tf.sqrt(
                tf.mul(
                    tf.expand_dims(
                        tf.reduce_sum(tf.square(self.img_last_layer), 1), 1),
                    tf.reduce_sum(tf.square(v_label_mod), 2)))
            #mod_1 = tf.select(tf.less(mod_1_1, tf.constant(0.0000001)), tf.ones([self.batch_size, self.n_class]), mod_1_1)
            # cos_1: N * L
            cos_1 = tf.div(ip_1, mod_1)

            ip_2 = tf.matmul(self.img_last_layer, word_dict, transpose_b=True)

            # multiply ids to inner product
            #ip_2 = tf.mul(ip_2_1, ids_dict)
            def reduce_shaper(t):
                return tf.reshape(tf.reduce_sum(t, 1), [tf.shape(t)[0], 1])

            mod_2_2 = tf.sqrt(
                tf.matmul(reduce_shaper(tf.square(self.img_last_layer)),
                          reduce_shaper(tf.square(word_dict)),
                          transpose_b=True))
            mod_2 = tf.select(tf.less(mod_2_2, tf.constant(0.0000001)),
                              tf.ones([self.batch_size, self.n_class]),
                              mod_2_2)
            # cos_2: N * L
            cos_2 = tf.div(ip_2, mod_2)

            # word_dic: L * D
            # ip_3: L * L
            # compute soft margin
            ip_3 = tf.matmul(word_dict, word_dict, transpose_b=True)
            # use word_dic to avoid 0 in /
            mod_3 = tf.sqrt(
                tf.matmul(reduce_shaper(tf.square(word_dict)),
                          reduce_shaper(tf.square(word_dict)),
                          transpose_b=True))
            margin_param = tf.sub(tf.constant(1.0, dtype=tf.float32),
                                  tf.div(ip_3, mod_3))

            # cos - cos: N * L * L
            cos_cos_1 = tf.sub(
                tf.expand_dims(margin_param, 0),
                tf.sub(tf.expand_dims(cos_1, 2), tf.expand_dims(cos_2, 1)))
            # we need to let the wrong place be 0
            cos_cos = tf.mul(cos_cos_1, tf.expand_dims(self.img_label, 2))

            cos_loss = tf.reduce_sum(
                tf.maximum(tf.constant(0, dtype=tf.float32), cos_cos))
            self.cos_loss = tf.div(
                cos_loss,
                tf.mul(tf.constant(self.n_class, dtype=tf.float32),
                       tf.reduce_sum(self.img_label)))

            self.test1 = cos_cos
            self.test2 = cos_1
            self.test3 = cos_2

        self.precq_loss_img = tf.reduce_mean(
            tf.reduce_sum(
                tf.square(
                    tf.sub(self.img_last_layer, tf.matmul(self.b_img,
                                                          self.C))), 1))
        word_dict = tf.constant(np.loadtxt(self.wordvec_dict),
                                dtype=tf.float32)
        self.cq_loss_img = tf.reduce_mean(
            tf.reduce_sum(
                tf.square(
                    tf.matmul(
                        tf.sub(self.img_last_layer,
                               tf.matmul(self.b_img, self.C)),
                        tf.transpose(word_dict))), 1))
        self.q_lambda = tf.Variable(self.cq_lambda, name='cq_lambda')
        self.cq_loss = tf.mul(self.q_lambda, self.cq_loss_img)
        self.loss = tf.add(self.cos_loss, self.cq_loss)

        ### Last layer has a 10 times learning rate
        self.lr = tf.train.exponential_decay(self.learning_rate,
                                             global_step,
                                             self.decay_step,
                                             self.learning_rate_decay_factor,
                                             staircase=True)
        opt = tf.train.MomentumOptimizer(learning_rate=self.lr, momentum=0.9)
        grads_and_vars = opt.compute_gradients(
            self.loss, self.train_layers + self.train_last_layer)
        fcgrad, _ = grads_and_vars[-2]
        fbgrad, _ = grads_and_vars[-1]

        if self.finetune_all:
            return opt.apply_gradients(
                [(grads_and_vars[0][0], self.train_layers[0]),
                 (grads_and_vars[1][0] * 2, self.train_layers[1]),
                 (grads_and_vars[2][0], self.train_layers[2]),
                 (grads_and_vars[3][0] * 2, self.train_layers[3]),
                 (grads_and_vars[4][0], self.train_layers[4]),
                 (grads_and_vars[5][0] * 2, self.train_layers[5]),
                 (grads_and_vars[6][0], self.train_layers[6]),
                 (grads_and_vars[7][0] * 2, self.train_layers[7]),
                 (grads_and_vars[8][0], self.train_layers[8]),
                 (grads_and_vars[9][0] * 2, self.train_layers[9]),
                 (grads_and_vars[10][0], self.train_layers[10]),
                 (grads_and_vars[11][0] * 2, self.train_layers[11]),
                 (grads_and_vars[12][0], self.train_layers[12]),
                 (grads_and_vars[13][0] * 2, self.train_layers[13]),
                 (fcgrad * 10, self.train_last_layer[0]),
                 (fbgrad * 20, self.train_last_layer[1])],
                global_step=global_step)
        else:
            return opt.apply_gradients(
                [(fcgrad * 10, self.train_last_layer[0]),
                 (fbgrad * 20, self.train_last_layer[1])],
                global_step=global_step)
コード例 #33
0
    def train_model(self, sess, max_iters):
        """Network training loop."""

        data_layer = get_data_layer(self.roidb, self.imdb.num_classes)

        # classification loss
        cls_score = self.net.get_output('cls_score')
        label = tf.placeholder(tf.int32, shape=[None])
        cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(cls_score, label))

        # subcategory classification loss
        if cfg.TRAIN.SUBCLS:
            subcls_score = self.net.get_output('subcls_score')
            sublabel = tf.placeholder(tf.int32, shape=[None])
            subcls_cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(subcls_score, sublabel))

        # bounding box regression L1 loss
        bbox_pred = self.net.get_output('bbox_pred')
        bbox_targets = tf.placeholder(tf.float32, shape=[None, 4 * self.imdb.num_classes])
        bbox_weights = tf.placeholder(tf.float32, shape=[None, 4 * self.imdb.num_classes])
        loss_box = tf.reduce_mean(tf.reduce_sum(tf.mul(bbox_weights, tf.abs(tf.sub(bbox_pred, bbox_targets))), reduction_indices=[1]))

        # multi-task loss
        if cfg.TRAIN.SUBCLS:
            loss = cross_entropy + subcls_cross_entropy + loss_box
        else:
            loss = cross_entropy + loss_box

        # optimizer
        lr = tf.Variable(cfg.TRAIN.LEARNING_RATE, trainable=False)
        momentum = cfg.TRAIN.MOMENTUM
        train_op = tf.train.MomentumOptimizer(lr, momentum).minimize(loss)

        # intialize variables
        sess.run(tf.initialize_all_variables())
        if self.pretrained_model is not None:
            print ('Loading pretrained model '
                   'weights from {:s}').format(self.pretrained_model)
            self.net.load(self.pretrained_model, sess, True)

        last_snapshot_iter = -1
        timer = Timer()
        for iter in range(max_iters):
            # learning rate
            if iter >= cfg.TRAIN.STEPSIZE:
                sess.run(tf.assign(lr, cfg.TRAIN.LEARNING_RATE * cfg.TRAIN.GAMMA))
            else:
                sess.run(tf.assign(lr, cfg.TRAIN.LEARNING_RATE))

            # get one batch
            blobs = data_layer.forward()

            # Make one SGD update
            if cfg.TRAIN.SUBCLS:
                feed_dict={self.net.data: blobs['data'], self.net.rois: blobs['rois'], self.net.keep_prob: 0.5, \
                           label: blobs['labels'], sublabel: blobs['sublabels'], bbox_targets: blobs['bbox_targets'], bbox_weights: blobs['bbox_inside_weights']}
            else:
                feed_dict={self.net.data: blobs['data'], self.net.rois: blobs['rois'], self.net.keep_prob: 0.5, \
                           label: blobs['labels'], bbox_targets: blobs['bbox_targets'], bbox_weights: blobs['bbox_inside_weights']}
            
            timer.tic()
            if cfg.TRAIN.SUBCLS:
                loss_cls_value, loss_subcls_value, loss_box_value, _ = sess.run([cross_entropy, subcls_cross_entropy, loss_box, train_op], feed_dict=feed_dict)
            else:
                loss_cls_value, loss_box_value, _ = sess.run([cross_entropy, loss_box, train_op], feed_dict=feed_dict)
            timer.toc()

            if cfg.TRAIN.SUBCLS:
                print 'iter: %d / %d, loss_cls: %.4f, loss_subcls: %.4f, loss_box: %.4f, lr: %f, time: %f' %\
                    (iter+1, max_iters, loss_cls_value, loss_subcls_value, loss_box_value, lr.eval(), timer.diff)
            else:
                print 'iter: %d / %d, loss_cls: %.4f, loss_box: %.4f, lr: %f' %\
                    (iter+1, max_iters, loss_cls_value, loss_box_value, lr.eval())

            if (iter+1) % (10 * cfg.TRAIN.DISPLAY) == 0:
                print 'speed: {:.3f}s / iter'.format(timer.average_time)

            if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
                last_snapshot_iter = iter
                self.snapshot(sess, iter)

        if last_snapshot_iter != iter:
            self.snapshot(sess, iter)
コード例 #34
0
def main(layers, t_hor, ind, nrolls, bts, ler_r, mom, teps, renew, imp, q):
    # Quad Params
    wMax = 3.0
    wMin = -1.0 * wMax
    aMax = 2 * np.pi / 10.0
    aMin = -1.0 * aMax
    max_list = [wMax, aMax]
    min_list = [wMin, aMin]

    print 'Starting worker-' + str(ind)

    Nx = 101
    minn = [-5.0, -5.0, 0.0, 6.0]
    maxx = [5.0, 5.0, 2 * np.pi, 12.0]

    X = np.linspace(minn[0], maxx[0], Nx)
    Y = np.linspace(minn[1], maxx[1], Nx)
    X, Y = np.meshgrid(X, Y)
    XX = np.reshape(X, [-1, 1])
    YY = np.reshape(Y, [-1, 1])
    grid_eval = np.concatenate(
        (XX, YY, 0.0 * np.ones(XX.shape), 6.0 * np.ones(XX.shape)), axis=1)
    grid_eval_ = np.concatenate(
        (XX, YY,
         (2.0 / 3.0) * np.pi * np.ones(XX.shape), 6.0 * np.ones(XX.shape)),
        axis=1)
    grid_eval__ = np.concatenate(
        (XX, YY,
         (4.0 / 3.0) * np.pi * np.ones(XX.shape), 6.0 * np.ones(XX.shape)),
        axis=1)
    grid_evall = np.concatenate(
        (XX, YY, 0.0 * np.ones(XX.shape), 12.0 * np.ones(XX.shape)), axis=1)
    grid_evall_ = np.concatenate(
        (XX, YY,
         (2.0 / 3.0) * np.pi * np.ones(XX.shape), 12.0 * np.ones(XX.shape)),
        axis=1)
    grid_evall__ = np.concatenate(
        (XX, YY,
         (4.0 / 3.0) * np.pi * np.ones(XX.shape), 12.0 * np.ones(XX.shape)),
        axis=1)

    reach100s = sio.loadmat('flat_1s.mat')
    reach100s = reach100s["M"]
    reach100s[:, [1, 2]] = reach100s[:, [2, 1]]
    reach100s[:, 2] = np.mod(reach100s[:, 2], 2.0 * np.pi)
    #mean_data = np.mean(reach100s[:,:-1],axis=0);
    #std_data = np.std(reach100s[:,:-1],axis=0);

    nofparams = 0
    for i in xrange(len(layers) - 1):
        nofparams += layers[i] * layers[i + 1] + layers[i + 1]
    print 'Number of Params is: ' + str(nofparams)

    H_length = t_hor
    #-1.0; #Has to be negative                                 #VAR
    iters = 1000000
    #VAR
    #center = np.array([[0.0,0.0]])
    center = np.array([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
    depth = 2.0
    incl = 1.0

    ##################### DEFINITIONS #####################
    #layers = [2 + 1,10,1];                                                    #VAR
    #ssize = layers[0] - 1;
    dt = 0.05
    #VAR
    num_ac = 2
    ##################### INSTANTIATIONS #################
    states, y, Tt, L, l_r, lb, reg, cross_entropy = TransDef(
        "Critic", False, layers, depth, incl, center)
    ola1 = tf.argmax(Tt, dimension=1)
    ola2 = tf.argmax(y, dimension=1)
    ola3 = tf.equal(ola1, ola2)
    accuracy = tf.reduce_mean(tf.cast(ola3, tf.float32))
    #a_layers = layers;
    #a_layers[-1] = 2; #We have two actions
    #states_,y_,Tt_,l_r_,lb_,reg_ = TransDef("Actor",False,a_layers,depth,incl,center,outp=True);

    #theta = tf.get_collection(tf.GraphKeys.VARIABLES, scope='Critic');
    #A_func_vars = tf.get_collection(tf.GraphKeys.VARIABLES, scope='Actor');

    #var_grad = tf.gradients(Tt_,states_)[0]
    var_grad_ = tf.gradients(Tt, states)[0]
    grad_x = tf.slice(var_grad_, [0, 0], [-1, layers[0] - 1])
    #theta = tf.trainable_variables();

    #    set_to_zero = []
    #    for var  in sorted(V_func_vars,        key=lambda v: v.name):
    #        set_to_zero.append(var.assign(tf.zeros(tf.shape(var))))
    #    set_to_zero = tf.group(*set_to_zero)
    #
    #    set_to_not_zero = []
    #    for var  in sorted(V_func_vars,        key=lambda v: v.name):
    #        set_to_not_zero.append(var.assign(tf.random_uniform(tf.shape(var),minval=-0.1,maxval=0.1)));
    #    set_to_not_zero = tf.group(*set_to_not_zero)

    # DEFINE LOSS

    lmbda = 0.0
    #1.0**(-3.5);#0.01;
    beta = 0.00
    L = tf.sqrt(
        tf.reduce_mean(
            tf.reduce_sum(tf.square(tf.sub(y, Tt)), 1, keep_dims=True))
    ) + beta * tf.reduce_mean(
        tf.reduce_max(tf.abs(grad_x), reduction_indices=1, keep_dims=True))
    #L = tf.reduce_mean(tf.mul(tf.exp(imp*t_vec),tf.abs(tf.sub(y,Tt)))) + lmbda*reg;
    #L = tf.reduce_mean(tf.abs(tf.sub(y,Tt))) + lmbda*reg;

    # DEFINE OPTIMIZER

    #nu = 5.01;
    #nunu = ler_r;#0.00005;
    nu = tf.placeholder(tf.float32, shape=[])  #VAR

    #lr_multiplier = ler_r
    lr_schedule = PiecewiseSchedule([
        (0, 0.1),
        (10000, 0.01),
        (20000, 0.001),
        (30000, 0.0001),
    ],
                                    outside_value=0.0001)

    #optimizer = tf.train.GradientDescentOptimizer(nu)
    #optimizer
    #train_step = tf.train.MomentumOptimizer(learning_rate=nu,momentum=mom).minimize(L)
    #optimizer
    #train_step = tf.train.AdamOptimizer(learning_rate=nu).minimize(L);
    train_step = tf.train.RMSPropOptimizer(learning_rate=nu,
                                           momentum=mom).minimize(L)
    #optimizer = tf.train.RMSPropOptimizer(learning_rate=nu,momentum=mom);
    #gvs = optimizer.compute_gradients(L,theta);
    #capped_gvs = [(tf.clip_by_value(grad, -3., 3.), var) for grad, var in gvs];
    #train_step = optimizer.apply_gradients(gvs);
    #train_step = tf.train.AdagradOptimizer(learning_rate=nu,initial_accumulator_value=0.5).minimize(L);

    hot_input = tf.placeholder(tf.int64, shape=(None))
    make_hot = tf.one_hot(hot_input, 4, on_value=1, off_value=0)

    # INITIALIZE GRAPH
    theta = tf.trainable_variables()
    sess = tf.Session()
    init = tf.initialize_all_variables()
    sess.run(init)

    def V_0(x):
        return np.linalg.norm(x, ord=np.inf, axis=1, keepdims=True) - 1.0

    def p_corr(ALL_x):
        ALL_x = np.mod(ALL_x, 2.0 * np.pi)
        return ALL_x

    def F(ALL_x, opt_a, opt_b):
        sin_phi = np.sin(ALL_x[:, 2, None])
        cos_phi = np.cos(ALL_x[:, 2, None])

        col1 = np.multiply(ALL_x[:, 3, None], cos_phi)
        col2 = np.multiply(ALL_x[:, 3, None], sin_phi)
        col3 = opt_a[:, 0, None]
        col4 = opt_a[:, 1, None]

        return np.concatenate((col1, col2, col3, col4), axis=1)

    ####################### RECURSIVE FUNC ####################

    def RK4(ALL_x, dtt, opt_a, opt_b):

        k1 = F(ALL_x, opt_a, opt_b)
        #### !!!
        # ~~~~ Compute optimal input (k2)
        ALL_tmp = ALL_x + np.multiply(dtt / 2.0, k1)
        ALL_tmp[:, 2] = p_corr(ALL_tmp[:, 2])

        k2 = F(ALL_tmp, opt_a, opt_b)
        #### !!!
        # ~~~~ Compute optimal input (k3)
        ALL_tmp = ALL_x + np.multiply(dtt / 2.0, k2)
        ALL_tmp[:, 2] = p_corr(ALL_tmp[:, 2])

        k3 = F(ALL_tmp, opt_a, opt_b)
        #### !!!
        # ~~~~ Compute optimal input (k4)
        ALL_tmp = ALL_x + np.multiply(dtt, k3)
        ALL_tmp[:, 2] = p_corr(ALL_tmp[:, 2])

        k4 = F(ALL_tmp, opt_a, opt_b)
        #### !!!

        Snx = ALL_x + np.multiply((dtt / 6.0), (k1 + 2.0 * k2 + 2.0 * k3 + k4))
        Snx[:, 2] = p_corr(Snx[:, 2])
        return Snx

    perms = list(itertools.product([-1, 1], repeat=num_ac))

    def Hot_to_Cold(opt_a):
        for k in range(len(max_list)):
            ind_max = (opt_a[:, [k]] > 0.0)
            opt_a[ind_max] = max_list[k]
            opt_a[not (ind_max)] = min_list[k]
        return opt_a

    def getPI(
        ALL_x,
        F_PI=[]
    ):  #Things to keep in MIND: You want the returned value to be the minimum accross a trajectory.

        current_params = sess.run(theta)

        #perms = list(itertools.product([-1,1], repeat=num_ac))
        next_states = []
        true_ac_list = []
        for i in range(len(perms)):  #2**num_actions
            ac_tuple = perms[i]
            ac_list = [tmp1 * tmp2 for tmp1, tmp2 in zip(ac_tuple, max_list)]
            #ASSUMING: aMax = -aMin
            true_ac_list.append(ac_list)
            opt_a = np.asarray(ac_list) * np.ones([ALL_x.shape[0], 1])
            Snx = RK4(ALL_x, dt, opt_a, None)
            next_states.append(Snx)
        next_states = np.concatenate(next_states, axis=0)
        values = V_0(next_states[:, [0, 1]])

        for params in F_PI:
            for ind in range(len(params)):  #Reload pi*(x,t+dt) parameters
                sess.run(theta[ind].assign(params[i]))

            opt_a = sess.run(Tt, {states: next_states})
            next_states = RK4(ALL_x, dt, opt_a, None)
            values = np.min((values, V_0(next_states[:, [0, 1]])),
                            axis=1,
                            keepdims=True)

        compare_vals = values.reshape([ALL_x.shape[0], -1])
        values = np.min(compare_vals, axis=1, keepdims=True)
        index_best_a = compare_vals.argmin(axis=1)  #.reshape([-1,1]);
        best_actions = np.asarray([true_ac_list[i] for i in index_best_a])
        final_values = np.min((values, V_0(ALL_x[:, [0, 1]])), axis=1)

        for ind in range(len(current_params)):  #Reload pi*(x,t+dt) parameters
            sess.run(theta[ind].assign(current_params[ind]))

        #return index_best_a,final_values
        return best_actions, final_values

    # *****************************************************************************
    #
    # ============================= MAIN LOOP ====================================
    #                     ( )
    # *****************************************************************************
    t1 = time.time()
    t = 0.0
    mse = np.inf
    k = 0
    kk = 0
    beta = 3.0
    batch_size = bts
    tau = 1000.0
    steps = teps
    ALL_PI = []
    nunu = lr_schedule.value(k)
    for i in xrange(iters):

        if (np.mod(i, renew) == 0 and i is not 0):

            ALL_PI.insert(0, sess.run(theta))

            k = 0
            ALL_x = np.random.uniform(-5.0, 5.0, (nrolls, layers[0]))
            ALL_x[:, 2] = ALL_x[:, 2] * np.pi / 5.0 + np.pi
            ALL_x[:, 3] = ALL_x[:, 3] * 3.0 / 5.0 + 9.0
            PI, _ = getPI(ALL_x, ALL_PI)

            ALL_x_ = np.random.uniform(-5.0, 5.0, (nrolls / 100, layers[0]))
            ALL_x_[:, 2] = ALL_x_[:, 2] * np.pi / 5.0 + np.pi
            ALL_x_[:, 3] = ALL_x_[:, 3] * 3.0 / 5.0 + 9.0
            PI_, _ = getPI(ALL_x_, ALL_PI)

            #ZR = getPI
            #ZR = sess.run(Tt,{states:reach100s[:,:-1]});
            #error1 = ZR - reach100s[:,-1,None];

            #            Z000 = np.reshape(sess.run(Tt,{states:grid_eval}),X.shape);
            #            Z001 = np.reshape(sess.run(Tt,{states:grid_eval_}),X.shape);
            #            Z002 = np.reshape(sess.run(Tt,{states:grid_eval__}),X.shape);
            #            #filter_in = (Z000 <= 0.05) #& (Z000 >= 0.05);
            #            filter_out = (Z000 > 0.00) #| (Z000 < -0.05);
            #            filter_out_ = (Z001 > 0.00) #| (Z000 < -0.05);
            #            filter_out__ = (Z002 > 0.00) #| (Z000 < -0.05);
            #            #Z000[filter_in] = 1.0;
            #            Z000[filter_out] = 0.0;
            #            Z001[filter_out_] = 0.0;
            #            Z002[filter_out__] = 0.0;
            #
            #            Z000l = np.reshape(sess.run(Tt,{states:grid_evall}),X.shape);
            #            Z001l = np.reshape(sess.run(Tt,{states:grid_evall_}),X.shape);
            #            Z002l = np.reshape(sess.run(Tt,{states:grid_evall__}),X.shape);
            #            #filter_in = (Z000 <= 0.05) #& (Z000 >= 0.05);
            #            filter_outl = (Z000l > 0.00) #| (Z000 < -0.05);
            #            filter_out_l = (Z001l > 0.00) #| (Z000 < -0.05);
            #            filter_out__l = (Z002l > 0.00) #| (Z000 < -0.05);
            #            #Z000[filter_in] = 1.0;
            #            Z000l[filter_outl] = 0.0;
            #            Z001l[filter_out_l] = 0.0;
            #            Z002l[filter_out__l] = 0.0;
            #
            #            plt.clf();
            #            #plt.plot(ALL_t_, np.abs(allE), 'ro');
            #            #plt.axis([-1.0, 0.0, 0.0, 10.0])
            #            plt.subplot(2,3,1)
            #            plt.imshow(Z000,cmap='gray');
            #            plt.subplot(2,3,2)
            #            plt.imshow(Z001,cmap='gray');
            #            plt.subplot(2,3,3)
            #            plt.imshow(Z002,cmap='gray');
            #            plt.subplot(2,3,4)
            #            plt.imshow(Z000l,cmap='gray');
            #            plt.subplot(2,3,5)
            #            plt.imshow(Z001l,cmap='gray');
            #            plt.subplot(2,3,6)
            #            plt.imshow(Z002l,cmap='gray');
            #            plt.pause(0.01);
            #
            #
            #            print str(t) + " || " + str(np.max(np.abs(error1))) + " , " + str(np.mean(np.abs(error1))) + " REG = " + str(sess.run(reg)) + ") | MSE = " + str(mse) + "|ITR=" + str(i)                                                #VAR
            t = t - dt

        #elif(i is 0):
        elif (np.mod(i, renew) == 0 and i is 0):

            k = 0
            #            sess.run(set_to_zero);
            ALL_x = np.random.uniform(-5.0, 5.0, (nrolls, layers[0]))
            ALL_x[:, 2] = ALL_x[:, 2] * np.pi / 5.0 + np.pi
            ALL_x[:, 3] = ALL_x[:, 3] * 3.0 / 5.0 + 9.0
            PI, _ = getPI(ALL_x)

            ALL_x_ = np.random.uniform(-5.0, 5.0, (nrolls / 100, layers[0]))
            ALL_x_[:, 2] = ALL_x_[:, 2] * np.pi / 5.0 + np.pi
            ALL_x_[:, 3] = ALL_x_[:, 3] * 3.0 / 5.0 + 9.0
            PI_, _ = getPI(ALL_x_)


#            sess.run(set_to_not_zero);

# |||||||||||| ----  PRINT ----- ||||||||||||

        if (np.mod(i, 200) == 0):

            #xel = sess.run(L,{states:ALL_x,y:PI});
            #test_e = sess.run(L,{states:ALL_x_,y:PI_});
            train_acc = sess.run(accuracy, {
                states: ALL_x,
                y: PI
            })
            test_acc = sess.run(accuracy, {
                states: ALL_x_,
                y: PI_
            })
            #o = np.random.randint(len(ALL_x));
            print str(i) + ") | TR_ACC = " + str(
                train_acc) + " | TE_ACC = " + str(
                    test_acc) + " | Lerning Rate = " + str(nunu)
            #print str(i) + ") | XEL = " + str(xel) + " | Test_E = " + str(test_e) + " | Lerning Rate = " + str(nunu)
            #print str(PI[[o],:]) + " || " + str(sess.run(l_r[-1],{states:ALL_x[[o],:]})) #+ " || " + str(sess.run(gvs[-1],{states:ALL_x,y:PI}))

        nunu = 0.001  #/np.log(i+2.0)#lr_schedule.value(i);
        #nunu = ler_r/(np.mod(i,renew)+1.0);
        tmp = np.random.randint(len(ALL_x), size=bts)
        sess.run(train_step,
                 feed_dict={
                     states: ALL_x[tmp],
                     y: PI[tmp],
                     nu: nunu
                 })
コード例 #35
0
 def test_Sub(self):
     t = tf.sub(*self.random((3, 4), (3, 4)))
     self.check(t)
コード例 #36
0
                print("Fitting...")
                print("Number of unknowns:{}".format(num_basis))

            x = tf.placeholder("float32", [None])
            y = tf.placeholder("float32", [None])
            #basis_   = tf.placeholder("float32", [None, num_basis] )
            coord_ = tf.placeholder("float32", [None, 3])

            # this is the only trainable variable
            coeff = tf.Variable(init_coeff, name="basis_coeff", trainable=True)

            # normalization field
            # calculate EQ^2 distance from every coord to every knot
            # then sum over them
            dist_sq_element = tf.square(
                tf.sub(tf.expand_dims(coord_, 1), tf.expand_dims(knots, 0)))
            dist_sq = tf.reduce_sum(dist_sq_element, 2)
            kernels = tf.exp((-1.0 / (spatial_bw * spatial_bw)) * dist_sq)

            normalization = tf.reduce_sum(tf.expand_dims(coeff, 0) * kernels,
                                          1,
                                          name='Normalization_Field')

            with tf.name_scope('correct') as scope:
                hist_bins = tf.linspace(rmin,
                                        rmax,
                                        options.bins,
                                        name="hist_bins")
                corr_x = tf.mul(x, normalization, name='Corrected_Image'
                                )  #tf.exp( tf.add( x, normalization ) )
                # calculate standard deviations inside each class
コード例 #37
0
# Constructing the hidden and output layers
def multilayer_perceptron(x, weights, biases):
    layer = [x]
    for i in range(1, n_layer + 2):
        layer.append(activation[kernel](tf.add(
            tf.matmul(layer[i - 1], weights[i - 1]), biases[i - 1])))
    return layer[-1]


# Defining the output
pred = multilayer_perceptron(x, weights, biases)

# Defining the cost function based on the function select
if cost_func == 0:
    cost = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(y, pred))))
elif cost_func == 1:
    cost = tf.reduce_mean(
        -tf.reduce_sum(y * tf.log(pred), reduction_indices=[1]))
elif cost_func == 2:
    c = np.zeros([nbits, 1]).astype('float32')
    for i in range(nbits):
        c[i, 0] = 2**i
    conv = tf.Variable(c)
    tpred = tf.nn.sigmoid(tf.mul(1000.0, pred))
    cost = tf.sqrt(
        tf.reduce_mean(
            tf.square(tf.sub(tf.matmul(tpred, conv), tf.matmul(y, conv)))))
else:
    c = np.ones([nbits, 1]).astype('float32')
    conv = tf.Variable(c)
コード例 #38
0
ファイル: QC.py プロジェクト: ystallonne/quantum-clustering
def QC(data,
       sigma,
       replicas=None,
       steps=10000,
       step_size=None,
       batch_size=None,
       how_often_to_test_stop=10,
       with_display=False,
       same_batch_for_all=False):
    if replicas is None:
        replicas = data
    number_of_data_points = data.shape[0]
    number_of_replicas = replicas.shape[0]
    dimensions = data.shape[1]

    if step_size is None:
        step_size = sigma / 7

    if batch_size is None:
        batch_size = data.shape[0]

    inds_to_move_ = tf.placeholder(dtype=tf.int32, shape=(None, ), name="inds")

    data_ = tf.constant(data, name='data', dtype=DATA_TYPE)
    replicas_ = tf.get_variable(name='replicas',
                                shape=(number_of_replicas, dimensions),
                                dtype=DATA_TYPE,
                                initializer=tf.constant_initializer(
                                    replicas, dtype=DATA_TYPE))
    replicas_to_move_ = tf.nn.embedding_lookup(replicas_,
                                               inds_to_move_,
                                               name="replicas_to_move")
    if batch_size is None:
        batch_ = data_
        squared_distances_ = tf.reduce_sum(tf.square(
            tf.sub(tf.expand_dims(replicas_to_move_, 1),
                   tf.expand_dims(batch_, 0))),
                                           axis=2,
                                           name='squared_distances')
    elif same_batch_for_all is True:
        inds_for_batch_ = tf.random_uniform(shape=(1, batch_size),
                                            minval=0,
                                            maxval=number_of_data_points,
                                            dtype=tf.int32,
                                            name="inds_for_batch")
        batch_ = tf.nn.embedding_lookup(data_, inds_for_batch_, name="batch")
        squared_distances_ = tf.reduce_sum(tf.square(
            tf.sub(tf.expand_dims(replicas_to_move_, 1), batch_)),
                                           axis=2,
                                           name='squared_distances')
    else:
        inds_for_batch_ = tf.random_uniform(shape=(tf.shape(inds_to_move_)[0],
                                                   batch_size),
                                            minval=0,
                                            maxval=number_of_data_points,
                                            dtype=tf.int32,
                                            name="inds_for_batch")
        batch_ = tf.nn.embedding_lookup(data_, inds_for_batch_, name="batch")
        squared_distances_ = tf.reduce_sum(tf.square(
            tf.sub(tf.expand_dims(replicas_to_move_, 1), batch_)),
                                           axis=2,
                                           name='squared_distances')
    gaussians_ = tf.exp(-1 * squared_distances_ / (2 * sigma**2),
                        name='gaussian')
    wave_function_ = tf.reduce_sum(gaussians_, name='wave_function', axis=1)
    laplacian_ = tf.reduce_sum(tf.mul(gaussians_, squared_distances_),
                               name='laplacian',
                               axis=1)
    potential_ = tf.div(laplacian_, wave_function_, name='potential')
    loss_ = tf.reduce_sum(potential_)
    # optimizer_ = tf.train.MomentumOptimizer(learning_rate=step_size,momentum=0.9,use_nesterov=True)
    optimizer_ = tf.train.GradientDescentOptimizer(learning_rate=step_size,
                                                   name='optimizer')
    # optimization_step_ = optimizer_.minimize(loss_,name='optimization_step')
    gradients_ = tf.gradients(loss_, replicas_, name='gradients')
    normalized_gradients_ = tf.nn.l2_normalize(gradients_[0], dim=1)
    optimization_step_ = optimizer_.apply_gradients([(normalized_gradients_,
                                                      replicas_)])
    #     tf.reduce_sum(tf.square(gradients_),axis=1,name="gradient_norms")

    if with_display:
        plt.ion()
        plt.figure()
        ax1 = plt.axes()
        sc = plt.scatter(replicas[:, 0], replicas[:, 1])
        plt.axis([-2, 2, -2, 2])
        # plt.figure()
        # ax2 = plt.axes()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        step = 0
        inds = np.arange(0, replicas.shape[0])
        previous_potential = np.zeros((replicas.shape[0]))
        previous_potential.fill(np.inf)
        while step < steps:
            print(step)

            _, potential_value = sess.run([optimization_step_, potential_],
                                          feed_dict={inds_to_move_: inds})
            if ((step % how_often_to_test_stop) == 0):
                prev_inds = inds
                inds = inds[previous_potential[inds] > potential_value]
                previous_potential[prev_inds] = potential_value
                if len(inds) == 0:
                    break

            if with_display:
                x = sess.run(replicas_)
                sc.set_offsets(x[:, :2])
                # plt.sca(ax2)
                # if np.any(prev_inds==0):
                #     plt.scatter(step,np.mean(potential_values_for_moving_average[0,:]))
                #     plt.scatter(step, potential_value[prev_inds==0],c='r')
                plt.pause(0.0001)

            step += 1
        if with_display:
            plt.ioff()
            plt.show()

        x = sess.run(replicas_)
        return x
コード例 #39
0
rpn_cls_soft = tf.nn.softmax(rpn_cls_score_reshape) 
rpn_cls_score_x = tf.reshape(tf.gather(rpn_cls_score_reshape,tf.where(tf.not_equal(rpn_labels_ind,-1))),[-1,2])
rpn_label = tf.reshape(tf.gather(rpn_labels_ind, tf.where(tf.not_equal(rpn_labels_ind,-1))),[-1])
rpn_loss_cls = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(rpn_cls_score_x, rpn_label))

unique_rpn_cls, o_cls, o_cls_ind = tf.py_func(cls_unique, \
    [rpn_cls_soft, rpn_labels_ind], [tf.float32, tf.float32, tf.float32])
unique_rpn_cls = tf.pack(unique_rpn_cls)

rpn_correct_prediction = tf.py_func(rpn_accuracy, [rpn_cls_soft, rpn_labels_ind], [tf.float32])
rpn_correct_prediction = tf.reshape(tf.pack(rpn_correct_prediction), [-1])
rpn_cls_accuracy = tf.reduce_mean(tf.cast(rpn_correct_prediction, tf.float32))

sigma1 = s1 * s1
smoothL1_sign = tf.cast(tf.less(tf.abs(tf.sub(rpn_bbox_pred, rpn_bbox_targets)),1/sigma1),tf.float32)
rpn_loss_bbox = tf.mul(tf.reduce_mean(tf.reduce_sum(tf.mul(rpn_bbox_outside_weights,tf.add( \
    tf.mul(tf.mul(tf.pow(tf.mul(rpn_bbox_inside_weights, \
        tf.sub(rpn_bbox_pred, rpn_bbox_targets)),2),0.5*sigma1), smoothL1_sign), \
    tf.mul(tf.sub(tf.abs(tf.sub(rpn_bbox_pred, rpn_bbox_targets)),0.5/sigma1),\
        tf.abs(smoothL1_sign-1)))), reduction_indices=[1,2])),1)
rpn_loss_bbox_label = rpn_loss_bbox
zero_count, one_count = tf.py_func(bbox_counter, [rpn_labels_ind], [tf.float32, tf.float32])


#ROI PROPOSAL
rpn_cls_prob = rpn_cls_soft
rpn_cls_prob_reshape = tf.reshape(rpn_cls_prob, [1, 18, height, width])

rpn_rois = tf.py_func(proposal, [rpn_cls_prob_reshape, rpn_bbox_pred, im_info], [tf.float32])
rpn_rois = tf.reshape(rpn_rois, [-1, 5])
コード例 #40
0
ファイル: model_v6.py プロジェクト: jtpils/road_classifier
import scipy
import parameters as params

ch = [4, 8, 16, 16, 8, 4]  # Number of channels for each section (except output section)
k_size = 5;

# Dropout variable
with tf.name_scope('dropout'):
  keep_prob = tf.placeholder(tf.float32)

# Make input and output variables
x = tf.placeholder(tf.float32, shape=[None, params.res["height"], params.res["width"], 1])
y_ = tf.placeholder(tf.bool, shape=[None, params.res["height"], params.res["width"]])
prev_y = tf.placeholder(tf.float32, shape=[None, params.res["height"], params.res["width"]])

prev_y_in = tf.sub(tf.expand_dims(prev_y, 3), 0.5)
x_in = tf.concat(3, [x, prev_y_in])
ch_in = 2

##############################
# Section 1

# Convolution
layer_name = "s1_conv1"
with tf.name_scope(layer_name):
  W = utils.weight_variable([k_size, k_size, ch_in, ch[0]])
  b = utils.bias_variable([ch[0]])
  conv = utils.conv2d(x_in, W, b, 1)

  tanh = tf.nn.tanh(conv)
  s1_conv1 = tf.nn.dropout(tanh, keep_prob)
コード例 #41
0
ファイル: model.py プロジェクト: jfsantos/segan
    def build_model_single_gpu(self, gpu_idx):
        if gpu_idx == 0:
            # create the nodes to load for input pipeline
            filename_queue = tf.train.string_input_producer([self.e2e_dataset])
            self.get_wav, self.get_noisy = read_and_decode(
                filename_queue, 2**14)
        # load the data to input pipeline
        wavbatch, \
        noisybatch = tf.train.shuffle_batch([self.get_wav,
                                             self.get_noisy],
                                             batch_size=self.batch_size,
                                             num_threads=2,
                                             capacity=1000 + 3 * self.batch_size,
                                             min_after_dequeue=1000,
                                             name='wav_and_noisy')
        if gpu_idx == 0:
            self.Gs = []
            self.zs = []
            self.gtruth_wavs = []
            self.gtruth_noisy = []

        self.gtruth_wavs.append(wavbatch)
        self.gtruth_noisy.append(noisybatch)

        # add channels dimension to manipulate in D and G
        wavbatch = tf.expand_dims(wavbatch, -1)
        noisybatch = tf.expand_dims(noisybatch, -1)
        if gpu_idx == 0:
            #self.sample_wavs = tf.placeholder(tf.float32, [self.batch_size,
            #                                               self.canvas_size],
            #                                  name='sample_wavs')
            self.reference_G = self.generator(noisybatch,
                                              is_ref=True,
                                              spk=None,
                                              z_on=False)
            # make a dummy copy of discriminator to have variables and then
            # be able to set up the variable reuse for all other devices
            #dummy_joint = tf.concat(0, [wavbatch, wavbatch, self.reference_G])
            #dummy_words_joint = tf.concat(0, [c_vector, fk_c_vector,
            #                                  c_vector])
            # merge along channels and this would be a real batch

        G = self.generator(noisybatch, is_ref=False, spk=None, z_on=False)
        print('GAE shape: ', G.get_shape())
        self.Gs.append(G)

        self.rl_audio_summ = audio_summary('real_audio', wavbatch)
        self.real_w_summ = histogram_summary('real_wav', wavbatch)
        self.noisy_audio_summ = audio_summary('noisy_audio', noisybatch)
        self.noisy_w_summ = histogram_summary('noisy_wav', noisybatch)
        self.gen_audio_summ = audio_summary('G_audio', G)
        self.gen_summ = histogram_summary('G_wav', G)

        if gpu_idx == 0:
            self.g_losses = []

        # Add the L1 loss to G
        g_loss = tf.reduce_mean(tf.abs(tf.sub(G, wavbatch)))

        self.g_losses.append(g_loss)

        self.g_loss_sum = scalar_summary("g_loss", g_loss)

        if gpu_idx == 0:
            self.get_vars()
コード例 #42
0
ファイル: model.py プロジェクト: jfsantos/segan
    def build_model_single_gpu(self, gpu_idx):
        if gpu_idx == 0:
            # create the nodes to load for input pipeline
            filename_queue = tf.train.string_input_producer([self.e2e_dataset])
            self.get_wav, self.get_noisy = read_and_decode(
                filename_queue, 2**14)
        # load the data to input pipeline
        wavbatch, \
        noisybatch = tf.train.shuffle_batch([self.get_wav,
                                             self.get_noisy],
                                             batch_size=self.batch_size,
                                             num_threads=2,
                                             capacity=1000 + 3 * self.batch_size,
                                             min_after_dequeue=1000,
                                             name='wav_and_noisy')
        if gpu_idx == 0:
            self.Gs = []
            self.zs = []
            self.gtruth_wavs = []
            self.gtruth_noisy = []

        self.gtruth_wavs.append(wavbatch)
        self.gtruth_noisy.append(noisybatch)

        # add channels dimension to manipulate in D and G
        wavbatch = tf.expand_dims(wavbatch, -1)
        noisybatch = tf.expand_dims(noisybatch, -1)
        # by default leaky relu is used
        do_prelu = False
        if self.g_nl == 'prelu':
            do_prelu = True
        if gpu_idx == 0:
            #self.sample_wavs = tf.placeholder(tf.float32, [self.batch_size,
            #                                               self.canvas_size],
            #                                  name='sample_wavs')
            ref_Gs = self.generator(noisybatch,
                                    is_ref=True,
                                    spk=None,
                                    do_prelu=do_prelu)
            print('num of G returned: ', len(ref_Gs))
            self.reference_G = ref_Gs[0]
            self.ref_z = ref_Gs[1]
            if do_prelu:
                self.ref_alpha = ref_Gs[2:]
                self.alpha_summ = []
                for m, ref_alpha in enumerate(self.ref_alpha):
                    # add a summary per alpha
                    self.alpha_summ.append(
                        histogram_summary('alpha_{}'.format(m), ref_alpha))
            # make a dummy copy of discriminator to have variables and then
            # be able to set up the variable reuse for all other devices
            #dummy_joint = tf.concat(0, [wavbatch, wavbatch, self.reference_G])
            #dummy_words_joint = tf.concat(0, [c_vector, fk_c_vector,
            #                                  c_vector])
            # merge along channels and this would be a real batch
            dummy_joint = tf.concat(2, [wavbatch, noisybatch])
            dummy = discriminator(self, dummy_joint, None, reuse=False)

        G, z = self.generator(noisybatch,
                              is_ref=False,
                              spk=None,
                              do_prelu=do_prelu)
        print('G shape: ', G.get_shape())
        self.Gs.append(G)
        self.zs.append(z)

        #joint = tf.concat(0, [wavbatch, wavbatch, G])
        #joint_words = tf.concat(0, [c_vector, fk_c_vector, c_vector])
        # add new dimension to merge with other pairs
        D_rl_joint = tf.concat(2, [wavbatch, noisybatch])
        D_fk_joint = tf.concat(2, [G, noisybatch])
        # build rl discriminator
        d_rl_logits = discriminator(self, D_rl_joint, reuse=True)
        # build fk G discriminator
        d_fk_logits = discriminator(self, D_fk_joint, None, reuse=True)

        # build fk noisy discriminator
        #d_nfk_logits = discriminator(self, (wavbatch - noisybatch), None,
        #                             reuse=True)

        # make disc variables summaries
        self.d_rl_sum = histogram_summary("d_real", d_rl_logits)
        self.d_fk_sum = histogram_summary("d_fake", d_fk_logits)
        #self.d_nfk_sum = histogram_summary("d_noisyfake", d_nfk_logits)

        self.rl_audio_summ = audio_summary('real_audio', wavbatch)
        self.real_w_summ = histogram_summary('real_wav', wavbatch)
        self.noisy_audio_summ = audio_summary('noisy_audio', noisybatch)
        self.noisy_w_summ = histogram_summary('noisy_wav', noisybatch)
        self.gen_audio_summ = audio_summary('G_audio', G)
        self.gen_summ = histogram_summary('G_wav', G)

        if gpu_idx == 0:
            self.g_losses = []
            self.g_l1_losses = []
            self.g_adv_losses = []
            self.d_rl_losses = []
            self.d_fk_losses = []
            #self.d_nfk_losses = []
            self.d_losses = []

        d_rl_loss = tf.reduce_mean(tf.squared_difference(d_rl_logits, 1.))
        d_fk_loss = tf.reduce_mean(tf.squared_difference(d_fk_logits, 0.))
        #d_nfk_loss = tf.reduce_mean(tf.squared_difference(d_nfk_logits, 0.))
        g_adv_loss = tf.reduce_mean(tf.squared_difference(d_fk_logits, 1.))

        d_loss = d_rl_loss + d_fk_loss

        # Add the L1 loss to G
        g_l1_loss = self.l1_lambda * tf.reduce_mean(tf.abs(tf.sub(G,
                                                                  wavbatch)))

        g_loss = g_adv_loss + g_l1_loss

        self.g_l1_losses.append(g_l1_loss)
        self.g_adv_losses.append(g_adv_loss)
        self.g_losses.append(g_loss)
        self.d_rl_losses.append(d_rl_loss)
        self.d_fk_losses.append(d_fk_loss)
        #self.d_nfk_losses.append(d_nfk_loss)
        self.d_losses.append(d_loss)

        self.d_rl_loss_sum = scalar_summary("d_rl_loss", d_rl_loss)
        self.d_fk_loss_sum = scalar_summary("d_fk_loss", d_fk_loss)
        #self.d_nfk_loss_sum = scalar_summary("d_nfk_loss",
        #                                     d_nfk_loss)
        self.g_loss_sum = scalar_summary("g_loss", g_loss)
        self.g_loss_l1_sum = scalar_summary("g_l1_loss", g_l1_loss)
        self.g_loss_adv_sum = scalar_summary("g_adv_loss", g_adv_loss)
        self.d_loss_sum = scalar_summary("d_loss", d_loss)

        if gpu_idx == 0:
            self.get_vars()
コード例 #43
0
def preprocess_for_train(image,
                         height,
                         width,
                         bbox,
                         fast_mode=True,
                         scope=None):
    """Distort one image for training a network.

  Distorting images provides a useful technique for augmenting the data
  set during training in order to make the network invariant to aspects
  of the image that do not effect the label.

  Additionally it would create image_summaries to display the different
  transformations applied to the image.

  Args:
    image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
      [0, 1], otherwise it would converted to tf.float32 assuming that the range
      is [0, MAX], where MAX is largest positive representable number for
      int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
    height: integer
    width: integer
    bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
      where each coordinate is [0, 1) and the coordinates are arranged
      as [ymin, xmin, ymax, xmax].
    fast_mode: Optional boolean, if True avoids slower transformations (i.e.
      bi-cubic resizing, random_hue or random_contrast).
    scope: Optional scope for name_scope.
  Returns:
    3-D float Tensor of distorted image used for training with range [-1, 1].
  """
    with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):
        if bbox is None:
            bbox = tf.constant([0.0, 0.0, 1.0, 1.0],
                               dtype=tf.float32,
                               shape=[1, 1, 4])
        if image.dtype != tf.float32:
            image = tf.image.convert_image_dtype(image, dtype=tf.float32)
        # Each bounding box has shape [1, num_boxes, box coords] and
        # the coordinates are ordered [ymin, xmin, ymax, xmax].
        image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
                                                      bbox)
        tf.image_summary('image_with_bounding_boxes', image_with_box)

        distorted_image, distorted_bbox = distorted_bounding_box_crop(
            image, bbox)
        # Restore the shape since the dynamic slice based upon the bbox_size loses
        # the third dimension.
        distorted_image.set_shape([None, None, 3])
        image_with_distorted_box = tf.image.draw_bounding_boxes(
            tf.expand_dims(image, 0), distorted_bbox)
        tf.image_summary('images_with_distorted_bounding_box',
                         image_with_distorted_box)

        # This resizing operation may distort the images because the aspect
        # ratio is not respected. We select a resize method in a round robin
        # fashion based on the thread number.
        # Note that ResizeMethod contains 4 enumerated resizing methods.

        # We select only 1 case for fast_mode bilinear.
        num_resize_cases = 1 if fast_mode else 4
        distorted_image = apply_with_random_selector(
            distorted_image,
            lambda x, method: tf.image.resize_images(x, [height, width], method
                                                     ),
            num_cases=num_resize_cases)

        tf.image_summary('cropped_resized_image',
                         tf.expand_dims(distorted_image, 0))

        # Randomly flip the image horizontally.
        distorted_image = tf.image.random_flip_left_right(distorted_image)

        # Randomly distort the colors. There are 4 ways to do it.
        distorted_image = apply_with_random_selector(
            distorted_image,
            lambda x, ordering: distort_color(x, ordering, fast_mode),
            num_cases=4)

        tf.image_summary('final_distorted_image',
                         tf.expand_dims(distorted_image, 0))
        distorted_image = tf.sub(distorted_image, 0.5)
        distorted_image = tf.mul(distorted_image, 2.0)
        return distorted_image
コード例 #44
0
ファイル: tmb2vec.py プロジェクト: ztypaker/trajectory2vec
def trajectory2Vec():
    def loopf(prev, i):
        return prev

    # Parameters
    learning_rate = 0.0001
    training_epochs = 300
    display_step = 100

    # Network Parameters
    # the size of the hidden state for the lstm (notice the lstm uses 2x of this amount so actually lstm will have state of size 2)
    size = 100
    # 2 different sequences total
    batch_size = 1
    # the maximum steps for both sequences is 5
    max_n_steps = 17
    # each element/frame of the sequence has dimension of 3
    frame_dim = 18

    input_length = tf.placeholder(tf.int32)

    initializer = tf.random_uniform_initializer(-1, 1)

    # the sequences, has n steps of maximum size
    # seq_input = tf.placeholder(tf.float32, [batch_size, max_n_steps, frame_dim])
    seq_input = tf.placeholder(tf.float32, [max_n_steps, batch_size, frame_dim])
    # what timesteps we want to stop at, notice it's different for each batch hence dimension of [batch]

    # inputs for rnn needs to be a list, each item/frame being a timestep.
    # we need to split our input into each timestep, and reshape it because split keeps dims by default

    useful_input = seq_input[0:input_length[0]]
    loss_inputs = [tf.reshape(useful_input, [-1])]
    encoder_inputs = [item for item in tf.unpack(seq_input)]
    # if encoder input is "X, Y, Z", then decoder input is "0, X, Y, Z". Therefore, the decoder size
    # and target size equal encoder size plus 1. For simplicity, here I droped the last one.
    decoder_inputs = ([tf.zeros_like(encoder_inputs[0], name="GO")] + encoder_inputs[:-1])
    targets = encoder_inputs

    # basic LSTM seq2seq model
    cell = tf.nn.rnn_cell.LSTMCell(size, state_is_tuple=True, use_peepholes=True)
    _, enc_state = tf.nn.rnn(cell, encoder_inputs, sequence_length=input_length[0], dtype=tf.float32)
    cell = tf.nn.rnn_cell.OutputProjectionWrapper(cell, frame_dim)
    dec_outputs, dec_state = tf.nn.seq2seq.rnn_decoder(decoder_inputs, enc_state, cell, loop_function=loopf)


    # flatten the prediction and target to compute squared error loss
    y_true = [tf.reshape(encoder_input, [-1]) for encoder_input in encoder_inputs]
    y_pred = [tf.reshape(dec_output, [-1]) for dec_output in dec_outputs]

    # Define loss and optimizer, minimize the squared error
    loss = 0
    for i in range(len(loss_inputs)):
        loss += tf.reduce_sum(tf.square(tf.sub(y_pred[i], y_true[len(loss_inputs) - i - 1])))
    optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(loss)

    # Initializing the variables
    init = tf.initialize_all_variables()

    # Launch the graph
    with tf.Session() as sess:
        sess.run(init)
        # Training cycle
        input_datas = cPickle.load(open('./simulated_data/sim_normal_behavior_sequences'))
        trajectoryVecs = []
        j = 0
        for input_data in input_datas:
            print 'Sample:'
            print j
            input_len = len(input_data)
            print input_len
            defalt = []
            for i in range(0, frame_dim):
                defalt.append(0)
            while len(input_data) < max_n_steps:
                input_data.append(defalt)
            x = np.array(input_data)
            print np.shape(x[0])
            x = x.reshape((max_n_steps, batch_size, frame_dim))
            embedding = None
            for epoch in range(training_epochs):
                feed = {seq_input: x, input_length: np.array([input_len])}
                # Fit training using batch data
                _, cost_value, embedding, en_int, de_outs, loss_in = sess.run(
                    [optimizer, loss, enc_state, encoder_inputs, dec_outputs, loss_inputs], feed_dict=feed)
                # Display logs per epoch step
                if epoch % display_step == 0:
                    print "logits"
                    a = sess.run(y_pred, feed_dict=feed)
                    print "labels"
                    b = sess.run(y_true, feed_dict=feed)

                    print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(cost_value))
            trajectoryVecs.append(embedding)
            print("Optimization Finished!")
            j = j + 1
        fout = file('./simulated_data/sim_traj_vec_normal_reverse', 'w')
        cPickle.dump(trajectoryVecs, fout)
コード例 #45
0
ファイル: deconv.py プロジェクト: ugnelis/code-samples
def main(argv):
    # Import data
    dataset = utils.read_files_with_skimage(RESOURCE)
    input, output = utils.split_dataset(dataset)
    batch_size = len(input)

    # Make data as Numpy array
    input = np.asarray(input)
    output = np.asarray(output)

    # HEIGHT * WIDTH because y place holder also has HEIGHT * WIDTH
    output = np.reshape(output, (batch_size, HEIGHT, WIDTH))

    # Test value
    test_x = skimage.io.imread("../../dataset/1.jpg", True)
    test_x = np.reshape(test_x, (1, 180, 320))

    # Tensorflow placeholders
    x = tf.placeholder(tf.float32, [None, HEIGHT, WIDTH])
    y = tf.placeholder(tf.float32, [None, HEIGHT, WIDTH], name="ground_truth")

    weights = {
        # 5x5 conv, 1 input, 32 outputs
        'wc1': tf.Variable(tf.random_normal([3, 3, 1, 32])),
        # 5x5 conv, 32 inputs, 64 outputs
        'wc2': tf.Variable(tf.random_normal([3, 3, 32, 64])),
        'wc3': tf.Variable(tf.random_normal([3, 3, 64, 128])),
        'wdc1': tf.Variable(tf.random_normal([2, 2, 64, 128])),
        'wdc2': tf.Variable(tf.random_normal([2, 2, 32, 64])),
        'wdc3': tf.Variable(tf.random_normal([2, 2, 1, 32])),
    }

    biases = {
        'bc1': tf.Variable(tf.random_normal([32])),
        'bc2': tf.Variable(tf.random_normal([64])),
        'bc3': tf.Variable(tf.random_normal([128])),
        'bdc1': tf.Variable(tf.random_normal([64])),
        'bdc2': tf.Variable(tf.random_normal([32])),
        'bdc3': tf.Variable(tf.random_normal([1])),
    }

    # Construct model
    pred = multilayer_perceptron(x, weights, biases)
    pred = tf.pack(pred)
    pred = tf.reshape(pred, [-1, HEIGHT, WIDTH])

    with tf.name_scope("opt") as scope:
        cost = tf.reduce_sum(tf.pow((pred - y), 2)) / (2 * batch_size)
        optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(cost)

    # Evaluate model
    with tf.name_scope("acc") as scope:
        # accuracy is the difference between prediction and ground truth matrices
        correct_pred = tf.equal(0, tf.cast(tf.sub(cost, y), tf.int32))
        accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    # Initializing the variables
    init = tf.global_variables_initializer()

    with tf.Session() as sess:
        sess.run(init)

        # Initializing summary writer for TensorBoard
        train_writer = tf.summary.FileWriter('/tmp/log_dir/', sess.graph)

        for step in range(10):
            feed_dict = {x: input, y: output}
            _, loss, acc = sess.run([optimizer, cost, accuracy],
                                    feed_dict=feed_dict)

            if step % 10 == 0:
                print("Minibatch loss at step ", step, ": ", cost)
                print("Minibatch accuracy: ", acc)

        print("Done")

        # Make a prediction
        prediction = sess.run(pred, feed_dict={x: test_x})
        prediction = np.reshape(prediction, (HEIGHT, WIDTH))
        prediction = prediction.astype(int)
        print(prediction)

        prediction[prediction > 255] = 255
        prediction[prediction < 0] = 0

        print(prediction)

        skimage.io.imsave("works.jpg", prediction)
コード例 #46
0
ファイル: core.py プロジェクト: zhuohuwu0603/tflearn
def highway(incoming,
            n_units,
            activation='linear',
            transform_dropout=None,
            weights_init='truncated_normal',
            bias_init='zeros',
            regularizer=None,
            weight_decay=0.001,
            trainable=True,
            restore=True,
            reuse=False,
            scope=None,
            name="FullyConnectedHighway"):
    """ Fully Connected Highway.

    A fully connected highway network layer, with some inspiration from
    [https://github.com/fomorians/highway-fcn](https://github.com/fomorians/highway-fcn).

    Input:
        (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.

    Output:
        2D Tensor [samples, n_units].

    Arguments:
        incoming: `Tensor`. Incoming (2+)D Tensor.
        n_units: `int`, number of units for this layer.
        activation: `str` (name) or `function` (returning a `Tensor`).
            Activation applied to this layer (see tflearn.activations).
            Default: 'linear'.
        transform_dropout: `float`: Keep probability on the highway transform gate.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'truncated_normal'.
        bias_init: `str` (name) or `Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model
        reuse: `bool`. If True and 'scope' is provided, this layer variables
            will be reused (shared).
        scope: `str`. Define this layer scope (optional). A scope can be
            used to share variables between layers. Note that scope will
            override name.
        name: A name for this layer (optional). Default: 'FullyConnectedHighway'.

    Attributes:
        scope: `Scope`. This layer scope.
        W: `Tensor`. Variable representing units weights.
        W_t: `Tensor`. Variable representing units weights for transform gate.
        b: `Tensor`. Variable representing biases.
        b_t: `Tensor`. Variable representing biases for transform gate.

    Links:
        [https://arxiv.org/abs/1505.00387](https://arxiv.org/abs/1505.00387)

    """
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) > 1, "Incoming Tensor shape must be at least 2-D"
    n_inputs = int(np.prod(input_shape[1:]))

    # Build variables and inference.
    # Variable Scope fix for older TF
    try:
        vscope = tf.variable_scope(scope,
                                   default_name=name,
                                   values=[incoming],
                                   reuse=reuse)
    except Exception:
        vscope = tf.variable_op_scope([incoming], scope, name, reuse=reuse)

    with vscope as scope:
        name = scope.name

        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = va.variable('W',
                        shape=[n_inputs, n_units],
                        regularizer=W_regul,
                        initializer=W_init,
                        trainable=trainable,
                        restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        if isinstance(bias_init, str):
            bias_init = initializations.get(bias_init)()
        b = va.variable('b',
                        shape=[n_units],
                        initializer=bias_init,
                        trainable=trainable,
                        restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        # Weight and bias for the transform gate
        W_T = va.variable('W_T',
                          shape=[n_inputs, n_units],
                          regularizer=None,
                          initializer=W_init,
                          trainable=trainable,
                          restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W_T)

        b_T = va.variable('b_T',
                          shape=[n_units],
                          initializer=tf.constant_initializer(-1),
                          trainable=trainable,
                          restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b_T)

        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            incoming = tf.reshape(incoming, [-1, n_inputs])

        if isinstance(activation, str):
            activation = activations.get(activation)
        elif hasattr(activation, '__call__'):
            activation = activation
        else:
            raise ValueError("Invalid Activation.")

        H = activation(tf.matmul(incoming, W) + b)
        T = tf.sigmoid(tf.matmul(incoming, W_T) + b_T)
        if transform_dropout:
            T = dropout(T, transform_dropout)
        C = tf.sub(1.0, T)

        inference = tf.add(tf.mul(H, T), tf.mul(incoming, C))

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.W_t = W_T
    inference.b = b
    inference.b_t = b_T

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference
コード例 #47
0
with tf.Graph().as_default():

    is_train = tf.placeholder(tf.bool)
    learning_rate = tf.placeholder(tf.float32, [])
    images_tf = tf.placeholder(tf.float32, [batch_size, 33, 33, 1],
                               name="images")
    cs_meas = tf.placeholder(tf.float32, [batch_size, 1, m, 1], name='cs_meas')
    keep_prob = tf.placeholder(tf.float32, [])

    labels_D = tf.concat(0, [tf.ones([batch_size]), tf.zeros([batch_size])])
    labels_G = tf.ones([batch_size])

    bn1, bn2, bn3, bn4, bn5, reconstruction_ori = csgan.build_reconstruction(
        cs_meas, is_train)
    loss_recon = tf.div(
        tf.reduce_sum(tf.square(tf.sub(images_tf, reconstruction_ori))),
        2. * batch_size)

    adversarial_pos, adversarial_pos_sig = csgan.build_adversarial(
        images_tf, is_train)
    adversarial_neg, adversarial_neg_sig = csgan.build_adversarial(
        reconstruction_ori, is_train, reuse=True
    )  # I changed this from reconstruction to reconstruction_ori. No idea which is right
    adversarial_all = tf.concat(0, [adversarial_pos, adversarial_neg])

    loss_adv_D = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(adversarial_all, labels_D))
    loss_adv_G = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(adversarial_neg, labels_G))

    loss_G = loss_recon * lambda_recon + loss_adv_G * lambda_adv
コード例 #48
0
 def b1(x, y):
     nx = tf.sub(x, 1)
     ny = y + gen_data_flow_ops._stack_pop(h, tf.float32)
     return [nx, ny]
コード例 #49
0
 def ms_error(self, y_pre, y_target):
     return tf.square(tf.sub(y_pre, y_target))
コード例 #50
0
    x = tf.reshape(x, [-1, n_input])
    x = tf.split(0, n_steps, x)
    lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
    outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
    all_lstm_outputs = tf.reshape(tf.stack(outputs, axis=1),
                                  [-1, n_steps * n_hidden])
    output = tf.matmul(outputs[-1], weights['out']) + biases['out']
    # output = tf.matmul(all_lstm_outputs, weights['all_out']) + biases['out']
    return tf.nn.dropout(output, 0.75)


pred = RNN(x, weights, biases)
n_samples = tf.cast(tf.shape(x)[0], tf.float32)
cost = tf.reduce_sum(tf.pow(pred - y, 2)) / (2 * n_samples)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
accuracy = tf.reduce_mean(tf.abs(tf.sub(pred, y)))
init = tf.initialize_all_variables()
saver = tf.train.Saver()

# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    step = 1
    # saver.restore(sess, "./lstm_models/200/bball_scraping/lstm_models/200/lstm_model_n_steps_175_750_0.773770080762_1.33473893366_0.408350682807.ckpt")
    avg_pred_diff = []
    avg_pred_vals = []
    avg_reg_vals = []
    max_r_values = [0, 0, 0]
    accuracy_data = []
    single_game_pred = []
    for step in range(250):
コード例 #51
0
ファイル: model.py プロジェクト: joconnor-ml/fish-recognition
    def build_inception_graph(self):
        """Builds an inception graph and add the necessary input & output tensors.

      To use other Inception models modify this file. Also preprocessing must be
      modified accordingly.

      See tensorflow/contrib/slim/python/slim/nets/inception_v3.py for
      details about InceptionV3.

    Returns:
      input_jpeg: A placeholder for jpeg string batch that allows feeding the
                  Inception layer with image bytes for prediction.
      inception_embeddings: The embeddings tensor.
    """

        # These constants are set by Inception v3's expectations.
        height = 299
        width = 299
        channels = 3

        image_str_tensor = tf.placeholder(tf.string, shape=[None])

        # The CloudML Prediction API always "feeds" the Tensorflow graph with
        # dynamic batch sizes e.g. (?,).  decode_jpeg only processes scalar
        # strings because it cannot guarantee a batch of images would have
        # the same output size.  We use tf.map_fn to give decode_jpeg a scalar
        # string from dynamic batches.
        def decode_and_resize(image_str_tensor):
            """Decodes jpeg string, resizes it and returns a uint8 tensor."""
            image = tf.image.decode_jpeg(image_str_tensor, channels=channels)
            # Note resize expects a batch_size, but tf_map supresses that index,
            # thus we have to expand then squeeze.  Resize returns float32 in the
            # range [0, uint8_max]
            image = tf.expand_dims(image, 0)
            image = tf.image.resize_bilinear(image, [height, width],
                                             align_corners=False)
            image = tf.squeeze(image, squeeze_dims=[0])
            image = tf.cast(image, dtype=tf.uint8)
            return image

        image = tf.map_fn(decode_and_resize,
                          image_str_tensor,
                          back_prop=False,
                          dtype=tf.uint8)
        # convert_image_dtype, also scales [0, uint8_max] -> [0 ,1).
        image = tf.image.convert_image_dtype(image, dtype=tf.float32)

        # Then shift images to [-1, 1) for Inception.
        # Try-except to make the code compatible across sdk versions
        try:
            image = tf.subtract(image, 0.5)
            image = tf.multiply(image, 2.0)
        except AttributeError:
            image = tf.sub(image, 0.5)
            image = tf.mul(image, 2.0)

        # Build Inception layers, which expect A tensor of type float from [-1, 1)
        # and shape [batch_size, height, width, channels].
        with slim.arg_scope(inception.inception_v3_arg_scope()):
            _, end_points = inception.inception_v3(image, is_training=False)

        inception_embeddings = end_points['PreLogits']
        inception_embeddings = tf.squeeze(inception_embeddings, [1, 2],
                                          name='SpatialSqueeze')
        return image_str_tensor, inception_embeddings
コード例 #52
0
def tf_voltage(timeSteptf, Nrn_trace, g_L, volt_S, E_L, NrON_1, spineFactor,
               Nr_D, volt_D, Conduct2, E_AMPA, dist_att, E_NMDA, delta_T,
               Nrn_u_lp, uM_Tconst, uP_Tconst, u_delay, Homeostat_tau, C,
               x_reset, x_tau, V_T_tconst, V_Trest, Noise_tau, Noise_avg,
               Noise_std):

    #------------
    # Currents
    #------------

    ### external current ###
    I_ext = Nrn_trace[:, 4]  ## soma
    #     I_ext_D = repmat([prox_Backprop*Nrn_trace(:,5) dist_Backprop*Nrn_trace(:,5)],1,1,Nr_D)  ## dendrites

    ### leak current ###
    I_L = g_L * tf.sub(volt_S, E_L * tf.ones(NrON_1))  ## soma
    I_L_D = tf.mul(
        tf.tile(tf.expand_dims([[spineFactor * g_L, spineFactor * g_L]], 2),
                [NrON_1, 1, Nr_D]),
        tf.sub(volt_D, E_L * tf.ones((NrON_1, 2, Nr_D))))  ## dendrites

    ### Noise ###
    I_noise = Nrn_trace[:, 5]  ## soma
    #     I_noise_D = repmat([prox_Backprop*Nrn_trace(:,6) dist_Backprop*Nrn_trace(:,6)],1,1,Nr_D)  ## dendrites

    ### AMPA current ###
    I_prox_ampa = tf.scalar_mul(
        -1,
        tf.mul(
            Conduct2[:, 0, None, :],
            tf.sub(volt_D[:, 0, None, :], E_AMPA * tf.ones(
                (NrON_1, 1, Nr_D)))))
    I_dist_ampa = tf.mul(
        -1 * (1 / dist_att),
        tf.mul(
            Conduct2[:, 1, None, :],
            tf.sub(volt_D[:, 1, None, :], E_AMPA * tf.ones(
                (NrON_1, 1, Nr_D)))))

    I_AMPA = tf.concat(1, [I_prox_ampa, I_dist_ampa])

    ### NMDA current ###
    corrf1 = tf.constant(100, dtype=tf.float32)
    corrf2 = tf.constant(2, dtype=tf.float32)
    I_prox_nmda = tf.scalar_mul(
        -1,
        tf.mul(
            Conduct2[:, 2, None, :],
            tf.div(
                tf.sub(volt_D[:, 0, None, :],
                       E_NMDA * tf.ones((NrON_1, 1, Nr_D))),
                (tf.add(
                    tf.to_float(1),
                    tf.div(tf.exp(tf.mul(-0.065, volt_D[:, 0, None, :])),
                           3.57))))))
    I_dist_nmda = tf.mul(
        -1 * (1 / dist_att),
        tf.mul(
            Conduct2[:, 3, None, :],
            tf.div(
                tf.sub(volt_D[:, 1, None, :],
                       E_NMDA * tf.ones((NrON_1, 1, Nr_D))),
                (tf.add(
                    tf.to_float(1),
                    tf.div(tf.exp(tf.mul(-0.065, volt_D[:, 1, None, :])),
                           3.57))))))

    I_NMDA = tf.concat(1, [I_prox_nmda, I_dist_nmda])

    ### Exponential term (fast Na activation in soma) ###
    I_exp = tf.scalar_mul(
        g_L * delta_T, tf.exp(tf.div(tf.sub(volt_S, Nrn_trace[:, 2]),
                                     delta_T)))

    ### Synacptic current ###
    I_syn_dend = tf.add(I_AMPA, I_NMDA)

    ### Dendrites to soma current ###

    I_soma_prox = 2500 * tf.mul(
        tf.sub(
            tf.tile(tf.expand_dims(tf.expand_dims(volt_S, 1), 2),
                    [1, 1, Nr_D]), volt_D[:, 0, None, :]),
        tf.tile(
            tf.expand_dims(
                tf.expand_dims(tf.to_float(tf.equal(Nrn_trace[:, 3], 0)), 1),
                2), [1, 1, Nr_D]))

    I_prox_dist = .5 * 1500 * tf.sub(volt_D[:, 0, None, :], volt_D[:, 1,
                                                                   None, :])
    backpr_som_prox = 0.02  #0.04
    backpr_prox_dist = 0.15  #15  #0.09
    V_fact = 25

    ### Total dendritic current ###

    tval21 = tf.add(
        tf.mul(I_soma_prox, tf.to_float(tf.greater(I_soma_prox, 0))),
        tf.mul(V_fact * backpr_som_prox,
               tf.mul(I_soma_prox, tf.to_float(tf.less(I_soma_prox, 0)))))
    tval22 = tf.add(
        tf.mul(backpr_prox_dist,
               tf.mul(I_prox_dist, tf.to_float(tf.less(I_prox_dist, 0)))),
        tf.mul(I_prox_dist, tf.to_float(tf.greater(I_prox_dist, 0))))
    tval2 = tf.sub(tval21, tval22)

    tval31 = tf.mul(backpr_prox_dist,
                    tf.mul(I_prox_dist, tf.to_float(tf.less(I_prox_dist, 0))))
    tval32 = tf.mul(I_prox_dist, tf.to_float(tf.greater(I_prox_dist, 0)))
    tval3 = tf.add(tval31, tval32)

    tval1 = tf.concat(1, [tval2, tval3])
    I_dend = tf.add(I_syn_dend, tval1)  #I_syn_dend

    #     I_dend(1,1,1) = I_dend(1,1,1) + 9000
    #     I_dend(1,2,1) = I_dend(1,2,1) + 15500

    #---------------------
    # Currents
    #------------------------

    ### low-pass filtered membrane potentials ###
    lp1 = tf.add(
        Nrn_u_lp[:, 0:2, :],
        tf.mul((timeSteptf / (uM_Tconst)),
               tf.sub(u_delay[:, :, :, 0], Nrn_u_lp[:, 0:2, :])))
    lp2 = tf.add(
        Nrn_u_lp[:, 2:4, :],
        tf.mul((timeSteptf / (uP_Tconst)),
               tf.sub(u_delay[:, :, :, 0], Nrn_u_lp[:, 2:4, :])))
    Nrn_u_lp2 = tf.concat(1, [lp1, lp2])

    traces_0 = tf.add(
        Nrn_trace[:, 0],
        tf.mul((timeSteptf / Homeostat_tau),
               tf.sub(
                   tf.mul((volt_S - E_L), tf.to_float(tf.greater(volt_S,
                                                                 E_L))),
                   Nrn_trace[:, 0])))  #rectified to avoid the point u = -u_ref

    ### store membrane potential of previous timeSteptf ###
    Nrn_u_delay_shift = u_delay[:, :, :, 1:]
    Nrn_u_delay_end = tf.expand_dims(volt_D, 3)
    Nrn_u_delay2 = tf.concat(3, [Nrn_u_delay_shift, Nrn_u_delay_end])

    ### Somatic Voltage evolution ###

    volt_S2 = tf.add(
        volt_S,
        tf.mul((timeSteptf / C),
               tf.add(
                   tf.sub(
                       I_exp,
                       tf.add(
                           I_L,
                           tf.squeeze(
                               tf.reduce_sum(
                                   tf.mul(
                                       backpr_som_prox,
                                       tf.mul(
                                           I_soma_prox,
                                           tf.to_float(tf.less(I_soma_prox,
                                                               0)))), 2)))),
                   tf.add(I_ext, I_noise))
               ))  #- sum((1/V_fact)*I_soma_prox.*(I_soma_prox>0),3)

    ### Dendritic voltage evolution ###
    C_matr = tf.mul(spineFactor * C, tf.ones((NrON_1, 2, Nr_D)))
    #    C_matr = tf.tile(,[NrON,2,Nr_D])
    volt_D2 = tf.add(volt_D,
                     tf.mul(timeSteptf, tf.div(tf.sub(I_dend, I_L_D), C_matr)))

    ### x (spike trace) ###
    traces_1 = tf.sub(
        tf.add(Nrn_trace[:, 1],
               tf.mul(tf.to_float(tf.equal(Nrn_trace[:, 3], 1)), x_reset)),
        tf.mul((timeSteptf / x_tau), Nrn_trace[:, 1]))

    ### threshold V_T evolution ###
    traces_2 = tf.add(
        Nrn_trace[:, 2],
        tf.mul((timeSteptf / V_T_tconst), tf.sub(V_Trest, Nrn_trace[:, 2])))

    ### Noise ###
    auxn1 = tf.mul(tf.to_float(tf.sqrt(tf.to_float(2 * Noise_tau))),
                   tf.random_normal([NrON_1]))
    auxn2 = tf.mul(tf.to_float(Noise_avg), tf.ones(NrON_1))

    traces_5 = tf.add(Nrn_trace[:, 5],
                      tf.mul(
                          tf.to_float(timeSteptf / Noise_tau),
                          tf.sub(
                              tf.add(tf.mul(auxn1, tf.to_float(Noise_std)),
                                     auxn2), Nrn_trace[:,
                                                       5])))  #filter the noise

    #
    traces_34 = Nrn_trace[:, 3:5]
    Nrn_trace2 = tf.concat(1, [
        tf.pack([traces_0, traces_1, traces_2], axis=1), traces_34,
        tf.expand_dims(traces_5, 1)
    ])

    return volt_S2, volt_D2, Nrn_trace2, Nrn_u_lp2, Nrn_u_delay2
コード例 #53
0
    def __init__(self, l, m, n, dim, n_iterations=100, alpha=None, sigma=None):
        self._m = m
        self._n = n
        self._l = l
        if alpha is None:
            alpha = 0.3
        else:
            alpha = float(alpha)
        if sigma is None:
            sigma = max(l, m, n) / 2.0
        else:
            sigma = float(sigma)
        self._n_iterations = abs(int(n_iterations))
 
        self._graph = tf.Graph()
 
        with self._graph.as_default():
            self._weightage_vects = tf.Variable(tf.random_normal(
                [l*m*n, dim]))
 
            self._location_vects = tf.constant(np.array(
                list(self._neuron_locations(l, m, n))))
 
            self._vect_input = tf.placeholder("float", [dim])
            self._iter_input = tf.placeholder("float")

            bmu_index = tf.argmin(tf.sqrt(tf.reduce_sum(
                tf.pow(tf.sub(self._weightage_vects, tf.pack(
                    [self._vect_input for i in range(l*m*n)])), 2), 1)),
                                  0)
 
            slice_input = tf.pad(tf.reshape(bmu_index, [1]),
                                 np.array([[0, 1]]))
            bmu_loc = tf.reshape(tf.slice(self._location_vects, slice_input,
                                          tf.constant(np.array([1, 3]))),
                                 [3])
 
            learning_rate_op = tf.sub(1.0, tf.div(self._iter_input,
                                                  self._n_iterations))
            _alpha_op = tf.mul(alpha, learning_rate_op)
            _sigma_op = tf.mul(sigma, learning_rate_op)

            bmu_distance_squares = tf.reduce_sum(tf.pow(tf.sub(
                self._location_vects, tf.pack(
                    [bmu_loc for i in range(l*m*n)])), 2), 1)

            neighbourhood_func = tf.exp(tf.neg(tf.div(tf.cast(
                bmu_distance_squares, "float32"), tf.pow(_sigma_op, 2))))
            learning_rate_op = tf.mul(_alpha_op, neighbourhood_func)

            learning_rate_multiplier = tf.pack([tf.tile(tf.slice(
                learning_rate_op, np.array([i]), np.array([1])), [dim])
                                               for i in range(l*m*n)])
            weightage_delta = tf.mul(
                learning_rate_multiplier,
                tf.sub(tf.pack([self._vect_input for i in range(l*m*n)]),
                       self._weightage_vects))                                         
            new_weightages_op = tf.add(self._weightage_vects,
                                       weightage_delta)
            self._training_op = tf.assign(self._weightage_vects,
                                          new_weightages_op)                                       
 
            self._sess = tf.Session()
 
            init_op = tf.global_variables_initializer()
            self._sess.run(init_op)
コード例 #54
0
def l2distance_tf(x, y):
    return tf.sqrt(tf.reduce_sum(tf.square(tf.sub(x, y)), 1))
コード例 #55
0
    def __init__(self,
                 state_size,
                 no_layers=2,
                 hidden_layer_size=128,
                 reg_L2=0.0):

        self.no_layers = no_layers
        self.hidden_layer_size = hidden_layer_size
        self.reg_L2 = reg_L2

        self.l2_loss = tf.constant(0.0, name="l2_loss")
        self.state_placeholder = tf.placeholder(dtype=tf.float32,
                                                shape=[None, state_size],
                                                name="state_placeholder")
        self.reward_placeholder = tf.placeholder(dtype=tf.float32,
                                                 shape=[None],
                                                 name="reward_placeholder")
        self.dropout_input = tf.placeholder(dtype=tf.float32,
                                            name="dropout_input")

        self.in_.append(self.state_placeholder)
        for i in range(self.no_layers):
            if i == 0:
                self.W.append(
                    tf.Variable(tf.truncated_normal(
                        shape=[state_size, self.hidden_layer_size],
                        stddev=0.01),
                                dtype=tf.float32,
                                name="weights" + str(i)))
                self.B.append(
                    tf.Variable(tf.truncated_normal(
                        shape=[self.hidden_layer_size], stddev=0.01),
                                dtype=tf.float32,
                                name="Bias" + str(i)))
            elif i == self.no_layers - 1:
                self.W.append(
                    tf.Variable(tf.truncated_normal(
                        shape=[self.hidden_layer_size, 1], stddev=0.01),
                                dtype=tf.float32,
                                name="weights" + str(i)))
                self.B.append(
                    tf.Variable(tf.truncated_normal(shape=[1], stddev=0.01),
                                dtype=tf.float32,
                                name="Bias" + str(i)))
            else:
                self.W.append(
                    tf.Variable(tf.truncated_normal(
                        shape=[self.hidden_layer_size, self.hidden_layer_size],
                        stddev=0.01),
                                dtype=tf.float32,
                                name="weights" + str(i)))
                self.B.append(
                    tf.Variable(tf.truncated_normal(
                        shape=[self.hidden_layer_size], stddev=0.01),
                                dtype=tf.float32,
                                name="Bias" + str(i)))
            self.l2_loss += tf.nn.l2_loss(self.W[i])
            self.l2_loss += tf.nn.l2_loss(self.B[i])
            self.out.append(
                tf.nn.xw_plus_b(self.in_[i],
                                self.W[i],
                                self.B[i],
                                name="Prediction" + str(i)))
            if i == 0 or i == self.no_layers - 1:
                self.dropout_t = tf.constant(1.0)
            else:
                self.dropout_t = self.dropout_input
            self.out_activated.append(
                tf.nn.relu(tf.nn.dropout(self.out[i], self.dropout_t)))
            self.in_.append(self.out_activated[i])

        self.prediction = self.out_activated[self.no_layers - 1]
        self.loss = tf.reduce_sum(
            tf.square(tf.sub(self.reward_placeholder, self.prediction)) +
            self.reg_L2 * self.l2_loss)
コード例 #56
0
ファイル: image_classify_tf.py プロジェクト: 0rC0/pyezminc
                                       0.0))

                # going to iterate over all outputs, because I don't know how to do it easier
                out = []

                mean = tf.Variable(init_means, name="means")
                sigma = tf.Variable(init_sds, name="covariance")

                for i in range(num_classes):

                    M = tf.squeeze(tf.split(0, num_classes, mean)[i])
                    s = tf.squeeze(tf.split(0, num_classes, sigma)[i])

                    if num_features == 1:
                        # special case with single modality
                        d = tf.sub(x, M)
                        m2 = tf.mul(tf.div(d, s), d)
                    else:
                        # calculate probabilities of class membership (multivariate gaussian)
                        d = tf.sub(x, M)
                        m1 = tf.matmul(d, tf.matrix_inverse(s))
                        m2 = tf.expand_dims(
                            tf.reduce_sum(tf.mul(m1, d), 1), -1
                        )  # have to replace matrix multiplication with this

                    out.append(-0.5 * m2)  # should be columns

                # align columns
                _out = tf.concat(1, out)
                y = tf.nn.softmax(_out) + _epsilon  # to avoid taking log of 0
コード例 #57
0
ファイル: lstm.py プロジェクト: trevorlindsay/lstm
    def __init__(self, is_training, config):

        self.batch_size = batch_size = np.int32(config.max_group_size)
        self.num_steps = num_steps = config.num_steps
        self.num_features = num_features = config.num_features
        self.dense_units = dense_units = config.dense_units
        self.hidden_size = size = np.int32(config.hidden_size)

        if is_training:
            print 'Initiating input tensors of shape: {}'.format(
                (num_steps, batch_size, num_features))

        self._input_data = inputs = tf.placeholder(
            tf.float32, [num_steps, batch_size, num_features])
        self._targets = tf.placeholder(tf.float32, [num_steps, batch_size])

        # Memory cell to use in model
        rnn_cell = tf.nn.rnn_cell.BasicRNNCell(num_units=size)

        if is_training:
            print 'Memory Cell: {}'.format(type(rnn_cell))

        # Wrap the memory cell in a dropout layer (for outputs)
        if is_training and config.keep_prob < 1:
            rnn_cell = tf.nn.rnn_cell.DropoutWrapper(
                cell=rnn_cell, output_keep_prob=config.keep_prob)

        # Create the RNN with 'num_layers' layers
        stacked_cell = tf.nn.rnn_cell.MultiRNNCell([rnn_cell] *
                                                   config.num_layers)

        # Initialize the state -- it will hold the last output, h_t, as well as the memory state, c_t
        # Shape will be [batch_size, num_units x 2] -- splitting on dimension 1 will separate output and memory
        self._initial_state = stacked_cell.zero_state(
            batch_size=tf.constant(batch_size), dtype=tf.float32)

        # Split the inputs (by timestep)
        inputs = [
            tf.squeeze(input, [0]) for input in tf.split(0, num_steps, inputs)
        ]

        # Computes dropout for inputs
        if is_training and config.keep_prob < 1:
            inputs = [tf.nn.dropout(x, config.keep_prob) for x in inputs]

        # Run inputs through the RNN
        outputs, state = tf.nn.rnn(stacked_cell,
                                   inputs,
                                   initial_state=self._initial_state)

        # Re-joins all output tensors (from each timestep)
        output = tf.reshape(tf.concat(1, outputs), shape=[-1, size])

        # Add a fully-connected layer
        self.dense_w = dense_w = tf.get_variable('dense_w',
                                                 shape=[size, dense_units])
        self.dense_b = dense_b = tf.get_variable('dense_b',
                                                 shape=[dense_units])

        # Feed the output from the RNN to the fully-connected layer
        self._predictions = predictions = tf.matmul(output, dense_w) + dense_b
        self._predictions = predictions = tf.reshape(
            self.predictions, shape=[num_steps, batch_size])

        # Compute the R^2
        numerator = tf.reduce_sum(
            tf.square(tf.sub(self.targets, self.predictions)))
        denominator = tf.reduce_sum(
            tf.square(tf.sub(self.targets, tf.reduce_mean(self.targets))))
        self.r2 = r2 = tf.sub(1.0, tf.div(numerator, denominator))

        # MSE cost function
        self._cost = cost = tf.reduce_mean(
            tf.square(tf.sub(self.targets, self.predictions)))
        self._final_state = state

        # Variable for state (for when saving model)
        self.save_state = tf.Variable(
            tf.zeros([batch_size, size * config.num_layers]))
        self.save_state.assign(state)

        if is_training:

            self._lr = tf.Variable(0.0, trainable=False)

            # Compute the gradients
            tvars = tf.trainable_variables()
            grads, _ = tf.clip_by_global_norm(t_list=tf.gradients(cost, tvars),
                                              clip_norm=config.max_grad_norm)

            # Adjust the parameters based on optimizer and gradients
            optimizer = tf.train.AdamOptimizer(self.lr)
            self._train_op = optimizer.apply_gradients(
                grads_and_vars=zip(grads, tvars))

            # Summaries for Tensorboard
            cost_summ = tf.scalar_summary('mean squared error', cost)
            r2_summ = tf.scalar_summary('r-squared', r2)
            state_summ = tf.histogram_summary('states', state)
            pred_summ = tf.histogram_summary('predictions', predictions)
            self.summary = tf.merge_all_summaries()

        else:
            # Ignore this -- put here so errors are prevented when running model not in training mode
            self.summary = predictions

        return
コード例 #58
0
    def __init__(self, m, n, dim, n_iterations=100, alpha=None, sigma=None):
        """
        Initializes all necessary components of the TensorFlow
        Graph.
 
        m X n are the dimensions of the SOM. 'n_iterations' should
        should be an integer denoting the number of iterations undergone
        while training.
        'dim' is the dimensionality of the training inputs.
        'alpha' is a number denoting the initial time(iteration no)-based
        learning rate. Default value is 0.3
        'sigma' is the the initial neighbourhood value, denoting
        the radius of influence of the BMU while training. By default, its
        taken to be half of max(m, n).
        """
 
        #Assign required variables first
        self._m = m
        self._n = n
        if alpha is None:
            alpha = 0.3
        else:
            alpha = float(alpha)
        if sigma is None:
            sigma = max(m, n) / 2.0
        else:
            sigma = float(sigma)
        self._n_iterations = abs(int(n_iterations))
 
        ##INITIALIZE GRAPH
        self._graph = tf.Graph()
 
        ##POPULATE GRAPH WITH NECESSARY COMPONENTS
        with self._graph.as_default():
 
            ##VARIABLES AND CONSTANT OPS FOR DATA STORAGE
 
            #Randomly initialized weightage vectors for all neurons,
            #stored together as a matrix Variable of size [m*n, dim]
            self._weightage_vects = tf.Variable(tf.random_normal(
                [m*n, dim]))
 
            #Matrix of size [m*n, 2] for SOM grid locations
            #of neurons
            self._location_vects = tf.constant(np.array(
                list(self._neuron_locations(m, n))))
 
            ##PLACEHOLDERS FOR TRAINING INPUTS
            #We need to assign them as attributes to self, since they
            #will be fed in during training
 
            #The training vector
            self._vect_input = tf.placeholder("float", [dim])
            #Iteration number
            self._iter_input = tf.placeholder("float")
 
            ##CONSTRUCT TRAINING OP PIECE BY PIECE
            #Only the final, 'root' training op needs to be assigned as
            #an attribute to self, since all the rest will be executed
            #automatically during training
 
            #To compute the Best Matching Unit given a vector
            #Basically calculates the Euclidean distance between every
            #neuron's weightage vector and the input, and returns the
            #index of the neuron which gives the least value
            bmu_index = tf.argmin(tf.sqrt(tf.reduce_sum(
                tf.pow(tf.sub(self._weightage_vects, tf.pack(
                    [self._vect_input for i in range(m*n)])), 2), 1)),
                                  0)
 
            #This will extract the location of the BMU based on the BMU's
            #index
            slice_input = tf.pad(tf.reshape(bmu_index, [1]),
                                 np.array([[0, 1]]))
            bmu_loc = tf.reshape(tf.slice(self._location_vects, slice_input,
                                          tf.constant(np.array([1, 2]))),
                                 [2])
 
            #To compute the alpha and sigma values based on iteration
            #number
            learning_rate_op = tf.sub(1.0, tf.div(self._iter_input,
                                                  self._n_iterations))
            _alpha_op = tf.mul(alpha, learning_rate_op)
            _sigma_op = tf.mul(sigma, learning_rate_op)
 
            #Construct the op that will generate a vector with learning
            #rates for all neurons, based on iteration number and location
            #wrt BMU.
            bmu_distance_squares = tf.reduce_sum(tf.pow(tf.sub(
                self._location_vects, tf.pack(
                    [bmu_loc for i in range(m*n)])), 2), 1)
            neighbourhood_func = tf.exp(tf.neg(tf.div(tf.cast(
                bmu_distance_squares, "float32"), tf.pow(_sigma_op, 2))))
            learning_rate_op = tf.mul(_alpha_op, neighbourhood_func)
 
            #Finally, the op that will use learning_rate_op to update
            #the weightage vectors of all neurons based on a particular
            #input
            learning_rate_multiplier = tf.pack([tf.tile(tf.slice(
                learning_rate_op, np.array([i]), np.array([1])), [dim])
                                               for i in range(m*n)])
            weightage_delta = tf.mul(
                learning_rate_multiplier,
                tf.sub(tf.pack([self._vect_input for i in range(m*n)]),
                       self._weightage_vects))                                         
            new_weightages_op = tf.add(self._weightage_vects,
                                       weightage_delta)
            self._training_op = tf.assign(self._weightage_vects,
                                          new_weightages_op)                                       
 
            ##INITIALIZE SESSION
            self._sess = tf.Session()
 
            ##INITIALIZE VARIABLES
            init_op = tf.initialize_all_variables()
            self._sess.run(init_op)
コード例 #59
0
    def build_model(self,
                    learning_rate=None,
                    deep_n_nodes=[100, 50],
                    deep_dropout=False,
                    bias=True):
        '''
        Model - wide and deep - built using tflearn
        '''
        if not learning_rate:
            learning_rate = [0.001, 0.01]

        n_cc = len(self.continuous_columns)
        n_categories = 1  # two categories: is_idv and is_not_idv
        input_shape = [None, n_cc]
        if self.verbose:
            print("=" * 77 + " Model %s (type=%s)" %
                  (self.name, self.model_type))
            print("  Input placeholder shape=%s" % str(input_shape))
        wide_inputs = tflearn.input_data(shape=input_shape, name="wide_X")
        if not isinstance(learning_rate, list):
            learning_rate = [learning_rate, learning_rate]  # wide, deep
        if self.verbose:
            print("  Learning rates (wide, deep)=%s" % learning_rate)

        with tf.name_scope(
                "Y"):  # placeholder for target variable (i.e. trainY input)
            Y_in = tf.placeholder(shape=[None, 1], dtype=tf.float32, name="Y")

        if bias:
            with tf.variable_op_scope([wide_inputs],
                                      None,
                                      "cb_unit",
                                      reuse=False) as scope:
                central_bias = tflearn.variables.variable(
                    'central_bias',
                    shape=[1],
                    initializer=tf.constant_initializer(np.random.randn()),
                    trainable=True,
                    restore=True)
                tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/cb_unit',
                                     central_bias)

        if 'wide' in self.model_type:
            wide_network = self.wide_model(wide_inputs, n_cc)
            network = wide_network
            if bias:
                wide_network_with_bias = tf.add(wide_network,
                                                central_bias,
                                                name="wide_with_bias")

        if 'deep' in self.model_type:
            deep_network = self.deep_model(wide_inputs,
                                           n_cc,
                                           n_nodes=deep_n_nodes,
                                           use_dropout=deep_dropout)
            if bias:
                deep_network_with_bias = tf.add(deep_network,
                                                central_bias,
                                                name="deep_with_bias")
            if 'wide' in self.model_type:
                network = tf.add(wide_network, deep_network)
                if self.verbose:
                    print("Wide + deep model network %s" % network)
            else:
                network = deep_network

        if bias:
            network = tf.add(network, central_bias, name="add_central_bias")

        # add validation monitor summaries giving confusion matrix entries
        with tf.name_scope('Monitors'):
            predictions = tf.cast(tf.greater(network, 0), tf.int64)
            print("predictions=%s" % predictions)
            Ybool = tf.cast(Y_in, tf.bool)
            print("Ybool=%s" % Ybool)
            pos = tf.boolean_mask(predictions, Ybool)
            neg = tf.boolean_mask(predictions, ~Ybool)
            psize = tf.cast(tf.shape(pos)[0], tf.int64)
            nsize = tf.cast(tf.shape(neg)[0], tf.int64)
            true_positive = tf.reduce_sum(pos, name="true_positive")
            false_negative = tf.sub(psize,
                                    true_positive,
                                    name="false_negative")
            false_positive = tf.reduce_sum(neg, name="false_positive")
            true_negative = tf.sub(nsize, false_positive, name="true_negative")
            overall_accuracy = tf.truediv(tf.add(true_positive, true_negative),
                                          tf.add(nsize, psize),
                                          name="overall_accuracy")
        vmset = [
            true_positive, true_negative, false_positive, false_negative,
            overall_accuracy
        ]

        trainable_vars = tf.trainable_variables()
        tv_deep = [v for v in trainable_vars if v.name.startswith('deep_')]
        tv_wide = [v for v in trainable_vars if v.name.startswith('wide_')]

        if self.verbose:
            print("DEEP trainable_vars")
            for v in tv_deep:
                print("  Variable %s: %s" % (v.name, v))
            print("WIDE trainable_vars")
            for v in tv_wide:
                print("  Variable %s: %s" % (v.name, v))

        if 'wide' in self.model_type:
            if bias:
                if not 'deep' in self.model_type:
                    tv_wide.append(central_bias)
                target_wide_net = wide_network_with_bias
            else:
                target_wide_net = wide_network
            tflearn.regression(
                target_wide_net,
                placeholder=Y_in,
                optimizer='sgd',
                #loss='roc_auc_score',
                loss='binary_crossentropy',
                metric="accuracy",
                learning_rate=learning_rate[0],
                # validation_monitors=vmset,
                trainable_vars=tv_wide,
                op_name="wide_regression",
                name="Y")

        if 'deep' in self.model_type:
            if bias:
                if not 'wide' in self.model_type:
                    tv_wide.append(central_bias)
                target_deep_net = deep_network_with_bias
            else:
                target_deep_net = deep_network
            tflearn.regression(
                target_deep_net,
                placeholder=Y_in,
                optimizer='adam',
                #loss='roc_auc_score',
                loss='binary_crossentropy',
                metric="accuracy",
                learning_rate=learning_rate[1],
                # validation_monitors=vmset if not 'wide' in self.model_type else None,
                trainable_vars=tv_deep,
                op_name="deep_regression",
                name="Y")

        if bias and self.model_type == 'wide+deep':  # learn central bias separately for wide+deep
            tflearn.regression(
                network,
                placeholder=Y_in,
                optimizer='adam',
                loss='binary_crossentropy',
                metric="accuracy",
                learning_rate=learning_rate[0],  # use wide learning rate
                trainable_vars=[central_bias],
                op_name="central_bias_regression",
                name="Y")

        self.model = tflearn.DNN(
            network,
            tensorboard_verbose=self.tensorboard_verbose,
            max_checkpoints=5,
            checkpoint_path="%s/%s.tfl" % (self.checkpoints_dir, self.name),
        )

        if self.verbose:
            print("Target variables:")
            for v in tf.get_collection(tf.GraphKeys.TARGETS):
                print("  variable %s: %s" % (v.name, v))

            print("=" * 77)
コード例 #60
0
    # Reshape conv layer to all-connected layer
    activation_shape = activation.get_shape().as_list()
    reshape = tf.reshape(activation, [
        activation_shape[0],
        activation_shape[1] * activation_shape[2] * activation_shape[3]
    ])
    all_connected = tf.nn.sigmoid(tf.matmul(reshape, fc1_weights) + fc1_biases)

    # if train:
    # 	all_connected = tf.nn.dropout(all_connected, 0.8, seed = SEED)
    return tf.matmul(all_connected, fc2_weights) + fc2_biases


# Be careful of the data-structure of network output & labels_node
train_output = model(train_images_node, True)
train_loss = tf.nn.l2_loss(tf.sub(
    train_output, train_labels_node)) / (2 * N_POINTS * BATCH_SIZE)

# Directly import LeNet L2 regularization for the fully connected parameters.
regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
                tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
# Add the regularization term to the loss.
regularized_train_loss = train_loss  # + LAMDA * regularizers

###############################################################
# Optimizers to be appeared here
# Using simple gradient descent method with constant learning rate

# SGD
# batch = tf.Variable(0, dtype=IMAGE_DATA_TYPE)
# opt = tf.train.GradientDescentOptimizer(learning_rate=LEARNING_RATE).minimize(regularized_train_loss)