Ejemplo n.º 1
0
 def chamfer_distance_single(inputs):
     """
         suqared distance, to be same with chamfer distance in P2M
     """
     pc1 = inputs[0]
     pc2 = inputs[1]
     num_p1 = tf.shape(pc1)[0]
     num_p2 = tf.shape(pc2)[0]
     num_f1 = tf.shape(pc1)[1]
     num_f2 = tf.shape(pc2)[1]
     # specify the number of pc1 and pc2
     # num_p1= pc1.get_shape()[0]
     # num_p2= pc2.get_shape()[0]
     # num_f1= pc1.get_shape()[1]
     # num_f2= pc2.get_shape()[1]
     exp_pc1 = tf.tile(pc1, (num_p2, 1))  # [num_p1 * num_p2, num_f1]
     exp_pc1 = tf.reshape(
         exp_pc1, [num_p2, num_p1, num_f1])  # [num_p2, num_p1, num_f1]
     exp_pc2 = tf.reshape(pc2, [num_p2, 1, num_f2])  # [num_p2, 1, num_f1]
     # exp_pc2 = tf.tile(exp_pc2, (1, num_p1, 1)) # [num_p2, num_p1, num_f1], use broadcasting automatically, no need to used tile
     distance_matrix = tf.squared_difference(exp_pc1,
                                             exp_pc2)  # [num_p2, num_p1]
     distance_matrix = tf.reduce_sum(distance_matrix,
                                     axis=2)  # [num_p2, num_p1]
     d1_2_all = tf.reduce_min(distance_matrix, axis=0)  # [num_p1]
     d2_1_all = tf.reduce_min(distance_matrix, axis=1)  # [num_p2]
     idx1 = tf.arg_min(distance_matrix, 0, tf.int32)  # [num_p1]
     idx2 = tf.arg_min(distance_matrix, 1, tf.int32)  # [num_p2]
     return [d1_2_all, idx1, d2_1_all, idx2]
Ejemplo n.º 2
0
def closest_class_prediction(pairwise_distances, labels):
    """
    Helper function to gather predictions for top-1 accuracy calculation

    :param pairwise_distances: nxn matrix with cosine distances within a batch
    :param labels: nx1 ids of identities
    :return:
    """

    max_values = tf.reduce_max(pairwise_distances)
    diag_replacer = tf.tile(
        tf.reduce_max(pairwise_distances)[None, ...],
        [tf.shape(pairwise_distances)[0]])

    # The distance of embedding to itself will be 0, so we're replacing it with the max value
    replaced_diag = tf.linalg.set_diag(pairwise_distances, diag_replacer)

    indecies_min = tf.arg_min(replaced_diag, 1)

    predictions_raw = tf.gather(labels, indecies_min)

    # Filter classes with one instance only
    classes, _, counts = tf.unique_with_counts(labels)
    classes_not_for_acuracy = classes[tf.equal(counts, 1)]

    _, indecies_to_keep = tf.setdiff1d(labels, classes_not_for_acuracy)

    labels_selected = tf.gather(labels, indecies_to_keep)

    predictions = tf.gather(predictions_raw, indecies_to_keep)

    return predictions, labels_selected
Ejemplo n.º 3
0
def knn(n_dim, Xtr, Ytr, Xte, Yte=None):
    # create placeholder
    xtr = tf.placeholder(tf.float32, [None, n_dim])
    xte = tf.placeholder(tf.float32, [n_dim])

    # Nearest Neighbor calculation using L1 Distance
    # Calculate L1 Distance
    distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))),
                             reduction_indices=1)
    # Prediction: Get min distance index (Nearest neighbor)
    pred = tf.arg_min(distance, 0)

    accuracy = 0.

    # Initialize the variables (i.e. assign their default value)
    init = tf.global_variables_initializer()

    # Start training
    with tf.Session() as sess:

        # Run the initializer
        sess.run(init)

        # loop over test data
        for i in range(len(Xte)):
            # Get nearest neighbor
            nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i, :]})
            prediction = np.argmax(Ytr[nn_index])

            generate_csv(i, prediction)
Ejemplo n.º 4
0
    def match_idx(self, g):
        """
        for the group g, identify the Ui whose residual is minimal, then return
            label, loss, sreal, u
            where label=i, loss=residual_real - residual_fake, sreal=residual_real, u=Ui
        """
        N_g = tf.shape(g)[0]
        g_fake = self.uniform_recombine(g)

        combined_sreal = []
        Us = []
        for i in xrange(self.n_class):
            u = self.Us[i]
            u = tf.nn.l2_normalize(u, dim=0)
            uT = tf.transpose(u)
            s_real = tf.reduce_sum((g - tf.matmul(tf.matmul(g, u), uT)) ** 2) / tf.to_float(N_g)

            combined_sreal.append(s_real)
            Us.append(u)
        combined_sreal = tf.convert_to_tensor(combined_sreal)
        Us             = tf.convert_to_tensor(Us)
        label = tf.cast(tf.arg_min(combined_sreal, dimension=0), tf.int32)
        sreal = combined_sreal[label]
        u     = Us[label]
        # returns label, and corresponding s_real and u
        return label, sreal, u
Ejemplo n.º 5
0
def main(_):
    cut = int(FLAGS.n_samples * 0.7)

    start = time.time()

    data, features = make_circles(n_samples=FLAGS.n_samples, shuffle=True, noise=0.12, factor=0.4)
    tr_data, tr_features = data[:cut], features[:cut]
    te_data, te_features = data[cut:], features[cut:]
    test = []

    fig, ax = plt.subplots()
    ax.scatter(tr_data[:, 0], tr_data[:, 1],
               marker='o', s=100, c=tr_features, cmap=plt.cm.coolwarm)
    plt.plot()
    plt.show()

    with tf.Session() as sess:
        for i, j in zip(te_data, te_features):
            distances = tf.reduce_sum(tf.square(tf.subtract(i, tr_data)), axis=1)
            neighbor = tf.arg_min(distances, 0)

            test.append(tr_features[sess.run(neighbor)])

    fig, ax = plt.subplots()
    ax.scatter(te_data[:, 0], te_data[:, 1],
               marker='o', s=100, c=test, cmap=plt.cm.coolwarm)
    plt.plot()
    plt.show()

    end = time.time()
    print("Found in %.2f seconds" % (end-start))
    print("Cluster assignments:", test)
Ejemplo n.º 6
0
    def __init__(self, train_features, train_labels, sequence_length):
        self.train_features = train_features
        self.train_labels = train_labels

        with tf.name_scope("input"):
            xtr = tf.placeholder(dtype=tf.float32,
                                 shape=[None, sequence_length])
            xte = tf.placeholder(dtype=tf.float32, shape=[sequence_length])

        # L1-Norm
        # distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1)

        # L2-Norm
        distance = tf.sqrt(
            tf.reduce_sum(tf.square(xtr - xte), reduction_indices=1))

        prediction = tf.arg_min(distance, 0)

        accuracy = 0.0

        self.xtr = xtr
        self.xte = xte
        self.distance = distance
        self.prediction = prediction
        self.accuracy = accuracy
    def create_model(self, num_samples, dim, k):
        with self.graph.as_default():
            X = tf.placeholder(tf.float32, [num_samples, dim])
            cluster_membership = tf.Variable(tf.zeros([num_samples]),
                                             dtype=tf.float32)
            centroids = tf.Variable(tf.random_uniform([k, dim]),
                                    dtype=tf.float32)

            # This ops is for the first to select K random center from X
            centroids_init = centroids.assign(tf.slice(X, [0, 0], [k, dim]))
            X_temp = tf.reshape(tf.tile(X, [1, k]), [num_samples, k, dim])
            centroids_temp = tf.reshape(tf.tile(centroids, [num_samples, 1]),
                                        [num_samples, k, dim])

            distances_to_centroids = tf.reduce_sum(
                tf.square(tf.subtract(X_temp, centroids_temp)),
                reduction_indices=2)  #N x k x 1
            cluster_membership = tf.arg_min(distances_to_centroids, 1)

            new_means_numerator = tf.unsorted_segment_sum(
                X, cluster_membership, k)
            new_means_denominator = tf.unsorted_segment_sum(
                tf.ones_like(X), cluster_membership, k)
            new_means = new_means_numerator / new_means_denominator
            update_centroids = centroids.assign(new_means)
            return update_centroids, cluster_membership, X, centroids_init
Ejemplo n.º 8
0
def arg_closest_anchor(bboxes, anchors):
    """Find the closest anchor. Box Format [ymin, xmin, ymax, xmax]
  """
    num_anchors = anchors.get_shape().as_list()[0]
    num_bboxes = tf.shape(bboxes)[0]

    _indices = tf.reshape(tf.range(num_bboxes), shape=[-1, 1])
    _indices = tf.reshape(tf.stack([_indices] * num_anchors, axis=1),
                          shape=[-1, 1])
    bboxes_m = tf.gather_nd(bboxes, _indices)
    # bboxes_m = tf.Print(bboxes_m, [bboxes_m], "bboxes_m", summarize=100)

    anchors_m = tf.tile(anchors, [num_bboxes, 1])
    # anchors_m = tf.Print(anchors_m, [anchors_m], "anchors_m", summarize=100)

    square_dist = tf.squared_difference(bboxes_m[:, 0], anchors_m[:, 0]) + \
                  tf.squared_difference(bboxes_m[:, 1], anchors_m[:, 1]) + \
                  tf.squared_difference(bboxes_m[:, 2], anchors_m[:, 2]) + \
                  tf.squared_difference(bboxes_m[:, 3], anchors_m[:, 3])

    square_dist = tf.reshape(square_dist, shape=[num_bboxes, num_anchors])

    # square_dist = tf.Print(square_dist, [square_dist], "square_dist", summarize=100)

    indices = tf.arg_min(square_dist, dimension=1)

    return indices
Ejemplo n.º 9
0
def main():

    # Write the code here
    x_train, y_train, x_test, y_test = load_data()

    xtr = tf.placeholder("float", [None, 784])
    xte = tf.placeholder("float", [784])

    distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))),
                             reduction_indices=1)
    pred = tf.arg_min(distance, 0)

    accuracy = 0.

    init = tf.global_variables_initializer()

    with tf.Session() as sess:
        # Run the initializer
        sess.run(init)
        # loop over test data
        for i in range(len(x_test)):
            # Get nearest neighbor
            nn_index = sess.run(pred,
                                feed_dict={
                                    xtr: x_train,
                                    xte: x_test[i, :]
                                })
            # Calculate accuracy
            if np.argmax(y_train[nn_index]) == np.argmax(y_test[i]):
                accuracy += 1. / len(x_test)
    return accuracy
Ejemplo n.º 10
0
      def get_loss(idx):
        anchor = h[idx, :]
        anchor_class = targets[idx]
        class_division = tf.cast(tf.equal(targets, anchor_class), tf.int32)
        partitioned_output = tf.dynamic_partition(h, class_division, 2)

        # Positives
        positive_distances = tf.abs(anchor - partitioned_output[1])
        pos_dis_val = tf.norm(positive_distances,axis=1)
        hardest_positive_idx = tf.arg_max(pos_dis_val,0)
        pos_div_size = smart_shape(partitioned_output[1])[0]
        pos_divider = tf.one_hot(hardest_positive_idx,pos_div_size,dtype=tf.int32)
        hardest_positive = tf.dynamic_partition(positive_distances,pos_divider,2)[1]
        hardest_positive_class = tf.gather(targets, hardest_positive_idx)
        hardest_positive = tf.norm(hardest_positive, axis=1)

        # Negatives
        negative_distances = tf.abs(anchor - partitioned_output[0])
        neg_dis_val = tf.norm(negative_distances, axis=1)
        hardest_negative_idx = tf.arg_min(neg_dis_val,0)
        neg_div_size = smart_shape(partitioned_output[0])[0]
        neg_divider = tf.one_hot(hardest_negative_idx,neg_div_size,dtype=tf.int32)
        hardest_negative = tf.dynamic_partition(negative_distances,neg_divider,2)[1]
        hardest_negative_class = tf.gather(targets,hardest_negative_idx)
        hardest_negative = tf.norm(hardest_negative, axis=1)

        # margin = 1
        # loss = tf.maximum(0., margin + hardest_positive - hardest_negative)
        loss = tf.log1p(tf.exp(hardest_positive - hardest_negative))

        return loss
Ejemplo n.º 11
0
 def initializeKnn(self):        
     if self.qualitative_outputs:            
         n_input = self.input_end_column - self.input_start_column + 1            
         self.tf_in = tf.placeholder("float", [None, n_input])
         self.tf_testing = tf.placeholder("float", [n_input])
         
         # Calculate L1 Distance
         self.distance = tf.reduce_sum(tf.abs(tf.add(self.tf_in, tf.neg(self.tf_testing))), reduction_indices=1)
         # Predict: Get min distance index (Nearest neighbor)
         self.prediction = tf.arg_min(self.distance, 0)
         
         init = tf.initialize_all_variables()
         self.sess = tf.Session()
         self.sess.run(init)
         accuracy = 0
         #output part
         for i in range(len(self.testing_data)):
             # Get nearest neighbor
             nn_index = self.sess.run(self.prediction, feed_dict={self.tf_in: self.training_data, self.tf_testing: self.testing_data[i,:]})
             # Calculate accuracy
             if np.argmax(self.training_outputs[nn_index]) == np.argmax(self.testing_outputs[i]):
                 accuracy += 1./len(self.testing_data)
         self.accuracy = accuracy
         self.epochs_for_accuracy = "N/A"
         self.best_accuracy = "N/A"
         self.epochs_for_best_accuracy = "N/A"
         self.trained = True
     else:
         raise ValueError("NOT IMPLEMENTED")
Ejemplo n.º 12
0
def main():
    mnist = input_data.read_data_sets('./outer/tf/sample_tests/data', one_hot=True)
    # 5000 for training(nn candidates)
    X_train,Y_train = mnist.train.next_batch(5000)
    X_test,Y_test = mnist.test.next_batch(200)

    x_tr = tf.placeholder(tf.float32, shape=[None, 784])
    x_te = tf.placeholder(tf.float32, shape=[784])

    #cal L1 distance
    distance = tf.reduce_sum(tf.abs(tf.add(x_tr, tf.negative(x_te))), reduction_indices=1)

    pred = tf.arg_min(distance, 0)

    accuracy = 0

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(len(X_test)):
            nn_index = sess.run(pred, feed_dict={x_tr:X_train, x_te:X_test[i, :]})
            print('test : {0} Prediction : {1} True Class : {2}'.format(i, np.argmax(Y_train[nn_index]), np.argmax(Y_test[i])))
            if np.argmax(Y_train[nn_index]) == np.argmax(Y_test[i]):
                accuracy += 1./len(X_test)
        print('accuracy : {0}'.format(accuracy))
def KNN(input_shape=784, output_shape=10):
    # 训练集大小为5000
    Xtr, Ytr = mnist.train.next_batch(5000)
    # 取其重200个数据作为测试集
    Xte, Yte = mnist.test.next_batch(200)

    # 一如既往地占坑
    xtr = tf.placeholder("float", [None, input_shape])
    xte = tf.placeholder("float", [input_shape])

    # 定义距离计算的方法
    distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))),
                             reduction_indices=1)
    # 预测结果为距离最小者,这是一个1-NN算法
    pred = tf.arg_min(distance, 0)
    accuracy = 0
    # 初始化所有的变量
    init = tf.global_variables_initializer()

    with tf.Session() as sess:
        sess.run(init)

        for i in range(len(Xte)):
            # 找出第i个测试点的最近邻点
            nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i, :]})
            # 比较预测值和真实值
            print("Test", i, "Prediction:", np.argmax(Ytr[nn_index]),
                  "True Class:", np.argmax(Yte[i]))
            # 统计准确率
            if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]):
                accuracy += 1. / len(Xte)
        print('Done! Acurracy = ', accuracy)
Ejemplo n.º 14
0
    def buildModel(self, sess, test_case_tensor, tc_name):
        '''
        This method is called per test case (defined by getTestCases()).

        keyword argument:
        test_case_tensor -- test case tensor metadata
            For example, if a test case is { "mul_1d_1d": [base.Tensor([5]), base.Tensor([5])] }
            test_case_tensor is [base.Tensor([5]), base.Tensor([5])]
        '''

        input_list = []

        # ------ modify below for your model FROM here -------#
        x_tensor = self.createTFInput(test_case_tensor[0], input_list)

        output_node = tf.arg_min(x_tensor, 0, name=tc_name)
        # ------ modify UNTIL here for your model -------#

        # Note if don't have any CONST value, creating checkpoint file fails.
        # The next lines insert such (CONST) to prevent such error.
        # So, Graph.pb/pbtxt contains this garbage info,
        # but this garbage info will be removed in Graph_frozen.pb/pbtxt
        garbage = tf.get_variable("garbage", [1],
                                  dtype=tf.float32,
                                  initializer=tf.zeros_initializer())
        init_op = tf.global_variables_initializer()
        garbage_value = [0]
        sess.run(tf.assign(garbage, garbage_value))

        sess.run(init_op)

        # ------ modify appropriate return value  -------#

        # returning (input_node_list, output_node_list)
        return (input_list, [output_node])
def graph(K, n):
    #define variables
    data = tf.placeholder(tf.float64, [None, data_dim], name='input_x')
    centorid = tf.cast(tf.Variable(tf.random_normal([K, data_dim])),
                       tf.float64)

    #calculate the distance, then assignment each point to the nearest centeroid
    Dis = Euclidean_dis(data, centorid)
    assignment = tf.arg_min(Dis, 1)
    assignment_onehot = tf.cast(tf.one_hot(assignment, K), tf.float64)

    #calculate percentage of data points belonging to each of the K clusters
    point_num = tf.expand_dims(tf.reduce_sum(assignment_onehot, 0), 1)
    percentage = tf.cast(point_num / data_num, tf.float64)

    #get class1
    #a = tf.constant([0.,0.],tf.float64)
    #class1pre = tf.not_equal(tf.expand_dims(assignment[:,0],1)*data, a)
    #class1 = tf.reshape(tf.gather(data, tf.where(class1pre[:,0])),[-1,2])

    #difine Loss funtion
    U = tf.matmul(assignment_onehot, centorid)
    distance = data - U
    Loss = tf.reduce_sum(tf.square(distance))

    #define train algorithm
    optimizer = tf.train.AdamOptimizer(learning_rate=n,
                                       beta1=0.9,
                                       beta2=0.99,
                                       epsilon=1e-5)
    train = optimizer.minimize(loss=Loss)

    return train, Loss, centorid, data, percentage, assignment
def model(xtr, xte):
    # Nearest Neighbor calculation using L1 Distance
    # Calculate L1 Distance
    distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), axis=1)
    # Prediction: Get min distance index (Nearest neighbor)
    pred = tf.arg_min(distance, 0)
    return pred
Ejemplo n.º 17
0
def arg_closest_anchor(bboxes, anchors):
  """Find the closest anchor. Box Format [ymin, xmin, ymax, xmax]
  """
  num_anchors = anchors.get_shape().as_list()[0]
  num_bboxes = tf.shape(bboxes)[0]

  _indices = tf.reshape(tf.range(num_bboxes), shape=[-1, 1])
  _indices = tf.reshape(tf.stack([_indices] * num_anchors, axis=1), shape=[-1, 1])
  bboxes_m = tf.gather_nd(bboxes, _indices)
  # bboxes_m = tf.Print(bboxes_m, [bboxes_m], "bboxes_m", summarize=100)

  anchors_m = tf.tile(anchors, [num_bboxes, 1])
  # anchors_m = tf.Print(anchors_m, [anchors_m], "anchors_m", summarize=100)

  square_dist = tf.squared_difference(bboxes_m[:, 0], anchors_m[:, 0]) + \
                tf.squared_difference(bboxes_m[:, 1], anchors_m[:, 1]) + \
                tf.squared_difference(bboxes_m[:, 2], anchors_m[:, 2]) + \
                tf.squared_difference(bboxes_m[:, 3], anchors_m[:, 3])

  square_dist = tf.reshape(square_dist, shape=[num_bboxes, num_anchors])

  # square_dist = tf.Print(square_dist, [square_dist], "square_dist", summarize=100)

  indices = tf.arg_min(square_dist, dimension=1)

  return indices
Ejemplo n.º 18
0
def pred(src, dst, tz, d, wtr, temp):
    print(src, dst, wtr, temp)
    training_set = []
    training_set_y = []

    with open("dataSets/TRAIN_SET" + str(src) + str(dst) + ".csv",
              "r") as file:
        reader = csv.reader(file)
        for row in reader:
            training_set.append([row[1], row[3], row[4], row[5]])
            training_set_y.append(row[6])

    training_set = training_set[1:]
    training_set_y = training_set_y[1:]

    testing_set = [tz, d, wtr, temp]

    # testing_set.append([src,dst,wtr,temp])
    print(testing_set)

    training_values = tf.placeholder("float", [None, len(training_set[0])])
    test_values = tf.placeholder("float", [len(training_set[0])])

    distance = tf.reduce_sum(tf.abs(
        tf.add(training_values, tf.negative(test_values))),
                             reduction_indices=1)
    prediction = tf.arg_min(distance, 0)

    init = tf.initialize_all_variables()
    with tf.Session() as sess:
        sess.run(init)
        # for i in range (len(testing_set)):

        index_in_trainingset = sess.run(prediction,
                                        feed_dict={
                                            training_values: training_set,
                                            test_values: testing_set
                                        })

        print(" The prediction is %s" % (training_set_y[index_in_trainingset]),
              index_in_trainingset)

        training_set = []
        # training_set_y = []
        # switch to choose training set

        with open("dataSets/TRAIN_SET" + str(src) + str(dst) + ".csv",
                  "r") as file:
            reader = csv.reader(file)
            for row in reader:
                training_set.append([row[1], row[3], row[4], row[5], row[7]])
                # training_set_y.append(row[7])

        training_set = training_set[1:]
        # training_set_y = training_set_y[1:]

        aor = training_set[index_in_trainingset][4]
        return round(float(training_set_y[index_in_trainingset]), 1), aor
Ejemplo n.º 19
0
    def build(self, **inputArgs):
        """
                Builds the network, x1 and x2 are the inputs, enc1 and enc2 the respective encodings
                y_pred is our prediction while y_true is the real value if given
                cost gives the combined costs and optimizer our learning method

                :param inputArgs:
                :return:
        """

        self.lr = tf.placeholder(tf.float32, shape=[])
        self.x1 = tf.placeholder(tf.float32, shape=self.shape, name="x1")
        self.x2 = tf.placeholder(tf.float32, shape=self.shape, name="x2")

        self.y_true = tf.placeholder(tf.float32, shape=[self.shape[0]])

        with tf.variable_scope("image_filters") as scope:
            self.enc1 = self.build_encoding(self.x1)
            scope.reuse_variables()
            self.enc2 = self.build_encoding(self.x2)

        self.params = self.enc_weights
        self.dec_weights = ops.conv_weight_bias([self.encoding_size, 1])

        self.params.extend(self.dec_weights)
        """
        ONLY DIFFERENCE BETWEEN METRIC AND CLASSICAL
        last weights are squared to ensure positivity, and we use tf.nn.tanh insteand of tf.nn.sigmoid
        """
        self.y_pred = tf.reshape(
            tf.nn.tanh(
                tf.matmul(tf.abs(self.enc1 - self.enc2),
                          tf.square(self.dec_weights[0]))), [-1])

        self.class_cost = tf.reduce_mean(-tf.mul(
            self.y_true, tf.log(tf.clip_by_value(self.y_pred, 0.0001, 0.9999))
        )) - tf.reduce_mean(
            tf.mul(1 - self.y_true,
                   tf.log(1 - tf.clip_by_value(self.y_pred, 0.0001, 0.9999))))

        self.cost = self.class_cost
        self.reg_cost = tf.reduce_sum(
            tf.get_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES)) * self.reg_constant

        if self.regularization:
            self.cost += self.reg_cost

        self.optimizer = tf.train.AdagradOptimizer(self.lr).minimize(
            self.cost, var_list=self.params)

        self.pred_class = tf.arg_min(self.y_pred, dimension=0)
        self.true_class = tf.placeholder(tf.int64, shape=[1], name="class")

        self.corr_class = tf.equal(self.pred_class, self.true_class)
Ejemplo n.º 20
0
    def _setup_action_selection(self, state_ph):
        """
            Computes the best action from the current state by using randomly sampled action sequences
            to predict future states, evaluating these predictions according to a cost function,
            selecting the action sequence with the lowest cost, and returning the first action in that sequence

            returns:
                best_action: the action that minimizes the cost function (tensor with shape [self._action_dim])

            implementation details (in order):
                (a) We will assume state_ph has a batch size of 1 whenever action selection is performed
                (b) Randomly sample uniformly self._num_random_action_selection number of action sequences,
                    each of length self._horizon
                (c) Starting from the input state, unroll each action sequence using your neural network
                    dynamics model
                (d) While unrolling the action sequences, keep track of the cost of each action sequence
                    using self._cost_fn
                (e) Find the action sequence with the lowest cost, and return the first action in that sequence

            Hints:
                (i) self._cost_fn takes three arguments: states, actions, and next states. These arguments are
                    2-dimensional tensors, where the 1st dimension is the batch size and the 2nd dimension is the
                    state or action size
                (ii) You should call self._dynamics_func and self._cost_fn a total of self._horizon times
                (iii) Use tf.random_uniform(...) to generate the random action sequences

        """
        # PROBLEM 2
        # YOUR CODE HERE
        states = tf.repeat(state_ph,
                           repeats=self._num_random_action_selection,
                           axis=0)
        costs = tf.zeros([self._num_random_action_selection], dtype=tf.float32)
        print(costs)
        actions_init = tf.random_uniform(
            [self._num_random_action_selection, self._action_dim],
            minval=self._action_space_low[0],
            maxval=self._action_space_high[0],
            dtype=tf.float32)
        print(actions_init)
        actions = actions_init
        for _ in range(self._horizon):
            next_states = self._dynamics_func(states, actions, reuse=True)
            costs += self._cost_fn(states, actions, next_states)
            states = next_states
            actions = tf.random_uniform(
                [self._num_random_action_selection, self._action_dim],
                minval=self._action_space_low,
                maxval=self._action_space_high,
                dtype=tf.float32)
        best_action = actions_init[tf.arg_min(costs, dimension=0)]

        return best_action
Ejemplo n.º 21
0
    def __init__(self, x_data, centroid_data):
        global X; X = tf.placeholder(tf.float32)
        global Centroid; Centroid= tf.placeholder(tf.float32)
        global expanded_vectors; expanded_vectors = tf.expand_dims(X, 0)
        global expanded_centroids; expanded_centroids = tf.expand_dims(Centroid, 1)

        global distances; distances = tf.reduce_sum(tf.square(tf.subtract(expanded_vectors, expanded_centroids)), 2)
        global min_distances; min_distances = tf.arg_min(distances, 0)
        global total_distances; total_distances = tf.reduce_sum(tf.reduce_min(distances,0))

        self.x_data = x_data
        self.centroid_data = centroid_data
        self.track_centroid = []
Ejemplo n.º 22
0
 def acc(self):
     dis = []
     dis.append(
         self.distance(input1=self.output_vec, input2=self.encode_answer_1))
     dis.append(
         self.distance(input1=self.output_vec, input2=self.encode_answer_2))
     dis.append(
         self.distance(input1=self.output_vec, input2=self.encode_answer_3))
     dis_all = tf.concat(dis, 1)
     predict = tf.arg_min(dis_all, 1)
     correct_prediction = tf.equal(self.input_y_index, predict)
     acc = tf.reduce_mean(tf.cast(correct_prediction, 'float'), name="acc")
     tf.summary.scalar('acc', acc)
     return acc
Ejemplo n.º 23
0
def get_mass_max_logit(cur_logits):
    """Help function"""

    bg_logits = tf.slice(cur_logits, [0, 0], [-1, 1])  # hard coding!!
    idx_min_instance = tf.to_int32(tf.reshape(tf.arg_min(bg_logits, 0),
                                              [1, 1]))
    sel_begin = tf.reshape(
        tf.concat(
            axis=1,
            values=[idx_min_instance,
                    tf.constant([0], tf.int32, [1, 1])]), [2])
    max_logit = tf.slice(cur_logits, sel_begin, [1, -1])

    return max_logit
Ejemplo n.º 24
0
def main(args):
    mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

    # In this example, we limit mnist data
    Xtr, Ytr = mnist.train.next_batch(
        args.b)  #5000 for training (nn candidates)
    Xte, Yte = mnist.test.next_batch(200)  #200 for testing

    # save our batch size as part of our metadata
    bandit.metadata.batch = args.b

    # tf Graph Input
    xtr = tf.placeholder("float", [None, 784])
    xte = tf.placeholder("float", [784])

    # Nearest Neighbor calculation using L1 Distance
    # Calculate L1 Distance
    distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.neg(xte))),
                             reduction_indices=1)
    # Prediction: Get min distance index (Nearest neighbor)
    pred = tf.arg_min(distance, 0)

    accuracy = 0.

    # Initializing the variables
    init = tf.initialize_all_variables()

    # Launch the graph
    with tf.Session() as sess:
        sess.run(init)

        # loop over test data
        for i in range(len(Xte)):
            # Get nearest neighbor
            nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i, :]})
            # Get nearest neighbor class label and compare it to its true label
            print("Test", i, "Prediction:", np.argmax(Ytr[nn_index]), \
                "True Class:", np.argmax(Yte[i]))
            # Calculate accuracy
            if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]):
                accuracy += 1. / len(Xte)
                print('accuracy:', accuracy)
                bandit.report('Accuracy', accuracy)

        print("Done!")
        print("Accuracy:", accuracy)

        # save our accuracy as metadata
        bandit.metadata.accuracy = float(accuracy)
 def build_graph(parameters):
     """Build the topk op testing graph."""
     input_value = tf.compat.v1.placeholder(dtype=parameters["input_dtype"],
                                            name="input",
                                            shape=parameters["input_shape"])
     axis = random.randint(0, max(len(parameters["input_shape"]) - 1, 0))
     if parameters["is_arg_max"]:
         out = tf.arg_max(input_value,
                          axis,
                          output_type=parameters["output_type"])
     else:
         out = tf.arg_min(input_value,
                          axis,
                          output_type=parameters["output_type"])
     return [input_value], [out]
Ejemplo n.º 26
0
def Kmeans():
    X = tf.placeholder(tf.float32, [None, D], name="X")

    MU = tf.get_variable('mean',
                         dtype=tf.float32,
                         shape=[K, D],
                         initializer=tf.initializers.random_normal())
    belong = tf.arg_min(distanceFunc(X, MU), dimension=1)
    lossfunc = tf.reduce_sum(tf.reduce_min(distanceFunc(X, MU), axis=1))
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01,
                                       beta1=0.9,
                                       beta2=0.99,
                                       epsilon=1e-5)
    train = optimizer.minimize(loss=lossfunc)

    return X, train, MU, lossfunc, belong
class XXX:
    mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

    # In this example, we limit mnist data
    Xtr, Ytr = mnist.train.next_batch(500)  # 5000 for training (nn candidates)
    Xte, Yte = mnist.test.next_batch(20)  # 200 for testing

    # tf Graph Input
    xtr = tf.placeholder("float", [None, 784])
    xte = tf.placeholder("float", [784])

    # Nearest Neighbor calculation using L1 Distance
    # Calculate L1 Distance
    distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))),
                             reduction_indices=1)
    # Prediction: Get min distance index (Nearest neighbor)
    pred = tf.arg_min(distance, 0)

    sess = 0

    def __init__(self):
        self.sess = tf.Session()

        # Initializing the variables
        init = tf.global_variables_initializer()
        self.sess.run(init)

    def run(self):
        accuracy = 0.
        # Launch the graph
        with self.sess:
            # loop over test data
            for i in range(len(self.Xte)):
                # Get nearest neighbor
                nn_index = self.sess.run(self.pred,
                                         feed_dict={
                                             self.xtr: self.Xtr,
                                             self.xte: self.Xte[i, :]
                                         })
                # Get nearest neighbor class label and compare it to its true label
                print("Test", i, "Prediction:", np.argmax(self.Ytr[nn_index]), \
                    "True Class:", np.argmax(self.Yte[i]))
                # Calculate accuracy
                if np.argmax(self.Ytr[nn_index]) == np.argmax(self.Yte[i]):
                    accuracy += 1. / len(self.Xte)
            print("Done!")
            print("Accuracy:", accuracy)
Ejemplo n.º 28
0
def knn_tensorflow():
    # tensorflow实现knn算法,对mnist数据识别分类

    mnist = input_data.read_data_sets("./data/mnist/", one_hot=True)

    # 数据全部取出,。
    train_x, train_y = mnist.train.next_batch(60000)
    test_x, test_y = mnist.test.next_batch(10000)

    # 占位符
    train_x_p = tf.placeholder(tf.float32, [None, 784])
    test_x_p = tf.placeholder(tf.float32, [784])

    # L2距离计算:dist = sqrt(sum(|X1-X2|^2))
    dist_l2 = tf.sqrt(
        tf.reduce_sum(tf.square(tf.abs(train_x_p + tf.negative(test_x_p))),
                      reduction_indices=1))

    # 获得最小距离的索引
    prediction = tf.arg_min(dist_l2, 0)

    # 定义准确率
    accuracy = 0

    init_op = tf.initialize_all_variables()

    with tf.Session() as sess:
        sess.run(init_op)

        for i in range(len(test_x)):
            # 获取最近邻的值得索引
            nn_index = sess.run(prediction,
                                feed_dict={
                                    train_x_p: train_x,
                                    test_x_p: test_x[i, :]
                                })
            print("测试集第 %d 条,实际值:%d,预测值:%d" %
                  (i, np.argmax(test_y[i]), np.argmax(train_y[nn_index])))

            # 当预测值==真实值时,计算准确率。
            if np.argmax(test_y[i]) == np.argmax(train_y[nn_index]):
                accuracy += 1. / len(test_x)

        print("准确率:%f " % accuracy)

    return None
def kNN_rec(im_arr):
    mnist = input_data.read_data_sets("./data/", one_hot=True)
    train_x, train_y = mnist.train.next_batch(60000)

    train_x_p = tf.placeholder(tf.float32, [None, 784])
    test_x_p = tf.placeholder(tf.float32, [784])

    # L2距离计算:dist = sqrt(sum(|X1-X2|^2))
    dist_l2 = tf.sqrt(tf.reduce_sum(tf.square(tf.abs(train_x_p + tf.negative(test_x_p))), reduction_indices=1))

    prediction = tf.arg_min(dist_l2, 0)
    init_op = tf.initialize_all_variables()

    with tf.Session() as sess:
        sess.run(init_op)
        nn_index = sess.run(prediction, feed_dict={train_x_p: train_x, test_x_p: im_arr[0]})
        knn_predict = np.argmax(train_y[nn_index])
    return knn_predict
Ejemplo n.º 30
0
    def _setup_action_selection(self, state_ph):
        """
            Computes the best action from the current state by using randomly sampled action sequences
            to predict future states, evaluating these predictions according to a cost function,
            selecting the action sequence with the lowest cost, and returning the first action in that sequence

            returns:
                best_action: the action that minimizes the cost function (tensor with shape [self._action_dim])

            implementation details (in order):
                (a) We will assume state_ph has a batch size of 1 whenever action selection is performed
                (b) Randomly sample uniformly self._num_random_action_selection number of action sequences,
                    each of length self._horizon
                (c) Starting from the input state, unroll each action sequence using your neural network
                    dynamics model
                (d) While unrolling the action sequences, keep track of the cost of each action sequence
                    using self._cost_fn
                (e) Find the action sequence with the lowest cost, and return the first action in that sequence

            Hints:
                (i) self._cost_fn takes three arguments: states, actions, and next states. These arguments are
                    2-dimensional tensors, where the 1st dimension is the batch size and the 2nd dimension is the
                    state or action size
                (ii) You should call self._dynamics_func and self._cost_fn a total of self._horizon times
                (iii) Use tf.random_uniform(...) to generate the random action sequences

        """
        ### PROBLEM 2
        state = tf.tile(state_ph, [self._num_random_action_selection, 1])
        actions = tf.random_uniform([
            self._num_random_action_selection, self._action_dim, self._horizon
        ],
                                    minval=-1.,
                                    maxval=1.)
        costs = tf.zeros([self._num_random_action_selection])
        for step in range(self._horizon):
            next_state = self._dynamics_func(state,
                                             actions[:, :, step],
                                             reuse=True)
            costs += self._cost_fn(state, actions[:, :, step], next_state)
            state = next_state
        best_sequence = tf.arg_min(costs, dimension=0)
        return actions[best_sequence, :, 0]
    def create_model(self, num_samples, dim, k):
        with self.graph.as_default():
            X = tf.placeholder(tf.float32, [num_samples, dim])
            cluster_membership = tf.Variable(tf.zeros([num_samples]),
                                             dtype=tf.float32,
                                             name='cluster_membership')
            centroids = tf.Variable(tf.random_uniform([k, dim]),
                                    dtype=tf.float32,
                                    name='centroids')

            X_temp = tf.reshape(tf.tile(X, [1, k]), [num_samples, k, dim])
            centroids_temp = tf.reshape(tf.tile(centroids, [num_samples, 1]),
                                        [num_samples, k, dim])

            distances_to_centroids = tf.reduce_sum(
                tf.square(tf.subtract(X_temp, centroids_temp)),
                reduction_indices=2)  #N x k x 1
            cluster_membership = tf.arg_min(distances_to_centroids, 1)

            return cluster_membership, X
Ejemplo n.º 32
0
    def fit(self):
        self.init = tf.initialize_all_variables()

        distance = tf.reduce_sum(tf.abs(tf.add(self.xtr, tf.neg(self.xte))), reduction_indices=1)
        self.pred = tf.arg_min(distance, 0)
        accuracy = 0.

        if self.x_test is None:
            return

        with tf.Session() as sess:
            sess.run(self.init)

            for i in range(len(self.x_test)):
                nn_index = sess.run(self.pred, feed_dict={self.xtr: self.x_train, self.xte: self.x_test[i, :]})
                # print "Test", i, "Prediction:", np.argmax(self.y_train[nn_index]), \
                #     "True Class:", np.argmax(self.y_test[i])
                if np.argmax(self.y_train[nn_index]) == np.argmax(self.y_test[i]):
                    accuracy += 1. / len(self.x_test)
            print "Done!"
            print "Accuracy:", accuracy
            return accuracy
    def __init__(self, train_features, train_labels, sequence_length):
        self.train_features = train_features
        self.train_labels = train_labels

        with tf.name_scope('input'):
            xtr = tf.placeholder(dtype=tf.float32, shape=[None, sequence_length])
            xte = tf.placeholder(dtype=tf.float32, shape=[sequence_length])

        # L1-Norm
        # distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1)

        # L2-Norm
        distance = tf.sqrt(tf.reduce_sum(tf.square(xtr - xte), reduction_indices=1))

        prediction = tf.arg_min(distance, 0)

        accuracy = 0.

        self.xtr = xtr
        self.xte = xte
        self.distance = distance
        self.prediction = prediction
        self.accuracy = accuracy
mnist = input_data.read_data_sets("/tmp/data",one_hot=True)

# In this example, we limit mnist data
X_train, Y_train = mnist.train.next_batch(5000) #5000 for training (nn candidates)
X_test, Y_test = mnist.test.next_batch(200) #200 for testing

# tf Graph Input
x_train = tf.placeholder("float", [None, 784])
x_test = tf.placeholder("float", [784])

# Nearest Neighbor calculation using L1 Distance
# Calculate L1 Distance
distance = tf.reduce_sum(tf.abs(tf.add(x_train, tf.negative(x_test))), reduction_indices=1)

# Prediction: Get min distance index (Nearest neighbor)
pred = tf.arg_min(distance, 0)

accuracy = 0.

# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()

# Start training
with tf.Session() as sess:
    sess.run(init)

    for i in range(len(X_test)):
        # Get nearest neighbor
        nn_index = sess.run(pred, feed_dict={x_train: X_train, x_test: X_test[i, :]})

        # Get nearest neighbor class label and compare it to its true label
Ejemplo n.º 35
0
# In this example, we limit mnist data
x_training_data, y_training_data = mnist.train.next_batch(5000)
x_test_data, y_test_data = mnist.test.next_batch(200)


# Reshape images to 1D
x_flat_training_data = np.reshape(x_training_data, newshape=(-1, 28 * 28))
x_flat_test_data = np.reshape(x_test_data, newshape=(-1, 28 * 28))

x_train_placeholder = tf.placeholder("float", [None, 784])
x_test_placeholder = tf.placeholder("float", [784])

distance = tf.reduce_sum(tf.abs(tf.add(x_train_placeholder, tf.neg(x_test_placeholder))), reduction_indices=1)

prediction = tf.arg_min(distance, 0)

accuracy = 0.

init = tf.initialize_all_variables()

with tf.Session() as session:
    session.run(init)

    for index in range(len(x_flat_test_data)):
        # get nearest negihbour
        nearest_neighbor_index = session.run(
            prediction,
            feed_dict={
                x_train_placeholder: x_flat_training_data,
                x_test_placeholder: x_flat_test_data[index,:]
# Initialize population array
population = tf.Variable(np.random.randn(pop_size, features), dtype=tf.float32)

# Initialize placeholders
truth_ph = tf.placeholder(tf.float32, [1, features])
crossover_mat_ph = tf.placeholder(tf.float32, [num_children, features])
mutation_val_ph = tf.placeholder(tf.float32, [num_children, features])

# Calculate fitness (MSE)
fitness = -tf.reduce_mean(tf.square(tf.sub(population, truth_ph)), 1)
top_vals, top_ind = tf.nn.top_k(fitness, k=pop_size)

# Get best fit individual
best_val = tf.reduce_min(top_vals)
best_ind = tf.arg_min(top_vals, 0)
best_individual = tf.gather(population, best_ind)

# Get parents
population_sorted = tf.gather(population, top_ind)
parents = tf.slice(population_sorted, [0, 0], [num_parents, features])


# Get offspring
# Indices to shuffle-gather parents
rand_parent1_ix = np.random.choice(num_parents, num_children)
rand_parent2_ix = np.random.choice(num_parents, num_children)
# Gather parents by shuffled indices, expand back out to pop_size too
rand_parent1 = tf.gather(parents, rand_parent1_ix)
rand_parent2 = tf.gather(parents, rand_parent2_ix)
rand_parent1_sel = tf.mul(rand_parent1, crossover_mat_ph)
Ejemplo n.º 37
0
def argmin(input, axis):
    return tf.arg_min(input, dimension=axis)
Ejemplo n.º 38
0
fig, ax = plt.subplots()
ax.scatter(tr_data.transpose()[0], tr_data.transpose()[1], marker = 'o', s = 100, c = tr_features, cmap=plt.cm.coolwarm )
plt.plot()

points=tf.Variable(data)
cluster_assignments = tf.Variable(tf.zeros([N], dtype=tf.int64))

sess = tf.Session()
sess.run(tf.initialize_all_variables())

test=[]

for i, j in zip(te_data, te_features):
    distances = tf.reduce_sum(tf.square(tf.sub(i , tr_data)),reduction_indices=1)
    neighbor = tf.arg_min(distances,0)
    
    #print tr_features[sess.run(neighbor)]
    #print j
    test.append(tr_features[sess.run(neighbor)])
print test
fig, ax = plt.subplots()
ax.scatter(te_data.transpose()[0], te_data.transpose()[1], marker = 'o', s = 100, c = test, cmap=plt.cm.coolwarm )
plt.plot()

#rep_points_v = tf.reshape(points, [1, N, 2])
#rep_points_h = tf.reshape(points, [N, 2])
#sum_squares = tf.reduce_sum(tf.square(rep_points - rep_points), reduction_indices=2)
#print(sess.run(tf.square(rep_points_v - rep_points_h)))

end = time.time()