def train_eval_nn(imgs):
    output = build_nn(x)
    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=output))
    optimizer = tf.train.GradientDescentOptimizer(
        learning_rate=learning_rate).minimize(loss)
    accuracy = tf.reduce_sum(
        tf.cast(tf.equal(tf.arg_max(y, 1), tf.arg_max(output, 1)), tf.float32))

    saver = tf.train.Saver()

    best_acc = 0

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        if not os.path.exists('checkpoint'):
            train_x, train_y, test_x, test_y = get_sym_data()

            for i in range(EPOCH):
                epoch_const = 0

                for idx in range(int(train_x.shape[0] / BATCH_SIZE)):
                    x_data, y_data = train_x[idx * BATCH_SIZE:(idx + 1) *
                                             BATCH_SIZE], train_y[idx *
                                                                  BATCH_SIZE:
                                                                  (idx + 1) *
                                                                  BATCH_SIZE]

                    cost, _ = sess.run([loss, optimizer],
                                       feed_dict={
                                           x: x_data,
                                           y: y_data,
                                           learning_rate: get_lr(i)
                                       })
                    epoch_const += cost

                acc_num = sess.run(accuracy, feed_dict={x: test_x, y: test_y})
                epoch_acc = acc_num / test_x.shape[0]

                if epoch_acc > best_acc:
                    best_acc = epoch_acc
                    saver.save(sess, CKPT_FILE)

                print('Epoch {}:\t loss: {:.6}\t acc: {:.4}'.format(
                    i, epoch_const, epoch_acc))

            print('Best acc: {:.4}'.format(best_acc))
            print('Training done.')
            exit()
        else:
            saver.restore(sess, CKPT_FILE)
            return predict(sess, output, imgs)
Exemple #2
0
    def __init__(self,
                 Input_Dim,
                 Output_Dim=1,
                 Hidden=[20, 10],
                 Activation='Sigmoid',
                 Learning_Rate=0.001,
                 Alpha=0):
        self._Activation_Method = self._Activation(Activation)
        regularizer = tf.contrib.layers.l2_regularizer(
            Alpha) if Alpha != 0 else None
        self.X = tf.placeholder(shape=[None, Input_Dim],
                                dtype=tf.float32,
                                name='States')
        Hidden_Layers = self.X
        for layers in Hidden:
            Hidden_Layers = tf.layers.dense(Hidden_Layers,
                                            layers,
                                            activation=self._Activation_Method,
                                            activity_regularizer=regularizer)
        Q_Raw = tf.layers.dense(Hidden_Layers,
                                Output_Dim,
                                activation=None,
                                activity_regularizer=regularizer)
        self.choose = tf.arg_max(Q_Raw, 1)
        self.Q = tf.reshape(tf.reduce_max(Q_Raw, 1), shape=(-1, 1))

        self.Q_In = tf.placeholder(shape=[None, 1],
                                   dtype=tf.float32,
                                   name='Quality')
        self._Optimizer = tf.train.AdamOptimizer(learning_rate=Learning_Rate)
        self.loss = tf.losses.mean_squared_error(self.Q_In, self.Q)

        # self.Weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        self.fit = self._Optimizer.minimize(self.loss)
Exemple #3
0
def train_with_gradient_and_valuation(agent_list, grad, bi, lr, distr_type):
    f_ini_p = open(os.path.join(os.path.dirname(__file__), "initial_model_parameters.txt"), "r")
    para_lines = f_ini_p.readlines()
    w_paras = para_lines[0].split("\t")
    w_paras = [float(i) for i in w_paras]
    b_paras = para_lines[1].split("\t")
    b_paras = [float(i) for i in b_paras]
    w_initial_g = np.asarray(w_paras, dtype=np.float32).reshape([784, 10])
    b_initial_g = np.asarray(b_paras, dtype=np.float32).reshape([10])
    f_ini_p.close()
    model_g = {
        'weights': w_initial_g,
        'bias': b_initial_g
    }
    for i in range(len(grad[0])):
        # i->迭代轮数
        gradient_w = np.zeros([784, 10], dtype=np.float32)
        gradient_b = np.zeros([10], dtype=np.float32)
        for j in agent_list:
            gradient_w = np.add(np.multiply(grad[j][i], 1/len(agent_list)), gradient_w)
            gradient_b = np.add(np.multiply(bi[j][i], 1/len(agent_list)), gradient_b)
        model_g['weights'] = np.subtract(model_g['weights'], np.multiply(lr[0][i], gradient_w))
        model_g['bias'] = np.subtract(model_g['bias'], np.multiply(lr[0][i], gradient_b))

    test_images = readTestImagesFromFile(False)
    test_labels_onehot = readTestLabelsFromFile(False)
    m = np.dot(test_images, np.asarray(model_g['weights']))
    test_result = m + np.asarray(model_g['bias'])
    y = tf.nn.softmax(test_result)
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.arg_max(test_labels_onehot, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    return accuracy.numpy()
Exemple #4
0
def train_with_gradient_and_valuation(agent_list, grad, bi, lr, distr_type,
                                      iter_n, g_m):
    model_g = {'weights': g_m[0], 'bias': g_m[1]}
    for i in range(iter_n - 1, iter_n):
        # i->迭代轮数
        gradient_w = np.zeros([784, 10], dtype=np.float32)
        gradient_b = np.zeros([10], dtype=np.float32)
        for j in agent_list:
            gradient_w = np.add(np.multiply(grad[j][i], 1 / len(agent_list)),
                                gradient_w)
            gradient_b = np.add(np.multiply(bi[j][i], 1 / len(agent_list)),
                                gradient_b)
        model_g['weights'] = np.subtract(model_g['weights'],
                                         np.multiply(lr[0][i], gradient_w))
        model_g['bias'] = np.subtract(model_g['bias'],
                                      np.multiply(lr[0][i], gradient_b))

    test_images = readTestImagesFromFile(False)
    test_labels_onehot = readTestLabelsFromFile(False)
    m = np.dot(test_images, np.asarray(model_g['weights']))
    test_result = m + np.asarray(model_g['bias'])
    y = tf.nn.softmax(test_result)
    correct_prediction = tf.equal(tf.argmax(y, 1),
                                  tf.arg_max(test_labels_onehot, 1))
    #print(list(tf.argmax(y, 1).numpy()))
    #print(list(tf.arg_max(test_labels_onehot, 1).numpy()))
    #print(model_g['weights'])
    #print(model_g['bias'])
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    return accuracy.numpy()
Exemple #5
0
def train(params=None):
    num_classes = 10
    input_size = 784
    hidden_units_size = 30
    batch_size = 100
    training_iterations = 10000

    # 加载数据
    mnist = input_data.read_data_sets('/storage/emulated/0/tensor-data/', one_hot=True)
    x_train = mnist.train.images
    y_train = mnist.train.labels
    x_test = mnist.test.images
    y_test = mnist.test.labels

    # 定义变量
    x = tf.placeholder(tf.float32, shape=[None, input_size])
    y_ = tf.placeholder(tf.float32, shape=[None, num_classes])
    w = tf.Variable(tf.truncated_normal([input_size, num_classes]), name='w1')
    b = tf.Variable(tf.constant(0.01, shape=[num_classes]), name='b1')
    y = tf.nn.softmax(tf.matmul(x, w) + b)

    # 定义损失
    loss = -tf.reduce_mean(y_ * tf.log(y))
    optimizer = tf.train.GradientDescentOptimizer(0.2)
    train = optimizer.minimize(loss)

    # 初始化变量
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    # 验证正确率
    accuracy_rate = tf.equal(tf.arg_max(y, 1), tf.arg_max(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(accuracy_rate, 'float32'))

    # 训练
    for step in range(training_iterations):
        batch_x, batch_y = mnist.train.next_batch(batch_size)
        sess.run(train, feed_dict={x: batch_x, y_: batch_y})
        if step % 1000 == 0:
            print(step, sess.run(accuracy, feed_dict={x: x_test, y_: y_test}))

    if not tf.gfile.Exists('/storage/emulated/0/tensor-model/'):
        tf.gfile.MakeDirs('/storage/emulated/0/tensor-model/')
    saver = tf.train.Saver()  # 保存模型 实例化
    saver.save(sess, '/storage/emulated/0/tensor-model/my_model.ckpt')
Exemple #6
0
def trainGroup(ss,federated_train_data_divide,test_images,test_labels_onehot):
    federated_train_data = []
    for item in ss:
        federated_train_data.append(federated_train_data_divide[item])

    f_ini_p = open(os.path.join(os.path.dirname(__file__), "initial_model_parameters.txt"), "r")
    para_lines = f_ini_p.readlines()
    w_paras = para_lines[0].split("\t")
    w_paras = [float(i) for i in w_paras]
    b_paras = para_lines[1].split("\t")
    b_paras = [float(i) for i in b_paras]
    w_initial = np.asarray(w_paras, dtype=np.float32).reshape([784, 10])
    b_initial = np.asarray(b_paras, dtype=np.float32).reshape([10])
    f_ini_p.close()

    initial_model = {
        'weights': w_initial,
        'bias': b_initial
    }

    model = initial_model
    learning_rate = 0.1
    for round_num in range(ROUND_NUM):
        local_models = federated_train(
            model, learning_rate, federated_train_data)
        # print(len(local_models))
        print("learning rate: ", learning_rate)

        m_w = np.zeros([784, 10], dtype=np.float32)
        m_b = np.zeros([10], dtype=np.float32)

        for local_model_index in range(len(local_models)):
            m_w = np.add(np.multiply(
                local_models[local_model_index][0], 1/len(ss)), m_w)
            m_b = np.add(np.multiply(
                local_models[local_model_index][1], 1/len(ss)), m_b)
            model = {
                'weights': m_w,
                'bias': m_b
            }
        learning_rate = learning_rate * 0.9
        loss = federated_eval(model, federated_train_data)
        print('round {}, loss={}'.format(round_num, loss))
        print(time.time() - start_time)
        '''model = federated_train(model, learning_rate, federated_train_data)
        learning_rate = learning_rate * 0.9
        loss = federated_eval(model, federated_train_data)
        print('round {}, loss={}'.format(round_num, loss))'''

    m = np.dot(test_images, np.asarray(model['weights']))
    test_result = m + np.asarray(model['bias'])
    y = tf.nn.softmax(test_result)
    correct_prediction = tf.equal(
        tf.argmax(y, 1), tf.arg_max(test_labels_onehot, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    
    return accuracy.numpy()
    def calc_reward(self, outputs):
        # consider the action at the last time step
        outputs = outputs[-1]  # look at ONLY THE END of the sequence
        outputs = tf.reshape(outputs, (self.batch_size, self.cell_out_size))

        # get the baseline
        b = tf.stack(self.baselines)
        b = tf.concat(axis=2, values=[b, b])
        b = tf.reshape(b, (self.batch_size, (self.nGlimpses) * 2))
        no_grad_b = tf.stop_gradient(b)

        # get the action(classification)
        p_y = tf.nn.softmax(tf.matmul(outputs, self.Wa_h_a) + self.Ba_h_a)
        max_p_y = tf.arg_max(p_y, 1)
        correct_y = tf.cast(self.labels_placeholder, tf.int64)

        # reward for all examples in the batch
        R = tf.cast(tf.equal(max_p_y, correct_y), tf.float32)
        reward = tf.reduce_mean(R)  # mean reward
        R = tf.reshape(R, (self.batch_size, 1))
        R = tf.tile(R, [1, (self.nGlimpses) * 2])

        # get the location

        p_loc = self.gaussian_pdf(self.mean_locs, self.sampled_locs)
        # p_loc = tf.tanh(p_loc)

        p_loc_orig = p_loc
        p_loc = tf.reshape(p_loc, (self.batch_size, (self.nGlimpses) * 2))

        # define the cost function
        J = tf.concat(axis=1,
                      values=[
                          tf.log(p_y + self.SMALL_NUM) *
                          (self.onehot_labels_placeholder),
                          tf.log(p_loc + self.SMALL_NUM) * (R - no_grad_b)
                      ])
        J = tf.reduce_sum(J, 1)
        J = J - tf.reduce_sum(tf.square(R - b), 1)
        J = tf.reduce_mean(J, 0)
        cost = -J
        var_list = tf.trainable_variables()
        grads = tf.gradients(cost, var_list)
        grads, _ = tf.clip_by_global_norm(grads, 0.5)
        # define the optimizer
        # lr_max = tf.maximum(lr, lr_min)
        optimizer = tf.train.AdamOptimizer(self.lr)
        # optimizer = tf.train.MomentumOptimizer(lr, momentumValue)
        # train_op = optimizer.minimize(cost, global_step)
        train_op = optimizer.apply_gradients(zip(grads, var_list),
                                             global_step=self.global_step)

        return cost, reward, max_p_y, correct_y, train_op, b, tf.reduce_mean(
            b), tf.reduce_mean(R - b), self.lr
Exemple #8
0
  def __init__(self, sess, config, name, is_train):
    self.sess = sess
    self.name = name
    self.is_train = is_train


    self.X_hsd = tf.placeholder(tf.float32, shape=[config.batch_size, config.im_size, config.im_size, 3], name="original_color_image")
    self.D, h_s = tf.split(self.X_hsd,[1,2], axis=3)

    self.E_Step = CNN("E_Step", config, is_train=self.is_train)
    self.Gama = self.E_Step(self.D)
    self.loss, self.Mu, self.Std = GMM_M_Step(self.X_hsd, self.Gama, config.ClusterNo, name='GMM_Statistics')
    
    if self.is_train:

      self.optim = tf.train.AdamOptimizer(config.lr)
      self.train = self.optim.minimize(self.loss, var_list=self.E_Step.Param)

    ClsLbl = tf.arg_max(self.Gama, 3)
    ClsLbl = tf.cast(ClsLbl, tf.float32)
    
    ColorTable = [[255,0,0],[0,255,0],[0,0,255],[255,255,0], [0,255,255], [255,0,255]]
    colors = tf.cast(tf.constant(ColorTable), tf.float32)
    Msk = tf.tile(tf.expand_dims(ClsLbl, axis=3),[1,1,1,3])
    for k in range(0, config.ClusterNo):
        ClrTmpl = tf.einsum('anmd,df->anmf', tf.expand_dims(tf.ones_like(ClsLbl), axis=3), tf.reshape(colors[k,...],[1,3]))
        Msk = tf.where(tf.equal(Msk,k), ClrTmpl, Msk)
    
    
    self.X_rgb = utils.HSD2RGB(self.X_hsd)
    tf.summary.image("1.Input_image", self.X_rgb*255.0, max_outputs=2)
    tf.summary.image("2.Gamma_image",  Msk, max_outputs=2)
    tf.summary.image("3.Density_image", self.D*255.0, max_outputs=2)
    tf.summary.scalar("loss", self.loss)

    self.summary_op = tf.summary.merge_all()

    self.saver = tf.train.Saver()
    self.summary_writer = tf.summary.FileWriter(config.logs_dir, self.sess.graph)

    self.sess.run(tf.global_variables_initializer())
    
    ckpt = tf.train.get_checkpoint_state(config.logs_dir)
    if ckpt and ckpt.model_checkpoint_path:
        self.saver.restore(self.sess, ckpt.model_checkpoint_path)
        print("Model restored...")
Exemple #9
0
    def __tf_call__(self, prev_image, prev_action, prev_reward, state):
        prev_reward = tf.expand_dims(prev_reward, axis=-1)
        if prev_action.get_shape().ndims == 0:
            prev_action = tf.expand_dims(prev_action, axis=-1)
        # Add batch and steps dimensions
        prev_image = tf.reshape(prev_image,
                                [1, 1] + prev_image.get_shape().as_list())
        prev_action = tf.reshape(prev_action,
                                 [1, 1] + prev_action.get_shape().as_list())
        prev_reward = tf.reshape(prev_reward,
                                 [1, 1] + prev_reward.get_shape().as_list())
        # calculate all proposals in a single batch inference
        prev_image = tf.tile(prev_image, [self._proposals, 1, 1, 1, 1])
        prev_action = tf.tile(prev_action, [self._proposals, 1] + [1] *
                              (prev_action.get_shape().ndims - 2))
        prev_reward = tf.tile(prev_reward, [self._proposals, 1] + [1] *
                              (prev_action.get_shape().ndims - 2))
        state = self._observe_fn(prev_image, prev_action, prev_reward, state)
        means, stdevs = self.initialize_distribution()

        @tf.function
        def iteration(means_stdevs, _):
            means, stdevs = means_stdevs
            traj_proposals = self.sample_continuous_actions(means, stdevs)
            rewards, _ = self.generate_rewards(traj_proposals, state)
            means, stdevs = self.fit_gaussian(rewards, traj_proposals)
            return means, stdevs

        means, stdevs = static_scan(iteration, tf.range(self._iterations - 1),
                                    (means, stdevs))
        means = means[-1]
        stdevs = stdevs[-1]
        traj_proposals = self.sample_continuous_actions(means, stdevs)
        rewards, predictions = self.generate_rewards(traj_proposals, state)
        rewards = tf.squeeze(rewards, axis=-1)
        index = tf.arg_max(rewards, dimension=0)
        best_action = traj_proposals[index, 0]
        predictions = {
            key: value[index, 0]
            for key, value in predictions.items()
        }
        return best_action, predictions, state
def argmax2d(tensor):
    """Find the indices where the peaks value is on 2d maps.

  If there are multiple locations with the same value, return the top left one.

  Args:
    tensor: a 4-d tensor [BATCH, HEIGHT, WIDTH, N].

  Returns:
    [BATCH, N, 2] locations (row, col) for each channel and batch.
  """
    with tf.name_scope(None, 'argmax2d', [tensor]):
        shape = tf.shape(tensor)
        batch, width, channels = shape[0], shape[2], shape[3]
        flat_tensor = tf.reshape(tensor, (batch, -1, channels))
        index = tf.cast(tf.arg_max(flat_tensor, 1), tf.int32)
        y = index // width
        x = index % width
        locations = tf.stack([y, x], -1)
        return locations
Exemple #11
0
def train_with_gradient_and_valuation(agent_list, grad, bi, lr, distr_type,
                                      datanum, iter_n, g_m):
    model_g = {'weights': g_m[0], 'bias': g_m[1]}
    data_sum = 0
    for i in agent_list:
        data_sum += datanum[i]
    agents_w = [0 for _ in range(NUM_AGENT)]
    for i in agent_list:
        agents_w[i] = datanum[i] / data_sum
    for i in range(iter_n - 1, iter_n):
        # i->迭代轮数
        gradient_w = np.zeros([784, 10], dtype=np.float32)
        gradient_b = np.zeros([10], dtype=np.float32)
        for j in agent_list:
            gradient_w = np.add(np.multiply(grad[j][i], agents_w[j]),
                                gradient_w)
            gradient_b = np.add(np.multiply(bi[j][i], agents_w[j]), gradient_b)
        model_g['weights'] = np.subtract(model_g['weights'],
                                         np.multiply(lr[0][i], gradient_w))
        model_g['bias'] = np.subtract(model_g['bias'],
                                      np.multiply(lr[0][i], gradient_b))

    test_images = None
    test_labels_onehot = None
    if distr_type == "SAME":
        test_images = readTestImagesFromFile(True)
        test_labels_onehot = readTestLabelsFromFile(True)
    else:
        test_images = readTestImagesFromFile(False)
        test_labels_onehot = readTestLabelsFromFile(False)
    # print(numpy.asarray(state.model.trainable.bias).shape)
    # print(numpy.asarray(state.model.trainable.weights).shape)
    m = np.dot(test_images, np.asarray(model_g['weights']))
    # print(m.shape)
    test_result = m + np.asarray(model_g['bias'])
    y = tf.nn.softmax(test_result)
    correct_prediction = tf.equal(tf.argmax(y, 1),
                                  tf.arg_max(test_labels_onehot, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    return accuracy.numpy()
# Evaluation our model using this test dataset
x_test = [[2, 1, 1], [3, 1, 2], [3, 3, 4]]
y_test = [[0, 0, 1], [0, 0, 1], [0, 0, 1]]

X = tf.placeholder("float", [None, 3])
Y = tf.placeholder("float", [None, 3])
W = tf.Variable(tf.random_normal([3, 3]))
b = tf.Variable(tf.random_normal([3]))

hypothesis = tf.nn.softmax(tf.matmul(X, W) + b)
cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis), axis=1))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)

# Correct prediction Test model
prediction = tf.arg_max(hypothesis, 1)
is_correct = tf.equal(prediction, tf.arg_max(Y, 1))
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))

# Launch graph
with tf.Session() as sess:
    # Initialize TensorFlow variables
    sess.run(tf.global_variables_initializer())
    for step in range(201):
        cost_val, W_val, _ = sess.run([cost, W, optimizer],
                                      feed_dict={
                                          X: x_data,
                                          Y: y_data
                                      })
        print(step, cost_val, W_val)
   - 축 별 최소/최대 값의 index 반환 
  2. unique/setdiff1d
   - 중복제거/차집합 결과 index 반환  
'''

import tensorflow.compat.v1 as tf  # ver1.x
tf.disable_v2_behavior()  # ver2.0 사용안함

# 1. argmin/argmax
a = tf.constant([5, 2, 1, 4, 3], dtype=tf.int32)
b = tf.constant([4, 5, 1, 3, 2])
c = tf.constant([[5, 4, 2], [3, 2, 4]])  # 2차원

# dimension : reduce 차원(vector = 0)
min_index = tf.arg_min(a, dimension=0)  # 1차원 대상
max_index = tf.arg_max(b, dimension=0)  # 1차원 대상
max_index2 = tf.arg_max(c, dimension=1)  # 2차원 대상
#
sess = tf.Session()
print(sess.run(min_index))  # 2
print(sess.run(max_index))  # 1
print(sess.run(max_index2))  # [0 2]

# 2. unique/setdiff1d

c = tf.constant(['a', 'b', 'a', 'c', 'b'])
# unique
cstr, cidx = tf.unique(c)
print(sess.run(cstr))  # [b'a' b'b' b'c']
print(sess.run(cidx))  # [0 1 0 2 1]
Exemple #14
0
                                  one_hot=True)  # 자동으로 이미지를 다운로드 받는다.
nb_classes = 10  # 클래스 갯수: 숫자 이미지 파일 갯수

# MNIST data image of shape 28 * 28 = 784
X = tf.placeholder(tf.float32, [None, 784])  # 784행
# 0 - 9 digits recognition = 10 classes
Y = tf.placeholder(tf.float32, [None, nb_classes])  # 10열
W = tf.Variable(tf.random_normal([784, nb_classes]))
b = tf.Variable(tf.random_normal([nb_classes]))
# Hypothesis (using softmax)
hypothesis = tf.nn.softmax(tf.matmul(X, W) + b)
cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis), axis=1))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
# Test model
# arg_max(): one-hot-encoding을 만들어 주는 함수(가장 확률이 높은것을 1로, 나머지는 0으로 만듬)
is_correct = tf.equal(tf.arg_max(hypothesis, 1),
                      tf.arg_max(Y, 1))  # 10개의 예측값중에서 가장큰값을 구함
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(is_correct,
                                  tf.float32))  # float형으로 형변환 (정확도 구함)
# parameters
training_epochs = 15
batch_size = 100  # batch_size : 큰 파일을 나눠서 읽어옴 (100개) - 메모리가 부족하기 때문에
# 55000 / 100 = 550
# epoch : 반복횟수(1번다 읽어온것)
print('데이터 갯수=', mnist.train.num_examples)  # 55,000 개

with tf.Session() as sess:
    # Initialize TensorFlow variables
    sess.run(tf.global_variables_initializer())
    # Training cycle
x_data = [[1, 2, 1, 1], [2, 1, 3, 2], [3, 1, 3, 4], [4, 1, 5, 5], [1, 7, 5, 5],
          [1, 2, 5, 6], [1, 6, 6, 6], [1, 7, 7, 7]]
y_data = [[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 1, 0], [0, 1, 0], [0, 1, 0],
          [1, 0, 0], [1, 0, 0]]

X = tf.placeholder(tf.float32, shape=[None, 4])
Y = tf.placeholder(tf.float32, shape=[None, 3])

W = tf.Variable(tf.random_normal([4, 3]), name="weight")
b = tf.Variable(tf.random_normal([3]), name="bias")

#Used softmax method in TensorFlow
hypothesis = tf.nn.softmax(tf.matmul(X, W) + b)

#Cost function is sum of -ylog(H(x)).
cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis), axis=1))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train = optimizer.minimize(cost)

sess = tf.Session()
sess.run(tf.global_variables_initializer())

for step in range(2000):
    sess.run([train], feed_dict={X: x_data, Y: y_data})
    if step % 20 == 0:
        print(step, sess.run([cost], feed_dict={X: x_data, Y: y_data}))

#Checking which argument is maximum in given input data.
a = sess.run(hypothesis, feed_dict={X: [[1, 100, 7, 8]]})
print(a, sess.run(tf.arg_max(a, 1)))
Exemple #16
0
def train(params=None):
    mnist = input_data.read_data_sets('/storage/emulated/0/tensor-data/',
                                      one_hot=True)
    #加载数据
    x_data = mnist.train.images
    y_data = mnist.train.labels
    x_test = mnist.test.images
    y_test = mnist.test.labels

    #输入值
    xs = tf.placeholder(tf.float32, shape=[None, 784])
    ys = tf.placeholder(tf.float32, shape=[None, 10])
    x_images = tf.reshape(xs, [-1, 28, 28, 1])

    #第一层卷积
    #con_1
    w_con1 = weights([5, 5, 1, 32], "w1")
    b_con1 = bias([32])
    h_con1 = tf.nn.conv2d(x_images, w_con1, [1, 1, 1, 1], padding='SAME')
    h_relu1 = tf.nn.relu(h_con1 + b_con1)
    #pool1
    h_pool1 = tf.nn.max_pool(h_relu1,
                             ksize=[1, 2, 2, 1],
                             strides=[1, 2, 2, 1],
                             padding='SAME')

    #第二层卷积
    #con2
    w_con2 = weights([5, 5, 32, 64], "w2")
    b_con2 = bias([64])
    h_con2 = tf.nn.conv2d(h_pool1,
                          w_con2,
                          strides=[1, 1, 1, 1],
                          padding='SAME')
    h_relu2 = tf.nn.relu(h_con2)
    #pool2
    h_pool2 = tf.nn.max_pool(h_relu2,
                             ksize=[1, 2, 2, 1],
                             strides=[1, 2, 2, 1],
                             padding='SAME')

    #全连接层
    w_fc1 = weights([7 * 7 * 64, 1024], "w3")
    b_fc1 = bias([1024])
    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)

    #drop_out
    keep_pro = tf.placeholder(dtype=tf.float32)
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=keep_pro)

    #输出层
    w_fc2 = weights([1024, 10], "w4")
    b_fc2 = bias([10])
    h_fc2 = tf.nn.softmax(tf.matmul(h_fc1_drop, w_fc2) + b_fc2)

    #损失函数
    loss = -tf.reduce_mean(ys * tf.log(h_fc2))
    train = tf.train.AdamOptimizer(1e-4).minimize(loss)
    #初始化变量
    # 初始化变量
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    #计算误差
    accuracy = tf.equal(tf.arg_max(ys, 1), tf.arg_max(h_fc2, 1))
    accuracy = tf.reduce_mean(tf.cast(accuracy, tf.float32))

    #开始训练
    for step in range(5000):
        batch_x, batch_y = mnist.train.next_batch(100)
        sess.run(train, feed_dict={xs: batch_x, ys: batch_y, keep_pro: 0.8})
        if step % 100 == 0:
            print(
                step,
                sess.run(accuracy,
                         feed_dict={
                             xs: mnist.test.images,
                             ys: mnist.test.labels,
                             keep_pro: 1
                         }))

    if not tf.gfile.Exists('/storage/emulated/0/tensor-model/'):
        tf.gfile.MakeDirs('/storage/emulated/0/tensor-model/')
    saver = tf.train.Saver()  # 保存模型 实例化
    saver.save(sess, '/storage/emulated/0/tensor-model/my_model.ckpt')
Exemple #17
0
# MNIST data image of shape 28 * 28 = 784
X = tf.placeholder(tf.float32, [None, 784])
# 0 - 9 digits recognition = 10 classes
Y = tf.placeholder(tf.float32, [None, nb_classes])

W = tf.Variable(tf.random_normal([784, nb_classes]))
b = tf.Variable(tf.random_normal([nb_classes]))

# Hypothesis (using softmax)
hypothesis = tf.nn.softmax(tf.matmul(X, W) + b)

cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis), axis=1))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)

# Test model
is_correct = tf.equal(tf.arg_max(hypothesis, 1), tf.arg_max(Y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))

# parameters
training_epochs = 15
batch_size = 100

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for epoch in range(training_epochs):
        avg_cost = 0
        total_batch = int(mnist.train.num_examples / batch_size)

        for i in range(total_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
Exemple #18
0
def train_and_test(training_data, test_data):
    if len(training_data['input']) != len(training_data['output']):
        print("トレーニングデータの入力と出力のデータの数が一致しません")
        return
    if len(test_data['input']) != len(test_data['output']):
        print("テストデータの入力と出力のデータの数が一致しません")
        return

    # ニューラルネットワークの入力部分の作成
    with tf.name_scope('Inputs'):
        input = tf.placeholder(tf.float32, shape=[None, 3], name='Input')
    with tf.name_scope('Outputs'):
        true_output = tf.placeholder(tf.float32, shape=[None, 3], name='Output')

    # ニューラルネットワークのレイヤーを作成する関数
    def hidden_layer(x, layer_size, is_output=False):
        name = 'Hidden-Layer' if not is_output else 'Output-Layer'
        with tf.name_scope(name):
            # 重み
            w = tf.Variable(tf.random_normal([x._shape[1].value, layer_size]), name='Weight')
            # バイアス
            b = tf.Variable(tf.zeros([layer_size]), name='Bias')
            # 入力総和(バッチ単位)
            z = tf.matmul(x, w) + b
            a = tf.tanh(z) if not is_output else z
        return a

    # レイヤーを作成
    # 3-10-10-3のDNN
    layer1 = hidden_layer(input, 10)
    layer2 = hidden_layer(layer1, 10)
    output = hidden_layer(layer2, 3, is_output=True)

    # 誤差の定義
    with tf.name_scope("Loss"):
        # クロスエントロピー
        with tf.name_scope("Cross-Entropy"):
            error = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=true_output, logits=output))
        # 真の出力と計算した出力がどれだけ一致するか
        with tf.name_scope("Accuracy"):
            accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.arg_max(true_output, 1), tf.argmax(output, 1)), tf.float32)) * 100.0
        with tf.name_scope("Prediction"):
            # 出力値を確率にノーマライズするOP(起こりうる事象の和を1にする)
            prediction = tf.nn.softmax(output)

    # 学習用OPの作成
    with tf.name_scope("Train"):
        train_op = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(error)

    # セッション生成、変数初期化
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    # TensorBoard用サマリ
    writer = tf.summary.FileWriter(FLAGS.summary_dir + '/' + dt.datetime.now().strftime('%Y%m%d-%H%M%S'), sess.graph)
    tf.summary.scalar('CrossEntropy', error)
    tf.summary.scalar('Accuracy', accuracy)
    summary = tf.summary.merge_all()

    # 学習を実行する関数
    def train():
        print('----------------------------------------------学習開始----------------------------------------------')
        batch_size = FLAGS.batch_size
        loop_per_epoch = int(len(training_data['input']) / batch_size)
        max_epoch = FLAGS.max_epoch
        print_interval = max_epoch / 10 if max_epoch >= 10 else 1
        step = 0
        start_time = time.time()
        for e in range(max_epoch):
            for i in range(loop_per_epoch):
                batch_input = training_data['input'][i*batch_size:(i+1)*batch_size]
                batch_output = training_data['output'][i*batch_size:(i+1)*batch_size]
                _, loss, acc, report = sess.run([train_op, error, accuracy, summary], feed_dict={input: batch_input, true_output: batch_output})
                step += batch_size

            writer.add_summary(report, step)
            writer.flush()

            if (e+1) % print_interval == 0:
                learning_speed = (e + 1.0) / (time.time() - start_time)
                print('エポック:{:3}    クロスエントロピー:{:.6f}    正答率:{:6.2f}%    学習速度:{:5.2f}エポック/秒'.format(e+1, loss, acc, learning_speed))

        print('----------------------------------------------学習終了----------------------------------------------')
        print('{}エポックの学習に要した時間: {:.2f}秒'.format(max_epoch, time.time() - start_time))

    # 学習成果をテストする関数
    def test():
        print('----------------------------------------------検証開始----------------------------------------------')
        # ヘッダー
        print('{:5}  {:20}      {:20}      {:20}      {:2}'.format('', '相手の手', '勝てる手', 'AIの判断', '結果'))
        print('{}  {:3}   {:3}   {:3}      {:3}   {:3}   {:3}      {:3}   {:3}   {:3}'.format('No.  ', 'グー ', 'チョキ', 'パー ', 'グー ', 'チョキ', 'パー ', 'グー ', 'チョキ', 'パー '))

        # 最も確率の高い手を強調表示するための関数
        def highlight(rock, scissors, paper):
            mx = max(rock, scissors, paper)
            rock_prob_em = '[{:6.4f}]'.format(rock) if rock == mx else '{:^8.4f}'.format(rock)
            scissors_prob_em = '[{:6.4f}]'.format(scissors) if scissors == mx else '{:^8.4f}'.format(scissors)
            paper_prob_em = '[{:6.4f}]'.format(paper) if paper == mx else '{:^8.4f}'.format(paper)
            return [rock_prob_em, scissors_prob_em, paper_prob_em]

        # N回じゃんけんさせてみてAIが勝てる手を正しく判断できるか検証
        win_count = 0
        for k in range(len(test_data['input'])):
            input_probs = [test_data['input'][k]]
            output_probs = [test_data['output'][k]]

            # 検証用オペレーション実行
            acc, predict = sess.run([accuracy, prediction], feed_dict={input: input_probs, true_output: output_probs})

            best_bet_label = np.argmax(output_probs, 1)
            best_bet_logit = np.argmax(predict, 1)
            result = '外れ'
            if best_bet_label == best_bet_logit:
                win_count += 1
                result = '一致'

            print('{:<5} {:8} {:8} {:8}'.format(*(tuple([k+1]+highlight(*input_probs[0])))), end='')
            print('    ', end='')
            print('{:8} {:8} {:8}'.format(*tuple(highlight(*output_probs[0]))), end='')
            print('    ', end='')
            print('{:8} {:8} {:8}'.format(*tuple(highlight(*predict[0]))), end='')
            print('    ', end='')
            print('{:2}'.format(result))

        print('----------------------------------------------検証終了----------------------------------------------')
        print('AIの勝率: {}勝/{}敗 勝率{:4.3f}%'.format(win_count, FLAGS.test_data-win_count, (win_count/len(test_data['input']) * 100.0)))

    print('学習無しの素の状態でAIがじゃんけんに勝てるか確認')
    test()

    if not FLAGS.skip_training:
        train()
        print('学習後、AIのじゃんけんの勝率はいかに…!')
        test()
Exemple #19
0
def build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0, double_q=True, noisy=False, scope="deepq", reuse=None, attack=None):
    """Creates the train function:

    Parameters
    ----------
    make_obs_ph: str -> tf.placeholder or TfInput
        a function that takes a name and creates a placeholder of input with that name
    q_func: (tf.Variable, int, str, bool) -> tf.Variable
        the model that takes the following inputs:
            observation_in: object
                the output of observation placeholder
            num_actions: int
                number of actions
            scope: str
            reuse: bool
                should be passed to outer variable scope
        and returns a tensor of shape (batch_size, num_actions) with values of every action.
    num_actions: int
        number of actions
    reuse: bool
        whether or not to reuse the graph variables
    optimizer: tf.train.Optimizer
        optimizer to use for the Q-learning objective.
    grad_norm_clipping: float or None
        clip gradient norms to this value. If None no clipping is performed.
    gamma: float
        discount rate.
    double_q: bool
        if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).
        In general it is a good idea to keep it enabled.
    scope: str or VariableScope
        optional scope for variable_scope.
    reuse: bool or None
        whether or not the variables should be reused. To be able to reuse the scope must be given.

    Returns
    -------
    act: (tf.Variable, bool, float) -> tf.Variable
        function to select and action given observation.
`       See the top of the file for details.
    train: (object, np.array, np.array, object, np.array, np.array) -> np.array
        optimize the error in Bellman's equation.
`       See the top of the file for details.
    update_target: () -> ()
        copy the parameters from optimized Q function to the target Q function.
`       See the top of the file for details.
    debug: {str: function}
        a bunch of functions to print debug data like q_values.
    """
    act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, noisy=noisy, reuse=reuse)

    with tf.variable_scope(scope, reuse=reuse):
        # set up placeholders
        obs_t_input = U.ensure_tf_input(make_obs_ph("obs_t"))
        act_t_ph = tf.placeholder(tf.int32, [None], name="action")
        rew_t_ph = tf.placeholder(tf.float32, [None], name="reward")
        obs_tp1_input = U.ensure_tf_input(make_obs_ph("obs_tp1"))
        done_mask_ph = tf.placeholder(tf.float32, [None], name="done")
        importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight")

        # q network evaluation
        q_t = q_func(obs_t_input.get(), num_actions, scope="q_func", noisy=noisy, reuse=True)  # reuse parameters from act
        q_t = q_t.get_logits(obs_t_input.get())
        q_func_vars = U.scope_vars(U.absolute_scope_name("q_func"))

        # target q network evalution
        q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope="target_q_func", noisy=noisy)
        q_tp1 = q_tp1.get_logits(obs_tp1_input.get())
        target_q_func_vars = U.scope_vars(U.absolute_scope_name("target_q_func"))

        # q scores for actions which we know were selected in the given state.
        q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), 1)

        # compute estimate of best possible value starting from state at t + 1
        if double_q:
            q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope="q_func", noisy=noisy, reuse=True)
            q_tp1_using_online_net = q_tp1_using_online_net.get_logits(obs_tp1_input.get())
            q_tp1_best_using_online_net = tf.arg_max(q_tp1_using_online_net, 1)
            q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1)
        else:
            q_tp1_best = tf.reduce_max(q_tp1, 1)
        q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best

        # compute RHS of bellman equation
        q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked

        # compute the error (potentially clipped)
        td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
        errors = U.huber_loss(td_error)
        weighted_error = tf.reduce_mean(importance_weights_ph * errors)
        # compute optimization op (potentially with gradient clipping)
        if grad_norm_clipping is not None:
            optimize_expr = U.minimize_and_clip(optimizer,
                                                weighted_error,
                                                var_list=q_func_vars,
                                                clip_val=grad_norm_clipping)
        else:
            optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars)

        # update_target_fn will be called periodically to copy Q network to target Q network
        update_target_expr = []
        for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),
                                   sorted(target_q_func_vars, key=lambda v: v.name)):
            update_target_expr.append(var_target.assign(var))
        update_target_expr = tf.group(*update_target_expr)

        # Create callable functions
        train = U.function(
            inputs=[
                obs_t_input,
                act_t_ph,
                rew_t_ph,
                obs_tp1_input,
                done_mask_ph,
                importance_weights_ph
            ],
            outputs=[td_error, errors],
            updates=[optimize_expr]
        )
        update_target = U.function([], [], updates=[update_target_expr])

        q_values = U.function([obs_t_input], q_t)
        ################## Vahid's Work ###################
        #U.load_state(model_path)

        if attack != None:
            if attack == 'fgsm':
                #def wrapper(x):
                #    return q_func(x, num_actions, scope="target_q_func", reuse=True, concat_softmax=True, noisy=noisy)
                adversary = FastGradientMethod(q_func(obs_tp1_input.get(), num_actions, scope="target_q_func", reuse=True, concat_softmax=True, noisy=noisy), sess=U.get_session())
                adv_observations = adversary.generate(obs_tp1_input.get(), eps=1.0/255.0,
                                                      clip_min=0, clip_max=1.0) * 255.0
            elif attack == 'iterative':
                def wrapper(x):
                    return q_func(x, num_actions, scope="q_func", reuse=True, concat_softmax=True)
                adversary = BasicIterativeMethod(CallableModelWrapper(wrapper, 'probs'), sess=U.get_session())
                adv_observations = adversary.generate(observations_ph.get(), eps=1.0/255.0,
                                                      clip_min=0, clip_max=1.0) * 255.0
            elif attack == 'cwl2':
                def wrapper(x):
                    return q_func(x, num_actions, scope="q_func", reuse=True)
                adversary = CarliniWagnerL2(CallableModelWrapper(wrapper, 'logits'), sess=U.get_session())
                cw_params = {'binary_search_steps': 1,
                             'max_iterations': 100,
                             'learning_rate': 0.1,
                             'initial_const': 10,
                             'clip_min': 0,
                             'clip_max': 1.0}
                adv_observations = adversary.generate(observations_ph.get(), **cw_params) * 255.0

            craft_adv_obs = U.function(inputs=[obs_tp1_input],
                            outputs=adv_observations,                       
                            updates=[update_target_expr])

        if attack == None:
            craft_adv_obs = None

        return act_f, train, update_target, {'q_values': q_values}, craft_adv_obs
Exemple #20
0
                                           global_step,
                                           Train_Num // batchsize,
                                           0.99,
                                           staircase=False)
trainStep = tf.train.GradientDescentOptimizer(learning_rate)\
                    .minimize(cross_entropy, global_step=global_step)

# 设置滑动平均类,并对所有参数进行滑动平均
ema_avg = tf.train.ExponentialMovingAverage(0.99, global_step)
emaStep = ema_avg.apply(tf.trainable_variables())
# 计算滑动平均后的预测值  n * 10
y_pre_ma = infer(x, ema_avg, tf.nn.sigmoid, layers)
# y_pre_ma = infer(x, ema_avg, tf.nn.relu, layer1W, layer1B, outputW, outputB)
# 根据滑动平均后预测值来计算正确率
# correct_pre = tf.equal(tf.arg_max(y_pre, 1), y_lab)
correct_pre = tf.equal(tf.arg_max(y_pre_ma, 1), y_lab)
accuracy = tf.reduce_mean(tf.cast(correct_pre, tf.float32))

# 结合训练和滑动平均为一轮训练的操作步骤
group_operaion = tf.group(trainStep, emaStep)

train_times = 50000
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    start = time.perf_counter()
    for i in range(train_times):
        randIdx = np.random.randint(0, Train_Num, batchsize)
        sess.run(group_operaion,
                 feed_dict={
                     x: x_train[randIdx, :],
# Cross entropy cost/loss
cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis), axis=1))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)

# Launch graph
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    for step in range(2001):
        sess.run(optimizer, feed_dict={X: x_data, Y: y_data})
        if step % 200 == 0:
            print(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}))

    a = sess.run(hypothesis, feed_dict={X: [[1, 11, 7, 9]]})
    print(a, sess.run(tf.arg_max(a, 1)))

    print('------------------------')

    b = sess.run(hypothesis, feed_dict={X: [[1, 3, 4, 3]]})
    print(b, sess.run(tf.arg_max(b, 1)))

    print('------------------------')

    c = sess.run(hypothesis, feed_dict={X: [[1, 1, 0, 1]]})
    print(c, sess.run(tf.arg_max(c, 1)))

    print('------------------------')

    all = sess.run(hypothesis,
                   feed_dict={X: [[1, 11, 7, 9], [1, 3, 4, 3], [1, 1, 0, 1]]})
Exemple #22
0
    for i in range(5000):
        sess.run(train_step,
                 feed_dict={
                     x_zdy: train_image,
                     y_zdy: train_label
                 })

        if i % 50 == 0:
            loss_run = sess.run(loss,
                                feed_dict={
                                    x_zdy: train_image,
                                    y_zdy: train_label
                                })
            print(loss_run)
            loss_record.append(loss_run)
            find_equal = tf.equal(tf.arg_max(y_zdy, 1),
                                  tf.arg_max(mlp_pred, 1))
            pred = tf.reduce_mean(tf.cast(find_equal, tf.float32))
            accu_pred = sess.run(pred,
                                 feed_dict={
                                     x_zdy: test_image,
                                     y_zdy: test_label
                                 })
            print("Test Accuracy:", accu_pred)
            accuracy_record.append(accu_pred)
    # get predictions on test_image
    ### Your Code Here ###

    find_equal = tf.equal(tf.arg_max(y_zdy, 1), tf.arg_max(mlp_pred, 1))
    pred = tf.reduce_mean(tf.cast(find_equal, tf.float32))
sess = tf.Session()
sess.run(init)
#graph_writer = tf.summary.FileWriter("./logs", sess.graph)

mse_history = []
accuracy_history = []

for epoch in range(training_epochs):
    sess.run(training_step, feed_dict={input_x: train_x, output_y: train_y})
    cost = sess.run(cost_function,
                    feed_dict={
                        input_x: train_x,
                        output_y: train_y
                    })
    cost_history = np.append(cost_history, cost)
    correct_prediction = tf.equal(tf.arg_max(model, 1),
                                  tf.arg_max(output_y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    predict_y = sess.run(model, feed_dict={input_x: test_x})
    mse = tf.reduce_mean(tf.square(predict_y - test_y))
    mse_ = sess.run(mse)
    mse_history.append(mse_)
    accuracy = (sess.run(accuracy,
                         feed_dict={
                             input_x: train_x,
                             output_y: train_y
                         }))
    accuracy_history.append(accuracy)

    print("epoch: ", epoch, " - ", "cost: ", cost, "- MSE: ", mse_,
Exemple #24
0
                    'bias': m_b
                }
            learning_rate = learning_rate * 0.9
            loss = federated_eval(model, federated_train_data)
            print('round {}, loss={}'.format(round_num, loss))
            print(time.time() - start_time)
            '''model = federated_train(model, learning_rate, federated_train_data)
            learning_rate = learning_rate * 0.9
            loss = federated_eval(model, federated_train_data)
            print('round {}, loss={}'.format(round_num, loss))'''

        m = np.dot(test_images, np.asarray(model['weights']))
        test_result = m + np.asarray(model['bias'])
        y = tf.nn.softmax(test_result)
        correct_prediction = tf.equal(
            tf.argmax(y, 1), tf.arg_max(test_labels_onehot, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        group_shapley_value.append(accuracy.numpy())
        print("combination finished ", time.time() - start_time)
        print(str(ss) + "\t" +
              str(group_shapley_value[len(group_shapley_value) - 1]))

    agent_shapley = []
    for index in range(NUM_AGENT):
        shapley = 0.0
        for j in all_sets:
            if index in j:
                remove_list_index = remove_list_indexed(index, j, all_sets)
                if remove_list_index != -1:
                    shapley += (group_shapley_value[shapley_list_indexed(j, all_sets)] - group_shapley_value[
                        remove_list_index]) / (comb(NUM_AGENT - 1, len(all_sets[remove_list_index])))
    def _build_outputs(self, images, labels, mode):
        is_training = mode == mode_keys.TRAIN
        model_outputs = {}

        if 'anchor_boxes' in labels:
            anchor_boxes = labels['anchor_boxes']
        else:
            anchor_boxes = anchor.Anchor(
                self._params.architecture.min_level,
                self._params.architecture.max_level,
                self._params.anchor.num_scales,
                self._params.anchor.aspect_ratios,
                self._params.anchor.anchor_size,
                images.get_shape().as_list()[1:3]).multilevel_boxes

            batch_size = tf.shape(images)[0]
            for level in anchor_boxes:
                anchor_boxes[level] = tf.tile(
                    tf.expand_dims(anchor_boxes[level], 0), [batch_size, 1, 1])

        backbone_features = self._backbone_fn(images, is_training)
        fpn_features = self._fpn_fn(backbone_features, is_training)

        rpn_score_outputs, rpn_box_outputs = self._rpn_head_fn(
            fpn_features, is_training)
        model_outputs.update({
            'rpn_score_outputs': rpn_score_outputs,
            'rpn_box_outputs': rpn_box_outputs,
        })
        # Run the RPN layer to get bbox coordinates for first frcnn layer.
        current_rois, _ = self._generate_rois_fn(rpn_box_outputs,
                                                 rpn_score_outputs,
                                                 anchor_boxes,
                                                 labels['image_info'][:, 1, :],
                                                 is_training)

        cascade_ious = [-1]
        if self._cascade_iou_thresholds is not None:
            cascade_ious = cascade_ious + self._cascade_iou_thresholds
        next_rois = current_rois
        # Stores the class predictions for each RCNN head.
        all_class_outputs = []
        for cascade_num, iou_threshold in enumerate(cascade_ious):
            # In cascade RCNN we want the higher layers to have different regression
            # weights as the predicted deltas become smaller and smaller.
            regression_weights = self._cascade_layer_to_weights[cascade_num]
            current_rois = next_rois
            (class_outputs, box_outputs, model_outputs, matched_gt_boxes,
             matched_gt_classes, matched_gt_indices,
             current_rois) = self._run_frcnn_head(fpn_features, current_rois,
                                                  labels, is_training,
                                                  model_outputs, cascade_num,
                                                  iou_threshold,
                                                  regression_weights)
            all_class_outputs.append(class_outputs)

            # Generate the next rois if we are running another cascade.
            # Since bboxes are predicted for every class
            # (if `class_agnostic_bbox_pred` is false) this takes the best class
            # bbox and converts it to the correct format to be used for roi
            # operations.
            if is_training:
                correct_class = matched_gt_classes
            else:
                correct_class = tf.arg_max(class_outputs, dimension=-1)

            next_rois = self._box_outputs_to_rois(
                box_outputs, current_rois, correct_class,
                labels['image_info'][:, 1:2, :], regression_weights)

        if not is_training:
            tf.logging.info('(self._class_agnostic_bbox_pred): {}'.format(
                self._class_agnostic_bbox_pred))
            if self._cascade_class_ensemble:
                class_outputs = tf.add_n(all_class_outputs) / len(
                    all_class_outputs)
            # Post processing/NMS is done here for final boxes. Note NMS is done
            # before to generate proposals of the output of the RPN head.
            # The background class is also removed here.
            detection_results = self._generate_detections_fn(
                box_outputs,
                class_outputs,
                current_rois,
                labels['image_info'][:, 1:2, :],
                regression_weights,
                bbox_per_class=(not self._class_agnostic_bbox_pred))
            model_outputs.update(detection_results)

        if not self._include_mask:
            return model_outputs

        if is_training:
            current_rois, classes, mask_targets = self._sample_masks_fn(
                current_rois, matched_gt_boxes, matched_gt_classes,
                matched_gt_indices, labels['gt_masks'])
            mask_targets = tf.stop_gradient(mask_targets)

            classes = tf.cast(classes, dtype=tf.int32)

            model_outputs.update({
                'mask_targets': mask_targets,
                'sampled_class_targets': classes,
            })
        else:
            current_rois = detection_results['detection_boxes']
            classes = tf.cast(detection_results['detection_classes'],
                              dtype=tf.int32)

        mask_roi_features = spatial_transform_ops.multilevel_crop_and_resize(
            fpn_features, current_rois, output_size=14)
        mask_outputs = self._mrcnn_head_fn(mask_roi_features, classes,
                                           is_training)

        if is_training:
            model_outputs.update({
                'mask_outputs': mask_outputs,
            })
        else:
            model_outputs.update(
                {'detection_masks': tf.nn.sigmoid(mask_outputs)})

        return model_outputs
    # 可以用feed_dict代替任何张量.
    batch_xs, batch_ys = mnist.train.next_batch(
        100)  # 每次随机选取100个数据进行训练,即所谓的“随机梯度下降(Stochastic Gradient Descent,SGD)”
    sess.run(train_step, feed_dict={
        x: batch_xs,
        y_real: batch_ys
    })  # 正式执行train_step,用feed_dict的数据取代placeholder

    # 评估模型的性能
    if i % 100 == 0:
        # 每训练100次后评估模型
        '''
        首先让我们找出那些预测正确的标签.tf.argmax 是一个非常有用的函数,它能给出某个 tensor 对象在某一维上的其数据最大值所在的索引值.由于标签向量是由0,1组成,因此最大值1所在的索引位置就是类别标签,比如tf.argmax(y,1)返回的是模型对于任一输入x预测到的标签值,而 tf.argmax(y_,1) 代表正确的标签,我们可以用 tf.equal 来检测我们的预测是否真实标签匹配(索引位置一样表示匹配).
        '''
        correct_prediction = tf.equal(tf.argmax(y, 1),
                                      tf.arg_max(y_real, 1))  # 比较预测值和真实值是否一致
        '''
        这里返回一个布尔数组.为了计算我们分类的准确率,我们将布尔值转换为浮点数来代表对、错,然后取平均值.例如:[True, False, True, True]变为[1,0,1,1],计算出平均值为0.75.
        '''
        accuracy = tf.reduce_mean(tf.cast(correct_prediction,
                                          "float"))  # 统计预测正确的个数,取均值得到准确率
        #print(accuracy)
        print(
            sess.run(accuracy,
                     feed_dict={
                         x: mnist.test.images,
                         y_real: mnist.test.labels
                     }))  #这一行为啥这样我也不知道.
'''
这是最简单的一种手写体识别了,接着我要做个超级复杂的。
准备采用卷积神经网络使训练精度提高,上面这个仅仅使用了tf中的softmax分类器。所以导致正确率只有91%,后面通过卷积神经网络可以提高识别率,准确率达到99%左右。
def main(argv=None):  # pylint: disable=unused-argument
    if FLAGS.self_test:
        print('Running self-test.')
        train_data, train_labels = fake_data(256)
        validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE)
        test_data, test_labels = fake_data(EVAL_BATCH_SIZE)
        num_epochs = 1
    else:
        # Get the data.
        train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
        train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
        test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
        test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')

        # Extract it into numpy arrays.
        train_data = extract_data(train_data_filename, 60000)
        train_labels = extract_labels(train_labels_filename, 60000)
        test_data = extract_data(test_data_filename, 10000)
        test_labels = extract_labels(test_labels_filename, 10000)

        # Generate a validation set.
        validation_data = train_data[:VALIDATION_SIZE, ...]
        validation_labels = train_labels[:VALIDATION_SIZE]
        train_data = train_data[VALIDATION_SIZE:, ...]
        train_labels = train_labels[VALIDATION_SIZE:]
        nrof_training_examples = train_labels.shape[0]
        nrof_changed_labels = int(nrof_training_examples * NOISE_FACTOR)
        shuf = np.arange(0, nrof_training_examples)
        np.random.shuffle(shuf)
        change_idx = shuf[0:nrof_changed_labels]
        train_labels[change_idx] = (
            train_labels[change_idx] +
            np.random.randint(1, 9, size=(nrof_changed_labels, ))) % NUM_LABELS
        num_epochs = NUM_EPOCHS
    train_size = train_labels.shape[0]

    # This is where training samples and labels are fed to the graph.
    # These placeholder nodes will be fed a batch of training data at each
    # training step using the {feed_dict} argument to the Run() call below.
    train_data_node = tf.placeholder(data_type(),
                                     shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE,
                                            NUM_CHANNELS))
    train_labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE, ))
    eval_data = tf.placeholder(data_type(),
                               shape=(EVAL_BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE,
                                      NUM_CHANNELS))

    # The variables below hold all the trainable weights. They are passed an
    # initial value which will be assigned when we call:
    # {tf.global_variables_initializer().run()}
    conv1_weights = tf.Variable(
        tf.truncated_normal(
            [5, 5, NUM_CHANNELS, 32],  # 5x5 filter, depth 32.
            stddev=0.1,
            seed=SEED,
            dtype=data_type()))
    conv1_biases = tf.Variable(tf.zeros([32], dtype=data_type()))
    conv2_weights = tf.Variable(
        tf.truncated_normal([5, 5, 32, 64],
                            stddev=0.1,
                            seed=SEED,
                            dtype=data_type()))
    conv2_biases = tf.Variable(tf.constant(0.1, shape=[64], dtype=data_type()))
    fc1_weights = tf.Variable(  # fully connected, depth 512.
        tf.truncated_normal([IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],
                            stddev=0.1,
                            seed=SEED,
                            dtype=data_type()))
    fc1_biases = tf.Variable(tf.constant(0.1, shape=[512], dtype=data_type()))
    fc2_weights = tf.Variable(
        tf.truncated_normal([512, NUM_LABELS],
                            stddev=0.1,
                            seed=SEED,
                            dtype=data_type()))
    fc2_biases = tf.Variable(
        tf.constant(0.1, shape=[NUM_LABELS], dtype=data_type()))

    # We will replicate the model structure for the training subgraph, as well
    # as the evaluation subgraphs, while sharing the trainable parameters.
    def model(data, train=False):
        """The Model definition."""
        # 2D convolution, with 'SAME' padding (i.e. the output feature map has
        # the same size as the input). Note that {strides} is a 4D array whose
        # shape matches the data layout: [image index, y, x, depth].
        conv = tf.nn.conv2d(data,
                            conv1_weights,
                            strides=[1, 1, 1, 1],
                            padding='SAME')
        # Bias and rectified linear non-linearity.
        relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
        # Max pooling. The kernel size spec {ksize} also follows the layout of
        # the data. Here we have a pooling window of 2, and a stride of 2.
        pool = tf.nn.max_pool(relu,
                              ksize=[1, 2, 2, 1],
                              strides=[1, 2, 2, 1],
                              padding='SAME')
        conv = tf.nn.conv2d(pool,
                            conv2_weights,
                            strides=[1, 1, 1, 1],
                            padding='SAME')
        relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
        pool = tf.nn.max_pool(relu,
                              ksize=[1, 2, 2, 1],
                              strides=[1, 2, 2, 1],
                              padding='SAME')
        # Reshape the feature map cuboid into a 2D matrix to feed it to the
        # fully connected layers.
        pool_shape = pool.get_shape().as_list()  #pylint: disable=no-member
        reshape = tf.reshape(
            pool,
            [pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
        # Fully connected layer. Note that the '+' operation automatically
        # broadcasts the biases.
        hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)

        # Add a 50% dropout during training only. Dropout also scales
        # activations such that no rescaling is needed at evaluation time.
        if train:
            hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)
        return tf.matmul(hidden, fc2_weights) + fc2_biases

    # Training computation: logits + cross-entropy loss.
    logits = model(train_data_node, True)

    # t: observed noisy labels
    # q: estimated class probabilities (output from softmax)
    # z: argmax of q

    t = tf.one_hot(train_labels_node, NUM_LABELS)
    q = tf.nn.softmax(logits)
    qqq = tf.arg_max(q, dimension=1)
    z = tf.one_hot(qqq, NUM_LABELS)
    #cross_entropy = -tf.reduce_sum(t*tf.log(q),reduction_indices=1)
    cross_entropy = -tf.reduce_sum(
        (BETA * t + (1 - BETA) * z) * tf.log(q), reduction_indices=1)

    loss = tf.reduce_mean(cross_entropy)

    #     loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
    #         logits, train_labels_node))

    # L2 regularization for the fully connected parameters.
    regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
                    tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
    # Add the regularization term to the loss.
    loss += 5e-4 * regularizers

    # Optimizer: set up a variable that's incremented once per batch and
    # controls the learning rate decay.
    batch = tf.Variable(0, dtype=data_type())
    # Decay once per epoch, using an exponential schedule starting at 0.01.
    learning_rate = tf.train.exponential_decay(
        0.01,  # Base learning rate.
        batch * BATCH_SIZE,  # Current index into the dataset.
        train_size,  # Decay step.
        0.95,  # Decay rate.
        staircase=True)
    # Use simple momentum for the optimization.
    optimizer = tf.train.MomentumOptimizer(learning_rate,
                                           0.9).minimize(loss,
                                                         global_step=batch)

    # Predictions for the current training minibatch.
    train_prediction = tf.nn.softmax(logits)

    # Predictions for the test and validation, which we'll compute less often.
    eval_prediction = tf.nn.softmax(model(eval_data))

    # Small utility function to evaluate a dataset by feeding batches of data to
    # {eval_data} and pulling the results from {eval_predictions}.
    # Saves memory and enables this to run on smaller GPUs.
    def eval_in_batches(data, sess):
        """Get all predictions for a dataset by running it in small batches."""
        size = data.shape[0]
        if size < EVAL_BATCH_SIZE:
            raise ValueError("batch size for evals larger than dataset: %d" %
                             size)
        predictions = np.ndarray(shape=(size, NUM_LABELS), dtype=np.float32)
        for begin in xrange(0, size, EVAL_BATCH_SIZE):
            end = begin + EVAL_BATCH_SIZE
            if end <= size:
                predictions[begin:end, :] = sess.run(
                    eval_prediction,
                    feed_dict={eval_data: data[begin:end, ...]})
            else:
                batch_predictions = sess.run(
                    eval_prediction,
                    feed_dict={eval_data: data[-EVAL_BATCH_SIZE:, ...]})
                predictions[begin:, :] = batch_predictions[begin - size:, :]
        return predictions

    # Create a local session to run the training.
    start_time = time.time()
    with tf.Session() as sess:
        # Run all the initializers to prepare the trainable parameters.
        tf.global_variables_initializer().run()  #pylint: disable=no-member
        print('Initialized!')
        # Loop through training steps.
        for step in xrange(int(num_epochs * train_size) // BATCH_SIZE):
            # Compute the offset of the current minibatch in the data.
            # Note that we could use better randomization across epochs.
            offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
            batch_data = train_data[offset:(offset + BATCH_SIZE), ...]
            batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
            # This dictionary maps the batch data (as a numpy array) to the
            # node in the graph it should be fed to.
            feed_dict = {
                train_data_node: batch_data,
                train_labels_node: batch_labels
            }
            # Run the graph and fetch some of the nodes.
            _, l, lr, predictions = sess.run(
                [optimizer, loss, learning_rate, train_prediction],
                feed_dict=feed_dict)
            if step % EVAL_FREQUENCY == 0:
                elapsed_time = time.time() - start_time
                start_time = time.time()
                print('Step %d (epoch %.2f), %.1f ms' %
                      (step, float(step) * BATCH_SIZE / train_size,
                       1000 * elapsed_time / EVAL_FREQUENCY))
                print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
                print('Minibatch error: %.1f%%' %
                      error_rate(predictions, batch_labels))
                print('Validation error: %.1f%%' % error_rate(
                    eval_in_batches(validation_data, sess), validation_labels))
                sys.stdout.flush()
        # Finally print the result!
        test_error = error_rate(eval_in_batches(test_data, sess), test_labels)
        print('Test error: %.1f%%' % test_error)
        if FLAGS.self_test:
            print('test_error', test_error)
            assert test_error == 0.0, 'expected 0.0 test_error, got %.2f' % (
                test_error, )
def model(X_train,
          Y_train,
          X_test,
          Y_test,
          learning_rate=0.009,
          num_epochs=100,
          minibatch_size=64,
          print_cost=True,
          isPlot=True):

    ops.reset_default_graph()
    tf.set_random_seed(1)
    seed = 3
    (m, n_H0, n_W0, n_C0) = X_train.shape
    n_y = Y_train.shape[1]
    costs = []
    # 为当前的维度创建占位符
    X, Y = create_placeholders(n_H0, n_W0, n_C0, n_y)
    # 初始化参数
    parameters = initialize_parameters()
    # 前向传播
    Z3 = forward_propagation(X, parameters)
    # 计算陈本
    cost = compute_cost(Z3, Y)

    # 反向传播,由于框架已经实现了反向传播,我们只需要选择一个优化器就可了
    optimizer = tf.train.AdamOptimizer(
        learning_rate=learning_rate).minimize(cost)

    # 全局初始化所有变量
    init = tf.global_variables_initializer()

    # 开始运行
    with tf.Session() as sess:

        # 初始化参数
        sess.run(init)
        # 开始便利数据集
        for epoch in range(num_epochs):
            minibatch_cost = 0
            num_minibatches = int(m / minibatch_size)
            seed = seed + 1
            minibatches = cnn_utils.random_mini_batches(
                X_train, Y_train, minibatch_size, seed)

            #   对每个数据块进行处理
            for minibatch in minibatches:
                (minibatch_X, minibatch_Y) = minibatch
                #   最小化这个数据化的成本(这里应该是进行一次梯度下降吧)
                _, temp_cost = sess.run([optimizer, cost],
                                        feed_dict={
                                            X: minibatch_X,
                                            Y: minibatch_Y
                                        })
                minibatch_cost += temp_cost / num_minibatches

            if print_cost:
                if epoch % 5 == 0:
                    print("当前的成本为: ", epoch, "代,成本为:" + str(minibatch_cost))

            if epoch % 5 == 0:
                costs.append(minibatch_cost)

        if isPlot:
            plt.plot(np.squeeze(costs))
            plt.ylabel('cost')
            plt.xlabel('daishu')
            plt.title("learning rate = " + str(learning_rate))
            plt.show()

        # 开始预测数据
        # 计算当前的预测情况
        predict_op = tf.arg_max(Z3, 1)
        corrent_prediction = tf.equal(predict_op, tf.arg_max(Y, 1))

        # 计算准确度
        accuracy = tf.reduce_mean(tf.cast(corrent_prediction, "float"))
        print("corrent_prediction accuracy= " + str(accuracy))

        train_accuracy = accuracy.eval({X: X_train, Y: Y_train})
        test_accuracy = accuracy.eval({X: X_test, Y: Y_test})
        print("训练集准确度: " + str(train_accuracy))
        print("测试集准确度: " + str(test_accuracy))

        return (train_accuracy, test_accuracy, parameters)
Exemple #29
0
                        0, 1))  #随机初始化为一串0-1中的3072个值
b = tf.get_variable('b', [10],
                    initializer=tf.constant_initializer(0.0))  #常量初始化

#获取输出,内积
y_ = tf.matmul(x, w) + b
#使用softmax进行激活,矩阵相乘后变为[None,10]的形式
p_y = tf.nn.softmax(y_)

#损失函数:平方差  多分类loss:独热编码5->[0000100000]
#y_one_hot = tf.one_hot(y, 10, dtype=tf.float32)
#loss = tf.reduce_mean(tf.square(y_one_hot - p_y))
#损失函数:交叉熵
loss = tf.losses.sparse_softmax_cross_entropy(labels=y, logits=y_)
#取y_中最大值标签为预测分类
predict = tf.arg_max(y_, 1)
correct_prediction = tf.equal(predict, y)  #预测值
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))  #准确率

#梯度下降
with tf.name_scope('train_op'):
    train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)


#cifar-10数据处理类
class CifarData:
    def __init__(self, filenames,
                 need_shuffle):  #need_shuffle:打乱训练集顺序,降低依赖,提升泛化能力
        # 读入数据
        all_data = []
        all_labels = []
# 将3维特征转换为1维向量
flatten = tf.layers.flatten(pool1)
print("my_log: flatten.shape------>",flatten.shape,'\n')

# 全连接层,转换为长度为100的特征向量
fc = tf.layers.dense(flatten, 400, activation=tf.nn.relu)
print("my_log: fc.shape------>",fc.shape,'\n')

# 加上DropOut,防止过拟合
dropout_fc = tf.layers.dropout(fc, dropout_placeholdr)

# 未激活的输出层
logits = tf.layers.dense(dropout_fc, num_classes)
print("my_log: logits.shape------>",logits.shape,'\n')

predicted_labels = tf.arg_max(logits, 1)
# my_note: find the max according to the row and return the sub
# my_query: arg_max
# 0 column 1 row

# 利用交叉熵定义损失
losses = tf.nn.softmax_cross_entropy_with_logits(
    labels=tf.one_hot(labels_placeholder, num_classes),
    logits=logits
)
# 平均损失
mean_loss = tf.reduce_mean(losses)

# 定义优化器,指定要优化的损失函数
optimizer = tf.train.AdamOptimizer(learning_rate=1e-2).minimize(losses)
# my_note: key part