def min_goal(x): m = [] for i in range(len(encoder_result)): m.append(function.func1(x[i])) #print(m.index(min(m))) return m.index(min(m))
def min_goal(self, x): m = [] for i in range(len(x)): m.append(function.func1(x[i])) #print(m.index(min(m))) return m.index(min(m))
import function #This imports all the function in the function python file import random function.func1() x = random.randrange(1, 1100) print(x)
def run(self): time_start = time.time() sess = tf.Session() encoder_op = self.encoder(self.array) # 编码 decoder_op = self.decoder(encoder_op) # 解码 y_pred = decoder_op y_true = self.X ## 定义代价函数和优化器 # 最小二乘法 rmse cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2)) rmse = cost**0.5 # 优化函数 全局学习速率 0.01 optimizer = tf.train.AdamOptimizer(1).minimize(rmse) if int((tf.__version__).split('.')[1]) < 12 and int( (tf.__version__).split('.')[0]) < 1: init = tf.initialize_all_variables() else: init = tf.global_variables_initializer() sess.run(init) encoder_result = sess.run(encoder_op, feed_dict={self.X: self.array}) total_batch = int(50 / self.batch_size) # 总批数55000/256 for epoch in range(self.training_epochs): # 训练次数 for i in range(total_batch): # 每次训练遍历训练集所需次数 #batch_xs, batch_ys = tf.train.batch([y_pred, y_true], batch_size=batch_size) batch = self.get_random_block_from_data( self.array, self.batch_size) _, c = sess.run([optimizer, rmse], feed_dict={self.X: batch}) #print(c) print("Optimization Finished!") X = [] Y = [] Z = [] # 读入n个点的坐标 for i in range(6): a = random.randint(0, 30) X.append(encoder_result[a][0]) Y.append(encoder_result[a][1]) for i in range(len(encoder_result)): Z.append(function.func1(encoder_result[a])) def f(x): a, b, c, d, e, f = self.FindFunction(6, X, Y, Z) x2 = x[1] x1 = x[0] return [2 * a * x1 + b * x2 + d, b * x1 + 2 * c * x2 + e] result = fsolve(f, [1, 1]) #print(result) decoder_result = sess.run(decoder_op, feed_dict={self.X: self.array}) # print('维度还原之后的输出') #print(decoder_result) encoder_result[-1] = result # print(encoder_result) a = [] for i in range(len(encoder_result)): a.append(encoder_result[i][0]) b = [] for i in range(len(encoder_result)): b.append(encoder_result[i][1]) plt.scatter(a, b) #print('位置') #print(self.min_goal(encoder_result)) #最终得出的粒子 decoder_result[min_goal(encoder_result)] #print('30维结果') #print(decoder_result[self.min_goal(encoder_result)]) # a = random.choice(self.array) #print(decoder_result[-1]) a = random.randint(0, 30) del self.array[a] e = decoder_result[-1].tolist() print(e) m = function.func1(e) print(m) self.array.append(e) #print('最后的列表') #print(self.array) #print(type(e)) #plt.show() time_end = time.time() #print('time cost_autoencoder', time_end - time_start, 's') tf.reset_default_graph() return self.array