def training_rms(losses, m, iterations, learning_rates, decays): tf.reset_default_graph() rms_results = {} for learning_rate in learning_rates: temp = np.empty(0, np.float32) rms_results[learning_rate] = {} for decay in decays: for loss in losses.keys(): for dim in losses[loss][1].keys(): seed(seed_counter) function_generator = generator.FunGen( dim, losses[loss][1][dim], loss) functions = function_generator.generate(losses[loss][0]) for f in functions: r = radius points = generator.generate_points(dim, m, r, f[2]) with tf.variable_scope(loss, reuse=tf.AUTO_REUSE): global_step = tf.Variable(0.0, name='rms_train_gs' + str(dim), trainable=False, dtype=tf.float32) if step > 0: optimizer = tf.train.RMSPropOptimizer( learning_rate=tf.train.exponential_decay( learning_rate, global_step, step, dec, name='train_rms'), decay=decay).minimize( f[0], global_step=global_step) else: optimizer = tf.train.RMSPropOptimizer( learning_rate=learning_rate, decay=decay).minimize(f[0]) for point in points: with tf.Session() as sess: sess.run(tf.global_variables_initializer()) sess.run(function_generator.x.assign(point)) # Normalization starting_value = sess.run(f[0]) f_temp = tf.divide(f[0], tf.constant(starting_value)) for i in range(iterations): _, f_curr = sess.run([optimizer, f_temp]) temp = np.append(temp, f_curr) rms_results[learning_rate][decay] = { 'best': np.nanmin(temp).item(), 'worst': np.nanmax(temp).item(), 'mean': np.nanmean(temp).item(), "median": np.nanmedian(temp).item() } return rms_results
def test_momentum(losses, m, iterations, learning_rate, metric): tf.reset_default_graph() momentum_results = np.empty(shape=(0, iterations), dtype=np.float32) for loss in losses.keys(): for dim in losses[loss][1].keys(): seed(seed_counter) tf.reset_default_graph() function_generator = generator.FunGen(dim, losses[loss][1][dim], loss) functions = function_generator.generate(losses[loss][0]) for f in functions: r = radius points = generator.generate_points(dim, m, r, f[2]) with tf.variable_scope(loss, reuse=tf.AUTO_REUSE): global_step = tf.Variable(0.0, name='momentum_test_gs' + str(dim), trainable=False, dtype=tf.float32) if step > 0: optimizer = tf.train.MomentumOptimizer( learning_rate=tf.train.exponential_decay( learning_rate.astype(np.float32), global_step, step, dec, name='test_momentum'), momentum=0.999, use_nesterov=True).minimize( f[0], global_step=global_step) else: optimizer = tf.train.MomentumOptimizer( learning_rate=learning_rate, momentum=0.999, use_nesterov=True).minimize(f[0]) for point in points: with tf.Session() as sess: sess.run(tf.global_variables_initializer()) sess.run(function_generator.x.assign(point)) # Normalization starting_value = sess.run(f[0]) f_temp = tf.divide(f[0], tf.constant(starting_value)) temp = np.empty(iterations) for i in range(iterations): _, f_curr = sess.run([optimizer, f_temp]) temp[i] = f_curr momentum_results = np.append(momentum_results, np.array([temp]), axis=0) return metric(momentum_results, axis=0)
def binary_tree_test(): # TODO: check empty point collection and one point collection point_count = 1000 print('Point count = ', point_count) points = generate_points(point_count) # find best point placement errors_counter = estimator.DiscontinuityErrorCounter( geometry.euclidean_distance) #min_error_count = estimator.find_few_errors_point_permutation(points, errors_counter) #print ("Best posible score:", min_error_count) plt.subplot(211) print("Starting...") start = time.time() cluster_ordered_points = create_point_placement_by_clustering(points) print("Finished. Spended time = ", time.time() - start) plt.subplot(212) print("Starting...") start = time.time() bsp_ordered_points = create_point_placement_by_bsp(points) print("Finished. Spended time = ", time.time() - start)
######################################### # Algorytmy Grafowe 2019/2020 # # Integracyjne testy automatyczne # # Stanislaw Denkowski # # Maciej Tratnowiecki # ######################################### # Import wykorzystywanych modulow import generator import kdtree import quadtree from random import randint if __name__ == '__main__': test = generator.generate_points(100) kd = kdtree.Kdtree(test) quad = quadtree.Quadtree(test) v = 1000 while True: xl = randint(-v, v) xh = randint(-v, v) yl = randint(-v, v) yh = randint(-v, v) s1 = kd.find(xl, xh, yl, yh) s2 = quad.find(xl, xh, yl, yh) if set(s1) != set(s2): print("ERROR!") print(test)
def process(self): # Create test directory if not exist if not os.path.exists('output/' + self.path + '/'): os.makedirs('output/' + self.path + '/') # Disable logs os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Opening files to write general information for tests and functions with open('output/' + self.path + '/' + 'results' + '.csv', 'w', newline='') as output, open('output/' + self.path + '/' + 'summary.txt', 'w', newline='') as output_f: # Column names for tests files (i.e. c1, c2) fieldnames = [ 'number', 'start', 'optimal_lr', 'iterations', 'method' ] # Writing columns to file. Look up python's csv module thewriter = csv.DictWriter(output, fieldnames=fieldnames) thewriter.writeheader() dims_amount = len(self.function_amount.keys()) k = 0 # For each dimension in the given dictionary do the following for dim in self.function_amount.keys(): # Create a generator object for functions pre_generated_functions = generator.FunGen( dim, self.function_amount[dim], self.path) generated_functions = pre_generated_functions.generate( self.proto_function) f_n = self.function_amount[dim] # Create a required amount of functions of a certain dimension for i in range(f_n): # Generate a function f = generated_functions[i] # Generate starting points starting_points = generator.generate_points( dim, self.m, self.r, f[2]) # Counter for points (used to name files) l = 0 # For a file containing info about functions write down the function number output_f.write('Function ' + str(i + 1) + '/' + str(f_n) + ' of dimension ' + str(dim) + '\n') # Write the generated function to function file output_f.write(str(f[1]) + '\n\n') output_f.write('Expected minimum coordinates: \t') output_f.write(str(f[2]) + '\n') output_f.write('Expected minimum value: \t') output_f.write(str(f[3]) + '\n') # Write all the generated points to function file output_f.write('\nStarting points:\n') d = 1 # For each starting point do the following for st_p in starting_points: output_f.write('-' * 25 + '\n') output_f.write(str(d) + ': \t' + str(st_p) + '\n\n') # Open file to write down trends with open('output/' + self.path + '/' + 'test_' + str(dim) + '_' + str(i) + '_' + str(l) + '.csv', 'w', newline='') as output_t: # Column names for test files (i.e. trend_*_*) fieldnames_t = [ 'method', 'learning_rate', 'decay', 'value', 'step_size' ] # Writing columns to file. Look up python's csv module thewriter_t = csv.DictWriter( output_t, fieldnames=fieldnames_t) thewriter_t.writeheader() # For each method to be tested do the following for method in self.methods.keys(): # Calculate optimal learning rate res = self.__optimal_learning_rate( method, f, self.methods[method], st_p, pre_generated_functions, thewriter_t) output_f.write(method.upper() + ': \t') output_f.write(str(res[2])) output_f.write(';\t Value: ') output_f.write( str(res[3]) + ' in ' + str(res[1]) + ' iterations with ' + str(res[0]) + ' learning rate' + '\n') # Write results of the calculations above to the tests file thewriter.writerow({ 'number': i, 'start': st_p, 'optimal_lr': res[0], 'iterations': res[1], 'method': method }) output_f.write('-' * 25 + '\n') l += 1 d += 1 output_f.write('\n\n') print( 'Completed', str(i + 1) + '/' + str(self.function_amount[dim]), 'of', dim, 'dimension', ) print('--- Completed', dim, 'dimension (total:', str(k + 1) + '/' + str(dims_amount) + ')\n') k += 1 print('All completed')