def fetch_results(self): men_data = [] women_data = [] men_append = men_data.append women_append = women_data.append for level in range(0, self.num_levels): men_averager = Averager() women_averager = Averager() for result in self.results: men_averager.add(result.men[level]) women_averager.add(result.women[level]) total_employees = men_averager.get_total( ) + women_averager.get_total() men_avg = men_averager.get_average() men_percentage = 100 * men_averager.get_total() / total_employees women_avg = women_averager.get_average() women_percentage = 100 * women_averager.get_total( ) / total_employees men_append(men_percentage) women_append(women_percentage) return [men_data, women_data]
def __init__(self, bbox): self._averager = Averager(AVG_SERIES, len(bbox)) self._seen = 0 self._unseen = 0 self.push(bbox)
input_content_text_vectors = helper.convert_lines_to_matrix(lines, word_to_index, content_text_length, 'c') feed_dict = {input_header_text: input_header_text_vectors, input_content_text: input_content_text_vectors} _predicted = session.run(predicted, feed_dict=feed_dict) expected = helper.convert_lines_to_labels(lines)[:len(p_lines)] (a_correct, _) = helper.prediction_assessment(expected, _predicted[:len(p_lines)]) correct += a_correct total += len(p_lines) print 'correct = ', correct, ' total = ', total, ' percentage = ', float(correct) / float(total) print 'final results:' print 'correct = ', correct, ' total = ', total, ' percentage = ', float(correct)/float(total) elif train_or_test == 'train': with open(data_filename) as data_file: with tf.Session() as session: session.run(init_op) av = Averager(50) for batch_index in range(number_of_batches): lines = helper.read_file_in_loop(data_file, batch_size) input_header_text_vectors = helper.convert_lines_to_matrix(lines, word_to_index, header_text_length, 'h') input_content_text_vectors = helper.convert_lines_to_matrix(lines, word_to_index, content_text_length, 'c') input_label_vector = helper.convert_lines_to_labels(lines) feed_dict = {input_header_text: input_header_text_vectors, input_content_text: input_content_text_vectors, input_labels: input_label_vector} (_loss, _, _predicted) = session.run([loss, train_op, predicted_as_vector], feed_dict=feed_dict) assessment = helper.prediction_assessment(input_label_vector, _predicted) (_, _percent) = assessment av.add(_percent) if (batch_index % 50) == 0 or batch_index == (number_of_batches-1): print 'batch: ', batch_index, ' loss: ', _loss print 'assessment: ', assessment print 'Last 50 iterations average: ', av.average()
magn_file.write(hdr_line_fmt.format(M=M_hdr, E=E_hdr, deltaE2=deltaE2_hdr)) print_options(magn_file, options) domain_file.write('T domain_sizes\n') print_options(domain_file, options) for T in (float(T_str) for T_str in options.T.split(',')): if options.verbose > 0: sys.stderr.write('# computing T = {0:.4f}\n'.format(T)) ising = ising_module.IsingSystem(options.N, options.J, options.H, T) ising.init_random(seed) runner = DomainSizeRunner(ising=None, steps=options.steps, is_verbose=options.verbose - 2, burn_in=options.burn_in, sample_period=options.sample_period, window=options.window) averager = Averager(runner, ising, is_verbose=options.verbose - 1) averager.average(options.runs) M_values = averager.get('M mean') M_str = val_fmt.format(**M_values) E_values = averager.get('E mean') E_str = val_fmt.format(**E_values) deltaE2_values = averager.get('deltaE^2') deltaE2_str = val_fmt.format(**deltaE2_values) magn_file.write(val_line_fmt.format(T=T, M=M_str, E=E_str, deltaE2=deltaE2_str)) magn_file.flush() domains = averager.get('domains') distrubtion = ','.join(['{0:d}:{1:.8e}'.format(k, v) for k, v in domains.items()]) domain_file.write('{0:.4f} {1:s}\n'.format(T, distrubtion)) domain_file.flush()