def test_net(): ''' Tests stuff ''' import data_loader d = data_loader.Loader() d.load() sizes = [d.x_dim, 30, d.y_dim] net = Network(sizes) k = d.size / 3 training_data, test_data = d.data[k:], d.data[:k] net.SGD2(training_data, 500, 3.0, 2, test_data=test_data)
def test_net2(): import data_loader d = data_loader.Loader() d.load2() np.random.shuffle(d.data) k = d.size / 3 data_r = [(x, y[4:] * 10e10) for x, y in d.data] training_data, test_data = data_r[k:], data_r[:k] sizes = [d.x_dim, len(training_data[0][1])] net = Network(sizes) net.SGDR(training_data, 100, 3.0, 10, test_data=test_data) results = [net.feedforward(x) for x, y in test_data] training_data_c = [(np.concatenate((x, y[4:])), y[:2]) for x, y in d.data[k:]] test_data_c = [(np.concatenate((x[0], y)), x[1][:2]) for x, y in zip(d.data[:k], results)] x_dim = training_data_c[0][0].shape[0] y_dim = training_data_c[0][1].shape[0] #sizes_c = [x_dim,10,10,10,10,y_dim] sizes_c = [x_dim, y_dim] net2 = Network(sizes_c) net2.SGD2(training_data_c, 200, 0.1, 5, test_data=test_data_c) print 'sizes %s' % sizes print 'sizes_c %s ' % sizes_c
# 默认从data/train.pkl中读取训练数据 # 保存训练结果到save文件夹 import tensorflow as tf import compo_net import data_loader import os if __name__ == '__main__': config = compo_net.Config(training=True, learning_rate=0) config.restore = os.path.exists(os.path.join(config.save_dir, 'checkpoint')) if config.restore: print(':: restore from saved model') loader = data_loader.Loader(config) model = compo_net.Model(config) merged_summary = tf.summary.merge_all() sess = tf.Session() summary_writer = tf.summary.FileWriter(config.log_dir, sess.graph) sess.run(tf.global_variables_initializer()) if config.restore: model.restore(sess) input('NOTICE: initial learning rate is 0, reset after start') input(':: press enter to start training') times = 1 cost = 0 while True:
#print('predict_for_list_of_models') best_prediction_forest, best_forest = self.predict_for_list_of_models(list_of_forests) #print('plot conf matrix') self.plot_confusion_matrix(best_prediction_forest, 'rforest') classes_dict_pos_svm, classes_dict_neg_svm = self.roc_curve(best_prediction_svm) classes_dict_pos_fr, classes_dict_neg_fr =self.roc_curve(best_prediction_forest) self.plot_roc(classes_dict_pos_svm, classes_dict_neg_svm) self.plot_predicted(best_prediction_svm, 'svm') self.plot_predicted(best_prediction_forest, 'forest') data_load = data_loader.Loader("consensus_county15.txt") data = data_load.estimation_data validation_data = data_load.validation_data[:,1:] header = data_load.header[1:] print(len(header)) pca = Main(data, header, validation_data) #categories should stay the same pca.choose_categories(header, 'Poverty') pca.choose_categories(header, 'Poverty', for_validation=True) #pca.choose_categories(['County', 'Income', 'White', 'TotalPop', 'Unemployment','PublicWork'], 'Poverty') #pca.choose_categories(['County', 'Income', 'White', 'TotalPop', 'Unemployment','PublicWork'], 'Poverty', for_validation=True) #print('classify_poverty')
#!/usr/bin/python3 import value_net import data_loader import config import tensorflow as tf import os.path if __name__ == '__main__': model = value_net.Model() data_loader = data_loader.Loader() input(':: press enter to start training') learning_rate = config.default_lr times = 1 while True: try: inputs, targets = data_loader.get_next_batch() cost = model.train(inputs, targets, learning_rate) # when use cross entroy, this may happen assert cost == cost, 'cost is nan' print('batch: {0}, cost: {1}'.format(times, cost)) times += 1 except KeyboardInterrupt: cmd = input('\noperation(w/q/c/l/t):') if cmd == 'w': model.save()