def good_plan(request): try: # cnn = CNN({'model_name': 'good_plan_50', 'img_rows': 50, 'img_cols': 50}) cnn = CNN({'model_name': 'good_plan_50'}, True) cnn.train_model(40) except Exception as e: print e return Response({'msg': e.message}, status=status.HTTP_400_BAD_REQUEST) print 'finish good plan' return Response({}, status=status.HTTP_200_OK)
def add_model(request): cnn = CNN(request.POST) success, msg = cnn_manager.add_model(cnn) if success: return Response({'msg': msg}, status=status.HTTP_200_OK) else: return Response({'msg': msg}, status=status.HTTP_400_BAD_REQUEST)
def compute_logits(inputs, batch_size=-1): # # logits.append(FullyConnected.build_model(logits[-1], 'fullyconnected')) # logits = [inputs] inputs_expanded = tf.expand_dims(inputs, 3) logits.append(CNN.build_model(inputs_expanded, config.encoder('cnn'))) temp_shape = logits[-1].get_shape().as_list() logits.append( tf.reshape( logits[-1], shape=[batch_size, temp_shape[1], temp_shape[2] * temp_shape[3]])) # logits.append(BottleNeck.build_model(logits[-1], config.encoder('bottleneck'))) # temp_shape = logits[-1].get_shape().as_list() # logits.append(tf.reshape(logits[-1], shape=[batch_size, temp_shape[1], -1])) logits.append( BiDirectionLSTM.build_model(logits[-1], config.encoder('bidirection_lstm'))) logits.append( tf.reshape(logits[-1], shape=[-1, logits[-1].get_shape()[-1]])) logits.append( FullyConnected.build_model(logits[-1], config.encoder('fullyconnected'))) logits.append(tf.reshape(logits[-1], shape=[batch_size, -1, 72])) return logits[-1], tf.fill([batch_size], temp_shape[1])
def test_train_model(self): # cnn = CNN({'model_name': 'split_cases_50_50_2_layer', 'img_rows': 200, 'img_cols': 200}) # cnn = CNN({'model_name': 'model_with_gabor2'}, True) # cnn = CNN({'model_name': 'split_cases_dropout_50', 'img_rows': 200, 'img_cols': 200}) # cnn = CNN({'model_name': 'test_split_case'}, True) # cnn = CNN({'model_name': 'test_split_case', 'img_rows': 200, 'img_cols': 200}) # cnn = CNN({'model_name': 'third_model', 'img_rows': 200, 'img_cols': 200}) cnn = CNN({'model_name': 'kernal_5X5'}, True) # cnn = CNN({'model_name': 'second_model'}, True) # cnn = CNN({'model_name': 'naor_first_model'}, True) # cnn._calculate_confusion_matrix() # self.assertEqual(cnn.tp, 396) # self.assertEqual(cnn.tn, 970) # self.assertEqual(cnn.fp, 6) # self.assertEqual(cnn.fn, 61) cnn.train_model(n_epoch=1)
def test(self): cnn = CNN({'model_name': 'e11'}, True) cnn.sigma = 0.5 # >0 cnn.gamma = 0.5 # 0-1 cnn.theta = 1 cnn.lambd = 0.5 cnn.psi = 1.57 cg = cnn.get_custom_gabor() ker = cg((3, 3, 3, 3)) print ker.container
def load_models(self): models_names = set() file_list = os.listdir(os.path.join(ROOT_DIR, 'cnn_models')) for _file in file_list: model_name = _file.split('.')[0] models_names.add(model_name) for i in models_names: print 'load model %s' % i cnn = CNN({'model_name': i}, True) self.models[i] = cnn
def test_train_model(self): _con_mat = [[25, 25, 25, 25], [30, 20, 30, 20], [50, 0, 0, 50]] model_name = 'kernal_6X6' cnn = CNN({ 'model_name': model_name, 'img_rows': 75, 'img_cols': 75, 'kernel_size': (8, 8) }) cnn.con_mat_train = _con_mat cnn.con_mat_val = _con_mat cnn._save_only_best() self.assertTrue( os.path.exists(os.path.join(cnn.model_path + '.h5(weights)'))) self.assertTrue(os.path.exists(os.path.join(cnn.model_path + '.json'))) cnn.train_model(1) del cnn cnn = CNN({'model_name': model_name}, True) self.assertEqual(_con_mat, cnn.con_mat_train) self.assertEqual(_con_mat, cnn.con_mat_val)
def test_add_model(self): cnn_manager = CNNManager() self.expected_file = os.path.join( ROOT_DIR, 'cnn_models', 'cpu_cnn_model_%s' % (cnn_manager.last_index + 1)) self.test_cnn = CNN(img_rows=300) cnn_manager.add_model(self.test_cnn) self.assertTrue(os.path.exists(self.expected_file + '.h5')) self.assertTrue(os.path.exists(self.expected_file + '.json')) cnn_manager.remove_model(self.test_cnn) self.assertFalse(os.path.exists(self.expected_file + '.h5')) self.assertFalse(os.path.exists(self.expected_file + '.json'))
def alg_train_new(model_name, p_keep_conv=1.0, p_keep_hidden=1.0, batch_size=512, test_size=256, epoch_time=3): """ :param model_name: :param p_keep_conv: :param p_keep_hidden: :param batch_size: :param test_size: :param epoch_time :return: """ print('initializing CNN model') cnn = CNN(p_keep_conv=p_keep_conv, p_keep_hidden=p_keep_hidden, batch_size=batch_size, test_size=test_size, epoch_time=epoch_time) print('CNN has been initialized') # print('load mnist done') print('load training data') X, y = get_new_data('F:/num_ocr') X = X / 255.0 X = X.reshape(-1, 48, 48, 1) # X = X.reshape(-1, 28, 28, 1) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.05) print('load training data done') print('-' * 30, 'training', '-' * 30) import time tmp_time = time.time() cnn.fit_new(X_train, y_train, X_test, y_test) print('total time cost:', time.time() - tmp_time) cnn.save(model_name)
def random_plan(request): while True: conf = get_random_conf() cnn = CNN(conf) cnn.train_model(1) if 0 in cnn.con_mat_val[-1]: continue else: print 'we find normal model' cnn.train_model(10)
def test_load_evaluate(self): cnn = CNN({'model_name': 'model1'}, _reload=True) cnn.load_data_set() cnn._calculate_confusion_matrix()
plt.imshow(feature.reshape(k, shp[0], shp[1])[0], interpolation='None', cmap='binary') plt.show() def dump2file(cnn, filename): global datasets_for_abstract x = datasets_for_abstract[0][0] y = datasets_for_abstract[0][1].eval() features = cnn.get_feature(x) print features.shape print y.shape cPickle.dump((features, y), open(filename, 'wb')) cnn = CNN(dim_in = 1, size_in = (28, 28), nkerns = [(4, (2, 2), (1, 1)), (2, (3, 3), (2, 2))]) #fit(self, lossf, datasets, batch_size = 500, n_epochs = 200, learning_rate = 0.01): # examinate(cnn) # dump2file(cnn, './features/1_random.feature') examinate(cnn) cnn.set_lossf(core.loss_functions.TEST_LOSS_F) cnn.fit(datasets, batch_size = 200, n_epochs = 100, learning_rate = 0.0001) examinate(cnn) # dump2file(cnn, './features/1_trained.feature')
def test_create_model_svg(self): cnn = CNN({'model_name': 'naor_first_model'}, True) cnn.create_model_svg()
def dump2file(cnn0, cnn1, filename): global datasets_for_abstract x0 = datasets_for_abstract0[0] x1 = datasets_for_abstract1[0] y = datasets_for_abstract0[1].eval() features0 = cnn1.get_feature(x0) features1 = cnn2.get_feature(x1) #features = np.concatenate([features0, features1], 1) features = features0 - features1 cPickle.dump((features, y), open(filename, 'wb')) cnn1 = CNN(dim_in = 1, size_in = (32, 32), nkerns = [(8, (2, 2), (1, 1)), (6, (3, 3), (2, 2))]) cnn2 = CNN(dim_in = 1, size_in = (32, 32), nkerns = [(8, (2, 2), (1, 1)), (6, (3, 3), (2, 2))]) #fit(self, lossf, datasets, batch_size = 500, n_epochs = 200, learning_rate = 0.01): # examinate(cnn) examinate(cnn1, cnn2) dump2file(cnn1, cnn2, './features/1_random.feature') for i in range(4): print 'The %03dth updating' % i loss = core.loss_functions.lossf3(cnn1, cnn2.get_feature(datasets1[0][0])) cnn1.set_lossf(loss) cnn1.fit(datasets0, batch_size = 50, n_epochs = 5, learning_rate = 0.00001, test_model_on = 0) loss = core.loss_functions.lossf3(cnn2, cnn1.get_feature(datasets1[0][0]))
def test(self): cnn = CNN({'model_name': 'model7(new_aug)'}, True) cnn.load_datasets() cnn._calculate_confusion_matrix()
def full_plan(request): try: item = 1 for split_cases in ['True', 'False']: for dropout in [0.25, 0.5]: for activation_function in ['softmax', 'sigmoid']: for img_size in [(75, 75), (50, 50)]: for nb_filters in [32, 64]: for kernel_size in [5, 6, 7, 8, 9, 10]: for pool_size in [2, 4, 6, 8]: for batch_size in [32, 64, 128]: for sigma in [180, 90, 30]: for theta in [45, 90, 135]: for lammbd in [45, 90, 135]: for gamma in [ 0.3, 0.5, 0.7, 0.9 ]: for psi in [ 0.2, 0.5, 0.8 ]: try: item_path = os.path.join( ROOT_DIR, 'cnn_models', 'item%s.json' % item) if os.path.exists( item_path ): item += 1 break params = {} params[ 'model_name'] = "item%s" % item item += 1 params[ 'split_cases'] = split_cases params[ 'img_rows'] = img_size[ 0] params[ 'img_cols'] = img_size[ 1] params[ 'batch_size'] = batch_size params[ 'nb_filters'] = nb_filters params[ 'dropout'] = dropout params[ 'activation_function'] = activation_function params[ 'pool_size'] = pool_size params[ 'kernel_size'] = kernel_size params[ 'sigma'] = sigma params[ 'theta'] = theta params[ 'lammbd'] = lammbd params[ 'gamma'] = gamma params[ 'psi'] = psi cnn = CNN( params) cnn.train_model( 150) except Exception as e: print e except Exception as e: print e return Response({'msg': e.message}, status=status.HTTP_400_BAD_REQUEST) print 'finish all plan' return Response({}, status=status.HTTP_200_OK)
plt.imshow(feature.reshape(k, shp[0], shp[1])[0], interpolation='None', cmap='binary') plt.show() def dump2file(cnn, filename): global datasets_for_abstract x = datasets_for_abstract[0] y = datasets_for_abstract[1].eval() features = cnn.get_feature(x) print features.shape print y.shape cPickle.dump((features, y), open(filename, 'wb')) cnn = CNN(dim_in = 2, size_in = (32, 32), nkerns = [(8, (2, 2), (1, 1)), (6, (3, 3), (2, 2))]) #fit(self, lossf, datasets, batch_size = 500, n_epochs = 200, learning_rate = 0.01): # examinate(cnn) dump2file(cnn, './features/1_random.feature') examinate(cnn) cnn.fit(core.loss_functions.TEST_LOSS_F, datasets, batch_size = 50, n_epochs = 100, learning_rate = 0.00005) examinate(cnn) dump2file(cnn, './features/1_trained.feature')
import numpy as np import cv2 from core.cnn import CNN from server.server_tools import * from gevent.pywsgi import WSGIServer from multiprocessing import cpu_count, Process from tornado.wsgi import WSGIContainer from tornado.httpserver import HTTPServer from tornado.ioloop import IOLoop import time import base64 from flask import Flask, jsonify, request from flask_cors import CORS import traceback cnn = CNN() cnn.load_session('../model/Test_CNN_Model.ckpt') print 'load model done' app = Flask(__name__) CORS(app, resource=r'/*') @app.route('/ai/cv/numreco', methods=['POST']) def num_reco(): """ :return: """ result = [] data = []