def train(): model = network.init_model(imggen.LABEL_SIZE, input_shape) model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) tensorboard = TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True) for epoch in range(0, iterations): (x_train, y_train) = imggen.next_batch(TRAINING_SET_SIZE, rotate=True, ud=True, multi_fonts=True, multi_sizes=True, blur=True) model.fit(x_train, y_train, batch_size=32, epochs=epoch + 1, verbose=1, validation_split=0.1, callbacks=[tensorboard], initial_epoch=epoch) if not os.path.exists('results/data'): os.makedirs('results/data') model.save_weights('results/data/model%d.h5' % (epoch)) (x_test, y_test) = imggen.next_batch(TEST_SET_SIZE) score = model.evaluate(x_test, y_test) print "\n" print 'Test score: {0:.4g}'.format(score[0]) print 'Test accur: {0:.4g}'.format(score[1])
def recognize(array): path = os.getcwd() array = 1 - array global model if model is None: model = network.init_model(ig.LABEL_SIZE, train.input_shape) model.load_weights(os.path.join(path, 'results/data/model.h5')) pred = model.predict(array, batch_size=5, verbose=0) return choose_char(pred[0], ig.chars)
def recognize(array): array = 1 - array global model if model is None: model = network.init_model(imggen.LABEL_SIZE, learning.input_shape) model.load_weights('results/data/model.h5') print model.predict(array, batch_size=1, verbose=0) pred = model.predict_classes(array, batch_size=1, verbose=0) ch = imggen.chars[pred[0]] return ch
def train(): path = os.getcwd() model = network.init_model(ig.LABEL_SIZE, input_shape) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) tensorboard = TensorBoard(log_dir=os.path.join(path, 'logs'), histogram_freq=0, write_graph=True, write_images=True) for epoch in range(0, iterations, nb_epoch): (x_train, y_train) = ig.next_batch(TRAINING_SET_SIZE, rotate=True, ud=True, lr=True, multi_fonts=True, multi_sizes=True, blur=False) model.fit(x_train, y_train, batch_size=32, epochs=epoch + nb_epoch, verbose=1, validation_split=0.1, callbacks=[tensorboard], initial_epoch=epoch) if not os.path.exists(os.path.join(path, 'results/data')): os.makedirs(os.path.join(path, 'results/data')) model.save_weights(os.path.join(path, 'results/data/model.h5')) (x_test, y_test) = ig.next_batch(TEST_SET_SIZE) score = model.evaluate(x_test, y_test) tf.train.write_graph(K.get_session().graph, "results/data", "model.pb", False) print "\n" print 'Test score: {0:.4g}'.format(score[0]) print 'Test accur: {0:.4g}'.format(score[1])
def recognize_chars(chars, image): char_images = image_ops.crop_all_char_images(chars, image) #print(char_images) global model path = os.getcwd() if model is None: model = network.init_model(ig.LABEL_SIZE, train.input_shape) model.load_weights(os.path.join(path, 'results/data/model.h5')) pred = model.predict(char_images, batch_size=len(chars), verbose=0) #print choose_char(pred[0], ig.chars)[0]['char'] #print choose_char(pred[1], ig.chars)[0]['char'] chars = [] for p in pred: chars.append(choose_char(p, ig.chars)) return chars
'beta1':0.533938, 'beta2':0.998234 } baseline_params['batches_per_epoch'] = set_batches_per_epoch(baseline_params['batch_size']) sherpa_algo = sherpa.algorithms.GPyOpt(max_num_trials=200) study = sherpa.Study(parameters=params, algorithm=sherpa_algo, lower_is_better=True) for trial in study: trial_id = trial.id savename = '{}/model/model.{}.h5'.format(algorithm, trial_id) pars = deepcopy(trial.parameters) for k,v in baseline_params.items(): pars[k] = v model = init_model(baseline_params) adam = init_adam(baseline_params) datagen = init_datagen(pars) model.compile(optimizer=adam, loss='categorical_crossentropy') # Train but reserve the last little bit of data for # a final testing set. t_loss, v_loss = train(model, baseline_params, X_train, Y_train, X_test[:6000], Y_test[:6000], patience=10, savename=savename, datagen=datagen) # Log study.add_observation(trial=trial, iteration=0, objective=min(v_loss)) # Save if it is not saved if not os.path.exists(savename): model.save(savename)
'max_epochs': 200, } sherpa_algo = sherpa.algorithms.GPyOpt(max_num_trials=80) study = sherpa.Study(parameters=params, algorithm=sherpa_algo, lower_is_better=True) for trial in study: trial_id = trial.id savename = '{}/model/model.{}.h5'.format(algorithm, trial_id) pars = deepcopy(trial.parameters) for k, v in baseline_params.items(): pars[k] = v pars['batches_per_epoch'] = set_batches_per_epoch(pars['batch_size']) model = init_model(pars) adam = init_adam(pars) datagen = init_datagen(pars) model.compile(optimizer=adam, loss='categorical_crossentropy') # Train but reserve the last little bit of data for # a final testing set. t_loss, v_loss = train(model, pars, X_train, Y_train, X_test[:6000], Y_test[:6000], patience=10, savename=savename, datagen=datagen)
'output_shape': Y_train.shape[1], 'depth': 3, 'dense_neurons': 512, 'init_filters': 20, 'use_batchnorm': True, 'dropout': 0.4, 'batch_size': 128, 'max_epochs': 5, 'learning_rate': 0.008, 'beta1': 0.5, 'beta2': 0.999 } params['batches_per_epoch'] = set_batches_per_epoch(params['batch_size']) print(params) model = init_model(params) adam = init_adam(params) model.compile(optimizer=adam, loss='categorical_crossentropy') print(model.summary()) # Train but reserve the last little bit of data for # a final testing set. t_loss, v_loss = train(model, params, X_train, Y_train, X_test[:6000], Y_test[:6000], patience=10, savename=savename)