if __name__ == '__main__': # Load and compile model K.clear_session() model = Xception(include_top=True, weights=None) model.compile(optimizer='Adadelta', loss='categorical_crossentropy', metrics=['accuracy']) batch_count = 0 try: for i in range(0, 30000): gc.collect() print('----------- On Epoch: ' + str(i) + ' ----------') for x_train, y_train, x_test, y_test in load_batches(): # Model input requires numpy array #K.clear_session() x_train = np.array(x_train) x_train_0 = x_train[:, 0] x_train_1 = x_train[:, 1] x_train_2 = x_train[:, 2] ''' x_train1 = np.array([x_train]) x_train_0_5S = np.array(x_train_0_5S) x_train1 = append_data(x_train1,x_train_0_5S) x_train_2S = np.array(x_train_2S) x_train1 = append_data(x_train1,x_train_2S) x_train_5S = np.array(x_train_5S) x_train1 = append_data(x_train1,x_train_5S) '''
print('We have loaded a previous model!!!!') ''' txtfile = 'data_rgb.txt' print("Starting Training...") batch_count = 0 #try: for i in range(1, 10): count = 0 print('----------- On Epoch: ' + str(i) + ' ----------') f = open(txtfile, 'w+') data_dump_no = 1 f.write(str(data_dump_no)) f.close() for x_train, y_train, x_test, y_test, samples_per_batch, batch_count in load_batches( epoch=i, txtfile=txtfile, samples_per_batch=1000): # Model input requires numpy array x_train = np.array(x_train) x_test = np.array(x_test) # Classification to one-hot vector y_train = np.array(y_train) y_test = np.array(y_test) # Fit model to batch ''' def fit(self, X_inputs, Y_targets, n_epoch=10, validation_set=None, show_metric=False, batch_size=None, shuffle=None, snapshot_epoch=True, snapshot_step=None, excl_trainops=None, validation_batch_size=None, run_id=None, callbacks=[]): ''' model.fit({'input': x_train}, {'targets': y_train}, n_epoch=1,
# # for j, layer in enumerate(model.layers): # print(j, layer.name) print("Starting Training...") batch_count = 0 # if True: try: for i in range(1, 6): # 6 epochs done! count = 0 print('----------- On Epoch: ' + str(i) + ' ----------') f = open(txtfile, 'w+') data_dump_no = 6 f.write(str(data_dump_no)) f.close() for x_train, y_train, x_test, y_test, batch_size, batch_count in load_batches( epoch=i, samples_per_batch=4000, txtfile=txtfile): # Model input requires numpy array x_train = np.array(x_train) x_test = np.array(x_test) # Classification to one-hot vector y_train = np.array(y_train) y_test = np.array(y_test) tensorboard = TensorBoard( log_dir="logs/{}-{}-added_data-turn-only-v9.2".format(NAME, i)) # Fit model to batch print('Training......') # train the model on the new data for a few epochs model.fit(x_train, y_train,
MODEL_NAME = NAME+'.model' PREV_MODEL = NAME+'.model' LOAD_MODEL = True model = googlenet(WIDTH, HEIGHT, 3, LR, output=35, model_name=MODEL_NAME) if LOAD_MODEL: model.load(PREV_MODEL) print('We have loaded a previous model!!!!') count = 0 for epoch in range(EPOCHS): try: for X, Y, test_x, test_y, batch_size, batch_count in load_batches(epoch=epoch,txtfile=txtfile): model.fit({'input': X}, {'targets': Y}, n_epoch=6, validation_set=({'input': test_x}, {'targets': test_y}), snapshot_step=2500, show_metric=True, run_id=MODEL_NAME) print('SAVING MODEL!') model.save(MODEL_NAME) print("Saved model to disk") if batch_count % 2 == 0: print('SAVING MODEL!') model.save(MODEL_NAME) print("Saved model to disk") if (batch_count + 1) % 2 == 0: print('SAVING MODEL!') model.save(MODEL_NAME) print("Saved model to disk")