model1 = emb_cnn_pre(INPUT_SHAPE, NUM_OUTPUTS) model1.compile(optimizer='rmsprop', loss='mse') start = time.time() earlyStopping=EarlyStopping(monitor='loss', patience=3, verbose=0, mode='auto', restore_best_weights=True) history = model1.fit([x_pre], [y_pre], batch_size=36, epochs=EPOCHS, callbacks=[earlyStopping], shuffle=False, verbose=0) end = time.time() pre_epochs[i,k] = len(history.history['loss']) print('Pre-training stopped at epoch ',pre_epochs[i,k], "after", end-start) model = emb_cnn_full(model1, INPUT_SHAPE, NUM_CLASSES) custom_callback = CustomCallback(k,i,x_test,y_test,folder,True) csv_logger = CSVLogger(folder+str(k)+'/log_'+"{:03d}".format(i)+'.csv') folder_chkpts = 'checkpoints/'+folder+str(k)+'/'+str(i) if not os.path.exists(folder_chkpts): os.makedirs(folder_chkpts) filepath=folder_chkpts+"/model-{epoch:02d}"+"{:03d}".format(i)+".hdf5" checkpoints = ModelCheckpoint(filepath, verbose=0, save_best_only=False, mode='auto', period=1) callbacks = [custom_callback,csv_logger,checkpoints] model.compile(loss={"class_output": 'categorical_crossentropy', "emb_inout": 'binary_crossentropy'}, loss_weights=[1, 1], optimizer='adam', metrics={"class_output": ['accuracy',acc_likelihood, acc_threshold], "emb_inout": ['mse']})
batch_size=BATCH, epochs=EPOCHS, callbacks=[earlyStopping], shuffle=True, verbose=0) pre_epochs[i, k] = len(history.history['loss']) print('Pre-training stopped at epoch ', pre_epochs[i, k]) batch_norm = True if (i < 5): batch_norm = False model = emb_cnn_full(model1, INPUT_SHAPE, NUM_CLASSES, embodied=False, batch_norm=batch_norm) custom_callback = CustomCallback(k, i, x_test, y_test, folder) csv_logger = CSVLogger(folder + str(k) + '/log_' + "{:03d}".format(i) + '.csv') folder_chkpts = 'checkpoints/' + folder + str(k) + '/' + str(i) if not os.path.exists(folder_chkpts): os.makedirs(folder_chkpts) filepath = folder_chkpts + "/model-{epoch:02d}" + "{:03d}".format( i) + ".hdf5" checkpoints = ModelCheckpoint(filepath, verbose=0,
[emb_split[k % div::div]], batch_size=BATCH, epochs=EPOCHS, callbacks=[earlyStopping], shuffle=True, verbose=0) pre_epochs[i, k] = len(history.history['loss']) print('Pre-training stopped at epoch ', pre_epochs[i, k]) batch_norm = True if (i < 5): batch_norm = False model = emb_cnn_full(model1, INPUT_SHAPE, NUM_CLASSES, batch_norm=batch_norm) custom_callback = CustomCallback(k, i, x_test, y_test, folder, True) csv_logger = CSVLogger(folder + str(k) + '/log_' + "{:03d}".format(i) + '.csv') folder_chkpts = 'checkpoints/' + folder + str(k) + '/' + str(i) if not os.path.exists(folder_chkpts): os.makedirs(folder_chkpts) filepath = folder_chkpts + "/model-{epoch:02d}" + "{:03d}".format( i) + ".hdf5" checkpoints = ModelCheckpoint(filepath, verbose=0, save_best_only=False, mode='auto',
model1 = emb_cnn_pre(INPUT_SHAPE, NUM_OUTPUTS) model1.compile(optimizer='rmsprop', loss='mse') earlyStopping=EarlyStopping(monitor='loss', patience=3, verbose=0, mode='auto', restore_best_weights=True) history = model1.fit([x_pre], [y_pre], batch_size=36, epochs=EPOCHS, callbacks=[earlyStopping], shuffle=False, verbose=0) pre_epochs[i,k] = len(history.history['loss']) print('Pre-training stopped at epoch ',pre_epochs[i,k]) model = emb_cnn_full(model1, INPUT_SHAPE, NUM_CLASSES, embodied=False) plot_model(model,to_file=folder+'one-hot_pretraining.png') custom_callback = CustomCallback(k,i,x_test,y_test,folder) csv_logger = CSVLogger(folder+str(k)+'/log_'+"{:03d}".format(i)+'.csv') folder_chkpts = 'checkpoints/'+folder+str(k)+'/'+str(i) if not os.path.exists(folder_chkpts): os.makedirs(folder_chkpts) filepath=folder_chkpts+"/model-{epoch:02d}"+"{:03d}".format(i)+".hdf5" checkpoints = ModelCheckpoint(filepath, verbose=0, save_best_only=False, mode='auto', period=1) callbacks = [custom_callback,csv_logger,checkpoints] model.compile(loss={"class_output": 'categorical_crossentropy'},