def run_load_finetune_models(filename_model, filename_conv_weights): print("Running process to load and finetune a model:") batch_size = 16 nb_epoch = 200 print('Loading model from {}'.format(filename_model)) model = create_classifier() model.load_weights(filename_model) conv_bias = model.layers[0].get_weights()[1] print('Loading conv weights from {}'.format(filename_conv_weights)) conv_kernel = np.load(filename_conv_weights) model.layers[0].set_weights([conv_kernel, conv_bias]) model.layers[0].trainable = False model.compile(optimizer=Adam(lr=5.0e-4), loss='sparse_categorical_crossentropy', metrics=['accuracy']) X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_color() callbacks = [ #EarlyStopping(monitor='val_loss', patience=15, verbose=0), ] print("Fine-tuning model...") model.fit(X_train, y_train, batch_size=batch_size, epochs=nb_epoch, shuffle=True, verbose=1, validation_data=(X_val, y_val), callbacks=callbacks) print('Saving weights of model...') now = datetime.datetime.now() filename_model_step3 = "weights/model_color_" + str( now.strftime("%Y-%m-%d-%H-%M")) + ".h5" model.save_weights(filename_model_step3) print("Done.") print('Making predictions for model') predictions_valid = model.predict(X_val, batch_size=batch_size, verbose=2) score = log_loss(y_val, predictions_valid) print('Score log_loss: ', score) print("Log_loss train: ", score) info_string = 'loss_' + str(score) + '_ep_' + str(nb_epoch) print(info_string) print('Start Test:') test_prediction = model.predict(X_test, batch_size=batch_size, verbose=2) score_test = log_loss(y_test, test_prediction) print('Test Score log_loss: ', score_test) score_test2 = accuracy_score(y_test, np.argmax(test_prediction, axis=1)) print('Test Score accuracy: ', score_test2) return filename_model_step3, score_test, score_test2
def run_create_and_test_models(): print("Running process to create and train a model:") # input image dimensions batch_size = 16 nb_epoch = 50 X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_color() model = create_classifier() callbacks = [ #EarlyStopping(monitor='val_loss', patience=8, verbose=0), ] print("Fitting model...") model.fit(X_train, y_train, batch_size=batch_size, epochs=nb_epoch, shuffle=True, verbose=1, validation_data=(X_val, y_val), callbacks=callbacks) print('Saving weights of model...') now = datetime.datetime.now() filename_model = "weights/model_color_" + str( now.strftime("%Y-%m-%d-%H-%M")) + ".h5" model.save_weights(filename_model) print("Done.") print('Making predictions for model') predictions_valid = model.predict(X_val, batch_size=batch_size, verbose=2) score = log_loss(y_val, predictions_valid) print('Score log_loss: ', score) print("Log_loss train: ", score) info_string = 'loss_' + str(score) + '_ep_' + str(nb_epoch) print(info_string) print('Start Test:') test_prediction = model.predict(X_test, batch_size=batch_size, verbose=2) score_test = log_loss(y_test, test_prediction) print('Test Score log_loss: ', score_test) score_test2 = accuracy_score(y_test, np.argmax(test_prediction, axis=1)) print('Test Score accuracy: ', score_test2) weights_step1 = model.get_weights() conv_weights = weights_step1[0] print("Saving conv weights into file...") now = datetime.datetime.now() filename_weights_step1 = "weights/conv_weights_color_" + str( now.strftime("%Y-%m-%d-%H-%M")) + ".npy" np.save(filename_weights_step1, conv_weights) print("Done.") return filename_model, filename_weights_step1, score_test, score_test2
def run_test_loading_models(filename_model, filename_conv_weights=None): print("Running process to load and test a model:") batch_size = 16 print('Loading weights from {}'.format(filename_model)) model = create_classifier() model.load_weights(filename_model) if filename_conv_weights: conv_bias = model.layers[0].get_weights()[1] print('Loading conv weights from {}'.format(filename_conv_weights)) conv_kernel = np.load(filename_conv_weights) model.layers[0].set_weights([conv_kernel, conv_bias]) print('Start Test') X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_color() test_prediction = model.predict(X_test, batch_size=batch_size, verbose=2) score = log_loss(y_test, test_prediction) print('Test Score log_loss: ', score) score_test2 = accuracy_score(y_test, np.argmax(test_prediction, axis=1)) print('Test Score accuracy: ', score_test2) return score, score_test2
def run_end_to_end_models(filename_model, filename_phases): X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_color() print( "Starting end-to-end co-training of phase profile and suffix layers..." ) print("Training dataset shape: ", X_train.shape) print("Training label shape:", y_train.shape) print('Loading model from {}'.format(filename_model)) model = create_classifier() model.load_weights(filename_model) EPOCHS = 20 BS = 64 INIT_LR = 5e-4 opt = Adam(lr=INIT_LR) PhaseLR = 5e-6 print('Loading phase profile from {}'.format(filename_phases)) phi_in = np.load(filename_phases) phase2Kernel = Phase2Kernels(phi_in) def step(X, y): # get the weights weights = phase2Kernel.forward() conv_bias = model.layers[0].get_weights()[1] model.layers[0].set_weights([weights, conv_bias]) # keep track of our gradients with tf.GradientTape() as tape: # make a prediction using the model and then calculate the # loss pred = model(X) print("Pred shape:", pred.shape) loss = sparse_categorical_crossentropy(y, pred) accuracy = accuracy_score(y, np.argmax(pred, axis=1)) print("loss", tf.math.reduce_mean(loss)) print("accuracy:", accuracy) grads = tape.gradient(loss, model.trainable_variables) print("gradients shapes: ", grads[0].shape, grads[1].shape) final_grad = phase2Kernel.backward(grads[0]) phase2Kernel.phi -= PhaseLR * cp.asnumpy(final_grad) opt.apply_gradients(zip(grads, model.trainable_variables)) # print(PhaseLR * cp.asnumpy(final_grad)) # time.sleep(1) numUpdates = int(X_train.shape[0] / BS) # loop over the number of epochs for epoch in range(0, EPOCHS): # show the current epoch number print("[INFO] starting epoch {}/{}...".format(epoch + 1, EPOCHS)) X_train, y_train = shuffle(X_train, y_train, random_state=0) sys.stdout.flush() epochStart = time.time() # loop over the data in batch size increments for i in range(0, numUpdates): # determine starting and ending slice indexes for the current # batch print("starting batch: {} of epoch {}".format(i, epoch)) start = i * BS end = start + BS # take a step step(X_train[start:end], y_train[start:end]) # show timing information for the epoch epochEnd = time.time() elapsed = (epochEnd - epochStart) / 60.0 print("took {:.4} minutes".format(elapsed)) print("Saving co-trained phases and network parameters...") now = datetime.datetime.now() filename_model_step4 = "weights/model_co-trained_" + str( now.strftime("%Y-%m-%d-%H-%M")) + ".h5" model.save_weights(filename_model_step4) filename_phases_step4 = "phases/phases_co-trained" + str( now.strftime("%Y-%m-%d-%H-%M")) + ".npy" np.save(filename_phases_step4, np.asarray(phase2Kernel.phi)) print("Done.") print("Co-training finished!") print('Start Test:') test_prediction = model.predict(X_test, batch_size=BS, verbose=2) score_test = log_loss(y_test, test_prediction) print('Test Score log_loss: ', score_test) score_test2 = accuracy_score(y_test, np.argmax(test_prediction, axis=1)) print('Test Score accuracy: ', score_test2) return filename_model_step4, filename_phases_step4, score_test, score_test2
def run_create_and_test_models(): print("Running process to create and train a model:") # input image dimensions batch_size = 16 nb_epoch = 200 learning_rate = 5.0e-3 lr_decay = 1e-6 lr_drop = 20 X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR100_color() model = create_classifier() def lr_scheduler(epoch): return learning_rate * (0.5**(epoch // lr_drop)) reduce_lr = keras.callbacks.LearningRateScheduler(lr_scheduler) datagen = ImageDataGenerator( featurewise_center=False, # set input mean to 0 over the dataset samplewise_center=False, # set each sample mean to 0 featurewise_std_normalization= False, # divide inputs by std of the dataset samplewise_std_normalization=False, # divide each input by its std zca_whitening=False, # apply ZCA whitening rotation_range= 15, # randomly rotate images in the range (degrees, 0 to 180) width_shift_range= 0.1, # randomly shift images horizontally (fraction of total width) height_shift_range= 0.1, # randomly shift images vertically (fraction of total height) horizontal_flip=True, # randomly flip images vertical_flip=False) # randomly flip images # (std, mean, and principal components if ZCA whitening is applied). datagen.fit(X_train) sgd = SGD(lr=learning_rate, decay=lr_decay, momentum=0.9, nesterov=False) model.compile(optimizer=sgd, loss='sparse_categorical_crossentropy', metrics=['accuracy']) print("Fitting model...") model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size), steps_per_epoch=X_train.shape[0] // batch_size, epochs=nb_epoch, validation_data=(X_val, y_val), callbacks=[reduce_lr], verbose=1) print('Saving weights of model...') now = datetime.datetime.now() filename_model = "weights/model_color_" + str( now.strftime("%Y-%m-%d-%H-%M")) + ".h5" model.save_weights(filename_model) print("Done.") #print('Making predictions for model') #predictions_valid = model.predict(X_val, batch_size=batch_size, verbose=2) #score = log_loss(y_val, predictions_valid) #print('Score log_loss: ', score) #print("Log_loss train: ", score) #info_string = 'loss_' + str(score) + '_ep_' + str(nb_epoch) #print(info_string) print('Start Test:') test_prediction = model.predict(X_test, batch_size=batch_size, verbose=2) score_test = log_loss(y_test, test_prediction) print('Test Score log_loss: ', score_test) score_test2 = accuracy_score(y_test, np.argmax(test_prediction, axis=1)) print('Test Score accuracy: ', score_test2) #weights_step1 = model.get_weights() conv_weights = conv_weights = model.layers[0].get_weights()[0] print("Saving conv weights into file...") now = datetime.datetime.now() filename_weights_step1 = "weights/conv_weights_color_" + str( now.strftime("%Y-%m-%d-%H-%M")) + ".npy" np.save(filename_weights_step1, conv_weights) print("Done.") return filename_model, filename_weights_step1, score_test, score_test2