def run_shap(start, end): import warnings warnings.filterwarnings('ignore') warnings.filterwarnings('always') # load images interval = 5 imgs = v.readImages(False, start, end) print('leng of imgs = ' + str(len(imgs))) imgSize = 64 # reshape the image imgs = np.reshape(imgs, (end - start, imgSize * imgSize)).astype(float) print('shape of imgs = ' + str(np.shape(imgs))) # load pre-trained model trainI = [[0] * 4096] trainO = [[0] * 2] f = open('car_model_config.txt', 'r') modelInfo = f.readlines() f.close() NN = helper.getNN(modelInfo, trainI, trainO) # Neural Network op = helper.getOptimizer(modelInfo) # optimizer loss = helper.getLoss(modelInfo) # loss model = DGPU.deepLearningModel('model', op, loss, True) model.load_weights('model.h5') for i in range(int((end - start) / interval)): imgs_sub = imgs[i * interval:(i + 1) * interval] e = shap.DeepExplainer(model, imgs_sub) shap_values = e.shap_values(imgs_sub) # reshape shap_values and imgs shap_values = np.reshape(shap_values, (2, interval, imgSize, imgSize)) imgs_sub = np.reshape(imgs_sub, (interval, imgSize, imgSize, 1)) # plot the feature attributions shap.image_plot(shap_values[0], -imgs_sub, show=False) plt.savefig('algo_0_shap_' + str(start + i * interval) + '_0.png') shap.image_plot(shap_values[1], -imgs_sub, show=False) plt.savefig('algo_0_shap_' + str(start + i * interval) + '_1.png')
def deepLearning(inputFileName, outputFileName, testFileName, testOutputFileName, testOutputReal, test_report, validRate, valid_report, modelConfig, deviceName, epoch, printed, modelName): # You can do only 'training' or 'testing' by setting some arguments as None. # inputFileName == None and outputFileName == None -> testing only # testFileName == None -> training only # for validation, you can set testFileName == None <- validation uses training data only ############################## ## ## ## 0. READ DATA ## ## ## ############################## # read files print('[00] reading train input / train output / test input files...') trainI = None trainO = None testI = None # input train data if inputFileName != None: trainI = helper.getDataFromFile(inputFileName) # output train data (Sigmoid applied) if outputFileName != None: trainO = helper.getDataFromFile(outputFileName) # test input data (set nullValue to 0) # set testI (array) as testFileName, if testFileName is an array if isinstance(testFileName, list): testI = testFileName # set testI (array) as test data from the file named as testFileName else: if testFileName != None: testI = helper.getDataFromFile(testFileName) # read configuration file (to get normalization info) print('[01] reading configuration files...') f = open('config.txt', 'r') fl = f.readlines() f.close() for i in range(len(fl)): fl[i] = fl[i].split('\n')[0] normalizeName = None validInterval = 1 testSizeOnce = 0 # max test data size at once (for both testing and validation) # extract configuration # trainInput : train input data file name # trainOutput : train output data file name # testInput : test input data file name for i in range(len(fl)): configSplit = fl[i].split('\n')[0].split(' ') # split # normalize info file name if configSplit[0] == 'normalizeName': normalizeName = configSplit[1] if normalizeName == 'None': normalizeName = None # validation interval elif configSplit[0] == 'validInterval': validInterval = int(configSplit[1]) # test input size at once elif configSplit[0] == 'testSize': testSizeOnce = int(configSplit[1]) # read normalization info file if normalizeName != None and trainO != None: print('[02] calculating and writing average and stddev...') trainOutputAvg = np.mean(trainO, axis=0) # average of train output value trainOutputStddev = np.std(trainO, axis=0) # stddev of train output value # normalize training output data and write avg and stddev writeNormalizeInfo(trainO, normalizeName) else: print('[03] Reading average and stddev failed.') trainOutputAvg = None trainOutputStddev = None # apply sigmoid to train output data if trainO != None: print('[04] applying sigmoid to train output data...') for i in range(len(trainO)): for j in range(len(trainO[0])): trainO[i][j] = helper.sigmoid(trainO[i][j]) # print input, output, and test data if printed != 0: if trainI != None: print('\n ---- original input data (' + str(len(trainI)) + ') ----\n') for i in range(len(trainI)): print(helper.roundedArray(trainI[i], 6)) if trainO != None: print('\n ---- original output data (' + str(len(trainO)) + ') ----\n') for i in range(len(trainO)): print(helper.roundedArray(trainO[i], 6)) if testI != None: print('\n ---- original test data (' + str(len(testI)) + ') ----\n') for i in range(len(testI)): print(helper.roundedArray(testI[i], 6)) ############################## ## ## ## 1. READ MODEL CONFIG ## ## ## ############################## # model design using model configuration file # activation function of final layer is always 'sigmoid' print('[10] reading model configuration...') f = open(modelConfig, 'r') modelInfo = f.readlines() f.close() ############################## ## ## ## 2A. TRAINING / TEST ## ## ## ############################## # if the model already exists, input the test input to the NN and get the result # if the model does not exist, newly train NN using training input and output data and then do testing procedure if validRate == 0: # NN and optimizer print('[11] obtaining neural network and optimizer info...') if trainI != None and trainO != None: NN = helper.getNN(modelInfo, trainI, trainO) # Neural Network op = helper.getOptimizer(modelInfo) # optimizer loss = helper.getLoss(modelInfo) # loss try: # try reading test.h5 and test.json print('[20] reading model [ ' + modelName + ' ]...') newModel = deepLearning_GPU.deepLearningModel( modelName, op, loss, True) testO = getTestResult(newModel, testI, testSizeOnce) except: # do learning if test.h5 and test.json does not exist print('[21] learning...') # False, True는 각각 dataPrint(학습데이터 출력 여부), modelPrint(model의 summary 출력 여부) print(trainO[0]) deepLearning_GPU.deepLearning(NN, op, 'mean_squared_error', trainI, trainO, modelName, epoch, False, True, deviceName) print('[22] reading learned model [ ' + modelName + ' ]...') newModel = deepLearning_GPU.deepLearningModel( modelName, op, loss, True) # get test output if testI is not None if testI == None: print('test input file name (testInput) is None.') return else: testO = getTestResult(newModel, testI, testSizeOnce) # test print('[23] testing...') # estimate # inverse sigmoid for i in range(len(testO)): # for each output data for j in range(len(testO[0])): # for each value of output data testO[i][j] = helper.invSigmoid(testO[i][j]) # check if test output exists, before writing test output file try: test = open(testOutputFileName, 'r') test.close() print(' **** Delete test output file (' + testOutputFileName + ') first. ****') return except: pass # write to file print('[24] writing test result to file [ ' + testOutputFileName + ' ]...') # open file f = open(testOutputFileName, 'a') result = '' for i in range(len(testO)): # for each output data if i % 1000 == 0: print(str(i) + ' / ' + str(len(testO))) for j in range(len(testO[0])): # for each value of output data result += str(testO[i][j]) + '\t' result += '\n' # flush every 10,000 steps if i % 10000 == 0: f.write(result) result = '' # final append f.write(result) f.close() ############################## ## ## ## 2A+. WRITE TEST REPORT ## ## ## ############################## # compare prediction output data with real output data and write report if testOutputReal != None: try: writeTestResult(test_report, testOutputFileName, testOutputReal, normalizeName, trainOutputAvg, trainOutputStddev) except: pass ############################## ## ## ## 2B. VALIDATION ## ## ## ############################## # validation (if validation rate > 0) else: ############################## ## ## ## 2B-0. DATA TO VALID ## ## ## ############################## # make index-list of validation data print('[28] deciding data to validate...') inputSize = len(trainI) validSize = int(inputSize * validRate) trainSize = inputSize - validSize validArray = [] for i in range(inputSize): validArray.append(0) while sum(validArray) < validSize: # start index for validation validStartIndex = int( random.randint(0, inputSize - 1) / validInterval) * validInterval # set data[validStartIndex : validStartIndex + validInterval] as validation data for i in range(validStartIndex, validStartIndex + validInterval): validArray[i] = 1 # make train and validation data # _TrainO, _ValidO : sigmoid((originalOutput - meanOriginalOutput)/stdOriginalOutput) _TrainI = [] # training input _TrainO = [] # training output _ValidI = [] # valid input _ValidO = [] # valid output for i in range(inputSize): if validArray[i] == 0: # training data _TrainI.append(trainI[i]) _TrainO.append(trainO[i]) else: # validation data _ValidI.append(trainI[i]) _ValidO.append(trainO[i]) ############################## ## ## ## 2B-1. TRAIN (MAKE MODEL) ## ## ## ############################## # model name for validation newModelName = modelName + 'Valid' print('[29] training [ ' + newModelName + ' ]...') # NN and optimizer NN = helper.getNN(modelInfo, _TrainI, _TrainO) # Neural Network op = helper.getOptimizer(modelInfo) # optimizer loss = helper.getLoss(modelInfo) # loss # output for validation try: # try reading the validation model validModel = deepLearning_GPU.deepLearningModel( newModelName, op, loss, True) _predValidO = getTestResult(validModel, _ValidI, testSizeOnce) except: # do learning if the validation model does not exist deepLearning_GPU.deepLearning(NN, op, loss, _TrainI, _TrainO, newModelName, epoch, False, True, deviceName) validModel = deepLearning_GPU.deepLearningModel( newModelName, op, loss, True) _predValidO = getTestResult(validModel, _ValidI, testSizeOnce) ############################## ## ## ## 2B-2. VALIDATION ## ## ## ############################## print('[30] validating and writing result [ ' + valid_report + ' ]...') MAE = 0 # mean absolute error MSE = 0 # mean square error accuracy = 0 # accuracy # inverse sigmoid for PREDICTED validation output for i in range(len(_predValidO)): # for each output data for j in range(len( _predValidO[0])): # for each value of output data _predValidO[i][j] = helper.invSigmoid(_predValidO[i][j]) # inverse sigmoid for REAL validation output for i in range(len(_ValidO)): # for each output data for j in range(len(_ValidO[0])): # for each value of output data _ValidO[i][j] = helper.invSigmoid(_ValidO[i][j]) # denormalize if normalized info is available (denormalize whole trainO) denormalize(normalizeName, len(_predValidO), len(_predValidO[0]), _predValidO, trainOutputAvg, trainOutputStddev) denormalize(normalizeName, len(_ValidO), len(_ValidO[0]), _ValidO, trainOutputAvg, trainOutputStddev) # compute error validCount = 0 resultToWrite = '' outputCols = len(_ValidO[0]) # for each data # set edgeitems and linewidth as infinite np.set_printoptions(edgeitems=10000, linewidth=1000000) for i in range(inputSize): if i % 1000 == 0: print(str(i) + ' / ' + str(inputSize)) # validation for data whose value of valid array is 1 if validArray[i] == 1: # compute MAE and MSE for j in range(outputCols): MAE += abs(_ValidO[validCount][0] - _predValidO[validCount][0]) MSE += pow( _ValidO[validCount][0] - _predValidO[validCount][0], 2) # compute accuracy if helper.argmax(_ValidO[validCount]) == helper.argmax( _predValidO[validCount]): accuracy += 1 # print and write result newResultToWrite = ( '[' + str(i) + '] pred = ' + str(np.round_(_predValidO[validCount], 6)) + ', real = ' + str(np.round_(_ValidO[validCount], 6))) resultToWrite += newResultToWrite + '\n' validCount += 1 # recover edgeitems and linewidth np.set_printoptions(edgeitems=10000, linewidth=1000000) # get the average of MAE, MSE and accuracy MAE /= (validSize * outputCols) MSE /= (validSize * outputCols) accuracy /= validSize # print evaluation result resultSummary = '----------------\n' resultSummary += 'input size : ' + str(inputSize) + '\n' resultSummary += 'train size : ' + str(trainSize) + '\n' resultSummary += 'valid size : ' + str(validSize) + '\n' resultSummary += 'MAE : ' + str(round(MAE, 6)) + '\n' resultSummary += 'MSE : ' + str(round(MSE, 6)) + '\n' resultSummary += 'accuracy : ' + str(round(accuracy, 6)) + '\n' resultSummary += 'pred avg : ' + str(np.average(_predValidO, axis=0)) + '\n' resultSummary += 'real avg : ' + str(np.average(_ValidO, axis=0)) + '\n' print(resultSummary) resultToWrite += resultSummary # write result file fvalid = open(valid_report, 'w') fvalid.write(resultToWrite) fvalid.close() # return final result return (MAE, MSE, accuracy, np.average(_predValidO, axis=0), np.average(_ValidO, axis=0))
def deepLearning(inputFileName, outputFileName, testFileName, testOutputFileName, imgHeight, deviceName, epoch, printed, modelName): # read files trainI = helper.getDataFromFile(inputFileName, imgHeight) # input train data trainO = helper.getDataFromFile( outputFileName, None) # output train data (Sigmoid applied) testI = helper.getDataFromFile( testFileName, imgHeight) # test input data (set nullValue to 0) # apply sigmoid to train output data for i in range(len(trainO)): for j in range(len(trainO[0])): trainO[i][j] = helper.sigmoid(trainO[i][j]) # flatten trainI: (N, size, size) -> (N, size*size) for i in range(len(trainI)): trainI[i] = helper.flatten(trainI[i]) print('') print(' ---- number of rows ----') print('input size: ' + str(len(trainI))) print('output size: ' + str(len(trainO))) print('test size: ' + str(len(testI))) print('') # print input, output, and test data if printed != 0: print('\n ---- original input data ----\n') for i in range(len(trainI)): print(helper.roundedArray(trainI[i], 6)) print('\n ---- original output data ----\n') for i in range(len(trainO)): print(helper.roundedArray(trainO[i], 6)) print('\n ---- original test data ----\n') for i in range(len(testI)): print(helper.roundedArray(testI[i], 6)) # model design using deepLearning_model.txt, in the form of # activation function of final layer is always 'sigmoid' f = open('deepLearning_model.txt', 'r') modelInfo = f.readlines() f.close() # NN and optimizer NN = helper.getNN(modelInfo, trainI, trainO) # Neural Network op = helper.getOptimizer(modelInfo) # optimizer try: # try reading test.h5 and test.json newModel = deepLearning_GPU.deepLearningModel(modelName, True) testOutput = deepLearning_GPU.modelOutput(newModel, testI) except: # do learning if test.h5 and test.json does not exist print('\n <<<< LEARNING >>>>\n') # False, True는 각각 dataPrint(학습데이터 출력 여부), modelPrint(model의 summary 출력 여부) print(trainO[0]) deepLearning_GPU.deepLearning(NN, op, 'mean_squared_error', trainI, trainO, modelName, epoch, False, True, deviceName) newModel = deepLearning_GPU.deepLearningModel(modelName, True) testOutput = deepLearning_GPU.modelOutput(newModel, testI) # test print('\n <<<< TEST >>>>\n') # estimate outputLayer = testOutput[len(testOutput) - 1] # inverse sigmoid for i in range(len(outputLayer)): # for each output data for j in range(len(outputLayer[0])): # for each value of output data outputLayer[i][j] = helper.invSigmoid(outputLayer[i][j]) # write to file result = '' print('\n<<<< output layer >>>>') for i in range(len(outputLayer)): # for each output data for j in range(len(outputLayer[0])): # for each value of output data result += str(outputLayer[i][j]) + '\t' result += '\n' print(result) f = open(testOutputFileName.split('.')[0] + '_prediction.txt', 'w') f.write(result) f.close()
def run_gradcam(start, end): import warnings warnings.filterwarnings('ignore') warnings.filterwarnings('always') # config gradcam_write = False multiple = 0.15 # https://stackoverflow.com/questions/66221788/tf-gradients-is-not-supported-when-eager-execution-is-enabled-use-tf-gradientta/66222183 tf.compat.v1.disable_eager_execution() # load images imgs = v.readImages(False, start, end) print('leng of imgs = ' + str(len(imgs))) imgSize = 64 # reshape the image imgs = np.reshape(imgs, (end - start, imgSize * imgSize)).astype(float) print('shape of imgs = ' + str(np.shape(imgs))) # load pre-trained model trainI = [[0] * 4096] trainO = [[0] * 2] f = open('car_model_config.txt', 'r') modelInfo = f.readlines() f.close() NN = helper.getNN(modelInfo, trainI, trainO) # Neural Network op = helper.getOptimizer(modelInfo) # optimizer loss = helper.getLoss(modelInfo) # loss model = DGPU.deepLearningModel('model', op, loss, True) model.load_weights('model.h5') # predict using the model predictions = model.predict(imgs) print('shape of predictions = ' + str(np.shape(predictions))) for i in range(len(predictions)): print('image ' + str(start + i) + ' -> ' + str(round(helper.invSigmoid(predictions[i][0]) * 100, 4)) + '% / ' + str(round(helper.invSigmoid(predictions[i][1]) * 100, 4)) + '%') # explanation of image for i in range(len(predictions)): print('processing image ' + str(start + i)) predicted_class = np.argmax(predictions[i]) layerNos = [2, 4, 6, 8] layerNames = ["conv2d", "conv2d_1", "conv2d_2", "conv2d_3"] reshaped_img = imgs[i].reshape((1, imgSize, imgSize, 1)) for j in range(4): cam, heatmap = grad_cam(model, reshaped_img, predicted_class, layerNos[j], layerNames[j]) if gradcam_write == True: cv2.imwrite( "gradcam_" + str(start + i) + "_" + layerNames[j] + "_cam.jpg", cam) cv2.imwrite( "gradcam_" + str(start + i) + "_" + layerNames[j] + "_heatmap.jpg", heatmap) # convert (64, 64, 1) into (64, 64, 3) reshaped_img = reshaped_img.reshape((1, imgSize, imgSize)) reshaped_img = gray_to_rgb(reshaped_img) reshaped_img = reshaped_img.reshape((1, imgSize, imgSize, 3)) register_gradient() guided_model = modify_backprop(model, 'GuidedBackProp') saliency_fn = compile_saliency_function(guided_model) saliency = saliency_fn([reshaped_img, 0]) gradcam = saliency[0] * heatmap[..., np.newaxis] cv2.imwrite("guided_gradcam_" + str(start + i) + ".jpg", deprocess_image(gradcam, multiple))
def run_gradcam(start, end, input_class): import warnings warnings.filterwarnings('ignore') warnings.filterwarnings('always') # config gradcam_write = False multiple = 0.15 # https://stackoverflow.com/questions/66221788/tf-gradients-is-not-supported-when-eager-execution-is-enabled-use-tf-gradientta/66222183 tf.compat.v1.disable_eager_execution() # load images imgs = v.readImages(False, start, end) print('leng of imgs = ' + str(len(imgs))) imgSize = 64 # reshape the image imgs = np.reshape(imgs, (end-start, imgSize*imgSize)).astype(float) print('shape of imgs = ' + str(np.shape(imgs))) # load pre-trained model trainI = [[0]*imgSize*imgSize] trainO = [[0]*2] f = open('car_model_config.txt', 'r') modelInfo = f.readlines() f.close() NN = helper.getNN(modelInfo, trainI, trainO) # Neural Network op = helper.getOptimizer(modelInfo) # optimizer loss = helper.getLoss(modelInfo) # loss model = DGPU.deepLearningModel('model', op, loss, True) model.load_weights('model.h5') model.summary() # modify model newModel = modified_model() newModel.summary() # load weights temp_weights = [layer.get_weights() for layer in model.layers] for i in range(len(newModel.layers)-1): newModel.layers[i+1].set_weights(temp_weights[i+2]) newModel.build((None, imgSize, imgSize, 1)) newModel.summary() # Find the index of the to be visualized layer above layer_index = utils.find_layer_idx(newModel, 'output_layer') newModel.layers[layer_index].activation = tf.keras.activations.linear newModel = utils.apply_modifications(newModel) newModel.summary() # print predictions print(' < predictions for each image >') N = end - start imgs_reshaped = imgs.reshape(N, imgSize, imgSize, 1) predictions = newModel.predict(imgs_reshaped) print('shape of predictions = ' + str(np.shape(predictions))) # DO NOT apply invSigmoid for the predictions of newModel for i in range(len(predictions)): print('image ' + str(start + i) + ' -> ' + str(round(predictions[i][0] * 100, 4)) + '% / ' + str(round(predictions[i][1] * 100, 4)) + '%') # explanation of image print(' < explanations for each image >') for i in range(len(imgs)): # reshape input image img = imgs[i].reshape(imgSize, imgSize, 1) # Matplotlib preparations fig, axes = plt.subplots(1, 3) # Generate visualization visualization = visualize_cam(newModel, layer_index, filter_indices=input_class[i], seed_input=img) axes[0].imshow(img[..., 0], cmap='gray') axes[0].set_title('Input') axes[1].imshow(visualization) axes[1].set_title('Grad-CAM') heatmap = np.uint8(cm.jet(visualization)[..., :3] * 255) original = np.uint8(cm.gray(img[..., 0])[..., :3] * 255) axes[2].imshow(overlay(heatmap, original)) axes[2].set_title('Overlay') fig.suptitle(f'MNIST target = {input_class}') plt.savefig('algo_2_gradcam2_' + str(start + i) + '.png')
def deepLearning(inputFileName, outputFileName, testFileName, testOutputFileName, valid, deviceName, epoch, printed, modelName, normalizeTarget): # read files # trainO : (originalOutput - meanOriginalOutput)/stdOriginalOutput trainI = helper.getDataFromFile(inputFileName, None) # input train data trainO = helper.getDataFromFile( outputFileName, None) # output train data (Sigmoid applied) testI = helper.getDataFromFile( testFileName, None) # test input data (set nullValue to 0) # apply sigmoid to train output data # trainO : sigmoid(normalize(originalOutput)) # = sigmoid((originalOutput - meanOriginalOutput)/stdOriginalOutput) for i in range(len(trainO)): for j in range(len(trainO[0])): trainO[i][j] = helper.sigmoid(trainO[i][j]) # for i in range(15): print(trainO[i]) print('') print(' ---- number of rows ----') print('input size: ' + str(len(trainI))) print('output size: ' + str(len(trainO))) print('test size: ' + str(len(testI))) print('') # print input, output, and test data if printed != 0: print('\n ---- original input data ----\n') for i in range(len(trainI)): print(helper.roundedArray(trainI[i], 6)) print('\n ---- original output data ----\n') for i in range(len(trainO)): print(helper.roundedArray(trainO[i], 6)) print('\n ---- original test data ----\n') for i in range(len(testI)): print(helper.roundedArray(testI[i], 6)) # model design using deepLearning_model.txt, in the form of # activation function of final layer is always 'sigmoid' f = open('deepLearning_model.txt', 'r') modelInfo = f.readlines() f.close() # read normalization info if normalizeTarget == True: fnorm = open('data_normalizeInfo.txt', 'r') fnormInfo = fnorm.readlines() fnormMean = float(fnormInfo[0].split(' ')[0]) # mean of training data fnormStd = float(fnormInfo[0].split(' ')[1]) # stddev of training data #### TEST when the value of valid is 0 #### if valid == 0: # NN and optimizer NN = helper.getNN(modelInfo, trainI, trainO) # Neural Network op = helper.getOptimizer(modelInfo) # optimizer #print(trainI[:5]) #print(trainO[:5]) try: # try reading test.h5 and test.json newModel = deepLearning_GPU.deepLearningModel(modelName, True) testOutput = deepLearning_GPU.modelOutput(newModel, testI) except: # do learning if test.h5 and test.json does not exist print('\n <<<< LEARNING >>>>\n') # False, True는 각각 dataPrint(학습데이터 출력 여부), modelPrint(model의 summary 출력 여부) deepLearning_GPU.deepLearning(NN, op, 'mean_squared_error', trainI, trainO, modelName, epoch, False, True, deviceName) newModel = deepLearning_GPU.deepLearningModel(modelName, True) testOutput = deepLearning_GPU.modelOutput(newModel, testI) # test print('\n <<<< TEST >>>>\n') # estimate outputLayer = testOutput[len(testOutput) - 1] # inverse sigmoid # output: denormalize(invSigmoid(sigmoid(normalize(originalOutput)))) # = denormalize((originalOutput - meanOriginalOutput)/stdOriginalOutput) # = originalOutput for i in range(len(outputLayer)): # for each output data for j in range(len( outputLayer[0])): # for each value of output data outputLayer[i][j] = helper.invSigmoid(outputLayer[i][j]) if normalizeTarget == True: outputLayer[i][ j] = outputLayer[i][j] * fnormStd + fnormMean # write to file result = '' print('\n<<<< output layer >>>>') for i in range(len(outputLayer)): # for each output data for j in range(len( outputLayer[0])): # for each value of output data result += str(outputLayer[i][j]) + '\t' result += '\n' f = open(testOutputFileName.split('.')[0] + '_prediction.txt', 'w') f.write(result) f.close() # return final result finalResult = [] for i in range(len(outputLayer)): # for each output data finalResult.append(outputLayer[i][0]) return finalResult #### VALIDATION when the value of valid is >0 #### else: # make index-list of validation data inputSize = len(trainI) validSize = int(inputSize * valid) trainSize = inputSize - validSize validArray = [] for i in range(inputSize): validArray.append(0) while sum(validArray) < validSize: validArray[random.randint(0, inputSize - 1)] = 1 # make train and validation data # _TrainO, _ValidO : sigmoid((originalOutput - meanOriginalOutput)/stdOriginalOutput) _TrainI = [] # training input _TrainO = [] # training output _ValidI = [] # valid input _ValidO = [] # valid output for i in range(inputSize): if validArray[i] == 0: # training data _TrainI.append(trainI[i]) _TrainO.append(trainO[i]) else: # validation data _ValidI.append(trainI[i]) _ValidO.append(trainO[i]) # model name for validation newModelName = modelName + 'Valid' # NN and optimizer NN = helper.getNN(modelInfo, _TrainI, _TrainO) # Neural Network op = helper.getOptimizer(modelInfo) # optimizer # output for validation try: # try reading testValid.h5 and test.json validModel = deepLearning_GPU.deepLearningModel(newModelName, True) predictedValidO = deepLearning_GPU.modelOutput(validModel, _ValidI) except: # do learning if testValid.h5 and test.json does not exist print('\n <<<< LEARNING >>>>\n') # False, True는 각각 dataPrint(학습데이터 출력 여부), modelPrint(model의 summary 출력 여부) # _TrainO : sigmoid((originalOutput - meanOriginalOutput)/stdOriginalOutput) deepLearning_GPU.deepLearning(NN, op, 'mean_squared_error', _TrainI, _TrainO, newModelName, epoch, False, True, deviceName) validModel = deepLearning_GPU.deepLearningModel(newModelName, True) predictedValidO = deepLearning_GPU.modelOutput(validModel, _ValidI) # evaluation print('\n <<<< VALID >>>>\n') MAE = 0 # mean absolute error MSE = 0 # mean square error accuracy = 0 # accuracy # predicted validation output outputLayer = predictedValidO[len(predictedValidO) - 1] # inverse sigmoid # output : invSigmoid(sigmoid(normalize(originalOutput))) # = (originalOutput - meanOriginalOutput)/stdOriginalOutput for i in range(len(outputLayer)): # for each output data for j in range(len( outputLayer[0])): # for each value of output data outputLayer[i][j] = helper.invSigmoid(outputLayer[i][j]) # compute error # output : denormalize((originalOutput - meanOriginalOutput)/stdOriginalOutput) # = originalOutput # _Valid0 : denormalize(invSigmoid(sigmoid((originalOutput - meanOriginalOutput)/stdOriginalOutput))) # = denormalize((originalOutput - meanOriginalOutput)/stdOriginalOutput) # = originalOutput for i in range(len(outputLayer)): # for each output data for j in range(len( outputLayer[0])): # for each value of output data _ValidO[i][j] = helper.invSigmoid(_ValidO[i][j]) if normalizeTarget == True: _ValidO[i][j] = _ValidO[i][j] * fnormStd + fnormMean outputLayer[i][ j] = outputLayer[i][j] * fnormStd + fnormMean # compute error validCount = 0 resultToWrite = '' for i in range(inputSize): if validArray[i] == 1: # compute errors and accuracy thisAE = abs(_ValidO[validCount][0] - outputLayer[validCount][0]) thisSE = pow( _ValidO[validCount][0] - outputLayer[validCount][0], 2) MAE += thisAE MSE += thisSE if thisSE <= 0.5: accuracy += 1 # print and write result newResultToWrite = ('[' + str(i) + '] pred = ' + str(int(outputLayer[validCount][0])) + ', real = ' + str(int(_ValidO[validCount][0])) + ', AE = ' + str(int(thisAE)) + ', SE = ' + str(int(thisSE))) resultToWrite += newResultToWrite + '\n' print(newResultToWrite) validCount += 1 MAE /= validSize MSE /= validSize accuracy /= validSize # print evaluation result resultSummary = '' resultSummary += 'input size : ' + str(inputSize) + '\n' resultSummary += 'train size : ' + str(trainSize) + '\n' resultSummary += 'valid size : ' + str(validSize) + '\n' resultSummary += 'MAE : ' + str(round(MAE, 6)) + '\n' resultSummary += 'MSE : ' + str(round(MSE, 6)) + '\n' resultSummary += 'accuracy : ' + str(round(accuracy, 6)) + '\n' resultSummary += 'pred avg : ' + str(np.average(outputLayer, axis=0)) + '\n' resultSummary += 'real avg : ' + str(np.average(_ValidO, axis=0)) + '\n' print(resultSummary) resultToWrite += resultSummary # write result file fvalid = open('data_valid_result.txt', 'w') fvalid.write(resultToWrite) fvalid.close()
dict_heatmap = dict(explanation.local_exp[ind]) heatmap = np.vectorize(dict_heatmap.get)(explanation.segments) plt.imshow(heatmap, cmap='RdBu', vmin=-heatmap.max(), vmax=heatmap.max()) plt.savefig('algo_1_lime_' + str(start + i) + '_1.png', bbox_inches='tight', pad_inches=0) if __name__ == '__main__': # load pre-trained model and choose two images to explain trainI = [[0] * 4096] trainO = [[0] * 2] f = open('car_model_config.txt', 'r') modelInfo = f.readlines() f.close() NN = helper.getNN(modelInfo, trainI, trainO) # Neural Network op = helper.getOptimizer(modelInfo) # optimizer loss = helper.getLoss(modelInfo) # loss model = DGPU.deepLearningModel('model', op, loss, True) model.load_weights('model.h5') run_lime(350, 355, model) run_lime(850, 855, model)