def check_res50_features(model_name, GPU=0): os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' os.environ['CUDA_VISIBLE_DEVICES'] = '{}'.format(GPU) import keras input_size_fit = (224, 224) blCol = uab_collectionFunctions.uabCollection('inria') opDetObj = bPreproc.uabOperTileDivide(255) rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj) rescObj.run(blCol) img_mean = blCol.getChannelMeans([0, 1, 2]) if model_name == 'deeplab': input_size = (321, 321) overlap = 0 else: input_size = (572, 572) overlap = 184 extrObj = uab_DataHandlerFunctions.uabPatchExtr( [0, 1, 2, 4], cSize=input_size, numPixOverlap=overlap, extSave=['jpg', 'jpg', 'jpg', 'png'], isTrain=True, gtInd=3, pad=overlap // 2) patchDir = extrObj.run(blCol) file_name = os.path.join(patchDir, 'fileList.txt') with open(file_name, 'r') as f: files = f.readlines() res50 = keras.applications.resnet50.ResNet50(include_top=True, weights='imagenet') pred_list = np.zeros(len(files)) for file_cnt, file_line in enumerate(tqdm(files)): img = np.zeros((input_size[0], input_size[1], 3), dtype=np.uint8) for cnt, file in enumerate(file_line.strip().split(' ')[:3]): img[:, :, cnt] = imageio.imread(os.path.join(patchDir, file)) - img_mean[cnt] img = np.expand_dims(crop_center(img, input_size_fit[0], input_size_fit[1]), axis=0) fc1000 = res50.predict(img).reshape((-1, )).tolist() pred_list[file_cnt] = np.argmax(fc1000) return pred_list
def make_res50_features(model_name, task_dir, GPU=0, force_run=False): tf.reset_default_graph() feature_file_name = os.path.join(task_dir, 'res50_atlanta_{}.csv'.format(model_name)) patch_file_name = os.path.join(task_dir, 'res50_atlanta_{}.txt'.format(model_name)) if model_name == 'deeplab': input_size = (321, 321) overlap = 0 else: input_size = (572, 572) overlap = 184 blCol = uab_collectionFunctions.uabCollection('atlanta') img_mean = blCol.getChannelMeans([0, 1, 2]) extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 3], cSize=input_size, numPixOverlap=overlap, extSave=['jpg', 'jpg', 'jpg', 'png'], isTrain=True, gtInd=3, pad=overlap // 2) patchDir = extrObj.run(blCol) if not os.path.exists(feature_file_name) or not os.path.exists(patch_file_name) or force_run: os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' os.environ['CUDA_VISIBLE_DEVICES'] = '{}'.format(GPU) import keras input_size_fit = (224, 224) file_name = os.path.join(patchDir, 'fileList.txt') with open(file_name, 'r') as f: files = f.readlines() res50 = keras.applications.resnet50.ResNet50(include_top=True, weights='imagenet') fc2048 = keras.models.Model(inputs=res50.input, outputs=res50.get_layer('flatten_1').output) with open(feature_file_name, 'w+') as f: with open(patch_file_name, 'w+') as f2: for file_line in tqdm(files): patch_name = file_line.split('.')[0][:-5] img = np.zeros((input_size[0], input_size[1], 3), dtype=np.uint8) for cnt, file in enumerate(file_line.strip().split(' ')[:3]): img[:, :, cnt] = imageio.imread(os.path.join(patchDir, file)) - img_mean[cnt] img = np.expand_dims(crop_center(img, input_size_fit[0], input_size_fit[1]), axis=0) fc1000 = fc2048.predict(img).reshape((-1,)).tolist() writer = csv.writer(f, lineterminator='\n') writer.writerow(['{}'.format(x) for x in fc1000]) f2.write('{}\n'.format(patch_name)) return feature_file_name, patch_file_name, input_size[0], patchDir
opDetObj = bPreproc.uabOperTileDivide( 255) # inria GT has value 0 and 255, we map it back to 0 and 1 # [3] is the channel id of GT rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj) rescObj.run(blCol) img_mean = blCol.getChannelMeans([0, 1, 2]) # get mean of rgb info # extract patches if cnn_name == 'deeplab': ps = 321 extrObj = uab_DataHandlerFunctions.uabPatchExtr( [0, 1, 2, 4], cSize=(ps, ps), numPixOverlap=0, extSave=['jpg', 'jpg', 'jpg', 'png'], isTrain=True, gtInd=3, pad=0) else: ps = 572 extrObj = uab_DataHandlerFunctions.uabPatchExtr( [0, 1, 2, 4], cSize=(ps, ps), numPixOverlap=184, extSave=['jpg', 'jpg', 'jpg', 'png'], isTrain=True, gtInd=3, pad=92) patchDir = extrObj.run(blCol) # get validation set
patch_prob = np.load( '/media/ei-edl01/user/bh163/tasks/2018.06.01.domain_selection/patch_prob_austin_2048.npy' ) city_list = ['austin', 'chicago', 'kitsap', 'tyrol-w', 'vienna'] # create collection # the original file is in /ei-edl01/data/uab_datasets/inria blCol = uab_collectionFunctions.uabCollection('inria') img_mean = blCol.getChannelMeans([0, 1, 2]) # extract patches extrObj = uab_DataHandlerFunctions.uabPatchExtr( [0, 1, 2, 4], # extract all 4 channels cSize=(321, 321), # patch size as 572*572 numPixOverlap=0, # overlap as 92 extSave=['jpg', 'jpg', 'jpg', 'png'], # save rgb files as jpg and gt as png isTrain=True, gtInd=3, pad=0) # pad around the tiles patchDir = extrObj.run(blCol) # make data reader chipFiles = os.path.join(patchDir, 'fileList.txt') # use uabCrossValMaker to get fileLists for training and validation idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile') # use first 5 tiles for validation file_list_train = uabCrossValMaker.make_file_list_by_key( idx, file_list, [i for i in range(6, 37)]) file_list_valid = uabCrossValMaker.make_file_list_by_key(
blCol = uab_collectionFunctions.uabCollection('inria') opDetObj = bPreproc.uabOperTileDivide( 255) # inria GT has value 0 and 255, we map it back to 0 and 1 # [3] is the channel id of GT rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj) rescObj.run(blCol) img_mean = blCol.getChannelMeans([0, 1, 2]) # get mean of rgb info # extract patches extrObj = uab_DataHandlerFunctions.uabPatchExtr( [0, 1, 2, 4], cSize=(input_size[0], input_size[1]), numPixOverlap=int(model.get_overlap()), extSave=['jpg', 'jpg', 'jpg', 'png'], isTrain=True, gtInd=3, pad=model.get_overlap() / 2) patchDir = extrObj.run(blCol) # get validation set # use uabCrossValMaker to get fileLists for training and validation idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile') # load patch names patch_file = os.path.join(task_dir, 'incep_inria_{}.txt'.format(model_name)) with open(patch_file, 'r') as f: patch_names = f.readlines() # make truth truth_file_building = os.path.join(
def main(flags): copyfile( os.path.join( flags.pred_file_dir, '1iter_pred_building_binary_{}.npy'.format(flags.leave_city)), os.path.join( flags.pred_file_dir, 'iter_pred_building_binary_{}.npy'.format(flags.leave_city))) flags.pred_file_dir = os.path.join( flags.pred_file_dir, 'iter_pred_building_binary_{}.npy'.format(flags.leave_city)) # make network # define place holder X = tf.placeholder( tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X') y = tf.placeholder( tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y') y2 = tf.placeholder(tf.float32, shape=[None, 1], name='y2') mode = tf.placeholder(tf.bool, name='mode') model = UnetModelCrop_Iter({ 'X': X, 'Y': y, 'Y2': y2 }, trainable=mode, model_name=flags.model_name, input_size=flags.input_size, batch_size=flags.batch_size, learn_rate=flags.learning_rate, decay_step=flags.decay_step, decay_rate=flags.decay_rate, epochs=flags.epochs, start_filter_num=flags.sfn) model.create_graph('X', class_num=flags.num_classes) # create collection # the original file is in /ei-edl01/data/uab_datasets/inria blCol = uab_collectionFunctions.uabCollection('inria') opDetObj = bPreproc.uabOperTileDivide( 255) # inria GT has value 0 and 255, we map it back to 0 and 1 # [3] is the channel id of GT rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj) rescObj.run(blCol) img_mean = blCol.getChannelMeans([0, 1, 2]) # get mean of rgb info # extract patches extrObj = uab_DataHandlerFunctions.uabPatchExtr( [0, 1, 2, 4], # extract all 4 channels cSize=flags.input_size, # patch size as 572*572 numPixOverlap=int(model.get_overlap() / 2), # overlap as 92 extSave=['jpg', 'jpg', 'jpg', 'png'], # save rgb files as jpg and gt as png isTrain=True, gtInd=3, pad=model.get_overlap()) # pad around the tiles patchDir = extrObj.run(blCol) # make data reader # use uabCrossValMaker to get fileLists for training and validation idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'city') # use first city for validation file_list_train = uabCrossValMaker.make_file_list_by_key( idx, file_list, [i for i in range(5) if i != flags.leave_city]) file_list_valid = uabCrossValMaker.make_file_list_by_key( idx, file_list, [flags.leave_city]) dataReader_train = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir, file_list_train, flags.input_size, flags.batch_size, dataAug='flip,rotate', block_mean=np.append( [0], img_mean), batch_code=0) dataReader_train_building = uabDataReader.ImageLabelReaderBuildingCustom( [3], [0, 1, 2], patchDir, file_list_valid, flags.input_size, flags.batch_size, dataAug='flip,rotate', percent_file=flags.pred_file_dir, block_mean=np.append([0], img_mean), patch_prob=0.1, binary=True) # no augmentation needed for validation dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir, file_list_valid, flags.input_size, flags.batch_size, dataAug=' ', block_mean=np.append( [0], img_mean), batch_code=0) # train start_time = time.time() model.train_config('X', 'Y', 'Y2', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath, loss_type='xent', par_dir='Inria_Domain_LOO') model.run( train_reader=dataReader_train, train_reader_building=dataReader_train_building, valid_reader=dataReader_valid, pretrained_model_dir=flags. finetune_dir, # train from scratch, no need to load pre-trained model isTrain=True, img_mean=img_mean, verb_step=100, # print a message every 100 step(sample) save_epoch=5, # save the model every 5 epochs gpu=GPU, tile_size=flags.tile_size, patch_size=flags.input_size) duration = time.time() - start_time print('duration {:.2f} hours'.format(duration / 60 / 60))
def main(flags): # make network # define place holder X = tf.placeholder( tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X') y = tf.placeholder( tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y') mode = tf.placeholder(tf.bool, name='mode') model = uabMakeNetwork_ASSN.SSAN_UNet({ 'X': X, 'Y': y }, trainable=mode, model_name=flags.model_name, input_size=flags.input_size, batch_size=flags.batch_size, learn_rate=flags.learning_rate, decay_step=flags.decay_step, decay_rate=flags.decay_rate, epochs=flags.epochs, start_filter_num=flags.sfn, lada=flags.lada, slow_iter=flags.slow_iter) model.create_graph(['X', 'Y'], class_num=flags.num_classes) # create collection # the original file is in /ei-edl01/data/uab_datasets/inria blCol = uab_collectionFunctions.uabCollection('inria') opDetObj = bPreproc.uabOperTileDivide(255) # [3] is the channel id of GT rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj) rescObj.run(blCol) img_mean = blCol.getChannelMeans([0, 1, 2]) # extract patches extrObj = uab_DataHandlerFunctions.uabPatchExtr( [0, 1, 2, 4], cSize=flags.input_size, numPixOverlap=int(model.get_overlap()), extSave=['jpg', 'jpg', 'jpg', 'png'], isTrain=True, gtInd=3, pad=model.get_overlap() // 2) patchDir = extrObj.run(blCol) # make data reader # use uabCrossValMaker to get fileLists for training and validation idx_city, file_list = uabCrossValMaker.uabUtilGetFolds( patchDir, 'fileList.txt', 'city') idx_tile, _ = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile') idx = [j * 10 + i for i, j in zip(idx_city, idx_tile)] # use first city for validation filter_train = [] filter_train_target = [] filter_valid = [] for i in range(5): for j in range(1, 37): if i != flags.finetune_city and j > 5: filter_train.append(j * 10 + i) elif i == flags.finetune_city and j > 5: filter_train_target.append(j * 10 + i) elif i == flags.finetune_city and j <= 5: filter_valid.append(j * 10 + i) # use first city for validation file_list_train = uabCrossValMaker.make_file_list_by_key( idx, file_list, filter_train) filter_list_train_valid = uabCrossValMaker.make_file_list_by_key( idx, file_list, filter_train_target) file_list_valid = uabCrossValMaker.make_file_list_by_key( idx, file_list, filter_valid) dataReader_train = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir, file_list_train, flags.input_size, flags.batch_size, dataAug='flip,rotate', block_mean=np.append( [0], img_mean), batch_code=0) dataReader_train_target = uabDataReader.ImageLabelReader( [3], [0, 1, 2], patchDir, filter_list_train_valid, flags.input_size, flags.batch_size, dataAug='flip,rotate', block_mean=np.append([0], img_mean), batch_code=0) # no augmentation needed for validation dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir, file_list_valid, flags.input_size, flags.batch_size, dataAug=' ', block_mean=np.append( [0], img_mean), batch_code=0) # train start_time = time.time() model.load_weights(flags.pred_model_dir.format(flags.finetune_city), layers2load='1,2,3,4,5,6,7,8,9', load_final_layer=True) model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath, loss_type='xent', par_dir='Inria_GAN/SSAN') model.run( train_reader=dataReader_train, train_reader_source=dataReader_train, train_reader_target=dataReader_train_target, valid_reader=dataReader_valid, pretrained_model_dir= None, # train from scratch, no need to load pre-trained model isTrain=True, img_mean=img_mean, verb_step=100, # print a message every 100 step(sample) save_epoch=flags.save_epoch, # save the model every 5 epochs gpu=GPU, tile_size=flags.tile_size, patch_size=flags.input_size) duration = time.time() - start_time print('duration {:.2f} hours'.format(duration / 60 / 60))
def main(flags): city_list = ['austin', 'chicago', 'kitsap', 'tyrol-w', 'vienna'] flags.llh_file_dir = flags.llh_file_dir.format(flags.finetune_city) weight = np.load(flags.llh_file_dir) # make network # define place holder X = tf.placeholder(tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X') y = tf.placeholder(tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y') mode = tf.placeholder(tf.bool, name='mode') model = uabMakeNetwork_UNet.UnetModelCrop({'X': X, 'Y': y}, trainable=mode, model_name=flags.model_name, input_size=flags.input_size, batch_size=flags.batch_size, learn_rate=flags.learning_rate, decay_step=flags.decay_step, decay_rate=flags.decay_rate, epochs=flags.epochs, start_filter_num=flags.sfn) model.create_graph('X', class_num=flags.num_classes) # create collection # the original file is in /ei-edl01/data/uab_datasets/inria blCol = uab_collectionFunctions.uabCollection('inria') opDetObj = bPreproc.uabOperTileDivide(255) # inria GT has value 0 and 255, we map it back to 0 and 1 # [3] is the channel id of GT rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj) rescObj.run(blCol) img_mean = blCol.getChannelMeans([0, 1, 2]) # get mean of rgb info # extract patches extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 4], # extract all 4 channels cSize=flags.input_size, # patch size as 572*572 numPixOverlap=int(model.get_overlap()/2), # overlap as 92 extSave=['jpg', 'jpg', 'jpg', 'png'], # save rgb files as jpg and gt as png isTrain=True, gtInd=3, pad=model.get_overlap()) # pad around the tiles patchDir = extrObj.run(blCol) # make data reader # use uabCrossValMaker to get fileLists for training and validation idx_city, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'city') idx_tile, _ = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile') idx = [j * 10 + i for i, j in zip(idx_city, idx_tile)] # use first city for validation filter_train = [] filter_valid = [] for i in range(5): for j in range(1, 37): if i != flags.finetune_city and j > 5: filter_train.append(j * 10 + i) elif i == flags.finetune_city and j <= 5: filter_valid.append(j * 10 + i) # use first city for validation file_list_train = uabCrossValMaker.make_file_list_by_key(idx, file_list, filter_train) file_list_valid = uabCrossValMaker.make_file_list_by_key(idx, file_list, filter_valid) dataReader_train = uabDataReader.ImageLabelReaderPatchSampleControl( [3], [0, 1, 2], patchDir, file_list_train, flags.input_size, flags.batch_size, weight, dataAug='flip,rotate', block_mean=np.append([0], img_mean)) # no augmentation needed for validation dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir, file_list_valid, flags.input_size, flags.batch_size, dataAug=' ', block_mean=np.append([0], img_mean), batch_code=0) # train start_time = time.time() model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath, loss_type='xent', par_dir='Inria_Domain_Selection') model.run(train_reader=dataReader_train, valid_reader=dataReader_valid, pretrained_model_dir=flags.pred_model_dir.format(flags.finetune_city), isTrain=True, img_mean=img_mean, verb_step=100, # print a message every 100 step(sample) save_epoch=5, # save the model every 5 epochs gpu=GPU, tile_size=flags.tile_size, patch_size=flags.input_size ) duration = time.time() - start_time print('duration {:.2f} hours'.format(duration/60/60))
# create collection # the original file is in /ei-edl01/data/uab_datasets/inria blCol = uab_collectionFunctions.uabCollection('inria') opDetObj = bPreproc.uabOperTileDivide(255) rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj) rescObj.run(blCol) print(blCol.readMetadata()) # extract patches extrObj = uab_DataHandlerFunctions.uabPatchExtr( [0, 1, 2, 4], # extract all 4 channels cSize=chip_size, # patch size as 224*224 numPixOverlap=0, # overlap as 0 extSave=['jpg', 'jpg', 'jpg', 'png'], # save rgb files as jpg and gt as png isTrain=True, gtInd=3) patchDir = extrObj.run(blCol) # make data reader chipFiles = os.path.join(patchDir, 'fileList.txt') # use uabCrossValMaker to get fileLists for training and validation idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile') file_list_train = uabCrossValMaker.make_file_list_by_key( idx, file_list, [i for i in range(6, 37)]) file_list_valid = uabCrossValMaker.make_file_list_by_key( idx, file_list, [i for i in range(0, 6)])
""" Created on Thu Dec 7 21:03:29 2017 @author: jordan Example script for extracting patches from a collection. The numbers in this file relate to a particular type of U-net """ import uab_collectionFunctions import uab_DataHandlerFunctions blCol = uab_collectionFunctions.uabCollection('inria_orgd') extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2], cSize=(572, 572), numPixOverlap=92, extSave=['png', 'jpg', 'jpg']) extrObj.run(blCol)
def main(flags): np.random.seed(int(flags.run_id)) tf.set_random_seed(int(flags.run_id)) if flags.start_layer >= 10: pass else: flags.model_name += '_up{}'.format(flags.start_layer) # make network # define place holder X = tf.placeholder(tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X') y = tf.placeholder(tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y') mode = tf.placeholder(tf.bool, name='mode') model = uabMakeNetwork_UNet.UnetModelCrop({'X': X, 'Y': y}, trainable=mode, model_name=flags.model_name, input_size=flags.input_size, batch_size=flags.batch_size, learn_rate=flags.learning_rate, decay_step=flags.decay_step, decay_rate=flags.decay_rate, epochs=flags.epochs, start_filter_num=flags.sfn) model.create_graph('X', class_num=flags.num_classes) # create collection # the original file is in /ei-edl01/data/uab_datasets/inria blCol = uab_collectionFunctions.uabCollection(flags.ds_name) blCol.readMetadata() img_mean = blCol.getChannelMeans([1, 2, 3]) # get mean of rgb info # extract patches extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 3], cSize=flags.input_size, numPixOverlap=int(model.get_overlap()), extSave=['png', 'jpg', 'jpg', 'jpg'], isTrain=True, gtInd=0, pad=int(model.get_overlap()//2)) patchDir = extrObj.run(blCol) # make data reader # use first 5 tiles for validation idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'tile') file_list_train = uabCrossValMaker.make_file_list_by_key(idx, file_list, [0, 1, 2, 3]) file_list_valid = uabCrossValMaker.make_file_list_by_key(idx, file_list, [4, 5]) file_list_train = file_list_train[-int(len(file_list_train)*flags.portion):] with tf.name_scope('image_loader'): # GT has no mean to subtract, append a 0 for block mean dataReader_train = uabDataReader.ImageLabelReader([0], [1, 2, 3], patchDir, file_list_train, flags.input_size, flags.tile_size, flags.batch_size, dataAug='flip,rotate', block_mean=np.append([0], img_mean)) # no augmentation needed for validation dataReader_valid = uabDataReader.ImageLabelReader([0], [1, 2, 3], patchDir, file_list_valid, flags.input_size, flags.tile_size, flags.batch_size, dataAug=' ', block_mean=np.append([0], img_mean)) # train start_time = time.time() if flags.start_layer >= 10: model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath, loss_type='xent', par_dir='aemo/{}'.format(flags.ds_name)) else: model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath, loss_type='xent', par_dir='aemo/{}'.format(flags.ds_name), train_var_filter=['layerup{}'.format(i) for i in range(flags.start_layer, 10)]) model.run(train_reader=dataReader_train, valid_reader=dataReader_valid, pretrained_model_dir=flags.model_dir, # train from scratch, no need to load pre-trained model isTrain=True, img_mean=img_mean, verb_step=100, # print a message every 100 step(sample) save_epoch=5, # save the model every 5 epochs gpu=GPU, tile_size=flags.tile_size, patch_size=flags.input_size) duration = time.time() - start_time print('duration {:.2f} hours'.format(duration/60/60))
opDetObj = bPreproc.uabOperTileDivide( 255) # inria GT has value 0 and 255, we map it back to 0 and 1 # [0] is the channel id of GT rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [0], opDetObj) rescObj.run(blCol) blCol.readMetadata() img_mean = blCol.getChannelMeans([1, 2, 3]) # get mean of rgb info # extract patches extrObj = uab_DataHandlerFunctions.uabPatchExtr( [1, 2, 3, 4], # extract all 4 channels cSize=(572, 572), # patch size as 572*572 numPixOverlap=46, # half overlap for this extSave=['jpg', 'jpg', 'jpg', 'png'], # save rgb files as jpg and gt as png isTrain=True, gtInd=3, pad=184) # pad around the tiles patchDir = extrObj.run(blCol) patchDir2 = r'/hdd/uab_datasets/Results/PatchExtr/road/chipExtrRegPurge_cSz572x572_pad184' if not os.path.exists(patchDir2): os.makedirs(patchDir2) files = os.path.join(patchDir, 'fileList.txt') with open(files, 'r') as f: file_list = f.readlines() file_list_new = [] for file in tqdm(file_list):
def main(flags): # ------------------------------------------Network---------------------------------------------# # make network # define place holder X = tf.placeholder(tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X') y = tf.placeholder(tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y') mode = tf.placeholder(tf.bool, name='mode') model = uabMakeNetwork_UnetMTL.UnetModelMTL({'X':X, 'Y':y}, trainable=mode, model_name=flags.model_name, input_size=flags.input_size, batch_size=flags.batch_size, learn_rate=flags.learning_rate, decay_step=flags.decay_step, decay_rate=flags.decay_rate, epochs=flags.epochs, start_filter_num=flags.sfn, source_num=flags.s_num, source_name=flags.s_name, source_control=flags.s_control) model.create_graph('X', class_num=flags.num_classes, start_filter_num=flags.sfn) # ------------------------------------------Dataset Inria---------------------------------------------# # create collection for inria blCol_inria = uab_collectionFunctions.uabCollection('inria') opDetObj_inria = bPreproc.uabOperTileDivide(255) # [3] is the channel id of GT rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj_inria) rescObj.run(blCol_inria) img_mean_inria = blCol_inria.getChannelMeans([0, 1, 2]) # extract patches extrObj_inria = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 4], cSize=flags.input_size, numPixOverlap=int(model.get_overlap()), extSave=['jpg', 'jpg', 'jpg', 'png'], isTrain=True, gtInd=3, pad=int(model.get_overlap() / 2)) patchDir_inria = extrObj_inria.run(blCol_inria) # make data reader # use uabCrossValMaker to get fileLists for training and validation idx_inria, file_list_inria = uabCrossValMaker.uabUtilGetFolds(patchDir_inria, 'fileList.txt', 'force_tile') # use first 5 tiles for validation file_list_train_inria = uabCrossValMaker.make_file_list_by_key(idx_inria, file_list_inria, [i for i in range(20, 136)]) file_list_valid_inria = uabCrossValMaker.make_file_list_by_key(idx_inria, file_list_inria, [i for i in range(0, 20)]) with tf.name_scope('image_loader_inria'): # GT has no mean to subtract, append a 0 for block mean dataReader_train_inria = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir_inria, file_list_train_inria, flags.input_size, flags.tile_size, flags.batch_size, dataAug='flip,rotate', block_mean=np.append([0], img_mean_inria)) # no augmentation needed for validation dataReader_valid_inria = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir_inria, file_list_valid_inria, flags.input_size, flags.tile_size, flags.batch_size, dataAug=' ', block_mean=np.append([0], img_mean_inria)) # ------------------------------------------Dataset Road---------------------------------------------# # create collection for road blCol_road = uab_collectionFunctions.uabCollection('road_5000') opDetObj_road = bPreproc.uabOperTileDivide(255) # [3] is the channel id of GT rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj_road) rescObj.run(blCol_road) img_mean_road = blCol_road.getChannelMeans([0, 1, 2]) # extract patches extrObj_road = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 4], cSize=flags.input_size, numPixOverlap=int(model.get_overlap()), extSave=['jpg', 'jpg', 'jpg', 'png'], isTrain=True, gtInd=3, pad=int(model.get_overlap() / 2)) patchDir_road = extrObj_road.run(blCol_road) # make data reader # use uabCrossValMaker to get fileLists for training and validation idx_road, file_list_road = uabCrossValMaker.uabUtilGetFolds(patchDir_road, 'fileList.txt', 'city') # use first 5 tiles for validation file_list_train_road = uabCrossValMaker.make_file_list_by_key(idx_road, file_list_road, [1]) file_list_valid_road = uabCrossValMaker.make_file_list_by_key(idx_road, file_list_road, [0, 2]) with tf.name_scope('image_loader_road'): # GT has no mean to subtract, append a 0 for block mean dataReader_train_road = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir_road, file_list_train_road, flags.input_size, flags.tile_size, flags.batch_size, dataAug='flip,rotate', block_mean=np.append([0], img_mean_road)) # no augmentation needed for validation dataReader_valid_road = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir_road, file_list_valid_road, flags.input_size, flags.tile_size, flags.batch_size, dataAug=' ', block_mean=np.append([0], img_mean_road)) # ------------------------------------------Train---------------------------------------------# start_time = time.time() model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath, loss_type='xent') model.run(train_reader=[dataReader_train_inria, dataReader_train_road], valid_reader=[dataReader_valid_inria, dataReader_valid_road], pretrained_model_dir=None, # train from scratch, no need to load pre-trained model isTrain=True, img_mean=[img_mean_inria, img_mean_road], verb_step=100, # print a message every 100 step(sample) save_epoch=5, # save the model every 5 epochs gpu=GPU, tile_size=flags.tile_size, patch_size=flags.input_size) duration = time.time() - start_time print('duration {:.2f} hours'.format(duration/60/60))
def main(flags): city_dict = { 'austin': 0, 'chicago': 1, 'kitsap': 2, 'tyrol-w': 3, 'vienna': 4 } city_alpha = [0.2, 0.5, 0.1, 0.1, 0.1] # make network # define place holder X = tf.placeholder( tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X') y = tf.placeholder( tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y') mode = tf.placeholder(tf.bool, name='mode') model = uabMakeNetwork_UNet.UnetModelCrop({ 'X': X, 'Y': y }, trainable=mode, model_name=flags.model_name, input_size=flags.input_size, batch_size=flags.batch_size, learn_rate=flags.learning_rate, decay_step=flags.decay_step, decay_rate=flags.decay_rate, epochs=flags.epochs, start_filter_num=flags.sfn) model.create_graph('X', class_num=flags.num_classes) # create collection # the original file is in /ei-edl01/data/uab_datasets/inria blCol = uab_collectionFunctions.uabCollection('inria') opDetObj = bPreproc.uabOperTileDivide( 255) # inria GT has value 0 and 255, we map it back to 0 and 1 # [3] is the channel id of GT rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj) rescObj.run(blCol) img_mean = blCol.getChannelMeans([0, 1, 2]) # get mean of rgb info # extract patches extrObj = uab_DataHandlerFunctions.uabPatchExtr( [0, 1, 2, 4], cSize=flags.input_size, numPixOverlap=int(model.get_overlap()), extSave=['jpg', 'jpg', 'jpg', 'png'], isTrain=True, gtInd=3, pad=model.get_overlap() / 2) patchDir = extrObj.run(blCol) # make data reader # use uabCrossValMaker to get fileLists for training and validation idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile') # use first 5 tiles for validation file_list_train = uabCrossValMaker.make_file_list_by_key( idx, file_list, [i for i in range(6, 37)]) file_list_valid = uabCrossValMaker.make_file_list_by_key( idx, file_list, [i for i in range(0, 6)]) dataReader_train = uabDataReader.ImageLabelReaderCitySampleControl( [3], [0, 1, 2], patchDir, file_list_train, flags.input_size, flags.batch_size, city_dict, city_alpha, dataAug='flip,rotate', block_mean=np.append([0], img_mean)) # no augmentation needed for validation dataReader_valid = uabDataReader.ImageLabelReaderCitySampleControl( [3], [0, 1, 2], patchDir, file_list_valid, flags.input_size, flags.batch_size, city_dict, city_alpha, dataAug=' ', block_mean=np.append([0], img_mean)) # train start_time = time.time() model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath, loss_type='xent', par_dir='Inria_Domain') model.run( train_reader=dataReader_train, valid_reader=dataReader_valid, pretrained_model_dir=PRED_DIR, isTrain=True, img_mean=img_mean, verb_step=100, # print a message every 100 step(sample) save_epoch=5, # save the model every 5 epochs gpu=GPU, tile_size=flags.tile_size, patch_size=flags.input_size, ) duration = time.time() - start_time print('duration {:.2f} hours'.format(duration / 60 / 60))
def main(flags, weight_dict): path_to_save = os.path.join(flags.weight_dir, 'shift_dict.pkl') shift_dict = ersa_utils.load_file(path_to_save) # make network # define place holder X = tf.placeholder(tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X') Z = tf.placeholder(tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='Z') y = tf.placeholder(tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y') mode = tf.placeholder(tf.bool, name='mode') model = uabMakeNetwork_UNet.UnetModelDTDA({'X': X, 'Z': Z, 'Y': y}, trainable=mode, model_name=flags.model_name, input_size=flags.input_size, batch_size=flags.batch_size, learn_rate=flags.learning_rate, decay_step=flags.decay_step, decay_rate=flags.decay_rate, epochs=flags.epochs, start_filter_num=flags.sfn) model.create_graph('X', 'Z', class_num=flags.num_classes) # create collection # the original file is in /ei-edl01/data/uab_datasets/inria blCol = uab_collectionFunctions.uabCollection('inria') opDetObj = bPreproc.uabOperTileDivide(255) # inria GT has value 0 and 255, we map it back to 0 and 1 # [3] is the channel id of GT rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj) rescObj.run(blCol) img_mean = blCol.getChannelMeans([0, 1, 2]) # get mean of rgb info # extract patches extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 4], cSize=flags.input_size, numPixOverlap=int(model.get_overlap()), extSave=['jpg', 'jpg', 'jpg', 'png'], isTrain=True, gtInd=3, pad=model.get_overlap() // 2) patchDir = extrObj.run(blCol) # make data reader # use uabCrossValMaker to get fileLists for training and validation idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile') # use first 5 tiles for validation file_list_source = uabCrossValMaker.make_file_list_by_key(idx, file_list, [i for i in range(6, 37)]) file_list_valid = uabCrossValMaker.make_file_list_by_key(idx, file_list, [i for i in range(0, 6)]) # AIOI dataset blCol = uab_collectionFunctions.uabCollection(CITY_LIST[flags.leave_city]) # extract patches extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 3], cSize=flags.input_size, numPixOverlap=int(model.get_overlap()), extSave=['jpg', 'jpg', 'jpg', 'png'], isTrain=True, gtInd=3, pad=model.get_overlap() // 2) patchDir_target = extrObj.run(blCol) idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir_target, 'fileList.txt', 'force_tile') file_list_target = uabCrossValMaker.make_file_list_by_key(idx, file_list, [i for i in range(5)]) with tf.name_scope('image_loader'): # GT has no mean to subtract, append a 0 for block mean dataReader_source = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir, file_list_source, flags.input_size, flags.tile_size, flags.batch_size, dataAug='flip,rotate', block_mean=np.append([0], img_mean)) # no augmentation needed for validation dataReader_target = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir_target, file_list_target, flags.input_size, flags.tile_size, flags.batch_size, dataAug='flip,rotate', block_mean=np.append([0], img_mean)) dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir, file_list_valid, flags.input_size, flags.tile_size, flags.batch_size, dataAug='flip,rotate', block_mean=np.append([0], img_mean)) # train start_time = time.time() model.train_config('X', 'Y', 'Z', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath, loss_type='xent', par_dir='domain_baseline/contorl_valid', lam=flags.lam) model.load_source_weights(flags.model_dir, shift_dict, gpu=flags.GPU) model.run(train_reader_source=dataReader_source, train_reader_target=dataReader_target, valid_reader=dataReader_valid, pretrained_model_dir=None, # train from scratch, no need to load pre-trained model isTrain=True, img_mean=img_mean, verb_step=100, # print a message every 100 step(sample) save_epoch=5, # save the model every 5 epochs gpu=flags.GPU, tile_size=flags.tile_size, patch_size=flags.input_size) duration = time.time() - start_time print('duration {:.2f} hours'.format(duration/60/60))
model = keras.models.load_model(model_save_dir) if model_name == 'unet': patch_size = (572, 572) overlap = 184 pad = 92 else: patch_size = (321, 321) overlap = 0 pad = 0 # extract patches extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 4], cSize=patch_size, numPixOverlap=overlap, extSave=['jpg', 'jpg', 'jpg', 'png'], isTrain=True, gtInd=3, pad=pad) patchDir = extrObj.run(blCol) chipFiles = os.path.join(patchDir, 'fileList.txt') idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'city') idx2, _ = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile') idx3 = [j * 10 + i for i, j in zip(idx, idx2)] filter_train = [] filter_valid = [] for i in range(5): for j in range(1, 37): if i == city_num and j <= 5: filter_valid.append(j * 10 + i) elif i != city_num:
def main(flags): # make network # define place holder X = tf.placeholder( tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X') y = tf.placeholder( tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y') mode = tf.placeholder(tf.bool, name='mode') model = uabMakeNetwork_UNet.UnetModelCrop({ 'X': X, 'Y': y }, trainable=mode, model_name=flags.model_name, input_size=flags.input_size, batch_size=flags.batch_size, learn_rate=flags.learning_rate, decay_step=flags.decay_step, decay_rate=flags.decay_rate, epochs=flags.epochs, start_filter_num=flags.sfn) model.create_graph('X', class_num=flags.num_classes) # create collection # the original file is in /ei-edl01/data/uab_datasets/inria blCol = uab_collectionFunctions.uabCollection('spca') blCol.readMetadata() img_mean = blCol.getChannelMeans([1, 2, 3]) # get mean of rgb info # extract patches extrObj = uab_DataHandlerFunctions.uabPatchExtr( [0, 1, 2, 3], # extract all 4 channels cSize=flags.input_size, # patch size as 572*572 numPixOverlap=int(model.get_overlap() / 2), # overlap as 92 extSave=['png', 'jpg', 'jpg', 'jpg'], # save rgb files as jpg and gt as png isTrain=True, gtInd=0, pad=model.get_overlap()) # pad around the tiles patchDir = extrObj.run(blCol) # make data reader # use uabCrossValMaker to get fileLists for training and validation idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile') # use first 5 tiles for validation file_list_train = uabCrossValMaker.make_file_list_by_key( idx, file_list, [i for i in range(0, 250)]) file_list_valid = uabCrossValMaker.make_file_list_by_key( idx, file_list, [i for i in range(250, 500)]) dataReader_train = uabDataReader.ImageLabelReader([0], [1, 2, 3], patchDir, file_list_train, flags.input_size, flags.batch_size, dataAug='flip,rotate', block_mean=np.append( [0], img_mean), batch_code=0) # no augmentation needed for validation dataReader_valid = uabDataReader.ImageLabelReader([0], [1, 2, 3], patchDir, file_list_valid, flags.input_size, flags.batch_size, dataAug=' ', block_mean=np.append( [0], img_mean), batch_code=0) # train start_time = time.time() model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath, loss_type='xent') model.run( train_reader=dataReader_train, valid_reader=dataReader_valid, pretrained_model_dir= None, # train from scratch, no need to load pre-trained model isTrain=True, img_mean=img_mean, verb_step=100, # print a message every 100 step(sample) save_epoch=5, # save the model every 5 epochs gpu=GPU, tile_size=flags.tile_size, patch_size=flags.input_size) duration = time.time() - start_time print('duration {:.2f} hours'.format(duration / 60 / 60))
255) # inria GT has value 0 and 255, we map it back to 0 and 1 # [3] is the channel id of GT rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj) rescObj.run(blCol) img_mean = blCol.getChannelMeans([0, 1, 2]) # get mean of rgb info print(blCol.readMetadata() ) # now inria collection has 4 channels, the last one is GT with (0, 1) # extract patches extrObj = uab_DataHandlerFunctions.uabPatchExtr( [0, 1, 2, 4], # extract all 4 channels cSize=chip_size, # patch size as 572*572 numPixOverlap=int(model.get_overlap() / 2), # overlap as 92 extSave=['jpg', 'jpg', 'jpg', 'png'], # save rgb files as jpg and gt as png isTrain=True, gtInd=3, pad=model.get_overlap()) # pad around the tiles patchDir = extrObj.run(blCol) # make data reader chipFiles = os.path.join(patchDir, 'fileList.txt') # use uabCrossValMaker to get fileLists for training and validation idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile') # use first 5 tiles for validation file_list_train = uabCrossValMaker.make_file_list_by_key( idx, file_list, [i for i in range(6, 37)]) file_list_valid = uabCrossValMaker.make_file_list_by_key(
def main(flags): np.random.seed(int(flags.run_id)) tf.set_random_seed(int(flags.run_id)) # make network # define place holder X = tf.placeholder( tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X') y = tf.placeholder( tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y') mode = tf.placeholder(tf.bool, name='mode') model = UnetModelCrop({ 'X': X, 'Y': y }, trainable=mode, model_name=flags.model_name, input_size=flags.input_size, batch_size=flags.batch_size, learn_rate=flags.learning_rate, decay_step=flags.decay_step, decay_rate=flags.decay_rate, epochs=flags.epochs, start_filter_num=flags.sfn) model.create_graph('X', class_num=flags.num_classes) # create collection # the original file is in /ei-edl01/data/uab_datasets/inria blCol = uab_collectionFunctions.uabCollection(flags.ds_name) blCol.readMetadata() img_mean = blCol.getChannelMeans([0, 1, 2]) # get mean of rgb info # extract patches extrObj = uab_DataHandlerFunctions.uabPatchExtr( [0, 1, 2, 3], cSize=flags.input_size, numPixOverlap=int(model.get_overlap()), extSave=['jpg', 'jpg', 'jpg', 'png'], isTrain=True, gtInd=3, pad=int(model.get_overlap() // 2)) patchDir = extrObj.run(blCol) # make data reader # use first 5 tiles for validation idx_city, file_list = uabCrossValMaker.uabUtilGetFolds( patchDir, 'fileList.txt', 'city') idx_tile, _ = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile') idx = [j * 10 + i for i, j in zip(idx_city, idx_tile)] # use first city for validation filter_train = [] filter_valid = [] for i in range(4): for j in range(1, 20): if i == flags.leave_city and j < 4: filter_valid.append(j * 10 + i) elif i == flags.leave_city and j >= 4: filter_train.append(j * 10 + i) # use first city for validation file_list_train = uabCrossValMaker.make_file_list_by_key( idx, file_list, filter_train) file_list_valid = uabCrossValMaker.make_file_list_by_key( idx, file_list, filter_valid) with tf.name_scope('image_loader'): # GT has no mean to subtract, append a 0 for block mean dataReader_train = uabDataReader.ImageLabelReader( [3], [0, 1, 2], patchDir, file_list_train, flags.input_size, None, flags.batch_size, dataAug='flip,rotate', block_mean=np.append([0], img_mean)) # no augmentation needed for validation dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir, file_list_valid, flags.input_size, None, flags.batch_size, dataAug=' ', block_mean=np.append( [0], img_mean)) # train start_time = time.time() model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath, loss_type='xent', par_dir='{}'.format(flags.ds_name), pos_weight=flags.pos_weight) model.run( train_reader=dataReader_train, valid_reader=dataReader_valid, pretrained_model_dir= None, # train from scratch, no need to load pre-trained model isTrain=True, img_mean=img_mean[1:], verb_step=100, # print a message every 100 step(sample) save_epoch=5, # save the model every 5 epochs gpu=GPU, patch_size=flags.input_size) duration = time.time() - start_time print('duration {:.2f} hours'.format(duration / 60 / 60))
def main(flags): # make network # define place holder X = tf.placeholder( tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X') z = tf.placeholder(tf.float32, shape=[None, flags.z_dim], name='z') mode = tf.placeholder(tf.bool, name='mode') model = uabMakeNetwork_UGAN.UGAN({ 'X': X, 'Z': z }, trainable=mode, model_name=flags.model_name, input_size=flags.input_size, batch_size=flags.batch_size, learn_rate=flags.learning_rate, decay_step=flags.decay_step, decay_rate=flags.decay_rate, epochs=flags.epochs, start_filter_num=flags.sfn) model.create_graph('X', class_num=flags.num_classes) # create collection # the original file is in /ei-edl01/data/uab_datasets/inria blCol = uab_collectionFunctions.uabCollection('inria') opDetObj = bPreproc.uabOperTileDivide( 255) # inria GT has value 0 and 255, we map it back to 0 and 1 # [3] is the channel id of GT rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj) rescObj.run(blCol) img_mean = blCol.getChannelMeans([0, 1, 2]) # get mean of rgb info # extract patches extrObj = uab_DataHandlerFunctions.uabPatchExtr( [0, 1, 2, 4], # extract all 4 channels cSize=flags.input_size, # patch size as 572*572 numPixOverlap=int(model.get_overlap() / 2), # overlap as 92 extSave=['jpg', 'jpg', 'jpg', 'png'], # save rgb files as jpg and gt as png isTrain=True, gtInd=3, pad=model.get_overlap()) # pad around the tiles patchDir = extrObj.run(blCol) # make data reader # use uabCrossValMaker to get fileLists for training and validation idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile') # use first 5 tiles for validation file_list_train = uabCrossValMaker.make_file_list_by_key( idx, file_list, [i for i in range(6, 37)]) file_list_valid = uabCrossValMaker.make_file_list_by_key( idx, file_list, [i for i in range(0, 6)]) with tf.name_scope('image_loader'): # GT has no mean to subtract, append a 0 for block mean dataReader_train = uabDataReader.ImageLabelReader( [3], [0, 1, 2], patchDir, file_list_train, flags.input_size, flags.tile_size, flags.batch_size, dataAug='flip,rotate', block_mean=np.append([0], img_mean)) # no augmentation needed for validation dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir, file_list_valid, flags.input_size, flags.tile_size, flags.batch_size, dataAug=' ', block_mean=np.append( [0], img_mean)) # train start_time = time.time() model.train_config('X', 'Z', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath) model.run( train_reader=dataReader_train, valid_reader=dataReader_valid, pretrained_model_dir=None, isTrain=True, img_mean=img_mean, verb_step=100, # print a message every 100 step(sample) save_epoch=5, # save the model every 5 epochs gpu=GPU, tile_size=flags.tile_size, patch_size=flags.input_size) duration = time.time() - start_time print('duration {:.2f} hours'.format(duration / 60 / 60))
rgb = imageio.imread(rgb_file) imageio.imsave( os.path.join( new_ds_dir, '{}{}_RGB.png'.format(city_list[city_cnt], img_cnt)), rgb) blCol = uab_collectionFunctions.uabCollection('inria_unet_retrain') blCol.readMetadata() img_mean = blCol.getChannelMeans([1, 2, 3]) # get mean of rgb info # extract patches extrObj = uab_DataHandlerFunctions.uabPatchExtr( [0, 1, 2, 3], cSize=(572, 572), numPixOverlap=184, extSave=['png', 'jpg', 'jpg', 'jpg'], isTrain=True, gtInd=3, pad=92) patchDir = extrObj.run(blCol) _, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'city') import imageio import matplotlib.pyplot as plt img = imageio.imread(os.path.join(patchDir, file_list[0][0])) plt.imshow(img) plt.colorbar() plt.show()