Esempio n. 1
0
def main():
    # histories = Customdash(ModelName = 'SLVC06_evaluation_process',
    #                       email = '*****@*****.**',
    #                       password = '******')
    if args.phase == 'test':
        print(args)
        if not os.path.exists('./results/'):
            os.makedirs('./results/')

        args.results_dir = './results/' + args.results_dir + '/'
        args.checkpoint_dir = './checkpoints/' + args.checkpoint_dir + '/'

        if args.dataset == 'Amazon_RO':
            args.dataset = 'Amazonia_Legal/'
            dataset = AMAZON_RO(args)

        if args.dataset == 'Amazon_MT':
            args.dataset = 'Amazonia_Legal/'
            dataset = AMAZON_MT(args)

        if args.dataset == 'Amazon_PA':
            args.dataset = 'Amazonia_Legal/'
            dataset = AMAZON_PA(args)

        if args.dataset == 'Cerrado_MA':
            args.dataset = 'Cerrado_Biome/'
            dataset = CERRADO_MA(args)

        dataset.Tiles_Configuration(args, 0)
        dataset.Coordinates_Creator(args, 0)

        checkpoint_files = os.listdir(args.checkpoint_dir)
        for i in range(len(checkpoint_files)):

            model_folder = checkpoint_files[i]
            args.trained_model_path = args.checkpoint_dir + '/' + model_folder + '/'
            model_folder_fields = model_folder.split('_')

            now = datetime.now()
            dt_string = now.strftime("%d_%m_%Y_%H_%M_%S")
            args.save_results_dir = args.results_dir + args.method_type + '_' + 'Model_Results_' + 'Trained_' + model_folder_fields[
                3] + '_' + model_folder_fields[4] + '_' + model_folder[
                    -19:] + '_Tested_' + args.data_t1_year + '_' + args.data_t2_year + '_' + dt_string + '/'
            #args.save_results_dir = args.results_dir + '\\model_' + str(i) + '\\'
            if not os.path.exists(args.save_results_dir):
                os.makedirs(args.save_results_dir)

            print('[*]Initializing the model...')
            model = Models(args, dataset)

            model.Test()
def main():
    print(args)
    #histories = Customdash(ModelName = 'SLVC06_train_process', email = '*****@*****.**', password = '******')
    
    if not os.path.exists('./checkpoints/'):
        os.makedirs('./checkpoints/')
    
    args.checkpoint_dir = './checkpoints/' + args.checkpoint_dir 
    
    if args.source_dataset == 'Amazon_RO':
        args.dataset = 'Amazonia_Legal/'
        args.buffer_dimension_in = args.source_buffer_dimension_in
        args.buffer_dimension_out = args.source_buffer_dimension_out
        args.data_t1_name = args.source_data_t1_name
        args.data_t2_name = args.source_data_t2_name
        args.reference_t1_name = args.source_reference_t1_name
        args.reference_t2_name = args.source_reference_t2_name
        dataset_s = AMAZON_RO(args)
        
    if args.source_dataset == 'Amazon_PA':
        args.dataset = 'Amazonia_Legal/'
        args.buffer_dimension_in = args.source_buffer_dimension_in
        args.buffer_dimension_out = args.source_buffer_dimension_out
        args.data_t1_name = args.source_data_t1_name
        args.data_t2_name = args.source_data_t2_name
        args.reference_t1_name = args.source_reference_t1_name
        args.reference_t2_name = args.source_reference_t2_name
        dataset_s = AMAZON_PA(args)
        
    if args.source_dataset == 'Cerrado_MA':
        args.dataset = 'Cerrado_Biome/'
        args.buffer_dimension_in = args.source_buffer_dimension_in
        args.buffer_dimension_out = args.source_buffer_dimension_out
        args.data_t1_name = args.source_data_t1_name
        args.data_t2_name = args.source_data_t2_name
        args.reference_t1_name = args.source_reference_t1_name
        args.reference_t2_name = args.source_reference_t2_name
        dataset_s = CERRADO_MA(args)
                            
    if args.target_dataset == 'Amazon_RO':
        args.dataset = 'Amazonia_Legal/'
        args.buffer_dimension_in = args.target_buffer_dimension_in
        args.buffer_dimension_out = args.target_buffer_dimension_out
        args.data_t1_name = args.target_data_t1_name
        args.data_t2_name = args.target_data_t2_name
        args.reference_t1_name = args.target_reference_t1_name
        args.reference_t2_name = args.target_reference_t2_name if args.target_reference_t2_name is not None else 'None'
        dataset_t = AMAZON_RO(args)
        
    if args.target_dataset == 'Amazon_PA':
        args.dataset = 'Amazonia_Legal/'
        args.buffer_dimension_in = args.target_buffer_dimension_in
        args.buffer_dimension_out = args.target_buffer_dimension_out
        args.data_t1_name = args.target_data_t1_name
        args.data_t2_name = args.target_data_t2_name
        args.reference_t1_name = args.target_reference_t1_name
        args.reference_t2_name = args.target_reference_t2_name if args.target_reference_t2_name is not None else 'None'
        dataset_t = AMAZON_PA(args)
        
    if args.target_dataset == 'Cerrado_MA':
        args.dataset = 'Cerrado_Biome/'
        args.buffer_dimension_in = args.target_buffer_dimension_in
        args.buffer_dimension_out = args.target_buffer_dimension_out
        args.data_t1_name = args.target_data_t1_name
        args.data_t2_name = args.target_data_t2_name
        args.reference_t1_name = args.target_reference_t1_name
        args.reference_t2_name = args.target_reference_t2_name if args.target_reference_t2_name is not None else 'None'
        dataset_t = CERRADO_MA(args)
        
        
       
    print(np.shape(dataset_s.images_norm))
    print(np.shape(dataset_t.images_norm))
    #print(np.shape(dataset_t.images_norm))
    for i in range(args.runs):
        dataset = []
        print(i)
        now = datetime.now()
        dt_string = now.strftime("%d_%m_%Y_%H_%M_%S")
        print(dt_string)
        if args.training_type == 'classification':
            args.save_checkpoint_path = args.checkpoint_dir + '/' + args.method_type + '_' + dt_string + '/'
        if args.training_type == 'domain_adaptation' or args.training_type == 'domain_adaptation_balance':
            args.save_checkpoint_path = args.checkpoint_dir + '/' + 'Tr_M_' + dt_string + '/'
        if not os.path.exists(args.save_checkpoint_path):
            os.makedirs(args.save_checkpoint_path)
            #Writing the args into a file
        with open(args.save_checkpoint_path + 'commandline_args.txt', 'w') as f:
            json.dump(args.__dict__, f, indent=2)
        
        args.vertical_blocks = args.source_vertical_blocks
        args.horizontal_blocks = args.source_horizontal_blocks
        args.overlap = args.overlap_s
        args.porcent_of_positive_pixels_in_actual_reference = args.porcent_of_positive_pixels_in_actual_reference_s
        dataset_s.Tiles_Configuration(args, i)
        dataset_s.Coordinates_Creator(args, i)
        
        args.vertical_blocks = args.target_vertical_blocks
        args.horizontal_blocks = args.target_horizontal_blocks
        args.overlap = args.overlap_t
        args.porcent_of_positive_pixels_in_actual_reference = args.porcent_of_positive_pixels_in_actual_reference_t
        dataset_t.Tiles_Configuration(args, i)
        dataset_t.Coordinates_Creator(args, i)
        
        dataset.append(dataset_s)
        dataset.append(dataset_t) 
        
        print('[*]Initializing the model...')
        model = Models(args, dataset)
               
        model.Train()
Esempio n. 3
0
def Main():
    
    if args.dataset == 'Amazon_RO':
        args.dataset = 'Amazonia_Legal/'
        dataset = AMAZON_RO(args)
    
    # if args.dataset == 'Amazon_MT':
    #     args.dataset = 'Amazonia_Legal/'
    #     dataset = AMAZON_MT(args)
        
    if args.dataset == 'Amazon_PA':
        args.dataset = 'Amazonia_Legal/'
        dataset = AMAZON_PA(args)
    
    if args.dataset == 'Cerrado_MA':
        args.dataset = 'Cerrado_Biome/'
        dataset = CERRADO_MA(args)
    
    if not os.path.exists('./results_avg/'):
        os.makedirs('./results_avg/')
    
    args.average_results_dir = './results_avg/' + args.results_dir + '/'
    if not os.path.exists(args.average_results_dir):
        os.makedirs(args.average_results_dir)
        
    args.results_dir = './results/' + args.results_dir + '/'
    args.checkpoint_dir = './checkpoints/' + args.checkpoint_dir + '/'
    counter = 0
    files = os.listdir(args.results_dir)
    initial_flag = True
    for i in range(0, len(files)):
        if files[i] == 'Results.txt':
            print('Results file')
        else:
            Hit_map_path = args.results_dir + files[i] + '/hit_map.npy'
            if os.path.exists(Hit_map_path):
                hit_map = np.load(Hit_map_path)
                counter += 1
                if initial_flag:
                    HIT_MAP = np.zeros_like(hit_map)
                    initial_flag = False
                HIT_MAP += hit_map
    
    dataset.Tiles_Configuration(args, 0)
    Avg_hit_map = HIT_MAP/counter
    args.file = 'Avg_Scores'
    args.results_dir = args.average_results_dir
    if not os.path.exists(args.results_dir + args.file + '/'):
        os.makedirs(args.results_dir + args.file + '/')
    
    if args.save_result_text:
        # Open a file in order to save the training history
        f = open(args.results_dir + "Results.txt","a")
        if counter == 0:
            ACCURACY_ = []  
            FSCORE_ = []
            RECALL_ = []
            PRECISION_ = [] 
            ALERT_AREA_ = []  
            
    ACCURACY, FSCORE, RECALL, PRECISION, CONFUSION_MATRIX, ALERT_AREA = Metrics_For_Test_M(Avg_hit_map,
                                                                                        dataset.references[0], dataset.references[1],
                                                                                        dataset.Train_tiles, dataset.Valid_tiles, dataset.Undesired_tiles,
                                                                                        args)
                
    if args.save_result_text:
        
        ACCURACY_.append(ACCURACY[0,0])
        FSCORE_.append(FSCORE[0,0])
        RECALL_.append(RECALL[0,0])
        PRECISION_.append(PRECISION[0,0])
        ALERT_AREA_.append(ALERT_AREA[0,0])
        #histories.sendLoss(loss = FSCORE[0 , 0], epoch = i, total_epochs = len(files))
        f.write("Run: %d Accuracy: %.2f%% F1-Score: %.2f%% Recall: %.2f%% Precision: %.2f%% Area: %.2f%% File Name: %s\n" % (counter, ACCURACY, FSCORE, RECALL, PRECISION, ALERT_AREA, args.file))
        f.close()
        print(ACCURACY_)
    else:
        print('Coming up!')
        #histories.sendLoss(loss = 0.0, epoch = i, total_epochs = len(files))

    
    if args.save_result_text:
        f = open(args.results_dir + "Results.txt","a")
        ACCURACY_m = np.mean(ACCURACY_)
        FSCORE_m = np.mean(FSCORE_)
        RECALL_m = np.mean(RECALL_)
        PRECISION_m = np.mean(PRECISION_)
        ALERT_AREA_m = np.mean(ALERT_AREA_)
        
        
        ACCURACY_s = np.std(ACCURACY_)
        FSCORE_s = np.std(FSCORE_)
        RECALL_s = np.std(RECALL_)
        PRECISION_s = np.std(PRECISION_) 
        ALERT_AREA_s = np.std(ALERT_AREA_)
        
        #histories.sendLoss(loss = FSCORE_m, epoch = i + 1, total_epochs = len(files) + 1)
        f.write("Mean: %d Accuracy: %f%% F1-Score: %f%% Recall: %f%% Precision: %f%% Area: %f%%\n" % ( 0, ACCURACY_m, FSCORE_m, RECALL_m, PRECISION_m, ALERT_AREA_m))
        f.write("Std: %d Accuracy: %.2f%% F1-Score: %.2f%% Recall: %.2f%% Precision: %.2f%% Area: %.2f%%\n" % ( 0, ACCURACY_s, FSCORE_s, RECALL_s, PRECISION_s, ALERT_AREA_s))
        f.close()
def Main():
    # histories = Customdash(ModelName = 'SLVC06_Metrics_compute',
    #                        email = '*****@*****.**',
    #                        password = '******')
    #Thresholds = np.linspace(1 , 0.01, 50)
    if args.dataset == 'Amazon_RO':
        args.dataset = 'Amazonia_Legal/'
        dataset = AMAZON_RO(args)

    # if args.dataset == 'Amazon_MT':
    #     args.dataset = 'Amazonia_Legal/'
    #     dataset = AMAZON_MT(args)

    if args.dataset == 'Amazon_PA':
        args.dataset = 'Amazonia_Legal/'
        dataset = AMAZON_PA(args)

    if args.dataset == 'Cerrado_MA':
        args.dataset = 'Cerrado_Biome/'
        dataset = CERRADO_MA(args)
    args.results_dir = './results/' + args.results_dir + '/'
    args.checkpoint_dir = './checkpoints/' + args.checkpoint_dir + '/'
    counter = 0
    files = os.listdir(args.results_dir)
    for i in range(0, len(files)):
        if files[i] == 'Results.txt':
            print('Results file')
        else:
            Hit_map_path = args.results_dir + files[i] + '/hit_map.npy'
            args.file = files[i]
            if os.path.exists(Hit_map_path):
                hit_map = np.load(Hit_map_path)
                fields_file = files[i].split('_')
                checkpoint_name = fields_file[0] + '_' + fields_file[
                    3] + '_' + fields_file[1] + '_' + fields_file[
                        4] + '_' + fields_file[5] + '_' + fields_file[
                            6] + '_' + fields_file[7] + '_' + fields_file[
                                8] + '_' + fields_file[9] + '_' + fields_file[
                                    10] + '_' + fields_file[11]
                args.save_checkpoint_path = args.checkpoint_dir + checkpoint_name + '/'
                #need to put the path of the checkpoint to recover if needed the original train, validation, and test tiles.
                dataset.Tiles_Configuration(args, i)

                if args.save_result_text:
                    # Open a file in order to save the training history
                    f = open(args.results_dir + "Results.txt", "a")
                    if counter == 0:
                        ACCURACY_ = []
                        FSCORE_ = []
                        RECALL_ = []
                        PRECISION_ = []
                        ALERT_AREA_ = []

                ACCURACY, FSCORE, RECALL, PRECISION, CONFUSION_MATRIX, ALERT_AREA = Metrics_For_Test_M(
                    hit_map, dataset.references[0], dataset.references[1],
                    dataset.Train_tiles, dataset.Valid_tiles,
                    dataset.Undesired_tiles, args)

                if args.save_result_text:

                    ACCURACY_.append(ACCURACY[0, 0])
                    FSCORE_.append(FSCORE[0, 0])
                    RECALL_.append(RECALL[0, 0])
                    PRECISION_.append(PRECISION[0, 0])
                    ALERT_AREA_.append(ALERT_AREA[0, 0])
                    #histories.sendLoss(loss = FSCORE[0 , 0], epoch = i, total_epochs = len(files))
                    f.write(
                        "Run: %d Accuracy: %.2f%% F1-Score: %.2f%% Recall: %.2f%% Precision: %.2f%% Area: %.2f%% File Name: %s\n"
                        % (counter, ACCURACY, FSCORE, RECALL, PRECISION,
                           ALERT_AREA, args.file))
                    f.close()
                    print(ACCURACY_)
                else:
                    print('Coming up!')
                    #histories.sendLoss(loss = 0.0, epoch = i, total_epochs = len(files))

                counter += 1

    if args.save_result_text:
        f = open(args.results_dir + "Results.txt", "a")
        ACCURACY_m = np.mean(ACCURACY_)
        FSCORE_m = np.mean(FSCORE_)
        RECALL_m = np.mean(RECALL_)
        PRECISION_m = np.mean(PRECISION_)
        ALERT_AREA_m = np.mean(ALERT_AREA_)

        ACCURACY_s = np.std(ACCURACY_)
        FSCORE_s = np.std(FSCORE_)
        RECALL_s = np.std(RECALL_)
        PRECISION_s = np.std(PRECISION_)
        ALERT_AREA_s = np.std(ALERT_AREA_)

        #histories.sendLoss(loss = FSCORE_m, epoch = i + 1, total_epochs = len(files) + 1)
        f.write(
            "Mean: %d Accuracy: %f%% F1-Score: %f%% Recall: %f%% Precision: %f%% Area: %f%%\n"
            % (0, ACCURACY_m, FSCORE_m, RECALL_m, PRECISION_m, ALERT_AREA_m))
        f.write(
            "Std: %d Accuracy: %.2f%% F1-Score: %.2f%% Recall: %.2f%% Precision: %.2f%% Area: %.2f%%\n"
            % (0, ACCURACY_s, FSCORE_s, RECALL_s, PRECISION_s, ALERT_AREA_s))
        f.close()