def main(options, args):
    # if len(args) != 2:
    #     basic.outputlogMessage('error, the number of input argument is 2')
    #     return False

    if options.para_file is None:
        basic.outputlogMessage('warning, no parameters file ')
    else:
        parameters.set_saved_parafile_path(options.para_file)

    input = args[0]
    output = args[1]
    test(input, output)

    pass
Пример #2
0
def main(unused_argv):
    # dataset_splits = glob.glob(os.path.join(FLAGS.list_file, '*.txt'))
    #
    # for dataset_split in dataset_splits:
    #     _convert_dataset(dataset_split)

    #how about the mean value?

    #split images
    data_root = FLAGS.image_folder
    list_txt = FLAGS.list_file

    ############## dataset processing
    parameters.set_saved_parafile_path(FLAGS.para_file)
    patch_w = parameters.get_digit_parameters("", "train_patch_width", None,
                                              'int')
    patch_h = parameters.get_digit_parameters("", "train_patch_height", None,
                                              'int')
    overlay_x = parameters.get_digit_parameters("", "train_pixel_overlay_x",
                                                None, 'int')
    overlay_y = parameters.get_digit_parameters("", "train_pixel_overlay_y",
                                                None, 'int')

    patches = make_dataset(data_root,
                           list_txt,
                           patch_w,
                           patch_h,
                           overlay_x,
                           overlay_y,
                           train=FLAGS.is_training)

    os.system("mkdir -p " + FLAGS.output_dir)

    #convert images
    patches_1d = [item for alist in patches
                  for item in alist]  # convert 2D list to 1D
    _convert_dataset(patches_1d, train=FLAGS.is_training)
    pass
Пример #3
0
    '--edge',
    default=0,
    type=int,
    help=
    'switch to train edge, if this is on batchSize should be 1, 0 is off 1 is on'
)

parser.add_argument('--test',
                    action='store_true',
                    help='switch to test model, without train')

args = parser.parse_args()
print(args)

############## dataset processing
parameters.set_saved_parafile_path(args.para)
patch_w = parameters.get_digit_parameters("", "train_patch_width", None, 'int')
patch_h = parameters.get_digit_parameters("", "train_patch_height", None,
                                          'int')
overlay_x = parameters.get_digit_parameters("", "train_pixel_overlay_x", None,
                                            'int')
overlay_y = parameters.get_digit_parameters("", "train_pixel_overlay_y", None,
                                            'int')
crop_height = parameters.get_digit_parameters("", "crop_height", None, 'int')
crop_width = parameters.get_digit_parameters("", "crop_width", None, 'int')
dataset = RemoteSensingImg(args.dataroot, args.list, patch_w, patch_h,
                           overlay_x, overlay_y, args.edge)
train_loader = torch.utils.data.DataLoader(dataset,
                                           batch_size=args.batchSize,
                                           num_workers=args.workers,
                                           shuffle=True)
Пример #4
0
    pass

if __name__ == "__main__":
    usage = "usage: %prog [options] ref_image pts_files warp_image"
    parser = OptionParser(usage=usage, version="1.0 2019-2-3")
    parser.description = 'Introduction: bundle adjust for an image'

    parser.add_option("-o", "--output",
                      action="store", dest="output",
                      help="the output file path")

    parser.add_option("-p", "--para",
                      action="store", dest="para_file",
                      help="the parameters file")

    (options, args) = parser.parse_args()
    if len(sys.argv) < 2:
        parser.print_help()
        sys.exit(2)

    ## set parameters files
    if options.para_file is None:
        print('error, no parameters file')
        parser.print_help()
        sys.exit(2)
    else:
        parameters.set_saved_parafile_path(options.para_file)

    main(options, args)
Пример #5
0
    def __init__(self, args):
        self.args = args

        # Define Saver
        self.saver = Saver(args)
        self.saver.save_experiment_config()
        # Define Tensorboard Summary
        self.summary = TensorboardSummary(self.saver.experiment_dir)
        self.writer = self.summary.create_summary()
        
        # Define Dataloader
        kwargs = {'num_workers': args.workers, 'pin_memory': True}

        parameters.set_saved_parafile_path(args.para)
        patch_w = parameters.get_digit_parameters("", "train_patch_width", None, 'int')
        patch_h = parameters.get_digit_parameters("", "train_patch_height", None, 'int')
        overlay_x = parameters.get_digit_parameters("", "train_pixel_overlay_x", None, 'int')
        overlay_y = parameters.get_digit_parameters("", "train_pixel_overlay_y", None, 'int')
        crop_height = parameters.get_digit_parameters("", "crop_height", None, 'int')
        crop_width = parameters.get_digit_parameters("", "crop_width", None, 'int')

        dataset = RemoteSensingImg(args.dataroot, args.list, patch_w, patch_h, overlay_x, overlay_y)

        #train_loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size,
        #                                           num_workers=args.workers, shuffle=True)
        train_length = int(len(dataset) * 0.9)
        validation_length = len(dataset) - train_length
	#print ("totol data len is %d , train_length is %d"%(len(train_loader),train_length))	
        [self.train_dataset, self.val_dataset] = torch.utils.data.random_split(dataset, (train_length, validation_length))
        print("len of train dataset is %d and val dataset is %d and total datalen is %d"%(len(self.train_dataset),len(self.val_dataset),len(dataset)))
        self.train_loader=torch.utils.data.DataLoader(self.train_dataset, batch_size=args.batch_size,num_workers=args.workers, shuffle=True,drop_last=True)
        self.val_loader=torch.utils.data.DataLoader(self.val_dataset, batch_size=args.batch_size,num_workers=args.workers, shuffle=True,drop_last=True)
        print("len of train loader is %d and val loader is %d"%(len(self.train_loader),len(self.val_loader)))
	#self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(args, **kwargs)
	
        # Define network
        model = DeepLab(num_classes=1,
                        backbone=args.backbone,
                        output_stride=args.out_stride,
                        sync_bn=args.sync_bn,
                        freeze_bn=args.freeze_bn)

        train_params = [{'params': model.get_1x_lr_params(), 'lr': args.lr},
                        {'params': model.get_10x_lr_params(), 'lr': args.lr * 10}]

        # Define Optimizer
        optimizer = torch.optim.SGD(train_params, momentum=args.momentum,
                                    weight_decay=args.weight_decay, nesterov=args.nesterov)




        # whether to use class balanced weights
        # if args.use_balanced_weights:
        #     classes_weights_path = os.path.join(Path.db_root_dir(args.dataset), args.dataset+'_classes_weights.npy')
        #     if os.path.isfile(classes_weights_path):
        #         weight = np.load(classes_weights_path)
        #     else:
        #         weight = calculate_weigths_labels(args.dataset, self.train_loader, self.nclass)
        #     weight = torch.from_numpy(weight.astype(np.float32))
        # else:
        #     weight = None





        # Define Criterion
        #self.criterion = SegmentationLosses(weight=weight, cuda=args.cuda).build_loss(mode=args.loss_type)


        self.criterion=nn.BCELoss()

        if args.cuda:
            self.criterion=self.criterion.cuda()


        self.model, self.optimizer = model, optimizer
        
        # Define Evaluator
        self.evaluator = Evaluator(2)
        # Define lr scheduler
        print("lenght of train_loader is %d"%(len(self.train_loader)))
        self.scheduler = LR_Scheduler(args.lr_scheduler, args.lr,
                                            args.epochs, len(self.train_loader))

        # Using cuda
        if args.cuda:
            #self.model = torch.nn.DataParallel(self.model, device_ids=self.args.gpu_ids)
            self.model = torch.nn.DataParallel(self.model)
            patch_replication_callback(self.model)
            self.model = self.model.cuda()

        # Resuming checkpoint
        self.best_pred = 0.0
        if args.resume is not None:
            if not os.path.isfile(args.resume):
                raise RuntimeError("=> no checkpoint found at '{}'" .format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            if args.cuda:
                self.model.module.load_state_dict(checkpoint['state_dict'])
            else:
                self.model.load_state_dict(checkpoint['state_dict'])
            if not args.ft:
                self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.best_pred = checkpoint['best_pred']
            print("=> loaded checkpoint '{}' (epoch {}) with best mIoU {}"
                  .format(args.resume, checkpoint['epoch'], checkpoint['best_pred']))

        # Clear start epoch if fine-tuning
        if args.ft:
            args.start_epoch = 0
            self.best_pred=0
Пример #6
0
def test_gdalwarp():
    filedir = '/Users/huanglingcao/Data/landsat_offset_test/coregistration_test/L7_B8_test_2/'
    os.chdir(filedir)
    file1 = 'LE70080111999288EDC00_B8.TIF'
    file2 = 'LE70080112000083KIS00_B8.TIF'

    warpresultfile = coregistration_siftGPU(file1, file2, True)
    if warpresultfile is False:
        return False

    return True


if __name__ == '__main__':
    # test_ortho_coregistration()
    # test_gdalwarp(syslog)
    length = len(sys.argv)
    if length == 3:
        basefile = sys.argv[1]
        warpfile = sys.argv[2]
    else:
        print(' Input error, Try to do like this:')
        print('geometryProcess.py basefile warpfile ')
        sys.exit(1)

    bkeepmidfile = True
    parameters.set_saved_parafile_path('para.ini')

    coregistration_siftGPU(basefile, warpfile, bkeepmidfile)
Пример #7
0
def main(options, args):
    # syslog = LogMessage()
    # length = len(sys.argv)

    # test_get_geoid_height(syslog)
    # sys.exit(1)
    #
    # if length >=3 :
    #     work_dir = sys.argv[1]
    #     what_to_do = sys.argv[2]
    # else:
    #     print 'Input error, Try to do like this:'
    #     print 'DemConvert.py work_dir what_to_do'
    #     help()
    #     sys.exit(1)

    length = len(args)
    want_to_do = options.what_to_do  # (what_to_do)#options.what_to_do
    work_dir = args[0]
    basic.setlogfile('dem_prepare_log.txt')
    parameters.set_saved_parafile_path(options.para_file)
    if want_to_do == 1:
        if prepare_gimpdem_for_Jakobshavn(work_dir):
            basic.outputlogMessage('process sucess')
        else:
            basic.outputlogMessage('process failed')
    elif want_to_do == 2:
        outputfile = options.output
        up = 72
        down = 69
        left = -57
        right = -40
        get_range_geoid_height(outputfile, up, down, left, right)
    elif want_to_do == 3:
        if length == 2:
            outputfile = options.output
            ref_image = args[1]
            if get_geoimage_range_geoid_height(outputfile, ref_image) != False:
                return True
        print 'Input error, Try to do like this: '
        print 'DemConvert.py  -a 3 -o outputfile work_dir ref_image'
        sys.exit(1)

    elif want_to_do == 4:
        if length == 3:
            orthometric_demfile = args[1]
            geoid_demfile = args[2]
            outputfile = io_function.get_name_by_adding_tail(
                orthometric_demfile, "ellip")
            if convert_orthometricH_to_elliopsoidalH(
                    outputfile, orthometric_demfile, geoid_demfile) != False:
                return True
        print 'Input error, Try to do like this: '
        print 'DemConvert.py -a 4  work_dir  orthometric_demfile geoid_demfile'
        sys.exit(1)

    elif want_to_do == 5:
        if length == 3:
            outputfile = options.output
            dem_file = args[1]
            image_file = args[2]
            exec_dir = os.path.expanduser('~') + '/bin/'
            if calculate_terrain_offset(outputfile, dem_file, image_file,
                                        exec_dir) != False:
                return True
        print 'Input error, Try to do like this: '
        print 'DemConvert.py  -a 5 -o output work_dir dem_file image_file'
        sys.exit(1)

    elif want_to_do == 6:
        if length != 3:
            print 'Input error, Try to do like this: '
            print 'DemConvert.py work_dir what_to_do'
            sys.exit(1)
        print prepare_GTOPO30_for_Jakobshavn(work_dir)

    else:
        basic.outputlogMessage('nothing to do')

    #test
    # print get_geoid_height(22,31,32,114,15,16,-999999,syslog)

    return True