Пример #1
0
def get_datasets(args, test_seed_offset=0):
    """ Gets training and test datasets. """

    # Load superpoints graphs
    testlist, trainlist = [], []
    #Changed by Arthur#
    # for n in range(1,7):
    for n in ["custom"]:
        if n != args.cvfold:
            #Changed by Arthur#
            # path = '{}/superpoint_graphs/Area_{:d}/'.format(args.S3DIS_PATH, n)
            path = '{}/superpoint_graphs/Area_{}/'.format(args.S3DIS_PATH, n)
            for fname in sorted(os.listdir(path)):
                if fname.endswith(".h5"):
                    trainlist.append(spg.spg_reader(args, path + fname, True))
    #Changed by Arthur#
    # path = '{}/superpoint_graphs/Area_{:d}/'.format(args.S3DIS_PATH, args.cvfold)
    path = '{}/superpoint_graphs/Area_{}/'.format(args.S3DIS_PATH, "custom")
    for fname in sorted(os.listdir(path)):
        if fname.endswith(".h5"):
            testlist.append(spg.spg_reader(args, path + fname, True))

    # Normalize edge features
    if args.spg_attribs01:
        trainlist, testlist = spg.scaler01(trainlist, testlist)

    return tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in trainlist],
                                    functools.partial(spg.loader, train=True, args=args, db_path=args.S3DIS_PATH)), \
           tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in testlist],
                                    functools.partial(spg.loader, train=False, args=args, db_path=args.S3DIS_PATH, test_seed_offset=test_seed_offset))
Пример #2
0
def get_datasets(args, test_seed_offset=0):
    """ Gets training and test datasets. """

    # Load superpoints graphs
    testlist, trainlist = [], []

    if args.data_mode == 'voxel':
        path = os.path.join(args.S3DIS_PATH, 'graph_v')
    else:
        path = os.path.join(args.S3DIS_PATH, 'graph_p')

    for fname in sorted(os.listdir(path)):
        area = int(fname[5])
        if fname.endswith(".h5"):
            if area != args.cvfold:
                trainlist.append(
                    spg.spg_reader(args, os.path.join(path, fname), True))
            elif area == args.cvfold:
                testlist.append(
                    spg.spg_reader(args, os.path.join(path, fname), True))

    # Normalize edge features
    if args.spg_attribs01:
        trainlist, testlist = spg.scaler01(trainlist, testlist)

    return tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in trainlist],
                                    functools.partial(spg.loader, train=True, args=args, db_path=args.S3DIS_PATH)), \
           tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in testlist],
                                    functools.partial(spg.loader, train=False, args=args, db_path=args.S3DIS_PATH, test_seed_offset=test_seed_offset))
def get_datasets(args, test_seed_offset=0):
    """build training and testing set

    For the ONERD, only the test dataset is available
    """

    #for a simple test organization
    testset = [
        'formatted/' + f for f in os.listdir(args.CUSTOM_SET_PATH +
                                             '/superpoint_graphs/formatted')
    ]

    # Load superpoints graphs
    testlist, trainlist = [], []
    for n in testset:
        testlist.append(
            spg.spg_reader(args,
                           args.CUSTOM_SET_PATH + '/superpoint_graphs/' + n,
                           True))

    # Normalize edge features
    if args.spg_attribs01:
        trainlist, testlist = spg.scaler01(trainlist, testlist)

    return tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in trainlist],
                                    functools.partial(spg.loader, train=True, args=args, db_path=args.CUSTOM_SET_PATH)), \
           tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in testlist],
                                    functools.partial(spg.loader, train=False, args=args, db_path=args.CUSTOM_SET_PATH, test_seed_offset=test_seed_offset))
def get_datasets(args, test_seed_offset=0):

    train_names = ['bildstein_station1', 'bildstein_station5', 'domfountain_station1', 'domfountain_station3', 'neugasse_station1', 'sg27_station1', 'sg27_station2', 'sg27_station5', 'sg27_station9', 'sg28_station4', 'untermaederbrunnen_station1']
    valid_names = ['bildstein_station3', 'domfountain_station2', 'sg27_station4', 'untermaederbrunnen_station3']

    if args.db_train_name == 'train':
        trainset = ['train/' + f for f in train_names]
    elif args.db_train_name == 'trainval':
        trainset = ['train/' + f for f in train_names + valid_names]

    if args.db_test_name == 'val':
        testset = ['train/' + f for f in valid_names]
    elif args.db_test_name == 'testred':
        testset = ['test_reduced/' + os.path.splitext(f)[0] for f in os.listdir(args.SEMA3D_PATH + '/superpoint_graphs/test_reduced')]
    elif args.db_test_name == 'testfull':
        testset = ['test_full/' + os.path.splitext(f)[0] for f in os.listdir(args.SEMA3D_PATH + '/superpoint_graphs/test_full')]

    # Load superpoints graphs
    testlist, trainlist = [], []
    for n in trainset:
        trainlist.append(spg.spg_reader(args, args.SEMA3D_PATH + '/superpoint_graphs/' + n + '.h5', True))
    for n in testset:
        testlist.append(spg.spg_reader(args, args.SEMA3D_PATH + '/superpoint_graphs/' + n + '.h5', True))

    # Normalize edge features
    if args.spg_attribs01:
        trainlist, testlist = spg.scaler01(trainlist, testlist)

    return tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in trainlist],
                                    functools.partial(spg.loader, train=True, args=args, db_path=args.SEMA3D_PATH)), \
           tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in testlist],
                                    functools.partial(spg.loader, train=False, args=args, db_path=args.SEMA3D_PATH, test_seed_offset=test_seed_offset))
Пример #5
0
def get_datasets(args, test_seed_offset=0):
    """ Gets training and test datasets. """

    # Load superpoints graphs
    testlist, trainlist, validlist = [], [], []
    valid_names = ['0001_00000.h5','0001_00085.h5', '0001_00170.h5','0001_00230.h5','0001_00325.h5','0001_00420.h5', \
                   '0002_00000.h5','0002_00111.h5','0002_00223.h5','0018_00030.h5','0018_00184.h5','0018_00338.h5',\
                   '0020_00080.h5','0020_00262.h5','0020_00444.h5','0020_00542.h5','0020_00692.h5', '0020_00800.h5']
    
    for n in range(1,7):
        if n != args.cvfold:
            path = '{}/superpoint_graphs/0{:d}/'.format(args.VKITTI_PATH, n)
            for fname in sorted(os.listdir(path)):
                if fname.endswith(".h5") and not (args.use_val_set and fname in valid_names):
                    #training set
                    trainlist.append(spg.spg_reader(args, path + fname, True))
                if fname.endswith(".h5") and (args.use_val_set  and fname in valid_names):
                    #validation set
                    validlist.append(spg.spg_reader(args, path + fname, True))
    path = '{}/superpoint_graphs/0{:d}/'.format(args.VKITTI_PATH, args.cvfold)
    #evaluation set
    for fname in sorted(os.listdir(path)):
        if fname.endswith(".h5"):
            testlist.append(spg.spg_reader(args, path + fname, True))

    # Normalize edge features
    if args.spg_attribs01:
        trainlist, testlist, validlist = spg.scaler01(trainlist, testlist, validlist=validlist)
        
    return tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in trainlist],
                                    functools.partial(spg.loader, train=True, args=args, db_path=args.VKITTI_PATH)), \
           tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in testlist],
                                    functools.partial(spg.loader, train=False, args=args, db_path=args.VKITTI_PATH, test_seed_offset=test_seed_offset)), \
           tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in validlist],
                                    functools.partial(spg.loader, train=False, args=args, db_path=args.VKITTI_PATH, test_seed_offset=test_seed_offset))
Пример #6
0
def get_datasets(args, test_seed_offset=0):
    """ Gets training and test datasets. """

    # Load superpoints graphs
    testlist, trainlist, validlist = [], [], []
    # valid_names = ['000000.h5','000100.h5', '000200.h5','000300.h5','000400.h5','000500.h5', \
    #                '000600.h5','000700.h5','000800.h5','000900.h5','001000.h5','001100.h5','001200.h5','001300.h5','001400.h5', \
    #                '001500.h5','001600.h5','001700.h5','001800.h5','001900.h5','002000.h5','002100.h5','002200.h5','002300.h5']
    # data_set_list = [0,1,2,3,4,5,6,7,8,9,10,90,91,92,93]
    # for n in data_set_list:
    #     if n != args.cvfold:
    #         path = '{}/superpoint_graphs/{:0>2d}/'.format(args.SKITTI_PATH, n)
    #         for fname in sorted(os.listdir(path)):
    #             if fname.endswith(".h5") and not (args.use_val_set and fname in valid_names):
    #                 #training set
    #                 trainlist.append(spg.spg_reader(args, path + fname, True))
    #             if fname.endswith(".h5") and (args.use_val_set  and fname in valid_names):
    #                 #validation set
    #                 validlist.append(spg.spg_reader(args, path + fname, True))

    # train
    train_set_list = [90]
    for n in train_set_list:
        path = '{}/superpoint_graphs/{:0>2d}/'.format(args.SKITTI_PATH, n)
        #train set
        for fname in sorted(os.listdir(path)):
            if fname.endswith(".h5"):
                trainlist.append(spg.spg_reader(args, path + fname, True))
    # val
    val_set_list = [8]
    for n in val_set_list:
        path = '{}/superpoint_graphs/{:0>2d}/'.format(args.SKITTI_PATH, n)
        #val set
        for fname in sorted(os.listdir(path)):
            if fname.endswith(".h5"):
                validlist.append(spg.spg_reader(args, path + fname, True))
    # test
    test_set_list = [10]
    for n in test_set_list:
        path = '{}/superpoint_graphs/{:0>2d}/'.format(args.SKITTI_PATH, n)
        #test set
        for fname in sorted(os.listdir(path)):
            if fname.endswith(".h5"):
                testlist.append(spg.spg_reader(args, path + fname, True))

    # Normalize edge features
    if args.spg_attribs01:
        trainlist, testlist, validlist, scaler = spg.scaler01(trainlist, testlist, validlist=validlist)
        
    return tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in trainlist],
                                    functools.partial(spg.loader, train=True, args=args, db_path=args.SKITTI_PATH)), \
           tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in testlist],
                                    functools.partial(spg.loader, train=False, args=args, db_path=args.SKITTI_PATH, test_seed_offset=test_seed_offset)), \
           tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in validlist],
                                    functools.partial(spg.loader, train=False, args=args, db_path=args.SKITTI_PATH, test_seed_offset=test_seed_offset)), \
            scaler
def get_datasets(args, test_seed_offset=0):
    """ Gets training and test datasets. """

    # Load superpoints graphs
    testlist, trainlist, validlist = [], [], []
    valid_names = ['hallway_1.h5', 'hallway_6.h5', 'hallway_11.h5', 'office_1.h5' \
                 , 'office_6.h5', 'office_11.h5', 'office_16.h5', 'office_21.h5', 'office_26.h5' \
                 , 'office_31.h5', 'office_36.h5'\
                 ,'WC_2.h5', 'storage_1.h5', 'storage_5.h5', 'conferenceRoom_2.h5', 'auditorium_1.h5']

    #if args.db_test_name == 'test' then the test set is the evaluation set
    #otherwise it serves as valdiation set to select the best epoch

    for n in range(1, 7):
        if n != args.cvfold:
            path = '{}/superpoint_graphs/Area_{:d}/'.format(args.S3DIS_PATH, n)
            for fname in sorted(os.listdir(path)):
                if fname.endswith(".h5") and not (args.use_val_set
                                                  and fname in valid_names):
                    #training set
                    trainlist.append(spg.spg_reader(args, path + fname, True))
                if fname.endswith(".h5") and (args.use_val_set
                                              and fname in valid_names):
                    #validation set
                    validlist.append(spg.spg_reader(args, path + fname, True))
    path = '{}/superpoint_graphs/Area_{:d}/'.format(args.S3DIS_PATH,
                                                    args.cvfold)

    #evaluation set
    for fname in sorted(os.listdir(path)):
        if fname.endswith(".h5"):
            testlist.append(spg.spg_reader(args, path + fname, True))

    # Normalize edge features
    if args.spg_attribs01:
        trainlist, testlist, validlist = spg.scaler01(trainlist,
                                                      testlist,
                                                      validlist=validlist)

    return tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in trainlist],
                                    functools.partial(spg.loader, train=True, args=args, db_path=args.S3DIS_PATH)), \
           tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in testlist],
                                    functools.partial(spg.loader, train=False, args=args, db_path=args.S3DIS_PATH, test_seed_offset=test_seed_offset)), \
           tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in validlist],
                                    functools.partial(spg.loader, train=False, args=args, db_path=args.S3DIS_PATH, test_seed_offset=test_seed_offset))
Пример #8
0
def get_datasets(args, test_seed_offset=0):
    """build training and testing set"""
    #for a simple train/test organization
    trainset = ['train/' + f for f in os.listdir(args.TANKER_PATH + '/superpoint_graphs'+num+'/train')]
    testset  = ['test/' + f for f in os.listdir(args.TANKER_PATH + '/superpoint_graphs'+num+'/test')]
    
    # Load superpoints graphs
    testlist, trainlist = [], []
    for n in trainset:
        trainlist.append(spg.spg_reader(args, args.TANKER_PATH + '/superpoint_graphs'+num+'/' + n, True))
    for n in testset:
        testlist.append(spg.spg_reader(args, args.TANKER_PATH + '/superpoint_graphs'+num+'/' + n , True))

    # Normalize edge features
    if args.spg_attribs01:
        trainlist, testlist = spg.scaler01(trainlist, testlist)

    return tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in trainlist],
                                    functools.partial(spg.loader, train=True, args=args, db_path=args.TANKER_PATH)), \
           tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in testlist],
                                    functools.partial(spg.loader, train=False, args=args, db_path=args.TANKER_PATH, test_seed_offset=test_seed_offset))
def get_datasets(args, test_seed_offset=0):
    """build training and testing set"""

    #for a simple train/test organization
    # trainset = ['trainval/' + f for f in os.listdir(args.AERIAL7_PATH + '/superpoint_graphs/train')]
    # testset  = ['test/' + f for f in os.listdir(args.AERIAL7_PATH + '/superpoint_graphs/test')]

    # #Load superpoints graphs
    # testlist, trainlist = [], []
    # for n in trainset:
    #     trainlist.append(spg.spg_reader(args, args.AERIAL7_PATH + '/superpoint_graphs/' + n, True))
    # for n in testset:
    #     testlist.append(spg.spg_reader(args, args.AERIAL7_PATH + '/superpoint_graphs/' + n, True))

    testlist, trainlist = [], []
    for n in range(1, 7):
        if n != args.cvfold:
            path = '{}/superpoint_graphs/Area_{:d}/'.format(
                args.AERIAL7_PATH, n)
            for fname in sorted(os.listdir(path)):
                if fname.endswith(".h5"):
                    trainlist.append(spg.spg_reader(args, path + fname, True))
    path = '{}/superpoint_graphs/Area_{:d}/'.format(args.AERIAL7_PATH,
                                                    args.cvfold)
    for fname in sorted(os.listdir(path)):
        if fname.endswith(".h5"):
            testlist.append(spg.spg_reader(args, path + fname, True))

    # Normalize edge features
    if args.spg_attribs01:
        trainlist, testlist = spg.scaler01(trainlist, testlist)

    return tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in trainlist],
                                    functools.partial(spg.loader, train=True, args=args, db_path=args.AERIAL7_PATH)), \
           tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in testlist],
                                    functools.partial(spg.loader, train=False, args=args, db_path=args.AERIAL7_PATH, test_seed_offset=test_seed_offset))
def get_datasets(args, test_seed_offset=0):
    train_names = [
        '6755_66525.h5', '6955_66645.h5', '6875_66585.h5', '6820_66550.h5',
        '6950_66640.h5', '6955_66650.h5', '7055_66455.h5', '7020_66300.h5',
        '7050_66335.h5', '6825_66560.h5', '7015_66550.h5', '7055_66350.h5',
        '7030_66525.h5', '6985_66650.h5', '7005_66575.h5', '6750_66520.h5',
        '6770_66530.h5', '6990_66595.h5', '6760_66530.h5', '6805_66540.h5',
        '7000_66580.h5', '6980_66650.h5', '7060_66385.h5', '7055_66410.h5',
        '7010_66565.h5', '7015_66555.h5', '6810_66545.h5', '7040_66495.h5',
        '6870_66580.h5', '7005_66565.h5', '7045_66485.h5', '7055_66445.h5',
        '7025_66530.h5', '6835_66565.h5', '7050_66340.h5', '7030_66315.h5',
        '7035_66325.h5', '6985_66605.h5', '7020_66305.h5', '7040_66505.h5',
        '6875_66580.h5', '6965_66630.h5', '7015_66560.h5', '7045_66500.h5',
        '7055_66340.h5', '7060_66420.h5', '6995_66575.h5', '7060_66400.h5',
        '6800_66540.h5', '7015_66545.h5', '7040_66325.h5', '7020_66310.h5',
        '7050_66330.h5', '7055_66420.h5', '6900_66605.h5', '6890_66600.h5',
        '6830_66560.h5', '6790_66540.h5', '6840_66565.h5', '6915_66610.h5',
        '7020_66540.h5', '7000_66570.h5', '6855_66570.h5', '7045_66335.h5',
        '6785_66535.h5', '6870_66585.h5', '6950_66645.h5', '6845_66570.h5',
        '6980_66610.h5', '6990_66650.h5', '6905_66610.h5', '6860_66575.h5',
        '7060_66360.h5', '6940_66635.h5', '6765_66530.h5', '6935_66635.h5',
        '6780_66530.h5', '6975_66650.h5', '7055_66345.h5', '7035_66520.h5',
        '6970_66620.h5', '7005_66570.h5', '6905_66605.h5', '6935_66625.h5',
        '7020_66550.h5', '7060_66395.h5', '6930_66625.h5', '7050_66450.h5',
        '7025_66305.h5', '7050_66475.h5', '6900_66600.h5', '6850_66565.h5',
        '6865_66575.h5', '7045_66495.h5', '6990_66590.h5', '6795_66540.h5',
        '7035_66320.h5', '7055_66460.h5', '6975_66615.h5', '6910_66610.h5',
        '7060_66380.h5', '7055_66335.h5', '6845_66565.h5', '6965_66635.h5',
        '6775_66530.h5', '6750_66525.h5', '6815_66545.h5', '6975_66620.h5'
    ]  #'6875_66590.h5',, '6960_66635.h5'
    valid_names = [
        '6835_66560.h5', '6960_66640.h5', '6880_66595.h5', '6860_66580.h5',
        '6955_66640.h5', '6795_66535.h5'
    ]
    if args.db_train_name == 'train':
        trainset = ['train/' + f for f in train_names]
    elif args.db_train_name == 'trainval':
        trainset = ['train/' + f for f in train_names + valid_names]

    validset = []
    testset = []
    if args.use_val_set:
        validset = ['train/' + f for f in valid_names]
    if args.db_test_name == 'testred':
        testset = [
            'test_reduced/' + os.path.splitext(f)[0]
            for f in os.listdir(args.SEMA3D_PATH +
                                '/superpoint_graphs/test_reduced')
        ]
    elif args.db_test_name == 'testfull':
        testset = [
            'test_full/' + os.path.splitext(f)[0]
            for f in os.listdir(args.SEMA3D_PATH +
                                '/superpoint_graphs/test_full')
        ]

    # Load superpoints graphs
    testlist, trainlist, validlist = [], [], []
    for n in trainset:
        trainlist.append(
            spg.spg_reader(args, args.SEMA3D_PATH + '/superpoint_graphs/' + n,
                           True))
    for n in validset:
        validlist.append(
            spg.spg_reader(args, args.SEMA3D_PATH + '/superpoint_graphs/' + n,
                           True))
    for n in testset:
        testlist.append(
            spg.spg_reader(
                args, args.SEMA3D_PATH + '/superpoint_graphs/' + n + '.h5',
                True))

    # Normalize edge features
    if args.spg_attribs01:
        trainlist, testlist, validlist, scaler = spg.scaler01(
            trainlist, testlist, validlist=validlist)

    return tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in trainlist],
                                    functools.partial(spg.loader, train=True, args=args, db_path=args.SEMA3D_PATH)), \
           tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in testlist],
                                    functools.partial(spg.loader, train=False, args=args, db_path=args.SEMA3D_PATH, test_seed_offset=test_seed_offset)), \
           tnt.dataset.ListDataset([spg.spg_to_igraph(*tlist) for tlist in validlist],
                                    functools.partial(spg.loader, train=False, args=args, db_path=args.SEMA3D_PATH, test_seed_offset=test_seed_offset)),\
            scaler