Ejemplo n.º 1
0
def make_data_loader(batch_size=16):
    """
    Prepare Dataset and apply Dataloader.
    You don't have to change it.
    """
    train_set = Dataset(split="train")
    val_set = Dataset(split="val")
    test_set = Dataset(split="test")
    num_class = train_set.NUM_CLASSES

    train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)
    test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)

    return train_loader, val_loader, test_loader, num_class
Ejemplo n.º 2
0
def make_data_loader(batch_size=16, is_develop=False):
    """
    Prepare Dataset and apply Dataloader.
    You don't have to change it.
    """
    lidc = Lidc(is_develop=is_develop)
    il = lidc.img_list
    ml = lidc.mask_list
    train_set = Dataset(il, ml, split="train")
    val_set = Dataset(il, ml, split="val")
    test_set = Dataset(il, ml, split="test")
    num_class = train_set.NUM_CLASSES

    train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)
    test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)

    return train_loader, val_loader, test_loader, num_class
Ejemplo n.º 3
0
    def preprocess_data(data_config: object, img_list: List, lbl_list: List,
                        batch_size: int, mode: str):
        """Preprocess dataset

        Parameters
        ----------
        data_config : object
            data configuration
        img_list : List
            a list of image paths
        lbl_list : List
            a list of labels
        batch_size : int
            batch_size
        mode : str
            'train' or 'eval'

        Returns
        -------
        Object : 
            DataLoader instance

        Raises
        ------
        ValueError
            raise value error if the mode is not 'train' or 'eval'
        """
        # transform
        resize = (data_config.img_size[0], data_config.img_size[1])
        color_mean = tuple(data_config.color_mean)
        color_std = tuple(data_config.color_std)
        transform = DataTransform(resize, color_mean, color_std, mode)

        # dataset
        dataset = Dataset(img_list, lbl_list, transform)

        # dataloader
        if mode == 'train':
            return data.DataLoader(dataset,
                                   batch_size=batch_size,
                                   shuffle=True,
                                   num_workers=2)
        elif mode == 'eval':
            return data.DataLoader(dataset,
                                   batch_size=batch_size,
                                   shuffle=False,
                                   num_workers=2)
        else:
            raise ValueError(
                'the mode should be train or eval. this mode is not supported')
Ejemplo n.º 4
0

if __name__ == "__main__":
    from model.fcos import FCOSDetector
    from dataloader.dataset import Dataset
    from tensorboardX import SummaryWriter

    images_root = '/home'
    val_path = '/mnt/hdd1/benkebishe01/data/val.txt'
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
    ])
    val_loader = torch.utils.data.DataLoader(Dataset(images_root,
                                                     val_path,
                                                     img_size=512,
                                                     transform=transform,
                                                     train=False),
                                             batch_size=64,
                                             shuffle=False)
    draw = False
    if draw:
        writer = SummaryWriter(comment='voc_mAP')

    model = FCOSDetector(mode="inference")
    # model.load_state_dict(torch.load("/mnt/hdd1/benkebishe01/FCOS/80epoch/fcos_voc_3/voc_epoch80_loss0.8893.pth"))
    model = model.to(device).eval()
    print("===>success loading model")

    names = os.listdir(model_root)
    names = sorted(names, key=functools.cmp_to_key(compare))
Ejemplo n.º 5
0
weight_path1 = '../src/logs/multimodal/mixup-lam_2-latest/22184_7_best_1.2906.pth'
weight_path2 = '../src/logs/multimodal/mixup-lam_2-latest-newseed/55460_19_best_1.2704.pth'
weight_path3 = '../src/logs/multimodal/mixup-lam_2-latest-newnewseed/30503_10_best_1.2922.pth'
weight_path4 = '../src/logs/multimodal/mixup-lam_2-seed1234-bigger-models/44368_7_best_1.2946.pth'

test_speech_root_dir = '../features/test/speech'
test_face_root_dir = '../features/test/video'
test_text_root_dir = '../features/test/text'
test_files = [
    file for file in os.listdir(test_speech_root_dir) if '.npy' in file
]
test_files.sort()
test_dataset = Dataset(speech_root_dir=test_speech_root_dir,
                       video_root_dir=test_face_root_dir,
                       text_root_dir=test_text_root_dir,
                       file_list=test_files,
                       label_smoothing=0,
                       is_train=False,
                       is_test=True,
                       n_frames=32)
test_loader = data.DataLoader(dataset=test_dataset,
                              batch_size=32,
                              num_workers=20,
                              shuffle=False)

test3_speech_root_dir = '../features/test3/speech'
test3_face_root_dir = '../features/test3/video'
test3_text_root_dir = '../features/test3/text'
test3_files = [
    file for file in os.listdir(test3_speech_root_dir) if '.npy' in file
]
test3_files.sort()
Ejemplo n.º 6
0
optimizer=torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-4)

BATCH_SIZE = 16
EPOCHS = 60
WARMPUP_STEPS_RATIO = 0.12

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225))
])

# 2012_train 2007_val
cfg= {'images_root': '/home', 'train_path': '/mnt/hdd1/benkebishe01/data/train.txt', 'test_path': '/mnt/hdd1/benkebishe01/data/val.txt',
      'img_size': 512}

train_dataset = Dataset(cfg['images_root'], cfg['train_path'], img_size=cfg['img_size'], transform=transform, train=True)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)

test_loader = torch.utils.data.DataLoader(
        Dataset(cfg['images_root'], cfg['test_path'],img_size=cfg['img_size'],
                    transform=transform, train=False),
                    batch_size=4,
                    shuffle=False,)

steps_per_epoch=len(train_dataset)//BATCH_SIZE
TOTAL_STEPS=steps_per_epoch*EPOCHS
WARMPUP_STEPS=TOTAL_STEPS*WARMPUP_STEPS_RATIO

# global GLOBAL_STEPS
GLOBAL_STEPS=1
LR_INIT=5e-5
Ejemplo n.º 7
0
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])

# 2012_train 2007_val
cfg = {
    'images_root': '/home',
    'train_path': '/mnt/hdd1/benkebishe01/data/train.txt',
    'test_path': '/mnt/hdd1/benkebishe01/data/val.txt',
    'img_size': 512
}

test_loader = torch.utils.data.DataLoader(Dataset(cfg['images_root'],
                                                  cfg['test_path'],
                                                  img_size=cfg['img_size'],
                                                  transform=transform,
                                                  train=False),
                                          batch_size=4,
                                          shuffle=False)

draw = False

if draw:
    writer = SummaryWriter(comment='test_voc_one')

# model_root = "/mnt/hdd1/benkebishe01/FCOS/fcos_giou_one_new"
model_root = "/mnt/hdd1/benkebishe01/FCOS/fcos_voc_one"


def compare(x, y):
Ejemplo n.º 8
0
    if opt.experiment_name is not None:
        print(F"Experiment name: {opt.experiment_name}")

    print(F"Run directory: {opt.run_dir}")

    # Pick the selected model
    if opt.model == 'DeepNAG':
        model_t = DeepNAG
        print('Selected model: DeepNAG')
    elif opt.model == 'DeepGAN':
        model_t = DeepGAN
        print('Selected model: DeepGAN')
    else:
        raise Exception(F"Unknown model '{opt.model}'")

    # Determine the computation device to use
    if opt.use_cuda:
        print("Using CUDA")
        device = torch.device('cuda:0')
    else:
        print("Using CPU")
        device = torch.device('cpu')

    # Instantiate the dataset
    dataset = Dataset.instantiate(opt)

    if opt.evaluate is None:
        train(model_t, dataset, device)
    else:
        evaluate(model_t, dataset, device)
Ejemplo n.º 9
0
]

if not (config.mode == 'speech' or config.mode == 'multimodal'):
    train_speech_root_dir = None
    valid_speech_root_dir = None
if not (config.mode == 'text' or config.mode == 'multimodal'):
    train_text_root_dir = None
    valid_text_root_dir = None
if not (config.mode == 'face' or config.mode == 'multimodal'):
    train_face_root_dir = None
    valid_face_root_dir = None

train_dataset = Dataset(speech_root_dir=train_speech_root_dir,
                        video_root_dir=train_face_root_dir,
                        text_root_dir=train_text_root_dir,
                        file_list=train_files,
                        label_smoothing=config.label_smoothing,
                        is_train=True,
                        n_frames=16)
valid_dataset = Dataset(speech_root_dir=valid_speech_root_dir,
                        video_root_dir=valid_face_root_dir,
                        text_root_dir=valid_text_root_dir,
                        file_list=valid_files,
                        label_smoothing=0,
                        is_train=False,
                        n_frames=16)
train_loader = data.DataLoader(dataset=train_dataset,
                               batch_size=config.batch_size,
                               num_workers=config.num_workers,
                               shuffle=True)
valid_loader = data.DataLoader(dataset=valid_dataset,