Exemplo n.º 1
0
    def init_data_loaders(self, num_of_workers):
        """
        create torch data loaders for train and validation data
        Args:
            num_of_workers ():

        Returns:
            train , validation data loaders
        """

        train_dataset = ClassificationLoader(self.args.train_path,
                                             window_size=self.args.window_size,
                                             window_stride=self.args.window_stride,
                                             window_type=self.args.window_type,
                                             normalize=self.args.normalize,
                                             max_len=self.args.max_len)
        sampler_train = Datasets.ImbalancedDatasetSampler(train_dataset)

        train_loader = \
            torch.utils.data.DataLoader(train_dataset,
                                        batch_size=self.args.batch_size, shuffle=None,
                                        num_workers=num_of_workers, pin_memory=self.args.cuda,
                                        sampler=sampler_train)

        valid_dataset = ClassificationLoader(self.args.valid_path,
                                             window_size=self.args.window_size,
                                             window_stride=self.args.window_stride,
                                             window_type=self.args.window_type,
                                             normalize=self.args.normalize,
                                             max_len=self.args.max_len)

        valid_loader = \
            torch.utils.data.DataLoader(valid_dataset, batch_size=self.args.batch_size,
                                        shuffle=None, num_workers=num_of_workers,
                                        pin_memory=self.args.cuda, sampler=None)

        return train_loader, valid_loader,
Exemplo n.º 2
0
                          lr=args.lr,
                          momentum=args.momentum)
else:
    optimizer = optim.SGD(speech_net.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)

train_dataset = Datasets.SpeechYoloDataSet(classes_root_dir=args.train_data,
                                           this_root_dir=args.train_data,
                                           yolo_config=config_dict,
                                           augment=args.augment_data)
val_dataset = Datasets.SpeechYoloDataSet(classes_root_dir=args.train_data,
                                         this_root_dir=args.val_data,
                                         yolo_config=config_dict)

sampler_train = Datasets.ImbalancedDatasetSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=args.batch_size,
                                           shuffle=False,
                                           num_workers=20,
                                           pin_memory=args.cuda,
                                           sampler=sampler_train)

val_loader = torch.utils.data.DataLoader(val_dataset,
                                         batch_size=args.batch_size,
                                         shuffle=None,
                                         num_workers=20,
                                         pin_memory=args.cuda,
                                         sampler=None)

if os.path.isfile(args.trained_yolo_model):  # model exists