예제 #1
0
파일: main.py 프로젝트: codepike/iceberg
def main(_):
    keep_prob = 0.5 if config.mode == 'train' else 1.0
    if config.mode == 'train':
        reader = TFReader(config.data_path, config.epoch, config.batch_size,
                          [75 * 75 * 2], [1])
        cnn_model = CNN(reader,
                        config.mode,
                        keep_prob=keep_prob,
                        learning_rate=config.learning_rate)
        # resnet = Resnet(reader, config.mode, keep_prob=0.5, learning_rate=config.learning_rate)
        train(cnn_model, config)
    elif config.mode == 'evaluate':
        reader = TFReader(config.data_path, config.epoch, config.batch_size,
                          [75 * 75 * 2], [1])
        cnn_model = CNN(reader,
                        config.mode,
                        keep_prob=keep_prob,
                        learning_rate=config.learning_rate)
        # resnet = Resnet(reader, config.mode, keep_prob=keep_prob)
        evaluate(cnn_model, config)
    elif config.mode == 'predict':
        reader = DefaultReader(None)
        cnn_model = Resnet(reader, config.mode, keep_prob=keep_prob)
        predict(cnn_model, config)
    elif config.mode == 'batch_predict':
        reader = TFReader(config.data_path,
                          1,
                          config.batch_size, [75 * 75 * 2], [1],
                          shuffle=False)
        # resnet = Resnet(reader, config.mode, keep_prob=keep_prob)
        cnn_model = CNN(reader, config.mode, keep_prob=keep_prob)
        batch_predict(cnn_model, config)
예제 #2
0
def main():
    print('loading modules ...')
    gen = DataGenerator('protain/all/train/', 'protain/all/test/', 'protain/all/train.csv')
    model = Resnet(resnet_layers=4, channels=[4, 16, 16, 32, 32])
    print('Done')
    epoch = 10

    for i in range(epoch):
        val_x, val_y = gen.get_validation_set()
        bar = tqdm(gen.get_batch(), total=len(gen.train_ids) // 8)
        for x, y in bar:
            loss = model.train(x, y)
            bar.set_description('loss = {:.5f}'.format(loss))
        preds = np.array([[int(y >= 0.5) for y in model.predict([x])[0]] for x in tqdm(val_x)])
        print('[epoch {}]: f1_macro = {}'.format(i, f1_score(val_y, preds, average='macro')))

    preds_test = [(name, [i for i, y in enumerate(model.predict([x])[0]) if y >= 0.5]) for name, x in gen.get_test_set()]
    with open('submission.csv', 'w') as f:
        f.write('Id,Predicted\n')
        for id_, preds in preds_test:
            f.write('{},{}\n'.format(id_, ' '.join(list(map(str, preds)))))
예제 #3
0
    def __init__(self, total_cls):
        self.total_cls = total_cls
        self.seen_cls = 0
        self.dataset = Cifar100()
        self.model = Resnet(32, total_cls).cuda()
        print(self.model)
        self.input_transform = Compose([
            transforms.RandomHorizontalFlip(),
            transforms.RandomCrop(32, padding=4),
            ToTensor(),
            Normalize([0.5071, 0.4866, 0.4409], [0.2673, 0.2564, 0.2762])
        ])

        self.input_transform_eval = Compose([
            ToTensor(),
            Normalize([0.5071, 0.4866, 0.4409], [0.2673, 0.2564, 0.2762])
        ])

        total_params = sum(p.numel() for p in self.model.parameters()
                           if p.requires_grad)
        print("Solver total trainable parameters : ", total_params)
        print("---------------------------------------------")
예제 #4
0
    def run(self, args):
        self.load_args(args)
        dataloader_args = {'batch_size': 32, 'num_workers': 0, 'shuffle': True}

        trainDataLoader = DataLoader(self.idir, **dataloader_args)
        self.train_dataset = trainDataLoader.load_data()

        testDataLoader = DataLoader(self.testdir, **dataloader_args)
        self.test_dataset = testDataLoader.load_data()

        if (self.load):
            try:
                self.Model = torch.load("model.pt")
            except:
                self.Model = Resnet()
                self.train()
                least_match_metric, full_match_metric, match_count_metric, mAP, mAUC, d_prime = self.evaluate(
                )
                print("mAp : ", mAP)
                print("mAUC : ", mAUC)
                print("D prime : ", d_prime)
                print("Least match accuracy  : ", least_match_metric)
                print("Full match accuracy : ", full_match_metric)
                print("Match count accuracy : ", match_count_metric)
        else:
            self.Model = Resnet()
            self.train()
            least_match_metric, full_match_metric, match_count_metric, mAP, mAUC, d_prime = self.evaluate(
            )
            print("mAp : ", mAP)
            print("mAUC : ", mAUC)
            print("D prime : ", d_prime)
            print("Least match accuracy  : ", least_match_metric)
            print("Full match accuracy : ", full_match_metric)
            print("Match count accuracy : ", match_count_metric)

        if (self.save):
            torch.save(self.Model, "model.pt")
예제 #5
0
    def __init__(self):
        super(EnsembleNetwork, self).__init__()

        # Init dual class classifier
        self.resnet = Resnet()
        self.crnn = CRNN()
        self.unet = UNet()

        # Init one class classifier
        self.deep_sad_normal = LG_1DCNN()
        self.deep_sad_abnormal = LG_1DCNN()

        # Init models list
        self.models = [self.resnet, self.crnn, self.unet, self.deep_sad_normal, self.deep_sad_abnormal]

        # Load weights for non-anomaly detectors
        self.resnet.load_state_dict(torch.load('/workspace/jinsung/resnet_final-Copy1js.pt'))
        #self.crnn.load_state_dict(torch.load('/workspace/demon/crnn_random700_spectrogram.pt'))
        #self.unet.load_state_dict(torch.load('/workspace/demon/unet_random700_spectrogram.pt'))

        # Load DeepSAD Normal
        model_dict_normal = torch.load('/workspace/demon/deepSAD_1117_7k_10ep_64batch_normal_flip.tar')
        self.c_normal = model_dict_normal["c"]
        self.deep_sad_normal.load_state_dict(model_dict_normal["net_dict"])

        # Load DeepSAD Abnormal
        model_dict_abnormal = torch.load('/workspace/demon/deepSADModel_7k_10ep_64batch_abnormal.tar')
        self.c_abnormal = model_dict_abnormal["c"]
        self.deep_sad_abnormal.load_state_dict(model_dict_abnormal["net_dict"])

        # Load on CUDA and freeze parameter values
        for model in self.models:
            model.to('cuda')
            model.eval()
            for param in model.parameters():
                param.requires_grad_(False)
예제 #6
0
def main():
    net = Resnet()
    opt = torch.optim.SGD(net.parameters())
예제 #7
0
class EnsembleNetwork(nn.Module):
    def __init__(self):
        super(EnsembleNetwork, self).__init__()

        # Init dual class classifier
        self.resnet = Resnet()
        self.crnn = CRNN()
        self.unet = UNet()

        # Init one class classifier
        self.deep_sad_normal = LG_1DCNN()
        self.deep_sad_abnormal = LG_1DCNN()

        # Init models list
        self.models = [self.resnet, self.crnn, self.unet, self.deep_sad_normal, self.deep_sad_abnormal]

        # Load weights for non-anomaly detectors
        self.resnet.load_state_dict(torch.load('/workspace/jinsung/resnet_final-Copy1js.pt'))
        #self.crnn.load_state_dict(torch.load('/workspace/demon/crnn_random700_spectrogram.pt'))
        #self.unet.load_state_dict(torch.load('/workspace/demon/unet_random700_spectrogram.pt'))

        # Load DeepSAD Normal
        model_dict_normal = torch.load('/workspace/demon/deepSAD_1117_7k_10ep_64batch_normal_flip.tar')
        self.c_normal = model_dict_normal["c"]
        self.deep_sad_normal.load_state_dict(model_dict_normal["net_dict"])

        # Load DeepSAD Abnormal
        model_dict_abnormal = torch.load('/workspace/demon/deepSADModel_7k_10ep_64batch_abnormal.tar')
        self.c_abnormal = model_dict_abnormal["c"]
        self.deep_sad_abnormal.load_state_dict(model_dict_abnormal["net_dict"])

        # Load on CUDA and freeze parameter values
        for model in self.models:
            model.to('cuda')
            model.eval()
            for param in model.parameters():
                param.requires_grad_(False)

    def forward(self, x):
        x_in_vec = torch.tensor(x[0, 3:-1], dtype=torch.float32, device='cuda')

        # Make prediction for DeepSAD models
        #output_sad_normal = self.deep_sad_normal.forward(x_in_vec)
        #distance_sad_normal = torch.sum((output_sad_normal - self.c_normal) ** 2, dim=1)
        #score_sad_normal = round(torch.sqrt(distance_sad_normal).item())

        #output_sad_abnormal = self.deep_sad_abnormal.forward(x_in_vec)
        #distance_sad_abnormal = torch.sum((output_sad_abnormal - self.c_abnormal) ** 2, dim=1)
        #score_sad_abnormal = round(torch.sqrt(distance_sad_abnormal).item())

        #if score_sad_normal == score_sad_abnormal:
        #    return score_sad_normal
        
        # If not in consensus, try dual class classifiers
        x_np = x.cpu().detach().numpy().squeeze()
        x_in_img = preprocess_spectrogram(x_np)
        x_in_img = x_in_img[None, :, :, :]
        x_in_tensor = torch.tensor((x_in_img), dtype=torch.float32, device='cuda')
        result_resnet = self.resnet.forward(x_in_tensor)
        result_resnet = 1 if float(result_resnet) > 0.0001 else 0
        #result_crnn = self.crnn.forward(x_in_tensor)
        #result_unet = self.unet.forward(x_in_tensor)
        #overall_result = 1 if float(result_resnet * 0.8 + result_crnn * 0.1 + result_unet * 0.1) else 0
        #return overall_result
        return result_resnet
예제 #8
0
    #parser.add_argument('--hidden_size', type=int, default=256)
    parser.add_argument('--output_size', type=int, default=350)  # Fixed
    parser.add_argument('--epochs', type=int, default=100)
    parser.add_argument('--log_interval', type=int, default=100)
    parser.add_argument('--learning_rate', type=float, default=0.0002)
    parser.add_argument('--device', type=int, default=0)
    parser.add_argument('--seed', type=int, default=42)
    args = parser.parse_args()

    torch.manual_seed(args.seed)
    device = args.device

    #model = make_model('se_resnext50_32x4d', num_classes=args.output_size, pretrained=True, pool=nn.AdaptiveAvgPool2d(1))
    #model = make_model('inceptionresnetv2', num_classes=args.output_size, pretrained=True, pool=nn.AdaptiveAvgPool2d(1))
    #model = EfficientNet.from_pretrained('efficientnet-b0', num_classes=args.output_size)
    model = Resnet(args.output_size)

    optimizer = optim.Adam(model.parameters(), args.learning_rate)
    criterion = nn.CrossEntropyLoss(
    )  #CrossEntropyLoss() #multi-class classification task

    model = model.to(device)
    #summary(model, (3,args.input_size,args.input_size))
    # DONOTCHANGE: They are reserved for nsml
    bind_model(model)
    if args.pause:
        nsml.paused(scope=locals())
    if args.mode == "train":
        # Warning: Do not load data before this line
        dataloader, valid_dataloader = train_dataloader(
            args.input_size, args.batch_size, args.num_workers)
예제 #9
0
class Trainer:
    def __init__(self, total_cls):
        self.total_cls = total_cls
        self.seen_cls = 0
        self.dataset = Cifar100()
        self.model = Resnet(32, total_cls).cuda()
        print(self.model)
        self.input_transform = Compose([
            transforms.RandomHorizontalFlip(),
            transforms.RandomCrop(32, padding=4),
            ToTensor(),
            Normalize([0.5071, 0.4866, 0.4409], [0.2673, 0.2564, 0.2762])
        ])

        self.input_transform_eval = Compose([
            ToTensor(),
            Normalize([0.5071, 0.4866, 0.4409], [0.2673, 0.2564, 0.2762])
        ])

        total_params = sum(p.numel() for p in self.model.parameters()
                           if p.requires_grad)
        print("Solver total trainable parameters : ", total_params)
        print("---------------------------------------------")

    def eval(self, valdata):
        self.model.eval()
        count = 0
        correct = 0
        wrong = 0
        for i, (image, label) in enumerate(valdata):
            image = image.cuda()
            label = label.view(-1).cuda()
            p = self.model(image)
            pred = p[:, :self.seen_cls].argmax(dim=-1)
            correct += sum(pred == label).item()
            wrong += sum(pred != label).item()
        acc = correct / (wrong + correct)
        print("Val Acc: {}".format(acc * 100))
        self.model.train()
        print("---------------------------------------------")
        return acc

    # Get learning rate
    def get_lr(self, optimizer):
        for param_group in optimizer.param_groups:
            return param_group['lr']

    def train(self, batch_size, epoches, lr, max_size, is_WA):
        total_cls = self.total_cls
        criterion = nn.CrossEntropyLoss()

        # Used for Knowledge Distill
        previous_model = None

        dataset = self.dataset
        val_xs = []
        val_ys = []
        train_xs = []
        train_ys = []

        test_accs = []

        for step_b in range(dataset.batch_num):
            print(f"Incremental step : {step_b + 1}")

            # Get the train and val data for step b,
            # and split them into train_x, train_y, val_x, val_y
            train, val = dataset.getNextClasses(step_b)
            print(
                f'number of trainset: {len(train)}, number of valset: {len(val)}'
            )
            train_x, train_y = zip(*train)
            val_x, val_y = zip(*val)
            val_xs.extend(val_x)
            val_ys.extend(val_y)
            train_xs.extend(train_x)
            train_ys.extend(train_y)

            # Transform data and prepare dataloader
            train_data = DataLoader(BatchData(train_xs, train_ys,
                                              self.input_transform),
                                    batch_size=batch_size,
                                    shuffle=True,
                                    drop_last=True)
            val_data = DataLoader(BatchData(val_xs, val_ys,
                                            self.input_transform_eval),
                                  batch_size=batch_size,
                                  shuffle=False)

            # Set optimizer and scheduler
            optimizer = optim.SGD(self.model.parameters(),
                                  lr=lr,
                                  momentum=0.9,
                                  weight_decay=2e-4)
            scheduler = MultiStepLR(optimizer, [100, 150, 200], gamma=0.1)

            # Print the number of classes have been trained
            self.seen_cls += total_cls // dataset.batch_num
            print("seen classes : ", self.seen_cls)
            test_acc = []

            for epoch in range(epoches):
                print("---------------------------------------------")
                print("Epoch", epoch)

                # Print current learning rate
                scheduler.step()
                cur_lr = self.get_lr(optimizer)
                print("Current Learning Rate : ", cur_lr)

                # Train the model with KD
                self.model.train()
                if step_b >= 1:
                    self.stage1_distill(train_data, criterion, optimizer)
                else:
                    self.stage1(train_data, criterion, optimizer)

                # Evaluation
                acc = self.eval(val_data)

            if is_WA:
                # Maintaining Fairness
                if step_b >= 1:
                    self.model.weight_align(step_b)

            # deepcopy the previous model used for KD
            self.previous_model = deepcopy(self.model)

            # Evaluate final accuracy at the end of one batch
            acc = self.eval(val_data)
            test_accs.append(acc)
            print(f'Previous accuracies: {test_accs}')

    def stage1(self, train_data, criterion, optimizer):
        print("Training ... ")
        losses = []
        for i, (image, label) in enumerate(train_data):
            image = image.cuda()
            label = label.view(-1).cuda()
            p = self.model(image)
            loss = criterion(p[:, :self.seen_cls], label)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            losses.append(loss.item())
        print("CE loss :", np.mean(losses))

    def stage1_distill(self, train_data, criterion, optimizer):
        print("Training ... ")
        distill_losses = []
        ce_losses = []
        T = 2
        beta = (self.seen_cls - 20) / self.seen_cls
        print("classification proportion 1-beta = ", 1 - beta)
        for i, (image, label) in enumerate(train_data):
            image = image.cuda()
            label = label.view(-1).cuda()
            p = self.model(image)
            with torch.no_grad():
                previous_q = self.previous_model(image)
                previous_q = F.softmax(previous_q[:, :self.seen_cls - 20] / T,
                                       dim=1)
            log_current_p = F.log_softmax(p[:, :self.seen_cls - 20] / T, dim=1)
            loss_distillation = -torch.mean(
                torch.sum(previous_q * log_current_p, dim=1))
            loss_crossEntropy = nn.CrossEntropyLoss()(p[:, :self.seen_cls],
                                                      label)
            loss = loss_distillation * T * T + (1 - beta) * loss_crossEntropy
            optimizer.zero_grad()
            loss.backward(retain_graph=True)
            optimizer.step()
            distill_losses.append(loss_distillation.item())
            ce_losses.append(loss_crossEntropy.item())
        print("KD loss :", np.mean(distill_losses), "; CE loss :",
              np.mean(ce_losses))
예제 #10
0
# In order to create the data_set 'bird_dataset_crop',
# run the python notebook called : 'preprocessing.ipynb'
# this notebook will create a directory where the images are cropped
# around the bounding boxes of the birds
#%%
import argparse
from tqdm import tqdm
import os
import PIL.Image as Image

import torch

from model import Resnet
#from model import inception_model
#from model import Net
model = Resnet()

parser = argparse.ArgumentParser(description='RecVis A3 evaluation script')
parser.add_argument(
    '--data',
    type=str,
    default='bird_dataset_crop',
    metavar='D',
    help=
    "folder where data is located. test_images/ need to be found in the folder"
)
parser.add_argument(
    '--model',
    type=str,
    metavar='M_0',
    help="the model file to be evaluated. Usually it is of the form model_X.pth"
예제 #11
0
파일: train.py 프로젝트: tpvt99/nus-assign3
def train():
    my_model = Resnet(kernel_size=3,
                      filters=64,
                      inChannels=3,
                      input_shape=(3, 240, 320),
                      conv_nonlinearity='relu',
                      num_class=25)
    my_model = my_model.to(device)
    if os.path.exists('my_model.pt'):
        my_model.load_state_dict(torch.load('my_model.pt'))
        print('Load my_model.pt')
    batch_size = 32
    num_epoch = 100
    num_classes = 25
    learning_rate = 8e-4

    train_set = MyDataset(is_train=True, num_cat=num_classes)
    validation_set = MyDataset(is_train=False, num_cat=num_classes)

    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               pin_memory=True)
    validation_loader = torch.utils.data.DataLoader(validation_set,
                                                    batch_size=32,
                                                    shuffle=True,
                                                    pin_memory=True)

    optimizer = torch.optim.Adam(my_model.parameters(), lr=learning_rate)
    loss_func = torch.nn.NLLLoss()
    scheduler = ReduceLROnPlateau(optimizer,
                                  'max',
                                  factor=0.5,
                                  patience=10,
                                  threshold=2e-1,
                                  verbose=True,
                                  min_lr=1e-5)
    bestTestAccuracy = 0

    print('Start training')
    train_size = len(train_loader.dataset)
    test_size = len(validation_loader.dataset)
    for epoch in range(num_epoch):
        total = 0
        correct = 0
        my_model.train()
        for i, data in enumerate(train_loader, 0):
            labels = data['label'].to(device)
            img = data['img'].to(device).float()
            prediction = my_model(img)

            loss = loss_func(prediction, labels)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            _, predicted = torch.max(prediction, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            print(
                f'Train | Epoch {epoch}/{num_epoch}, Batch {i}/{int(train_size/batch_size)} '
                f' Loss: {loss.clone().item():.3f} LR: {get_lr(optimizer):.6f}'
                f' Acc: {(100 * correct / total):.3f}')

        total = 0
        correct = 0
        my_model.eval()
        for i, data in enumerate(validation_loader, 0):
            labels = data['label'].to(device)
            img = data['img'].to(device).float()
            prediction = my_model(img)

            _, predicted = torch.max(prediction, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

            print(
                f'Test | Epoch {epoch}/{num_epoch}, Batch {i}/{int(test_size/batch_size)} '
                f' Loss: {loss.clone().item():.3f} LR: {get_lr(optimizer):.6f}'
                f' Acc: {(100 * correct / total):.3f} Best-so-far: {100*bestTestAccuracy:.5f}'
            )

        if (correct / total) > bestTestAccuracy:
            bestTestAccuracy = correct / total
            print(f'Update best test: {100*bestTestAccuracy:.5f}')
            torch.save(
                my_model.state_dict(),
                f"my_model_{str(round(100*bestTestAccuracy,2)).replace('.', '_')}.pt"
            )

        scheduler.step(bestTestAccuracy)
예제 #12
0
import matplotlib.pyplot as plt
import os
import torch

from dataset import TrainDataset
from model import Resnet

path = "pytorch/model_l1_128.pth"

### MODEL ###
model = Resnet().cuda()
model = model.eval()

### DATASET ###
dataset = TrainDataset(max_num_pic=3)

### LOAD ###
if os.path.isfile(path):
    m = torch.load(path)
    model.load_state_dict(m["model"])
    del m

benchmark_img = dataset.transform(
    "DanbooRegion2020/train/0.image.png").unsqueeze(0).cuda()
benchmark_skel = dataset.transform(
    "DanbooRegion2020/train/0.skeleton.png").unsqueeze(0).expand(1, 3, -1,
                                                                 -1).cuda()
y = model(benchmark_img)
plt.imsave("pytorch/test.png",
           -y[0, 0].detach().cpu().numpy() + 1,
           cmap='Greys')
예제 #13
0
파일: train.py 프로젝트: Ryanrenqian/AI_HW
                        type=float,
                        default=0.001,
                        help='initial learning rate')
    parser.add_argument('-s',
                        type=bool,
                        default=True,
                        help='whether shuffle the dataset')
    parser.add_argument('-a',
                        type=bool,
                        default=False,
                        help='test the filter, reconstructed')
    parser.add_argument('-b', type=bool, default=False, help='test the acc')
    parser.add_argument('-c', type=bool, default=True, help='train')
    args = parser.parse_args()

    net = Resnet(BasicBlock)
    net = net.cuda()  #参数和模型应该都放在cuda上

    cifar10_training_loader = get_training_dataloader(
        batch_size=args.batchsize, shuffle=args.s)

    cifar10_test_loader = get_test_dataloader(batch_size=args.batchsize,
                                              shuffle=args.s)

    cifar10_image_loader = get_test_dataloader(batch_size=1, shuffle=args.s)

    loss_function = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=args.lr)
    #TIME_NOW = datetime.now().isoformat() #这个文件命名有点问题,Windows下文件夹名称不允许有:
    TIME_NOW = '20191025'
    checkpoint_path = os.path.join('checkpoint', TIME_NOW)
예제 #14
0
                acc_valid, summary = sess.run(
                    [model.accuracy, model.merged],
                    feed_dict={
                        model.image: image_valid,
                        model.label: label_valid,
                        model.training: True
                    })
                writer_valid.add_summary(summary, step)
                print(
                    '[epoch %d, step %d/%d]: train acc %.3f, valid acc %.3f' %
                    (step // (amount // batch_size), step %
                     (amount // batch_size), amount // batch_size, acc_train,
                     acc_valid), 'time %.3fs' % (time.time() - time1))
            if step % 100 == 0:
                print("Save the model Successfully")
                saver.save(sess,
                           "../../model/" + dirId + "/model.ckpt",
                           global_step=step)

    coord.request_stop()
    coord.join(threads)


if __name__ == '__main__':
    deviceId = input("please input device id (0-7): ")
    os.environ["CUDA_VISIBLE_DEVICES"] = deviceId
    dirId = input("please input dir id: ")
    model = Resnet()
    batch_size = model.batch_size
    train(model)
예제 #15
0
from dataset import TrainDataset
from model import Resnet

### VIZ ###
writer = SummaryWriter(filename_suffix="512")
ppepoch = 10  #points per epoch

### HYPERPARAMETERS ###
batch_size = 3
lr = 1e-4
weight_decay = 1e-6
num_epoch = 100
save_path = "pytorch/model_512.pth"

### MODEL ###
model = Resnet().cuda()

### DATASET ###
dataset = TrainDataset()
dataloader = DataLoader(dataset,
                        batch_size=batch_size,
                        shuffle=True,
                        num_workers=0,
                        pin_memory=True)

datasetVal = TrainDataset(is_val=True)
dataloaderVal = DataLoader(datasetVal,
                           batch_size=batch_size,
                           shuffle=True,
                           num_workers=0,
                           pin_memory=True)