Esempio n. 1
0
def train():
    use_cuda = True
    epochs = 100
    batch_size = 32
    hidden_dim = 500
    z_dim = 20
    lr = 0.0001

    compressed = transforms.Compose([dataset.ToTensor()])
    variable = dataset.ToVariable(use_cuda=use_cuda)
    kwargs = {'num_workers': 8, 'pin_memory': True}
    loaders = dataset.setup_data_loaders(dataset.LastFMCSVDataset,
                                         use_cuda,
                                         batch_size,
                                         transform=compressed,
                                         **kwargs)

    print('{} steps for all data / 1 epoch'.format(len(loaders['X'])))

    vae = VAE(1, hidden_dim, z_dim, use_cuda=use_cuda)

    adagrad_param = {'lr': lr}
    optimizer = optim.Adam(vae.parameters(), **adagrad_param)

    for epoch in range(epochs):
        loss = train_epoch(vae, optimizer, loaders, variable)
        print('Epoch{}:{}'.format(epoch, loss))
Esempio n. 2
0
def main(args):
    trn_dir, trn_data_list = preprocess.get_datalist(args.datadir, args.mode, args.type, args.trn_data_type)
    # print(trn_dir)
    # print(trn_datalist)
    tst_dir, tst_data_list = preprocess.get_datalist(args.datadir, args.mode, args.type, args.tst_data_type)
    trn_twlist, df_total = preprocess.make_twlist(args.tw, trn_dir, trn_data_list, total_columns)
    stat_dict = preprocess.get_statdict(df_total, used_cols = measured_columns + manipulated_columns)
    del df_total
    tst_twlist, _ = preprocess.make_twlist(args.tw, tst_dir, tst_data_list, total_columns)

    transform_op = transforms.Compose([dataset.Preprocessing(used_cols=measured_columns + manipulated_columns,
                                                             stat_dict=stat_dict, normalize_method='minmax'),
                                       dataset.ToTensor()])
    trn_dset = dataset.TWDataset(trn_twlist, transform=transform_op)
    tst_dset = dataset.TWDataset(tst_twlist, transform=transform_op)
    trn_loader = DataLoader(trn_dset, batch_size=args.trn_batch_size, num_workers=args.n_workers, shuffle=True)
    tst_loader = DataLoader(tst_dset, batch_size=args.tst_batch_size, num_workers=args.n_workers, shuffle=False)

    model = ae.ConvAE(args.tw, measured_columns+manipulated_columns,
                      args.k, args.s, args.p, args.n_ch, args.use_fc, 20,
                      args.actfn_name, args.outactfn_name)
    device = torch.device(f'cuda:{args.device_num}')

    if args.multigpu_method == 1:
        trainer = train.AETrainer1(model, args.lr, args.weight_decay, device, args.whole_devices, args.check_every)
    elif args.multigpu_method == 2:
        trainer = train.AETrainer2(model, args.lr, args.weight_decay, device, args.whole_devices, args.check_every)

    trainer.train(trn_loader, args.n_epoch)
    trainer.save_model(args.save_dir)
Esempio n. 3
0
def get_live_dataset(live_train='',
                     live_test='',
                     transform=transforms.Compose([
                         dataset.RandomCrop(32),
                         dataset.ToTensor()
                     ])):
    return dataset.LiveDataset(live_train=live_train,
                               live_test=live_test,
                               transform=transform)
Esempio n. 4
0
    def __init__(self, hyper_params):
        self.batch_size = hyper_params.batch

        self.n_cls = hyper_params.n_cls
        self.classes = hyper_params.classes
        root = hyper_params.root

        self.cuda = True if torch.cuda.is_available() else False

        self.model_name = hyper_params.model_name
        model_cfg = hyper_params.model_cfg
        model_type = model_cfg.pop('type')
        assert model_type in models.__dict__
        self.bg = model_cfg.pop('background')
        model_cfg['weights'] = hyper_params.weights
        model_cfg['head'][
            'num_classes'] = self.n_cls + 1 if self.bg else self.n_cls
        self.net = models.__dict__[model_type](**model_cfg)
        setattr(self.net, 'num_classes', model_cfg['head']['num_classes'])

        if self.cuda:
            self.net.cuda()
        self.net.eval()

        self.input_size = hyper_params.input_size
        setattr(self.net, 'input_size', self.input_size)
        transform = [dt.Resize_Pad(self.input_size), dt.ToTensor()]
        if hyper_params.norm:
            transform += [
                dt.Normalize(hyper_params.norm['mean'],
                             hyper_params.norm['std'])
            ]
        ann_file = root + hyper_params.ann_file
        img_dir = root + hyper_params.img_dir
        if hasattr(hyper_params, 'json'):
            self.dataset = dt.JsonDataset(
                ann_file,
                img_dir,
                transform=transforms.Compose(transform),
                city=hyper_params.city)
        else:
            self.dataset = dt.VOCDataset(
                ann_file, img_dir, transform=transforms.Compose(transform))
        setattr(self.dataset, 'with_bg', self.bg)
        self.dataloader = DataLoader(
            self.dataset,
            batch_size=self.batch_size,
            shuffle=False,
            # num_workers=hyper_params.nworkers if self.cuda else 0,
            # pin_memory=hyper_params.pin_mem if self.cuda else False,
            collate_fn=dt.list_collate,
        )

        self.conf_thresh = hyper_params.conf_th
        self.nms_thresh = hyper_params.nms_th
        self.ignore_thresh = hyper_params.ignore_th
        self.n_det = hyper_params.n_det
Esempio n. 5
0
def get_dataset(limited=True,
                train=True,
                image_list='',
                transform=transforms.Compose([
                    dataset.RandomCrop(32),
                    dataset.ToTensor()
                ])):
    if train:
        face_dataset = dataset.FaceScoreDataset(limited=limited,
                                                image_list=image_list,
                                                transform = transform)

    else:
        face_dataset = dataset.FaceScoreDataset(limited=limited,
                                                image_list = image_list,
                                                train=False)
    return face_dataset
Esempio n. 6
0
def train():
    compressed = transforms.Compose([dataset.ToTensor()])
    variable = dataset.ToVariable(use_cuda=True)
    kwargs = {'num_workers': 2, 'pin_memory': True}
    loaders = dataset.setup_data_loaders(dataset.LastFMCSVDataset,
                                         False,
                                         32,
                                         transform=compressed,
                                         **kwargs)
    fmvae = FMVAE(1892, 17632, 11946, 1892, 1, 1000, 20)
    # fmvae = nn.DataParallel(fmvae)
    fmvae.cuda()

    adagrad_param = {'lr': 0.0001}
    optimizer = optim.Adam(fmvae.parameters(), **adagrad_param)

    epochs = 100
    for epoch in range(epochs):
        train_epoch(fmvae, optimizer, loaders, variable)
Esempio n. 7
0
def run(path_to_net,
        label_dir,
        nii_dir,
        plotter,
        batch_size=32,
        test_split=0.3,
        random_state=666,
        epochs=8,
        learning_rate=0.0001,
        momentum=0.9,
        num_folds=5):
    """
    Applies training and validation on the network 
    """
    print('Setting started', flush=True)
    nii_filenames = np.asarray(glob.glob(nii_dir + '/*.npy'))
    print('Number of files: ', len(nii_filenames), flush=True)
    # Creating data indices
    dataset_size = len(nii_filenames)
    indices = list(range(dataset_size))
    test_indices, trainset_indices = utils.get_test_indices(
        indices, test_split)
    # kfold index generator
    for cv_num, (train_idx, val_idx) in enumerate(
            utils.get_train_cv_indices(trainset_indices, num_folds,
                                       random_state)):
        # take from trainset_indices the kfold generated ones
        train_indices = np.asarray(trainset_indices)[np.asarray(train_idx)]
        val_indices = np.asarray(trainset_indices)[np.asarray(val_idx)]
        print('cv cycle number: ', cv_num, flush=True)
        net = Net()
        device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
        num_GPU = torch.cuda.device_count()
        print('Device: ', device, flush=True)
        if num_GPU > 1:
            print('Let us use', num_GPU, 'GPUs!', flush=True)
            net = nn.DataParallel(net)
        net.to(device)
        # weigh the loss with the size of classes
        # class 0: 3268
        # class 1: 60248
        weight = torch.tensor([1. / 3268., 1. / 60248.]).to(device)
        criterion = nn.CrossEntropyLoss(weight=weight)
        optimizer = optim.Adam(net.parameters(), lr=learning_rate)
        scheduler = ReduceLROnPlateau(optimizer,
                                      threshold=1e-6,
                                      patience=0,
                                      verbose=True)
        fMRI_dataset_train = dataset.fMRIDataset(label_dir,
                                                 nii_dir,
                                                 train_indices,
                                                 transform=dataset.ToTensor())
        fMRI_dataset_val = dataset.fMRIDataset(label_dir,
                                               nii_dir,
                                               val_indices,
                                               transform=dataset.ToTensor())
        datalengths = {
            'train': len(fMRI_dataset_train),
            'val': len(fMRI_dataset_val)
        }
        dataloaders = {
            'train': utils.get_dataloader(fMRI_dataset_train, batch_size,
                                          num_GPU),
            'val': utils.get_dataloader(fMRI_dataset_val, batch_size, num_GPU)
        }
        print('Train set length {}, Val set length {}: '.format(
            datalengths['train'], datalengths['val']))
        # Setup metrics
        running_metrics_val = metrics.BinaryClassificationMeter()
        running_metrics_train = metrics.BinaryClassificationMeter()
        val_loss_meter = metrics.averageLossMeter()
        train_loss_meter = metrics.averageLossMeter()
        # Track iteration number over epochs for plotter
        itr = 0
        # Track lowest loss over epochs for saving network
        lowest_loss = 100000
        for epoch in tqdm(range(epochs), desc='Epochs'):
            print('Epoch: ', epoch + 1, flush=True)
            print('Phase: train', flush=True)
            phase = 'train'
            # Set model to training mode
            net.train(True)
            # Iterate over data.
            for i, data in tqdm(enumerate(dataloaders[phase]),
                                desc='Dataiteration_train'):
                train_pred, train_labels, train_loss = train(
                    data, optimizer, net, criterion, device)
                running_metrics_train.update(train_pred, train_labels)
                train_loss_meter.update(train_loss, n=1)
                if (i + 1) % 10 == 0:
                    print('Number of Iteration [{}/{}]'.format(
                        i + 1, int(datalengths[phase] / batch_size)),
                          flush=True)
                    itr += 1
                    score = running_metrics_train.get_scores()
                    for k, v in score.items():
                        plotter.plot(k, 'itr', phase, k, itr, v)
                        print(k, v, flush=True)
                    print('Loss Train', train_loss_meter.avg, flush=True)
                    plotter.plot('Loss', 'itr', phase, 'Loss Train', itr,
                                 train_loss_meter.avg)
                    utils.save_scores(running_metrics_train.get_history(),
                                      phase, cv_num)
                    utils.save_loss(train_loss_meter.get_history(), phase,
                                    cv_num)
            print('Phase: val', flush=True)
            phase = 'val'
            # Set model to validation mode
            net.train(False)
            with torch.no_grad():
                for i, data in tqdm(enumerate(dataloaders[phase]),
                                    desc='Dataiteration_val'):
                    val_pred, val_labels, val_loss = val(
                        data, net, criterion, device)
                    running_metrics_val.update(val_pred, val_labels)
                    val_loss_meter.update(val_loss, n=1)
                    if (i + 1) % 10 == 0:
                        print('Number of Iteration [{}/{}]'.format(
                            i + 1, int(datalengths[phase] / batch_size)),
                              flush=True)
                    utils.save_scores(running_metrics_val.get_history(), phase,
                                      cv_num)
                    utils.save_loss(val_loss_meter.get_history(), phase,
                                    cv_num)
                    if val_loss_meter.avg < lowest_loss:
                        lowest_loss = val_loss_meter.avg
                        utils.save_net(path_to_net,
                                       batch_size,
                                       epoch,
                                       cv_num,
                                       train_indices,
                                       val_indices,
                                       test_indices,
                                       net,
                                       optimizer,
                                       criterion,
                                       iter_num=i)
                # Plot validation metrics and loss at the end of the val phase
                score = running_metrics_val.get_scores()
                for k, v in score.items():
                    plotter.plot(k, 'itr', phase, k, itr, v)
                    print(k, v, flush=True)
                print('Loss Val', val_loss_meter.avg, flush=True)
                plotter.plot('Loss', 'itr', phase, 'Loss Val', itr,
                             val_loss_meter.avg)

            print(
                'Epoch [{}/{}], Train_loss: {:.4f}, Train_bacc: {:.2f}'.format(
                    epoch + 1, epochs, train_loss_meter.avg,
                    running_metrics_train.bacc),
                flush=True)
            print('Epoch [{}/{}], Val_loss: {:.4f}, Val_bacc: {:.2f}'.format(
                epoch + 1, epochs, val_loss_meter.avg,
                running_metrics_val.bacc),
                  flush=True)
            # Call the learning rate adjustment function after every epoch
            scheduler.step(train_loss_meter.avg)
        # Save net after every cross validation cycle
        utils.save_net(path_to_net, batch_size, epochs, cv_num, train_indices,
                       val_indices, test_indices, net, optimizer, criterion)
Esempio n. 8
0
start = time.time()

path_to_net = './net.pt'
label_dir = './label/'
nii_dir = './numpy/'

checkpoint = torch.load(path_to_net)
net = checkpoint['net']
test_indices = checkpoint['test_indices']
batch_size = checkpoint['batch_size']
random_state=666

device = torch.device('cuda:0' 
                      if torch.cuda.is_available() else 'cpu')
num_GPU = torch.cuda.device_count()
fMRI_dataset_test = dataset.fMRIDataset(label_dir, nii_dir, 
                            test_indices, transform=dataset.ToTensor())
test_length = len(fMRI_dataset_test)
test_loader = utils.get_dataloader(fMRI_dataset_test, 
                                     batch_size, num_GPU)

metrics, pred_dict, history = test(net, test_loader, device)
torch.save(history, './history.pt')
torch.save(pred_dict, './pred_dict.pt')
torch.save(metrics.get_scores(), './test_scores.pt')
score = metrics.get_scores()
for k, v in score.items():
    print(k, v, flush=True)
print('Whole run took ', time.time()-start, flush=True)
print('Done!', flush=True)
Esempio n. 9
0
    crop_len = args.crop_len

    ###Set paths
    text_path = args.datasetpath
    embedding_path = args.embeddingpath

    ###Load text and embeddings
    text = ds.clean_text(text_path)
    emb = ds.embedding_matrix(embedding_path, text, normalize=True)

    ### Test dataloader
    #alphabet_len = len(dataset.alphabet)
    trans = transforms.Compose(
        [ds.RandomCrop(crop_len),
         ds.OneHotEncoder(),
         ds.ToTensor()])
    dataset = ds.LOTRDataset(text, emb, transform=trans)
    dataloader = DataLoader(dataset,
                            batch_size=len(dataset.chapter_list) //
                            args.batchnumber,
                            shuffle=True)

    ### Set cuda
    device = torch.device('cuda')

    ### Get validation
    for batch_sample in dataloader:
        batch_onehot = batch_sample['encoded_onehot']
    validation_batch = batch_onehot.float().to(device)

    ### Initialize Network
Esempio n. 10
0
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import dataset

transform = transforms.Compose([
    dataset.Rescale((110, 110)),
    dataset.image_rotation((110, 110)),
    dataset.random_affine_transform((110, 110)),
    dataset.ToTensor()
])

trainrootdir = './Input/train'
traincsvfile = './Input/final_tag.csv'

train_datasets = dataset.carsDataset(traincsvfile, trainrootdir, transform)
train_dataloders = torch.utils.data.DataLoader(train_datasets,
                                               batch_size=4,
                                               shuffle=True,
                                               num_workers=0)

train_dataset_size = len(train_datasets)
class_names = train_datasets.classes

validrootdir = './Input/validation'
Esempio n. 11
0
    def __init__(self, hyper_params):
        # get config
        self.batch_size = hyper_params.batch
        self.mini_batch_size = hyper_params.mini_batch
        # subdivision using accumulated grad
        self.subdivision = self.batch_size // self.mini_batch_size

        self.n_cls = hyper_params.n_cls
        self.classes = hyper_params.classes
        self.root = hyper_params.root

        self.cuda = True if torch.cuda.is_available() else False
        self.backup_dir = hyper_params.backup_dir
        self.backup = hyper_params.backup

        # listening SIGINT while Ctrl + c
        self.sigint = False
        signal.signal(signal.SIGINT, self.__sigint_handler)

        self.model_name = hyper_params.model_name
        model_cfg = hyper_params.model_cfg
        model_type = model_cfg.pop('type')
        assert model_type in models.__dict__
        self.bg = model_cfg.pop('background')
        model_cfg['weights'] = hyper_params.weights
        model_cfg['head'][
            'num_classes'] = self.n_cls + 1 if self.bg else self.n_cls
        self.net = models.__dict__[model_type](**model_cfg)

        if self.cuda:
            self.net.cuda()
        self.net.train()

        self.input_size = hyper_params.input_size
        setattr(self.net, 'input_size', self.input_size)
        transform = []
        if hasattr(hyper_params, 'crop'):
            # transform.append(dt.RCM(self.input_size,hyper_params.crop))
            transform.append(dt.RC(self.input_size, hyper_params.crop))
            # transform.append(dt.RandomCrop(hyper_params.crop))
            # transform.append(dt.SSDCrop())
        if hasattr(hyper_params, 'flip'):
            transform.append(dt.HFlip(hyper_params.flip))
        if hasattr(hyper_params, 'hue'):
            transform.append(
                dt.ColorJitter(hyper_params.exposure, hyper_params.saturation,
                               hyper_params.hue))
        transform += [dt.Resize_Pad(self.input_size), dt.ToTensor()]
        if hyper_params.norm:
            transform += [
                dt.Normalize(hyper_params.norm['mean'],
                             hyper_params.norm['std'])
            ]
        ann_file = self.root + hyper_params.ann_file
        img_dir = self.root + hyper_params.img_dir
        if hasattr(hyper_params, 'json'):
            dataset = dt.JsonDataset(ann_file,
                                     img_dir,
                                     transform=transforms.Compose(transform),
                                     city=hyper_params.city)
        else:
            dataset = dt.VOCDataset(ann_file,
                                    img_dir,
                                    transform=transforms.Compose(transform))
        setattr(dataset, 'with_bg', self.bg)
        self.dataloader = DataLoader(
            dataset,
            batch_size=self.mini_batch_size,
            shuffle=True,
            drop_last=True,
            num_workers=hyper_params.nworkers if self.cuda else 0,
            # pin_memory=hyper_params.pin_mem if self.cuda else False,
            collate_fn=dt.list_collate,
        )
        if hasattr(hyper_params, 'val'):
            ann_file = self.root + hyper_params.val['ann_file']
            img_dir = self.root + hyper_params.val['img_dir']
            transform = [dt.Resize_Pad(self.input_size), dt.ToTensor()]
            if hyper_params.norm:
                transform += [
                    dt.Normalize(hyper_params.norm['mean'],
                                 hyper_params.norm['std'])
                ]
            if hasattr(hyper_params, 'json'):
                valdata = dt.JsonDataset(
                    ann_file,
                    img_dir,
                    transform=transforms.Compose(transform),
                    city=hyper_params.city)
            else:
                valdata = dt.VOCDataset(
                    ann_file, img_dir, transform=transforms.Compose(transform))
            setattr(valdata, 'with_bg', self.bg)
            self.valloader = DataLoader(
                valdata,
                batch_size=self.mini_batch_size,
                num_workers=hyper_params.nworkers if self.cuda else 0,
                # pin_memory=hyper_params.pin_mem if self.cuda else False,
                collate_fn=dt.list_collate,
            )

        self.batch_e = len(self.dataloader) // self.subdivision
        self.max_batches = hyper_params.max_batches
        self.epoches = self.max_batches // self.batch_e

        self.optim_cfg = hyper_params.optim_cfg
        self.optim = self.make_optimizer(self.net)
        self.lr_cfg = hyper_params.lr_cfg
        self.scheduler = self.make_lr_scheduler(self.optim)

        if hyper_params.ckpt is not None:
            state = torch.load(hyper_params.ckpt)
            self.net.seen = state['seen']
            self.net.load_state_dict(state['net'])
            self.optim.load_state_dict(state['optim'])
            self.scheduler.load_state_dict(state['sche'])

        self.log = {}
Esempio n. 12
0
import constants as c

torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True

# http://www.bubuko.com/infodetail-2977032.html
# https://discuss.pytorch.org/t/received-0-items-of-ancdata-pytorch-0-4-0/19823/4
# torch.multiprocessing.set_sharing_strategy('file_system')
# but easily cause shared memory leak:
# http://www.sohu.com/a/225270797_491081

# %% dataset
# Pretrain
transform = transforms.Compose(
    [D.TruncateInput(), D.normalize_frames(),
     D.ToTensor()])

trainset = D.TrainDataset(transform=transform)
pretrainset_loader = torch.utils.data.DataLoader(dataset=trainset,
                                                 batch_size=c.batch_size,
                                                 num_workers=c.num_workers,
                                                 pin_memory=True,
                                                 shuffle=True)

# ---------------------------------------
# Triplet loss

split_path = './voxceleb1_veri_dev.txt'
_split = pd.read_table(split_path,
                       sep=' ',
                       header=None,
Esempio n. 13
0
from torchvision import transforms
import torch.nn as nn
import torch
import config
import ResNet
import train
import dataset

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("device is:", device)

train_dataset = dataset.costum_images_dataset(
    root_dir=config.train_root_dir,
    transform=transforms.Compose(
        [dataset.Rescale(config.resize_param),
         dataset.ToTensor()]))
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=config.batch_size,
                                           shuffle=True)

test_dataset = dataset.costum_images_dataset(
    root_dir=config.test_root_dir,
    transform=transforms.Compose(
        [dataset.Rescale(config.resize_param),
         dataset.ToTensor()]))
test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=config.batch_size,
                                          shuffle=False)

models = {
    'resnet':
Esempio n. 14
0
batch_size = 32
sigma = 3

print()
print(" + Sequence length:\t{}".format(seq_len))
print(" + Batch size:\t\t{}".format(batch_size))
print(" + Sigma:\t\t{}".format(sigma))
print()

data_transforms = {
    'train':
    transforms.Compose([
        dataset.Rescale(280),
        dataset.RandomRotation(10),
        dataset.RandomCrop(256),
        dataset.ToTensor(sigma)
    ]),
    'val':
    transforms.Compose(
        [dataset.Rescale(280),
         dataset.Crop(256),
         dataset.ToTensor(sigma)])
}
"""
datasets = {
    'train':dataset.UltrasoundData("Project/Data/train/", seq_type, seq_len, transform=data_transforms['train']),
    'val':dataset.UltrasoundData("Project/Data/val/", seq_type, seq_len, transform=data_transforms['val'])}
"""
datasets = {
    'train':
    dataset.UltrasoundData("/floyd/input/us-data/train/",
Esempio n. 15
0
from torch.utils.data import Dataset
from torchvision import transforms
import torch.nn as nn
import torch
import config
import ResNet
import train
import dataset

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("device is:", device)

train_dataset = dataset.costum_images_dataset(root_dir=config.train_root_dir,
                                              transform=transforms.Compose(
                                                  [dataset.Rescale(config.resize_param), dataset.ToTensor()]))
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True)

test_dataset = dataset.costum_images_dataset(root_dir=config.test_root_dir,
                                             transform=transforms.Compose(
                                                 [dataset.Rescale(config.resize_param), dataset.ToTensor()]))
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=config.batch_size, shuffle=False)

models = {
    'resnet': ResNet.ResNetClassifier(
        in_size=config.in_size, out_classes=1, channels=[32, 64, 128, 256, 512, 1024],
        pool_every=1, hidden_dims=[100] * 2,
        activation_type='relu',
        pooling_type='avg', pooling_params=dict(kernel_size=2),
        batchnorm=True, dropout=0.2, ),

    'cnn': ResNet.ConvClassifier(
Esempio n. 16
0
    return picked


if __name__ == '__main__':
    images_dir = './data/images'
    checkpoints_dir = './checkpoints'
    if not os.path.exists(checkpoints_dir):
        os.makedirs(checkpoints_dir)

    data_transforms = {
        'train': torchvision.transforms.Compose([
            dataset.RandomRotate(15),
            dataset.RandomCrop(512),
            dataset.RandomColorShift(),
            dataset.Normalize(config.mean, config.std),
            dataset.ToTensor(dtype=config.dtype)
        ]),
        'eval': torchvision.transforms.Compose([
            dataset.CenterCrop(512),
            dataset.Normalize(config.mean, config.std),
            dataset.ToTensor(dtype=config.dtype),
        ]),
    }

    df = pd.read_csv('./data/all.csv', usecols=['patientid', 'finding', 'filename'])
    covid_df = df[df.apply(lambda x: 'COVID-19' in str(x), axis=1)]
    other_df = df[df.apply(lambda x: 'COVID-19' not in str(x), axis=1)]
    print('Unique COVID-19 images:', len(covid_df))
    print('Unique COVID-19 patients:', len(covid_df['patientid'].unique()))
    print('Unique other images:', len(other_df))
    print('Unique other patients:', len(other_df['patientid'].unique()))
        if m.bias is not None:
            torch.nn.init.constant_(m.bias.data, 0.2)
    elif isinstance(m, nn.Linear):
        torch.nn.init.normal_(m.weight.data, mean=0., std=0.01)
        torch.nn.init.constant_(m.bias.data, 0.)


if __name__ == '__main__':

    train_data = dataset.Dataset_Load(opt.cover_path,
                                      opt.stego_path,
                                      opt.train_size,
                                      transform=transforms.Compose([
                                          dataset.ToPILImage(),
                                          dataset.RandomRotation(p=0.5),
                                          dataset.ToTensor()
                                      ]))

    val_data = dataset.Dataset_Load(opt.valid_cover_path,
                                    opt.valid_stego_path,
                                    opt.val_size,
                                    transform=dataset.ToTensor())

    train_loader = DataLoader(train_data,
                              batch_size=opt.batch_size,
                              shuffle=True)
    valid_loader = DataLoader(val_data,
                              batch_size=opt.batch_size,
                              shuffle=False)

    # model creation and initialization