Esempio n. 1
0
def get_dataloader(root_path, subset, transform, batch, workers):
    '''
    Returns a dataloader object
    -----------------------
    Args:
        - root_path (str)
        - subset (str)
            "train", "val", "test"
        - transform (torch transform obj)

    Returns: dataloader object
    -----------------------
    '''
    datafolder = datasets.ImageFolder(
        Path.joinpath(root_path, subset),
        transform,
    )
    if subset == "train":
        dataloader = Dataloader(datafolder,
                                batch_size=batch,
                                shuffle=True,
                                num_workers=workers)
    else:
        dataloader = Dataloader(datafolder,
                                batch_size=batch,
                                shuffle=False,
                                num_workers=workers)
    return dataloader
Esempio n. 2
0
def test(**kwargs):
    opt._parse(kwargs)
    model = eval("models." + opt.model + "_" + opt.feature + "_" +
                 str(opt.classes) + "class()")
    data = eval(opt.feature + "(\"test\")")
    test_dataloader = Dataloader(data,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=opt.num_workers)

    opt.nodes = ''

    f1 = open('./confusion_matrix/' + opt.notes + opt.model + "_" +
              opt.feature + "_" + str(opt.classes) + "class" +
              '_real_label.txt',
              mode='w',
              encoding='utf-8')
    f2 = open('./confusion_matrix/' + opt.notes + opt.model + "_" +
              opt.feature + "_" + str(opt.classes) + "class" +
              '_pred_label.txt',
              mode='w',
              encoding='utf-8')

    model.load_latest(opt.notes)
    model.to(opt.device)
    total = 0
    cnt = 0
    for ii, (data, label) in tqdm(enumerate(test_dataloader)):
        if opt.feature == "mel_plus_cqt":
            input = [_data.cuda() for _data in data]
        else:
            input = data.cuda()
        target = label.cuda()
        score = model(input)
        # calculate precision
        for i in range(score.shape[0]):
            for j in range(opt.duration):
                cnt += 1
                if torch.argmax(score,
                                dim=1)[i][j] == torch.argmax(target,
                                                             dim=1)[i][j]:
                    total += 1
                f1.write(str(torch.argmax(target, dim=1)[i][j].item()) + '\n')
                f2.write(str(torch.argmax(score, dim=1)[i][j].item()) + '\n')
    f1.close()
    f2.close()
    # print("***************************************************total is:",total/(opt.test_len*opt.duration))
    print(cnt)
    print("***************************************************total is:",
          total / cnt)
Esempio n. 3
0
    init_seed(0)
    # resnet = Resnet1D_bottleneck(channels=[128, 256, 512, 512], blocks=(1, 1, 1, 1))
    resnet = Resnet1D(channels=[128, 256, 512, 512],
                      blocks=(1, 1, 1, 1),
                      num_class=9)
    model = Model(resnet)

    conf = edict()
    conf.max_epochs = 1000
    conf.use_cuda = True
    conf.save_model_name = 'model_new_2.pkl'  # weight decay
    conf.resume = None
    conf.optimizer = optim.Adam(model.parameters(),
                                lr=0.001,
                                weight_decay=2e-4)
    # conf.optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=2e-4)
    conf.lr_scheduler = CosineAnnealingWarmRestarts(conf.optimizer,
                                                    T_0=100,
                                                    T_mult=1,
                                                    eta_min=1e-5)  #
    # conf.lr_scheduler = StepLR(conf.optimizer, 1, 0.95) # 41-- 0.001
    conf.dataloader = Dataloader(Dataset_triple(split='training',
                                                n_triple=1000),
                                 batch_size=512,
                                 shuffle=True)
    conf.test_dataloader = Dataloader(Dataset(split='testing', binary=False),
                                      batch_size=4096,
                                      shuffle=False)
    # conf.lossf = nn.CrossEntropyLoss(weight=torch.tensor([1]+[0.05]*8).cuda())
    conf.lossf = nn.CrossEntropyLoss()
    best_model = model.train_model(conf)
Esempio n. 4
0
            print('--- val ---')
            val_metric = self.eval_model(eval_loader, verbose=True)
            print('==' * 10)

        return best_model


if __name__ == "__main__":
    init_seed(0)
    resnet = Resnet1D(channels=[64, 128, 256], blocks=(1, 1, 1))
    model = Model(resnet)

    conf = edict()
    conf.max_epochs = 200
    conf.use_cuda = True
    conf.save_model_name = 'model_st.pkl'
    conf.resume = None
    conf.optimizer = optim.Adam(model.parameters(),
                                lr=0.001,
                                weight_decay=2e-4)
    # conf.lr_scheduler = CosineAnnealingWarmRestarts(conf.optimizer, T_0=25, T_mult=1, eta_min=1e-5) # model_dr0.5
    conf.lr_scheduler = StepLR(conf.optimizer, 25, 0.5)  # model_st
    conf.dataloader = Dataloader(Dataset(split='training', oversample=False),
                                 batch_size=512,
                                 shuffle=True)
    conf.test_dataloader = Dataloader(Dataset(split='testing'),
                                      batch_size=4096,
                                      shuffle=False)
    conf.lossf = nn.CrossEntropyLoss()
    best_model = model.train_model(conf)
def main():
    global args
    parser = build_parser()
    args = parser.parse_args()

    rank = dist.get_rank()
    set_seed(args.seed + rank)
    #use_cuda = not args.no_cuda and torch.cuda.is_available()

    ######### set distributed args for multi-gpus ############
    local_rank = args.local_rank

    dist.init_process_group(
        'nccl',
        init_method='tcp://localhost:9118',
        rank=local_rank,
        world_size=args.gpus,
    )
    torch.cuda.set_device(local_rank)
    cur_device = torch.cuda.current_device()

    #train_kwargs = {'batch_size': args.batch_size}
    #test_kwargs = {'batch_size': args.test_batch_size}
    #if use_cuda:
    #cuda_kwargs = {'num_workers': 1,
    #               'pin_memory': True,
    #               'shuffle': True}
    #    train_kwargs.update(cuda_kwargs)
    #    test_kwargs.update(cuda_kwargs)

    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])
    dataset1 = datasets.MNIST('../data',
                              train=True,
                              download=True,
                              transform=transform)
    dataset2 = datasets.MNIST('../data', train=False, transform=transform)

    #train_loader = torch.utils.data.DataLoader(dataset1,**train_kwargs)
    #test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)

    def construct_sampler(dataset, shuffle):
        sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=shuffle) \
                    if args.gpus > 1 else None
        return sampler

    train_loader = Dataloader(dataset1, batch_size=args.batch_size//max(args.gpus,1), \
                shuffle=False, sampler=construct_sampler(dataset1, shuffle=True), num_workers=4, \
                pin_memory=True, drop_last=True)
    test_loader = Dataloader(dataset2, batch_size=args.test_batch_size//max(args.gpus,1), \
                shuffle=False, sampler=construct_sampler(dataset2, shuffle=False), num_workers=4, pin_memory=True)

    if dist.get_rank() == 0:
        print ("Total train examples: {} total test examples: {} \n".\
            format(len(train_loader.dataset), len(test_loader.dataset)))
        print("Building model......\n")
    #model = Net().to(device)
    model = Net().cuda(device=cur_device)
    model = DDP(model, device_ids=[cur_device], output_device=cur_device)

    optimizer = optim.Adadelta(model.parameters(), lr=args.lr)

    scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
    for epoch in range(1, args.epochs + 1):
        if dist.get_rank() == 0:
            print("Training epoch ", epoch)
        train(args, model, cur_device, train_loader, optimizer, epoch)
        test(model, cur_device, test_loader)
        scheduler.step()

    if args.save_model:
        torch.save(model.module.state_dict(), "mnist_cnn.pt")
Esempio n. 6
0
def train(**kwargs):
    # parse args, load network, set class number as 4/7
    opt._parse(kwargs)
    model = eval("models." + opt.model + "_" + opt.feature + "_" +
                 str(opt.classes) + "class()")
    print(model.get_model_name())

    if opt.load_latest is True:
        model.load_latest(opt.notes)
    elif opt.load_model_path:
        print("load_model:", opt.load_model_path)
        model.load(opt.load_model_path)
    model.to(opt.device)
    model.save(opt.notes)
    print("Loading data..")
    # load data
    data = eval(opt.feature + "(\"train\")")

    lengths = [opt.train_len, opt.eval_len]
    # if opt.dataset == "zhudi":
    #     other_len = 1021
    #     lengths = [opt.train_len, opt.eval_len]
    # lengths = [int(0.75*(opt.total_len+other_len)), opt.total_len+other_len-int(0.75*(opt.total_len+other_len))]
    train_mel_dataset, test_mel_dataset = torch.utils.data.dataset.random_split(
        data, lengths)
    train_dataloader = Dataloader(train_mel_dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=False,
                                  num_workers=opt.num_workers)
    test_dataloader = Dataloader(test_mel_dataset,
                                 batch_size=opt.batch_size,
                                 shuffle=False,
                                 num_workers=opt.num_workers)
    # set loss and learning parameters
    criterion = torch.nn.BCELoss()
    lr = opt.lr
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=lr,
                                 weight_decay=opt.weight_decay)
    loss_meter = torchnet.meter.AverageValueMeter()
    previous_loss = 1e100

    eva_result = eva_test(test_dataloader, model, criterion)
    # begin train
    print("Begin train..")
    for epoch in range(opt.max_epoch):
        print("this is ", epoch, "epoch")
        loss_meter.reset()
        loss_sum = 0.0
        count = 0.0

        for ii, (data, label) in tqdm(enumerate(train_dataloader)):
            if opt.feature == "mel_plus_cqt":
                input = [_data.cuda() for _data in data]
            else:
                input = data.cuda()
            target = label.cuda()

            optimizer.zero_grad()
            score = model(input)

            loss = criterion(score, target)
            loss.backward()
            loss_sum += loss.item()
            count += 1
            optimizer.step()
            loss_meter.add(loss.item())

        eva_result = eva_test(test_dataloader, model, criterion)
        print("train_loss:", loss_meter.value()[0], " ", loss_sum / count)

        if epoch % opt.print_freq == opt.print_freq - 1:
            print("train_loss:", loss_meter.value()[0], " ", loss_sum / count)
            model.save(opt.notes)
Esempio n. 7
0
discriminator=discriminator.to(device)
features=features.to(device)

criterion_Gan=criterion_Gan.to(device)
criterion_percept=criterion_percept.to(device)

#set optimizers (Adam)

optimizer_G=torch.optim.Adam(generator.parameters(),lr=0.001,betas=(0.5,0.99))
optimizer_D=torch.optim.Adam(generator.parameters(),lr=0.001,betas=(0.5,0.99))

Tensor=torch.cuda.FloatTensor if device=="gpu" else torch.Tensor

#create the dataloader
dataloader=Dataloader(ImagesDataset("data/HRimages",(125,125)),
						batch_size=3,
						shuffle=True)

#featur extractor using the pretrained vgg
def extractor(real,fake):
	f_HR=features(real)
	f_LR=features(fake)
	return f_HR,f_LR


print("start training")
for epoch in range(opt.epochs):
	print("epoch :",epoch)
	for i,img in enumerat(dataloader):
    #get the hr and lr images
		img_lr=Variable(img["lr"].type(Tensor))
Esempio n. 8
0
from sklearn.manifold import TSNE
from utils import Dataset, evaluation_metrics
import matplotlib.pyplot as plt
from main import Model
import torch
import pickle
import numpy as np
import os
from sklearn.metrics import confusion_matrix

use_cuda = True

with open('./exp/model_True_4_0', 'rb') as fp:
    model = pickle.load(fp)

loader = Dataloader(Dataset(split='testing', oversample=False),
                    batch_size=4096)

FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
model.eval()
y_truth = []
y_pred = []
fea_list = []
for i, (X, y) in enumerate(loader):
    X = X.type(FloatTensor)
    with torch.no_grad():
        pred, fea = model(X)
    y_pred.append(pred.cpu().numpy().argmax(axis=-1))

    y_truth.append(y.numpy())
    fea_list.append(fea.cpu().numpy())
Esempio n. 9
0
    def __init__(self, csv_file, txt_file, root_dir, other_file):
        self.csv_data = pd.read_csv(csv_file)
        with open(txt_file, 'r') as f:
            data_list = f.readlines()
        self.txt_data = data_list
        self.root_dir = root_dir

    def __len__(self):
        return len(self.csv_data)

    def __getitem__(self, idx):
        data = (self.csv_data[idx], self.txt_data[idx])
        return data


dataiter = Dataloader(myDataset, batch_size=32, shuffle=True)

#%% module
from torch import nn, from_numpy
from torch.autograd import Variable


class net(nn.Module):
    def __init__(self, other_args):
        super(net, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size)
        # other network layers

    def forward(self, x):
        x = self.conv1(x)
        return x
Esempio n. 10
0
cudnn.benchmark = True
device = torch.device("cuda")

#Dataset Processing
transform = tf.Compose([
    tf.Resize(256), #Default image_size
    tf.ToTensor(), #Transform it to a torch tensor
    tf.CenterCrop(256),
    #tf.Lambda(lambda x:x[torch.LongTensor([2, 1,0])]), #Converting from RGB to BGR
    #tf.Normalize(mean=[0.40760392, 0.45795686, 0.48501961], std=[0.225, 0.224, 0.229]), #subracting imagenet mean
    tf.Lambda(lambda x: x.mul_(255))
    ])

data = "./data/"
train_dataset = datas.ImageFolder(data, transform)
train_loader = Dataloader(train_dataset, batch_size=4)

transformer = TransformerNet().to(device)
optimizer = Adam(transformer.parameters(), 1e-3)
mse_loss = nn.MSELoss()

vgg_directory = "./vgg_conv.pth" #path to pretrained vgg vgg_directory
vgg = VGG()
vgg.load_state_dict(torch.load(vgg_directory))
for param in vgg.parameters():
    param.requires_grad = False

vgg.to(device) # Putting model on cuda

def load_img(path):
    img = Image.open(path)
Esempio n. 11
0

train_set = torchvision.datasets.FashionMNIST(
    root='./data/FashionMNIST',
    train=True,
    #download=True,
    transform=transforms.Compose([
        transforms.ToTensor()
    ])
)
#/---------------------Training with a single batch-----------------/
#Step1: Initialize a network.
network = Network()
network = network
#Step2: General setup: Load data. Setup optimizer. Initialize total loss and correct.
train_loader = Dataloader(train_set, batch_size=100)
optimizer = optim.Adam(network.parameters(), lr=0.01) #lr: learning rate.

batch_num = 0

#Step3: Get batch from the train_loader
#batch = next(iter(train_loader)) #Train with single batch.
for epoch in range(5):

    total_loss = 0
    total_correct = 0

    for batch in train_loader:
        batch_num += 1
        images, labels = batch
        images = images