Ejemplo n.º 1
0
def main(opt):

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    # Dataset
    print('Dataset....')
    transform = transforms.Compose([
        transforms.Resize((600, 600)),
        transforms.Grayscale(3),
        transforms.ToTensor()
    ])

    train_set = myDataset(image_path=opt.train_path, transform=transform)
    val_set = myDataset(image_path=opt.val_path, transform=transform)

    train_loader = DataLoader(train_set, batch_size=opt.train_batch_size)
    val_loader = DataLoader(val_set, batch_size=opt.val_batch_size)

    # Model
    print('Model....')
    model = AutoEncoder()
    model.to(device)

    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    loss_func = nn.MSELoss()

    # Train
    print('Training....')

    train_epoch_loss = []
    val_epoch_loss = []
    train_iter_losses = []
    val_iter_losses = []
    for e in range(opt.epoch):
        train_iter_loss = train(opt, model, train_loader, optimizer, loss_func,
                                device, e)
        train_iter_losses += train_iter_loss
        train_epoch_loss.append(sum(train_iter_loss))

        val_iter_loss = val(opt, model, val_loader, loss_func, device, e)
        val_iter_losses += val_iter_loss
        val_epoch_loss.append(sum(val_iter_loss))

        # save model
        best = 10000
        if val_epoch_loss[-1] < best:
            print('Saving Model....')
            torch.save(model, 'weights/AutoEncoder_try1.pth')
            best = val_epoch_loss[-1]

    print('Saving Result')
    plt.figure(figsize=(10, 10))
    plt.plot(train_iter_losses)
    plt.plot(val_iter_losses)
    plt.legend(['Train_loss', 'Val_loss'])

    plt.savefig('Result.jpg')
Ejemplo n.º 2
0
def train():
    f = open('loss.txt','w')
    f.close()
    start_epoch = 0
    print(args.continue_train)
    if args.continue_train:
        checkpoint = torch.load(args.ckpt_dir+'/state_final.pth')
        start_epoch = checkpoint['epoch']+1
        print(start_epoch)
        if start_epoch < args.epoch:
            print(start_epoch)
            enhancenet.load_state_dict(checkpoint['enhance'])
            enhance_optim.load_state_dict(checkpoint['enhance_optim'])
        else :
            pass
    print(start_epoch)
    train_set = myDataset(args.train_dir,'train',args.patch_size)
    sum_loss_f = 0.0
    for epoch in range(start_epoch,args.epoch):
        step_num = 0
        sum_loss = 0.0
        dataloader = DataLoader(train_set,batch_size=args.batch_size, shuffle=True,pin_memory=True)
        for (_, data) in enumerate(dataloader):
            low_im, high_im = data
            low_im, high_im = low_im.cuda(), high_im.cuda()
            enhance_optim.zero_grad()
            S_low = enhancenet(low_im)
            loss = final_loss(S_low,high_im)
            loss.backward()
            enhance_optim.step()
            sum_loss += float(loss)
            step_num += 1
        print('epoch: %d, loss: %f'%(epoch,sum_loss/step_num))
        sum_loss_f += sum_loss/step_num
        w_log.add_scalar('loss',sum_loss/step_num,epoch)
        #if epoch%200 ==0:
        #    eval()
        if epoch == 2000:
            for pa in enhance_optim.param_groups:
                pa['lr'] = args.start_lr/10.0
        elif epoch == 3000:
            for pa in enhance_optim.param_groups:
                pa['lr'] = args.start_lr/50.0
        elif epoch == 4000:
            for pa in enhance_optim.param_groups:
                pa['lr'] = args.start_lr/100.0
        if epoch % 50 ==1:
            f = open('loss.txt','a')
            if epoch > 1:
                f.write(str(sum_loss_f/50)+'\n')
            sum_loss_f = 0.0
            f.close()
            state = {'enhance':enhancenet.state_dict(),'enhance_optim':enhance_optim.state_dict(),'epoch':epoch}
            if not os.path.isdir(args.ckpt_dir):
                os.makedirs(args.ckpt_dir)
            torch.save(state,args.ckpt_dir+'/state_final.pth')
    state = {'enhance':enhancenet.state_dict(),'enhance_optim':enhance_optim.state_dict(),'epoch':epoch}
    torch.save(state,args.ckpt_dir+'/state_final.pth')
    w_log.close()
Ejemplo n.º 3
0
def eval():
    eval_set = myDataset(args.train_dir,'eval',args.patch_size)
    with torch.no_grad():
        dataLoader = DataLoader(eval_set,batch_size=1)
        for (step,data) in enumerate(dataLoader):
            low_im,high_im = data
            low_im, high_im = low_im.cuda(), high_im.cuda()
            S_low = enhancenet(low_im)
            out = S_low
            mse = torch.abs(high_im-out).mean()
            print(float(mse))
            torch.cuda.empty_cache()
Ejemplo n.º 4
0
def main(config):
    # For fast training.
    cudnn.deterministic = True
    cudnn.benchmark = True

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.485,0.456,0.406], [0.229,0.224,0.225])
    ])

    trainset = myDataset(root=config.trainset_dir, transform=transform, randomflip=True)
    
    # load the valset
    valset = myDataset(root=config.valset_dir, transform=transform)
    # Data loader.
    trainset_loader = DataLoader(trainset, batch_size=config.batch_size, shuffle=True, num_workers=1)
    valset_loader = DataLoader(valset, batch_size=config.batch_size, shuffle=False, num_workers=1)
    
    solver = Solver(trainset_loader, valset_loader, config)

    solver.train()
Ejemplo n.º 5
0
def main():
	lr = 0.0003
	mod = 'R'
	epochs = 25
	layer_num = 3
	batch_size = 64

	if mod == 'R':
		print("Layer Number: %d, Batch_size: %d, Learning Rate: %.4f, Activation Function: ReLu " % (layer_num, batch_size, lr))
	else:
		print("Layer Number: %d, Batch_size: %d, Learning Rate: %.4f, Activation Function: Tanh " % (layer_num, batch_size, lr))

	use_cuda = True
	device = torch.device('cuda' if use_cuda else 'cpu')
	torch.manual_seed(1234)

	transforms = T.Compose([
		T.ToTensor(),
		T.Normalize((0.5),(0.5)),
	])
	train_dataset = myDataset('data', 'train', transforms)
	train_set, valid_set = random_split(train_dataset, [8000,2000])
	
	train_loader = DataLoader(train_set, 
		batch_size=batch_size, 
		shuffle=True,
		num_workers=4)

	valid_loader = DataLoader(valid_set,
		batch_size=batch_size,
		shuffle=True,
		num_workers=4)
	if layer_num == 2 and mod == 'T':
		model = T2Model()
	elif layer_num == 3 and mod == 'T':
		model = T3Model()
	elif layer_num == 2 and mod == 'R':
		model = R2Model()
	else:
		model = R3Model()


	model = model.to(device)

	optimizer = torch.optim.Adam(model.parameters(), lr=lr)

	train(model, optimizer, train_loader, valid_loader, epochs, device, lr)
Ejemplo n.º 6
0
def plot(epochs):
    print("H1211")
    base_path = os.getcwd()
    net = network.Net()
    dir = base_path + "/net_training/"
    for j in range(0,epochs):
        name = "net_epoch"+str(j)
        # load weights
        net.load_state_dict(torch.load(dir+name))
        # set up evaluation mode of network
        net.eval()
        # set up data loader
        dataset_val = dataset.myDataset(base_path +"/ds/val", transform=network.ToTensor())
        valloader = torch.utils.data.DataLoader(dataset_val, batch_size=1, shuffle=False, num_workers=1)
        # move to GPU
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        print(device)
        L = []
        O = []
        # process epoch
        print('epoch '+str(j))
        for i, data in enumerate(valloader):


            input = data['image']
            label = data['label']
            lab = label.numpy().flatten()
            output = net(input)
            out = output[0, :, :, :].detach().cpu().numpy().flatten()
            L.append(lab)
            O.append(out)

        L = np.concatenate(L)
        O = np.concatenate(O)

        # compute fpr, tpr
        fpr, tpr, thresholds = metrics.roc_curve(L, O)
        # plot roc for epoch
        plt.clf()
        plt.semilogx(fpr, tpr, color='darkorange',lw=2, label='ROC curve')
        plt.xlim([0.000001, 1.0])
        plt.ylim([0.0, 1])
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.title('Receiver operating characteristic example')
        plt.legend(loc="lower right")
        plt.savefig(dir+name+".png")
Ejemplo n.º 7
0
def test():
    checkpoint = torch.load(args.ckpt_dir+'/state_final.pth')
    enhancenet.load_state_dict(checkpoint['enhance'])
    print('load weights successfully')
    args.batch_size=1
    test_set = myDataset(args.train_dir,'test',args.patch_size)
    print('number of test samples: %d'%(test_set.len))
    dataLoader = DataLoader(test_set,batch_size=args.batch_size)
    with torch.no_grad():
        for (step,data) in enumerate(dataLoader):
            low_im,high_im,idstr,ratio = data
            low_im = low_im.cuda()
            print(low_im.shape)
            S_low = enhancenet(low_im)
            out = S_low
            out_cpu = out.cpu()
            out_cpu = np.minimum(out_cpu,1.0)
            out_cpu = np.maximum(out_cpu,0.0)
            save_images(os.path.join(args.save_dir,'%d_00_%d_out.png'%(idstr,ratio)),out_cpu.detach().numpy())
            save_images(os.path.join(args.save_dir,'%d_00_%d_gt.png'%(idstr,ratio)),high_im.detach().numpy())
            torch.cuda.empty_cache()
Ejemplo n.º 8
0
def main(config):
    # For fast training.
    cudnn.benchmark = True

    if config.use_wandb:
        import wandb
        wandb.init(project="dlcv-hw3-1", config=config)
        #wandb.config.update(vars(args))
        config = wandb.config
        print(config)

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
    ])
    trainset = myDataset(root=config.data_path, transform=transform)
    trainset_loader = DataLoader(trainset,
                                 batch_size=config.batch_size,
                                 shuffle=True,
                                 num_workers=1)

    solver = Solver(trainset_loader, config)
    solver.train()
Ejemplo n.º 9
0

if __name__ == '__main__':

    device = 'cpu:0'
    batch_size = 1
    num_epochs = 50
    lr = 0.0001
    num_workers = 0

    model = UNet(2, 3).to(device)

    continue_train = True

    train_data_path = './test'
    train_dataset = myDataset(train_data_path)

    train_loader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              shuffle=False,
                              num_workers=num_workers)

    start = 0
    if continue_train:

        start = 0
        model.load_state_dict(torch.load('./trained_model/model1.pth'))

    model.eval()
    dice_sum = 0
    f1_sum = 0
Ejemplo n.º 10
0
    def __init__(self):
        super(T2Model, self).__init__()
        self.fc1 = nn.Linear(64 * 32, 1024)
        self.fc2 = nn.Linear(1024, 100)

    def forward(self, x):
        x = torch.flatten(x, 1)
        x = self.fc1(x)
        x = F.tanh(x)
        x = self.fc2(x)
        x = torch.log_softmax(x, dim=1)
        return x


if __name__ == "__main__":
    transforms = T.Compose([
        T.ToTensor(),
        T.Normalize((0.5), (0.5)),
    ])
    dataset = myDataset('data', 'train', transforms)
    dataloader = DataLoader(dataset,
                            batch_size=64,
                            shuffle=True,
                            num_workers=4)

    model = T2Model()

    for images, labels in dataloader:
        pred = model(images)
        print(pred)
        exit()
classnum = 4

if __name__ == '__main__':
    # experiment = Experiment(api_key="YOUR API KEY",
    #                         project_name="YOUR_PROJECT_NAME", workspace="YOUR_WORKSPACE_NAME",
    #                         disabled=True)
    # experiment.set_name("mySERpro")

    # device setting
    cuda = torch.cuda.is_available()
    device = torch.device('cuda:%s' % gpu_idx[0] if cuda else 'cpu')
    torch.cuda.set_device(device)

    # define dataset generators
    devset = myDataset(path=my_path + 'valid',
                       batch_size=my_batch_size,
                       uttr_len=my_uttr_len,
                       fre_size=my_fre_size)
    devset_gen = data.DataLoader(devset,
                                 batch_size=my_batch_size,
                                 shuffle=False,
                                 drop_last=False,
                                 num_workers=0)
    # set save directory
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    if not os.path.exists(save_dir + 'results/'):
        os.makedirs(save_dir + 'results/')
    if not os.path.exists(save_dir + 'models4sgd/'):
        os.makedirs(save_dir + 'models4sgd/')

    # define model
Ejemplo n.º 12
0
num_workers = 8
patch_size = (256, 256)  #(64,64)
epoch_start = 0
scale = 2  # downscale factor
seed = 123
use_climatology = True
model_name = 'YNet30_test'  #'ESPCN' #
save_root_path += 'scale{}/'.format(scale)
torch.manual_seed(seed)
cudnn.benchmark = True  # true if input size not vary else false
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('using device {}'.format(device))
nGPU = torch.cuda.device_count()  # number of GPU used, 0,1,..

#%%
train_dataset = myDataset(datapath=train_datapath, datapath2=datapath2)
valid_dataset = myDataset(datapath=valid_datapath, datapath2=datapath2)

#train_dataset = ImageNetDataset(datapath=train_datapath,is_train=True,scale=scale,patch_size=patch_size)
#if is_debug:
#    train_dataset = torch.utils.data.Subset(train_dataset,indices=list(range(0,nSubSample)))
#valid_dataset = ImageNetDataset(datapath=valid_datapath,is_train=False,scale=scale,patch_size=patch_size)
#if is_debug:
#    valid_dataset = torch.utils.data.Subset(valid_dataset,indices=list(range(0,nSubSample)))

print('len(train_dataset)={}'.format(len(train_dataset)))
print('len(valid_dataset)={}'.format(len(valid_dataset)))

train_loader = DataLoader(dataset=train_dataset,
                          batch_size=batch_size,
                          shuffle=True,
Ejemplo n.º 13
0
def main(config):
    # For fast training.
    cudnn.benchmark = True

    if config.use_wandb:
        import wandb
        wandb.init(project="dlcv-hw3-4", config=config)
        config = wandb.config
        print(config)

    label_path = os.path.join(config.data_path, config.src_domain, 'train.csv')
    src_root = os.path.join(config.data_path, config.src_domain, 'train')

    src_label_data = []
    with open(label_path) as f:
        src_label_data += f.readlines()[1:]

    src_label_train, src_label_val = train_test_split(src_label_data,
                                                      test_size=0.25,
                                                      shuffle=True)

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
    ])

    transform_aug = transforms.Compose([
        transforms.ColorJitter(brightness=63.0 / 255.0,
                               saturation=[0.5, 1.5],
                               contrast=[0.2, 1.8]),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
    ])

    if config.data_aug:
        src_trainset = myDataset(root=src_root,
                                 label_data=src_label_train,
                                 transform=transform_aug)

    else:
        src_trainset = myDataset_2(root=src_root,
                                   label_data=src_label_train,
                                   transform=transform)

    src_trainset_loader = DataLoader(src_trainset,
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     num_workers=4)

    src_valset = myDataset_2(root=src_root,
                             label_data=src_label_val,
                             transform=transform)
    src_valset_loader = DataLoader(src_valset,
                                   batch_size=config.batch_size,
                                   shuffle=False,
                                   num_workers=4)

    if config.src_only:
        solver = Solver(src_trainset_loader, src_valset_loader, None, config)

    else:
        tgt_root = os.path.join(config.data_path, config.tgt_domain, 'train')
        tgt_trainset = myDataset_2(root=tgt_root, transform=transform)

        tgt_trainset_loader = DataLoader(tgt_trainset,
                                         batch_size=config.batch_size,
                                         shuffle=True,
                                         num_workers=4)

        solver = Solver(src_trainset_loader, src_valset_loader,
                        tgt_trainset_loader, config)

    solver.train()
Ejemplo n.º 14
0
    # set export dir and make copy of current training script inside
    export_path = 'net_training'

    if not os.path.exists(export_path):
        os.makedirs(export_path)
    copyfile(cwd + '/train.py', export_path + '/train_script.py')

    # training parameters
    batch_size = 50
    epochs = 100
    learning_rate = 0.0001
    freeze_layers_for_epoch = 0  # freeze pretrained part od the network for number of epochs
    flip_images = False

    # load datasets
    dataset_trn = dataset.myDataset(
        cwd + "/ds/train", transform=network.ToTensor(flip=flip_images))
    dataset_val = dataset.myDataset(cwd + "/ds/val",
                                    transform=network.ToTensor())
    trainloader = torch.utils.data.DataLoader(dataset_trn,
                                              batch_size=1,
                                              shuffle=True,
                                              num_workers=2)
    valloader = torch.utils.data.DataLoader(dataset_val,
                                            batch_size=1,
                                            shuffle=False,
                                            num_workers=2)
    print("Training gets started!")
    # loop over the dataset multiple times
    for epoch in range(epochs):
        batch = 0
        # create optimizer and set up learning rate
gpu_idx = [1]
my_path = '/data3/mahaoxin/emotion/data/wav/'
# save_dir = '/data3/mahaoxin/emotion/exp/models7sgd/best20-0.32692.pt'
save_dir = '/data3/mahaoxin/emotion/exp/models4sgd/best60-0.72603.pt'
my_batch_size = 64
my_uttr_len = 300
my_fre_size = 200

if __name__ == '__main__':
    # device setting
    cuda = torch.cuda.is_available()
    device = torch.device('cuda:%s' % gpu_idx[0] if cuda else 'cpu')
    torch.cuda.set_device(device)

    evalset = myDataset(path=my_path + 'test',
                        batch_size=my_batch_size,
                        uttr_len=my_uttr_len,
                        fre_size=my_fre_size)
    evalset_gen = data.DataLoader(evalset,
                                  batch_size=my_batch_size,
                                  shuffle=False,
                                  drop_last=False,
                                  num_workers=0)

    # load model
    model = myNet().to(device)
    model.load_state_dict(torch.load(save_dir))
    if len(gpu_idx) > 1:
        model = nn.DataParallel(model, device_ids=gpu_idx)

    model.eval()
    correct = list(0. for i in range(7))
Ejemplo n.º 16
0
os.system('mkdir {0}'.format(opt.experiment))

opt.manualSeed = random.randint(1, 10000)  # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)

cudnn.benchmark = True

if torch.cuda.is_available() and not opt.cuda:
    print(
        "WARNING: You have a CUDA device, so you should probably run with --cuda"
    )

train_dataset = dataset.myDataset(10240)
assert train_dataset
if not opt.random_sample:
    sampler = dataset.randomSequentialSampler(train_dataset, opt.batchSize)
else:
    sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=opt.batchSize,
                                           shuffle=False,
                                           sampler=sampler,
                                           num_workers=int(opt.workers),
                                           collate_fn=dataset.alignCollate(
                                               imgH=opt.imgH,
                                               imgW=opt.imgW,
                                               keep_ratio=opt.keep_ratio))
test_dataset = dataset.myDataset(256,