Ejemplo n.º 1
0
def main():
    import configs
    from pytorch_utils import BaseOptions
    options = BaseOptions()
    options.parser = configs.get_args(options.parser)
    args = options.parse_args()
    sequence = KITTIObjectSequence(args, "/NAS/data/khantk/training/image_2")
    dataloader = torch.utils.data.DataLoader(sequence,
                                             batch_size=8,
                                             shuffle=True,
                                             num_workers=0)
    dataloader = sequence
    for i_batch, sample_batched in enumerate(dataloader):
        #print sequence[2]['next_image'].shape
        break
Ejemplo n.º 2
0
def main():
    args = get_args()

    class_name = 'rotate'
    file_name = '00000020_NO_07.wav'
    signal_path = os.path.join(args.data_root, 'dataset', class_name, file_name)

    # # Read a sample from the dataset for testing
    # signal = load_data(class_name, file_name, signal_samples, args.data_root, args.signal_sr)

    signal, _ = torchaudio.load(signal_path)

    melkwargs = {
        'n_mels': args.nfilt,
        'n_fft': args.nfft,
        'win_length': int(args.winlen * args.signal_sr),
        'hop_length': int(args.winstep * args.signal_sr)
    }

    spectrogram = torchaudio.transforms.Spectrogram(n_fft=args.nfft,
                                                    win_length=int(args.winlen * args.signal_sr),
                                                    hop_length=int(args.winstep * args.signal_sr))(signal)

    mel_spectrogram = torchaudio.transforms.MelSpectrogram(sample_rate=args.signal_sr,
                                                           n_fft=args.nfft,
                                                           n_mels=args.nfilt,
                                                           win_length=int(args.winlen * args.signal_sr),
                                                           hop_length=int(args.winstep * args.signal_sr))(signal)

    mfcc = torchaudio.transforms.MFCC(sample_rate=args.signal_sr,
                                      n_mfcc=args.numcep,
                                      log_mels=True,
                                      melkwargs=melkwargs)(signal)

    num_frames = spectrogram.size()[2]

    plot_features(args, spectrogram, mel_spectrogram, mfcc, num_frames)

    history_path = os.path.join('..', 'assets', 'history.json')

    # Load the history file
    with open(history_path, mode='rb') as history_file:
        history = json.load(history_file)

    plot_history(args, history)

    plt.show()
Ejemplo n.º 3
0
        acc_meter.reset()
        for idx, (images, labels) in enumerate(local_progress):
            with torch.no_grad():
                feature = model(images.to(args.device))
                preds = classifier(feature).argmax(dim=1)
                correct = (preds == labels.to(args.device)).sum().item()
                acc_meter.update(correct/preds.shape[0])
                local_progress.set_postfix({'accuracy': acc_meter.avg})
        
        global_progress.set_postfix({"epoch":epoch, 'accuracy':acc_meter.avg*100})




if __name__ == "__main__":
    main(args=get_args())














Ejemplo n.º 4
0
from pytorch_utils import BaseOptions
import configs

options = BaseOptions()
options.parser = configs.get_args(options.parser)
args = options.parse_args()

if args.model.lower() == 'eventgan':
    from models.eventgan_trainer import EventGANTrainer
    trainer = EventGANTrainer(args)
    trainer.train()
elif args.model.lower() == 'flow' or args.model.lower() == 'recons':
    import os
    import torch
    from models.flow_reconstruct_trainer import FlowReconstructTrainer
    trainer = FlowReconstructTrainer(args)
    trainer.train()

    checkpoint_filename = os.path.abspath(
        os.path.join(args.checkpoint_dir,
                     '{}.pickle'.format(args.model.lower())))
    torch.save(trainer.cycle_unet, checkpoint_filename)
else:
    raise ValueError(
        "Model {} not supported, please select from {EventGAN, flow, recons}".
        format(args.model))
                max_probs, targets_u = torch.max(pseudo_label1, dim=-1)
                mask = max_probs.ge(args.threshold_pl).float()
                Lu = (F.cross_entropy(ema_logit, targets_u, reduction='none') *
                      mask).mean()
                loss = Lu
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
    #             batch_loss.append(loss.item())

            w_locals.append(copy.deepcopy(model_local.state_dict()))
            #             loss_locals.append(sum(loss_local) / len(loss_local) )

            del model_local
            gc.collect()
            del train_loader_unlabeled
            gc.collect()
            torch.cuda.empty_cache()

        w_glob = FedAvg(w_locals)
        model_glob.load_state_dict(w_glob)

        #         loss_avg = sum(loss_locals) / len(loss_locals)

        if iter % 5 == 0:
            print('Round {:3d}, Acc {:.3f}'.format(iter, accuracy))

if __name__ == "__main__":
    args = get_args()
    main(device=args.device, args=args)
Ejemplo n.º 6
0
def train():
    args = configs.get_args()
    use_cuda = args.use_cuda and torch.cuda.is_available()

    # prepare dataset
    dataset = libs.dataset.MyDataset(min_length=args.min_length)
    voc_size = dataset.get_voc_size()
    dataloader = DataLoader(dataset, 1, True, drop_last=False)

    # prepare model
    model = models.TopModuleCNN(voc_size, output_channel=args.output_channel)
    if use_cuda:
        model = model.cuda()

    # load pretrained if asked
    if args.resume:
        checkpoint_path = Checkpoint.get_certain_checkpoint(
            "./experiment/cnn_net", "best")
        resume_checkpoint = Checkpoint.load(checkpoint_path)
        model = resume_checkpoint.model
        optimizer = resume_checkpoint.optimizer

        resume_optim = optimizer.optimizer
        defaults = resume_optim.param_groups[0]
        defaults.pop('params', None)
        optimizer.optimizer = resume_optim.__class__(model.parameters(),
                                                     **defaults)

        start_epoch = resume_checkpoint.epoch
        max_ans_acc = resume_checkpoint.max_ans_acc
    else:
        start_epoch = 1
        max_ans_acc = 0
        optimizer = NoamOpt(
            512, 1, 2000,
            optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))

    # define loss
    loss = nn.CrossEntropyLoss(weight=torch.tensor([1., 4.]))
    if use_cuda:
        loss = loss.cuda()

    # training
    for i in range(start_epoch, args.epochs):
        # test the model
        if args.resume:
            test_ans_acc = max_ans_acc
        else:
            test_ans_acc = test(DataLoader(dataset, 1, True, drop_last=False),
                                model, i)
        print('For EPOCH {}, total f1: {:.2f}'.format(i, test_ans_acc))

        # calculate loss
        j = 0
        los1 = []
        for _, data in enumerate(dataloader):
            j += 1
            x = data['que'].long()
            y = data['ans'].long()
            res = data['res'].long()
            if use_cuda:
                x, y, res = x.cuda(), y.cuda(), res.cuda()
            res_pred = model(x, y)

            los1.append(loss(res_pred, res).unsqueeze(0))

            # apply gradient
            if j % args.batch_size == 0:
                los1 = torch.cat(los1)
                los = los1.sum()
                model.zero_grad()
                los.backward()
                optimizer.step()
                los1 = []
                print('EPOCH: {}, {} / {}====> LOSS: {:.2f}'.format(
                    i, j // args.batch_size,
                    dataloader.__len__() // args.batch_size,
                    los.item() / args.batch_size))

        # save checkpoint
        if test_ans_acc > max_ans_acc:
            max_ans_acc = test_ans_acc
            th_checkpoint = Checkpoint(model=model,
                                       optimizer=optimizer,
                                       epoch=i,
                                       max_ans_acc=max_ans_acc)
            th_checkpoint.save_according_name("./experiment/cnn_net", 'best')
Ejemplo n.º 7
0
#
from tqdm import tqdm
import logging
import sys
#
from torch import optim
import torch.nn as nn
import torch
#
from networks import unet_vgg
import configs, utils_ai
import datasets

if __name__ == '__main__':
    args = configs.get_args()
    # device = torch.device('cuda' if torch.cuda.is_available() )
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logging.info(f'Using deveice {device}')
    ###
    net = unet_vgg.UNet_VGG11()
    device_ids = list(map(int, args.device_ids.split(',')))
    model = nn.DataParallel(model, device_ids=device_ids).cuda()
    loss = Loss()

    if args.load:
        net.load_state_dict(torch.load(args.load, map_location=device))
        logging.info(f'Model loaded from {args.load}')

    net.to(device=device)
    # faster convolutions, but more memory
    # cudnn.benchmark = True