예제 #1
0
def evaluate(encoder_model_path, decoder_model_path):
    transformation = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
    ])

    VAL_LOADER_UNIQUE = {
        'root': config.VAL_IMG_PATH,
        'json': config.VAL_JSON_PATH,
        'batch_size': 16,
        'shuffle': False,
        'transform': transformation,
        'num_workers': 4
    }
    val_loader_unique = get_loader_unique(**VAL_LOADER_UNIQUE)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    encoder = CNNfull(2048)
    encoder.to(device)
    decoder = DecoderRNN(2048, 300, 512, vocab_size)
    decoder.to(device)
    encoder.load_state_dict(torch.load(encoder_model_path))
    decoder.load_state_dict(torch.load(decoder_model_path))
    encoder.eval()
    decoder.eval()

    bleu2, bleu3, bleu4, meteor = val_epoch(val_loader_unique,
                                            device,
                                            encoder,
                                            decoder,
                                            vocab,
                                            0,
                                            enc_scheduler=None,
                                            dec_scheduler=None,
                                            view_val_captions=False)
    print(f'Bleu2 score:{bleu2}')
    print(f'Bleu3 score:{bleu3}')
    print(f'Bleu4 score:{bleu4}')
    print(f'Meteor score:{meteor}')
예제 #2
0
def objective(dropout):
    global count
    count += 1
    print(
        '-------------------------------------------------------------------')
    print('%d' % count)
    print(dropout)

    os.environ["CUDA_VISIBLE_DEVICES"] = "1"
    torch.backends.cudnn.benchmark = True

    root_path = '/data/eaxfjord/deep_LFP'
    matrix = 'shuffled_LR.npy'
    batch_size = 20

    training_dataset = LFP_data(root_path=root_path,
                                data_file=matrix,
                                split='train')
    training_loader = DataLoader(training_dataset,
                                 shuffle=True,
                                 batch_size=batch_size,
                                 pin_memory=True,
                                 num_workers=1)

    validation_set = LFP_data(root_path=root_path,
                              data_file=matrix,
                              split='valid')
    validation_loader = DataLoader(validation_set,
                                   shuffle=False,
                                   batch_size=batch_size,
                                   pin_memory=True,
                                   num_workers=1)

    input_shape = (2, 2110)  # this is a hack to figure out shape of fc layer
    net = conv1d_nn.Net(input_shape=input_shape, dropout=dropout)
    net.cuda()

    criterion = nn.CrossEntropyLoss()
    criterion.cuda()
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     'min',
                                                     patience=100,
                                                     threshold=1e-3)
    num_epochs = 200

    for epoch in range(1, num_epochs + 1):
        train_loss, train_acc = train_epoch(training_loader, net, criterion,
                                            optimizer)
        validation_loss, validation_accuracy = val_epoch(
            validation_loader, net, criterion)

        scheduler.step(validation_loss)
        print(
            'EPOCH:: %i, (%s), train_loss, test_loss: %.3f, train_acc: %.3f, test_acc: %.3f'
            % (epoch + 1, train_loss, validation_loss, train_acc,
               validation_accuracy))

    return {
        'loss': -validation_accuracy,
        'status': STATUS_OK,
        'val_loss': validation_loss
    }
예제 #3
0
def main():
    best_prec1 = 0
    test = True
    log = True
    save_best = True
    sample_length = 0.5
    num_samples = np.int(np.round(
        5000 /
        sample_length))  # together I want about 5000 seconds from each subject
    batch_size = 100
    num_epochs = 200
    dropout = 0.4
    task = 'subject_prediction'

    os.environ["CUDA_VISIBLE_DEVICES"] = "1"
    torch.backends.cudnn.benchmark = True

    root_path = pathlib.Path.cwd()
    matrix = root_path.joinpath(
        'data', f'cleaned_{sample_length}sec_{num_samples}.npy')

    training_dataset = LFPData(data_file=matrix,
                               split='train',
                               standardize=True)
    training_loader = DataLoader(training_dataset,
                                 shuffle=True,
                                 batch_size=batch_size,
                                 pin_memory=True,
                                 num_workers=1)

    validation_set = LFPData(data_file=matrix, split='valid', standardize=True)
    validation_loader = DataLoader(validation_set,
                                   shuffle=False,
                                   batch_size=batch_size,
                                   pin_memory=True,
                                   num_workers=1)
    # input_shape = (2, np.int(422 * sample_length))  # this is a hack to figure out shape of fc layer
    # net = conv1d_nn.Net(input_shape=input_shape, dropout=dropout)
    net = conv1d_nn.FCN(in_channels=2, num_classes=9)
    net.apply(init_weights)
    net.cuda()

    criterion = nn.CrossEntropyLoss()
    criterion.cuda()
    # optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
    optimizer = optim.Adam(net.parameters(),
                           lr=1e-3,
                           betas=(0.9, 0.999),
                           eps=1e-8)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     'min',
                                                     patience=5,
                                                     threshold=1e-2)
    stop_criterion = EarlyStopping()

    title = f'FCN2_cleaned_{sample_length}sec_{num_samples}'
    if log:
        log_dir = root_path.joinpath('logs', title)
        if not log_dir.exists():
            log_dir.mkdir()
        training_log = log_dir.joinpath('log')
        if not training_log.exists():
            open(str(training_log), 'w').close()
        result_writer = ResultsWriter(str(training_log), overwrite=True)

    mlog = MeterLogger(server='localhost',
                       port=8097,
                       nclass=9,
                       title=title,
                       env=title)

    for epoch in range(1, num_epochs + 1):
        mlog.timer.reset()

        train_epoch(training_loader, net, criterion, optimizer, mlog)

        if log:
            result_writer.update(title, {'Train': mlog.peek_meter()})
        mlog.print_meter(mode="Train", iepoch=epoch)
        mlog.reset_meter(mode="Train", iepoch=epoch)
        validation_loss = val_epoch(validation_loader, net, criterion, mlog)

        prec1 = mlog.meter['accuracy'].value()[0]

        if save_best:
            # remember best prec@1 and save checkpoint
            is_best = prec1 > best_prec1
            if is_best:
                best_prec1 = max(prec1, best_prec1)
                save_checkpoint(
                    root_path.joinpath('checkpoints', title), {
                        'epoch': epoch + 1,
                        'state_dict': net.state_dict(),
                        'best_prec1': best_prec1,
                        'optimizer': optimizer.state_dict(),
                    }, is_best)

        if log:
            result_writer.update(title, {'Validation': mlog.peek_meter()})
        mlog.print_meter(mode="Test", iepoch=epoch)
        mlog.reset_meter(mode="Test", iepoch=epoch)

        stop_criterion.eval_loss(validation_loss)
        if stop_criterion.get_nsteps() >= 30:
            print('Early stopping')
            break
        print(optimizer.param_groups[0]['lr'])
        scheduler.step(validation_loss)

    print('Training finished', best_prec1)

    if test:
        test_set = LFPData(data_file=matrix, split='test', standardize=True)
        test_loader = DataLoader(test_set,
                                 shuffle=False,
                                 batch_size=batch_size,
                                 pin_memory=True,
                                 num_workers=1)
        test_loss, test_acc = test_epoch(test_loader, net, criterion, mlog)

        result_writer.update(
            title, {'Test': {
                'loss': test_loss,
                'accuracy': test_acc
            }})

        print(test_loss, test_acc)

    # save pngs of visdom plot into log path
    plot_visdom(mlog, log_dir)
예제 #4
0
def main():
    global args, best_prec1, best_epoch
    args = parse_args()

    if args.root_path != '':
        args.result_path = os.path.join(args.root_path, args.result_path)
        args.checkpoint_path = os.path.join(args.root_path,
                                            args.checkpoint_path)
        if not os.path.exists(args.result_path):
            os.mkdir(args.result_path)
        if not os.path.exists(args.checkpoint_path):
            os.mkdir(args.checkpoint_path)
        if args.resume_path:
            args.resume_path = os.path.join(args.checkpoint_path,
                                            args.resume_path)

    args.arch = '{}{}'.format(args.model, args.model_depth)

    torch.manual_seed(args.manual_seed)

    args.use_cuda = args.use_cuda and torch.cuda.is_available()

    device = torch.device("cuda" if args.use_cuda else "cpu")

    # create model
    model, parameters = get_model_param(args)
    print(model)
    model = model.to(device)

    with open(os.path.join(args.result_path, 'args.json'), 'w') as args_file:
        json.dump(vars(args), args_file)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().to(device)
    optimizer = optim.SGD(parameters,
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    # scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, threshold=0.001, patience=args.lr_patience)

    lr_mult = []
    for param_group in optimizer.param_groups:
        lr_mult.append(param_group['lr'])

    # optionally resume from a checkpoint
    if args.resume_path:
        if os.path.isfile(args.resume_path):
            print("=> loading checkpoint '{}'...".format(args.resume_path))
            checkpoint = torch.load(args.resume_path)
            args.begin_epoch = checkpoint['epoch'] + 1
            model.load_state_dict(checkpoint['model'])
            optimizer.load_state_dict(checkpoint['optimizer'])
        else:
            print("=> no checkpoint found at '{}'".format(args.resume_path))

    if args.train:
        train_dataset = Driver(root=args.data_path, train=True, test=False)
        train_loader = DataLoader(dataset=train_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=args.num_workers)
        train_logger = Logger(os.path.join(args.result_path, 'train.log'),
                              ['epoch', 'loss', 'top1', 'top3', 'lr'])
        train_batch_logger = Logger(
            os.path.join(args.result_path, 'train_batch.log'),
            ['epoch', 'batch', 'iter', 'loss', 'top1', 'top3', 'lr'])

    if args.val:
        val_dataset = Driver(root=args.data_path, train=False, test=True)
        val_loader = DataLoader(dataset=val_dataset,
                                batch_size=args.test_batch_size,
                                shuffle=False,
                                num_workers=args.num_workers)
        val_logger = Logger(os.path.join(args.result_path, 'val.log'),
                            ['epoch', 'loss', 'top1', 'top3'])

    print('=> Start running...')
    vis = Visualizer(env=args.env)
    for epoch in range(args.begin_epoch, args.epochs + 1):
        if args.train:
            adjust_learning_rate(optimizer, epoch, lr_mult, args)
            train_epoch(epoch, train_loader, model, criterion, optimizer, args,
                        device, train_logger, train_batch_logger, vis)
            print('\n')

        if args.val:
            val_loss, val_prec1 = val_epoch(epoch, val_loader, model,
                                            criterion, args, device,
                                            val_logger, vis)
            print('\n')
            # remember best prec@1 and save checkpoint
            if val_prec1 > best_prec1:
                best_prec1 = val_prec1
                best_epoch = epoch
                print('=> Saving current best model...\n')
                save_file_path = os.path.join(
                    args.result_path,
                    'save_best_{}_{}.pth'.format(args.arch, epoch))
                checkpoint = {
                    'arch': args.arch,
                    'epoch': best_epoch,
                    'best_prec1': best_prec1,
                    'model': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                }
                torch.save(checkpoint, save_file_path)

        # if args.train and args.val:
        # scheduler.step(val_loss)

    if args.test:
        test_dataset = Driver(root=args.data_path, train=False, test=True)
        test_loader = DataLoader(dataset=test_dataset,
                                 batch_size=args.test_batch_size,
                                 shuffle=False,
                                 num_workers=args.num_workers)
        # # if you only test the model, you need to set the "best_epoch" manually
        # best_epoch = 10  # set manually
        saved_model_path = os.path.join(
            args.result_path,
            'save_best_{}_{}.pth'.format(args.arch, best_epoch))
        print("Using '{}' for test...".format(saved_model_path))
        checkpoint = torch.load(saved_model_path)
        model.load_state_dict(checkpoint['model'])
        test.test(test_loader, model, args, device)
예제 #5
0
        checkpoint = torch.load(opt.resume_path)
        assert opt.arch == checkpoint['arch']

        opt.begin_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        if opt.is_train:
            optimizer.load_state_dict(checkpoint['optimizer'])

    for epoch in range(opt.begin_epoch, opt.n_epochs + 1):

        if opt.is_train:
            train_epoch(epoch, train_logger, train_batch_logger, trainloader,
                        optimizer, criterion, model, opt, device)

        if opt.is_val:
            validation_loss = val_epoch(epoch, val_logger, valloader,
                                        optimizer, criterion, model, device)

        if opt.is_train and opt.is_val:
            scheduler.step(validation_loss)

        if opt.is_test:

            testset = FrankaDataset(opt,
                                    'test',
                                    data_length,
                                    transform_input=transform_i,
                                    transform_target=transform_t)
            testloader = DataLoader(testset,
                                    batch_size=opt.batch_size,
                                    shuffle=False,
                                    num_workers=opt.n_threads,
예제 #6
0
def main():
    best_prec1 = 0
    test = True
    transfer_learning = True
    batch_size = 50
    sample_length = 3
    num_epochs = 50
    task = 'state_prediciton'

    os.environ["CUDA_VISIBLE_DEVICES"] = "1"
    torch.backends.cudnn.benchmark = True

    root_path = pathlib.Path.home().joinpath('deep_LFP')
    matrix = root_path.joinpath(
        'data', f'cleaned_state_matrix_{sample_length}sec.npz')

    training_dataset = LFPDataStates(data_file=matrix,
                                     split='train',
                                     standardize=True)
    training_loader = DataLoader(training_dataset,
                                 shuffle=True,
                                 batch_size=batch_size,
                                 pin_memory=True,
                                 num_workers=1)

    validation_set = LFPDataStates(data_file=matrix,
                                   split='valid',
                                   standardize=True)
    validation_loader = DataLoader(validation_set,
                                   shuffle=False,
                                   batch_size=batch_size,
                                   pin_memory=True,
                                   num_workers=1)
    input_shape = (2, np.int(422 * sample_length)
                   )  # this is a hack to figure out shape of fc layer
    net = conv1d_nn.Net(input_shape=input_shape, dropout=0)
    if transfer_learning:
        num_samples_prev_model = np.int(np.round(5000 / sample_length))
        previous_model = f'cleaned_{sample_length}sec_{num_samples_prev_model}_model_best.pth.tar'
        previous_model_weights = os.path.join(root_path, 'checkpoints',
                                              previous_model)
        net.load_state_dict(torch.load(previous_model_weights)['state_dict'])
        for param in net.parameters():
            param.requires_grad = False

        num_features = net.fc1.in_features
        net.fc1 = nn.Linear(num_features, 2040)
    net.fc2 = nn.Linear(2040, 4)
    net.cuda()

    criterion = nn.CrossEntropyLoss()
    criterion.cuda()
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     'min',
                                                     patience=100,
                                                     threshold=1e-3)
    stop_criterion = EarlyStopping()

    title = f'cleaned_state_prediction_{sample_length}sec_transfer_learning'
    training_log_path = '/data/eaxfjord/deep_LFP/logs/' + title + '/log'
    log_dir = os.path.dirname(training_log_path)
    if not os.path.exists(log_dir):
        os.mkdir(log_dir)
    if not os.path.exists(training_log_path):
        open(training_log_path, 'w').close()

    result_writer = ResultsWriter(training_log_path, overwrite=True)

    mlog = MeterLogger(server='localhost',
                       port=8097,
                       nclass=4,
                       title=title,
                       env=f'state_prediction_{sample_length}sec')

    for epoch in range(1, num_epochs + 1):
        mlog.timer.reset()

        train_epoch(training_loader, net, criterion, optimizer, mlog)

        result_writer.update(task, {'Train': mlog.peek_meter()})
        mlog.print_meter(mode="Train", iepoch=epoch)
        mlog.reset_meter(mode="Train", iepoch=epoch)
        validation_loss = val_epoch(validation_loader, net, criterion, mlog)

        prec1 = mlog.meter['accuracy'].value()[0]

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        if is_best:
            best_prec1 = max(prec1, best_prec1)
            save_checkpoint(
                os.path.join(root_path, 'checkpoints', title), {
                    'epoch': epoch + 1,
                    'state_dict': net.state_dict(),
                    'best_prec1': best_prec1,
                    'optimizer': optimizer.state_dict(),
                }, is_best)

        result_writer.update(task, {'Validation': mlog.peek_meter()})

        mlog.print_meter(mode="Test", iepoch=epoch)
        mlog.reset_meter(mode="Test", iepoch=epoch)

        # stop_criterion.eval_loss(validation_loss)
        # if stop_criterion.get_nsteps() >= 30:
        #     print('Early stopping')
        #     break
        print(optimizer.param_groups[0]['lr'])
        scheduler.step(validation_loss)

    print('Training finished', best_prec1)

    if test:
        test_set = LFPDataStates(data_file=matrix,
                                 split='test',
                                 standardize=True)
        test_loader = DataLoader(test_set,
                                 shuffle=False,
                                 batch_size=batch_size,
                                 pin_memory=True,
                                 num_workers=1)
        test_loss, test_acc = test_epoch(test_loader, net, criterion, mlog)

        result_writer.update(task, {'Test': test_acc})
        print(test_loss, test_acc)

    # when finished get data from visdom plot, and save to png
    plot_visdom(mlog, log_dir)
예제 #7
0
def main():
    opts = parse_opts()
    if torch.cuda.is_available():
        opts.cuda = True
    opts.device = torch.device(opts.device if opts.cuda else 'cpu')
    print(opts)

    opts.arch = '{}-{}'.format(opts.model_name, opts.model_depth)
    torch.manual_seed(opts.manual_seed)

    print("Preprocessing train data ...")
    train_data = globals()['{}'.format(opts.dataset)](data_type='train',
                                                      opts=opts,
                                                      split=opts.split)
    print("Length of train data = ", len(train_data))

    print("Preprocessing validation data ...")
    val_data = globals()['{}'.format(opts.dataset)](data_type='val',
                                                    opts=opts,
                                                    split=opts.split)
    print("Length of validation data = ", len(val_data))

    if opts.modality == 'RGB': opts.input_channels = 3
    elif opts.modality == 'Flow': opts.input_channels = 2

    print("Preparing dataloaders ...")
    train_dataloader = DataLoader(train_data,
                                  batch_size=opts.batch_size,
                                  shuffle=True,
                                  num_workers=opts.n_workers,
                                  pin_memory=True,
                                  drop_last=True)
    val_dataloader = DataLoader(val_data,
                                batch_size=opts.batch_size,
                                shuffle=True,
                                num_workers=opts.n_workers,
                                pin_memory=True,
                                drop_last=True)
    print("Length of train dataloader = ", len(train_dataloader))
    print("Length of validation dataloader = ", len(val_dataloader))

    # define the model
    print("Loading model... ", opts.model_name, opts.model_depth)
    model, parameters = generate_model(opts)
    if not opts.pretrained_path:
        opts.learning_rate = 0.1
        opts.weight_decay = 5e-4
    criterion = nn.CrossEntropyLoss().cuda()

    if opts.resume_md_path:
        print('loading checkpoint {}'.format(opts.resume_path1))
        checkpoint = torch.load(opts.resume_path1)

        assert opts.arch == checkpoint['arch']
        opts.begin_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])

    log_path = os.path.join(opts.result_path, opts.dataset)
    if not os.path.exists(log_path):
        os.makedirs(log_path)

    if opts.log == 1:
        if opts.resume_md_path:
            begin_epoch = int(opts.resume_path1.split('/')[-1].split('_')[1])
            train_logger = Logger(os.path.join(
                log_path,
                '{}_train_clip{}model{}{}.log'.format(opts.dataset,
                                                      opts.sample_duration,
                                                      opts.model_name,
                                                      opts.model_depth)),
                                  ['epoch', 'loss', 'acc', 'lr'],
                                  overlay=False)
            val_logger = Logger(os.path.join(
                log_path,
                '{}_val_clip{}model{}{}.log'.format(opts.dataset,
                                                    opts.sample_duration,
                                                    opts.model_name,
                                                    opts.model_depth)),
                                ['epoch', 'loss', 'acc'],
                                overlay=False)
        else:
            begin_epoch = 0
            train_logger = Logger(os.path.join(
                log_path,
                '{}_train_clip{}model{}{}.log'.format(opts.dataset,
                                                      opts.sample_duration,
                                                      opts.model_name,
                                                      opts.model_depth)),
                                  ['epoch', 'loss', 'acc', 'lr'],
                                  overlay=True)
            val_logger = Logger(os.path.join(
                log_path,
                '{}_val_clip{}model{}{}.log'.format(opts.dataset,
                                                    opts.sample_duration,
                                                    opts.model_name,
                                                    opts.model_depth)),
                                ['epoch', 'loss', 'acc'],
                                overlay=True)

    print("Initializing the optimizer ...")

    if opts.nesterov: dampening = 0
    else: dampening = opts.dampening

    print(
        "lr = {} \t momentum = {} \t dampening = {} \t weight_decay = {}, \t nesterov = {}"
        .format(opts.learning_rate, opts.momentum, dampening,
                opts.weight_decay, opts.nesterov))
    print("LR patience = ", opts.lr_patience)

    optimizer = optim.SGD(parameters,
                          lr=opts.learning_rate,
                          momentum=opts.momentum,
                          dampening=dampening,
                          weight_decay=opts.weight_decay,
                          nesterov=opts.nesterov)

    if opts.resume_md_path:
        optimizer.load_state_dict(
            torch.load(opts.pretrained_path)['optimizer'])

    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                               'min',
                                               patience=opts.lr_patience)

    for epoch in range(begin_epoch, opts.n_epochs + 1):
        print('Start training epoch {}'.format(epoch))
        train_epoch(epoch, train_dataloader, model, criterion, optimizer, opts,
                    train_logger)

        print('Start validating epoch {}'.format(epoch))
        with torch.no_grad():
            val_epoch(epoch, val_dataloader, model, criterion, optimizer, opts,
                      val_logger, scheduler)