コード例 #1
0
def eval(model, split, seq_length, n_cpu, disp):
    dataset = GolfDB(data_file='data/val_split_{}.pkl'.format(split),
                     vid_dir='data/videos_160/',
                     seq_length=seq_length,
                     transform=transforms.Compose([ToTensor(),
                                                   Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),
                     train=False)

    data_loader = DataLoader(dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=n_cpu,
                             drop_last=False)


    correct = []
    delta = []
    tolerance = []
    predictions = []
    predictions_original = []
    ground_truth = []
    for i, sample in enumerate(data_loader):
        images, labels = sample['images'][:,::step,:,:,:], sample['labels']

        # full samples do not fit into GPU memory so evaluate sample in 'seq_length' batches
        seq_length_new = int(np.ceil(seq_length/step))
        batch = 0
        while batch * (seq_length_new-1) < images.shape[1]-1:
            if (batch + 1) * (seq_length_new-1) + 1 > images.shape[1]:
                image_batch = images[:, batch * (seq_length_new-1):, :, :, :]
            else:
                image_batch = images[:, batch * (seq_length_new-1):(batch + 1) * (seq_length_new-1)+1, :, :, :]
            logits = model(image_batch.to(device))
            if batch == 0:
                probs = F.softmax(logits.data, dim=1).cpu().numpy()
            else:
                probs = np.append(probs[:-1], F.softmax(logits.data, dim=1).cpu().numpy(), 0)
            batch += 1
        gt, pp, deltas, tol, c, original = correct_preds(probs, labels.squeeze())
        if disp:
            print(i, c)

        correct.append(c)
        tolerance.append(tol)
        delta.append(deltas)
        predictions.append(pp)
        ground_truth.append(gt)
        predictions_original.append(original)

    np.savez_compressed('results/' + version_name + '.npz', array1=np.array(delta), array2=np.array(predictions),
                                                    array3=np.array(tolerance), array4=np.array(ground_truth),
                                                    array5=np.array(predictions_original))

    print(np.round(np.mean(np.array(correct),0),3))
    print(np.round(np.sqrt(np.mean(np.square(np.array(delta)/np.array(tolerance)[:,np.newaxis]),0)),3))
    print(np.round(np.std(np.array(delta)/np.array(tolerance)[:,np.newaxis],0),3))
    PCE = np.mean(correct)
    return PCE
コード例 #2
0
 def get_test_dataset(modeltype, input_size, debug=DEBUG_MODE):
     """
     :param modeltype: resnet / mobilenet
     :return: type: PoseDataset
     Example:
     DataFactory.get_test_dataset("resnet", 224)
     In debug mode, it will return a small dataset
     """
     csv_name = "test_joints.csv"
     if debug:
         csv_name = "test_joints-500.csv"
     return PoseDataset(csv_file=os.path.join(ROOT_DIR, csv_name),
                        transform=transforms.Compose([
                            get_transform(modeltype, input_size),
                            Expansion(),
                            ToTensor()
                        ]))
コード例 #3
0
def eval(model, split, seq_length, n_cpu, disp):

    dataset = StsqDB(data_file='val_split_{}.pkl'.format(split),
                     vid_dir='data/videos_40/',
                     seq_length=seq_length,
                     transform=transforms.Compose([
                         ToTensor(),
                         Normalize([0.485, 0.456, 0.406],
                                   [0.229, 0.224, 0.225])
                     ]),
                     train=False)

    data_loader = DataLoader(dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=n_cpu,
                             drop_last=False)

    correct = []

    for i, sample in enumerate(data_loader):
        images, labels = sample['images'], sample['labels']

        # full samples do not fit into GPU memory so evaluate sample in 'seq_length' batches
        batch = 0
        while batch * seq_length < images.shape[1]:
            if (batch + 1) * seq_length > images.shape[1]:
                image_batch = images[:, batch * seq_length:, :, :, :]
            else:
                image_batch = images[:, batch * seq_length:(batch + 1) *
                                     seq_length, :, :, :]
            logits = model(image_batch.to(device))

            if batch == 0:
                probs = F.softmax(logits.data, dim=1).to(device).numpy()
            else:
                probs = np.append(
                    probs,
                    F.softmax(logits.data, dim=1).to(device).numpy(), 0)
            batch += 1
        _, _, _, _, c = correct_preds(probs, labels.squeeze())
        if disp:
            print(i, c)
        correct.append(c)
    PCE = np.mean(correct)
    return PCE
コード例 #4
0
ファイル: train.py プロジェクト: zebrajack/crownconv360depth
def main():
    global args, writer

    args = parser.parse_args()
    logger.info('Arguments:')
    logger.info(json.dumps(vars(args), indent=1))
    # Dataset
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # device = torch.device('cpu')
    if device.type != 'cpu':
        cudnn.benchmark = True
    logger.info(f"device:{device}")

    logger.info('=> setting data loader')
    reduction = args.level - args.depth_level
    fisheye_transform = transforms.Compose([ColorJitter(), RandomShift()]) if args.aug_real else None

    # Dataset
    root_train_dataset = OmniStereoDataset(args.root_dir, args.train_list, fisheye_transform, fov=args.fov)
    ocam_dict = root_train_dataset.ocams
    # camera poses world <- T cam
    pose_dict = root_train_dataset.pose_dict

    transform = transforms.Compose([ToTensor(), Normalize()])
    trainset = FisheyeToIcoDataset(root_train_dataset, ocam_dict, pose_dict, level=args.level, reduction=reduction,
                                   transform=transform)
    logger.info(f'trainset:{len(trainset)} samples were found.')
    train_loader = DataLoader(trainset, args.batch_size, shuffle=True, num_workers=args.workers)

    root_val_dataset = OmniStereoDataset(args.root_dir, args.val_list, fov=args.fov)
    val_dataset = FisheyeToIcoDataset(root_val_dataset, root_val_dataset.ocams, root_val_dataset.pose_dict,
                                      level=args.level, reduction=reduction, transform=transform)
    logger.info(f'{len(val_dataset)} samples were found.')
    val_loader = DataLoader(val_dataset, args.batch_size, shuffle=False, num_workers=args.workers)

    logger.info('=> setting model')
    model = IcoSweepNet(args.root_dir, args.ndisp, args.level, fov=args.fov)
    total_params = 0
    for param in model.parameters():
        total_params += np.prod(param.shape)
    logger.info(f"Total model parameters: {total_params:,}.")
    model = model.to(device)

    invd_0 = model.inv_depths[0]
    invd_max = model.inv_depths[-1]
    converter = InvDepthConverter(model.ndisp, invd_0, invd_max)

    # setup solver scheduler
    logger.info('=> setting optimizer')
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    logger.info('=> setting scheduler')
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=int(2 * args.epochs / 3), gamma=0.1)

    start_epoch = 0
    # Load pretrained model
    if args.pretrained:
        checkpoint = torch.load(args.pretrained)
        param_check = {
            'ndisp': model.ndisp,
            'min_depth': model.min_depth,
            'level': model.level,
        }
        for key, val in param_check.items():
            if not checkpoint[key] == val:
                logger.error(f'Error! Key:{key} is not the same as the checkpoints')

        logger.info("=> using pre-trained weights")
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        optimizer.load_state_dict(checkpoint['optimizer'])
        scheduler.load_state_dict(checkpoint['scheduler'])
        logger.info("=> Resume training from epoch {}".format(start_epoch))
    #
    model = nn.DataParallel(model)

    # setup writer
    timestamp = datetime.now().strftime("%m%d-%H%M")
    log_folder = join('checkpoints', f'{args.arch}_{timestamp}')
    logger.info(f'=> create log folder: {log_folder}')
    os.makedirs(log_folder, exist_ok=True)
    with open(join(log_folder, 'args.json'), 'w') as f:
        json.dump(vars(args), f, indent=1)
    writer = SummaryWriter(log_dir=log_folder)
    writer.add_text('args', json.dumps(vars(args), indent=1).replace('\n', '  \n'))
    logger.info('=> copy models folder to log folder')
    shutil.copytree('./models', join(log_folder, 'models'))
    # setup logger file handler
    handler = FileHandler(join(log_folder, 'train.log'))
    handler.setLevel(INFO)
    logger.addHandler(handler)

    logger.info('Start training')

    for epoch in range(start_epoch, args.epochs):
        # ----------------------------
        # training
        mode = 'train'
        ave_loss = run(epoch, mode, model, train_loader, converter, device, optimizer)
        # ----------------------------
        # evaluation
        mode = 'val'
        ave_loss = run(epoch, mode, model, val_loader, converter, device, optimizer=None, show_metrics=True)

        scheduler.step()
        # save
        save_data = {
            'epoch': epoch + 1,
            'state_dict': model.module.state_dict(),
            'optimizer': optimizer.state_dict(),
            'scheduler': scheduler.state_dict(),
            'ave_loss': ave_loss,
            'ndisp': model.module.ndisp,
            'min_depth': model.module.min_depth,
            'level': model.module.level,
        }
        torch.save(save_data, join(log_folder, f'checkpoints_{epoch}.pth'))

    writer.close()
    logger.info('Finish training.')
コード例 #5
0
                          )
    #print('model.py, class EventDetector()')

    freeze_layers(k, model)
    #print('utils.py, func freeze_laters()')
    model.train()
    model.to(device)
    print('Loading Data')


    # TODO: vid_dirのpathをかえる。stsqの動画を切り出したimage全部が含まれているdirにする
    if use_no_element == False:
        dataset = StsqDB(data_file='data/no_ele/seq_length_{}/train_split_{}.pkl'.format(args.seq_length, args.split),
                        vid_dir='/home/akiho/projects/golfdb/data/videos_40/',
                        seq_length=int(seq_length),
                        transform=transforms.Compose([ToTensor(),
                                                    Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),
                        train=True)
    else:
        dataset = StsqDB(data_file='data/seq_length_{}/train_split_{}.pkl'.format(args.seq_length, args.split),
                    vid_dir='/home/akiho/projects/golfdb/data/videos_40/',
                    seq_length=int(seq_length),
                    transform=transforms.Compose([ToTensor(),
                                                Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),
                    train=True)
    print('dataloader.py, class StsqDB()')
    # dataset.__len__() : 1050


    data_loader = DataLoader(dataset,
                             batch_size=int(bs),
コード例 #6
0
ファイル: main.py プロジェクト: radsn23/recyclicat
                    default=1,
                    metavar='S',
                    help='random seed (default: 1)')
parser.add_argument(
    '--log-interval',
    type=int,
    default=10,
    metavar='N',
    help='how many batches to wait before logging training status')
args = parser.parse_args()

torch.manual_seed(args.seed)

from dataloader import TrashData, ToTensor

data_used = TrashData(args.data, args.csv, transform=ToTensor())

# for i in range(len(data_used)):
#         sample = data_used[i]
#         print(i, sample['image'],sample['label'])
#         if i == 3:
#              break
train_ = []
for i in range(int(0.8 * len(data_used))):
    j = data_used[i]
    train_.append(j)
val_ = []
for k in range(int(0.8 * len(data_used)), len(data_used)):
    l = data_used[k]
    val_.append(l)
コード例 #7
0
    model = EventDetector(pretrain=True,
                          width_mult=1.,
                          lstm_layers=1,
                          lstm_hidden=256,
                          bidirectional=True,
                          dropout=False)
    freeze_layers(k, model)
    model.train()
    model.cuda()

    dataset = GolfDB(data_file='data/train_split_{}.pkl'.format(split),
                     vid_dir='data/videos_160/',
                     seq_length=seq_length,
                     transform=transforms.Compose([
                         ToTensor(),
                         Normalize([0.485, 0.456, 0.406],
                                   [0.229, 0.224, 0.225])
                     ]),
                     train=True,
                     noise_level=noise_level)

    data_loader = DataLoader(dataset,
                             batch_size=bs,
                             shuffle=True,
                             num_workers=n_cpu,
                             drop_last=True)

    # the 8 golf swing events are classes 0 through 7, no-event is class 8
    # the ratio of events to no-events is approximately 1:35 so weight classes accordingly:
    weights = torch.FloatTensor(
コード例 #8
0
def main():
    global args, writer, logger
    args = parser.parse_args()
    logger.info('Arguments:')
    logger.info(json.dumps(vars(args), indent=1))
    # Dataset
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # device = torch.device('cpu')
    if device.type != 'cpu':
        cudnn.benchmark = True
    logger.info(f"device:{device}")

    # Load pretrained model
    logger.info("=> loading checkpoints")
    checkpoint = torch.load(args.pretrained)
    ndisp = checkpoint['ndisp']
    # min_depth = checkpoint['min_depth']
    level = checkpoint['level']
    logger.info(f'ndisp:{ndisp}')
    logger.info(f'level:{level}')

    logger.info('=> setting model')
    model = IcoSweepNet(args.root_dir, ndisp, level, fov=args.fov)
    model = model.to(device)
    invd_0 = model.inv_depths[0]
    invd_max = model.inv_depths[-1]
    converter = InvDepthConverter(model.ndisp, invd_0, invd_max)
    model.load_state_dict(checkpoint['state_dict'])
    epoch = checkpoint['epoch'] - 1
    logger.info("=> Pretrained model epoch {}".format(epoch))

    logger.info('=> setting data loader')
    transform = transforms.Compose([ToTensor(), Normalize()])
    reduction = model.level - model.idepth_level
    root_val_dataset = OmniStereoDataset(args.root_dir,
                                         args.val_list,
                                         fov=args.fov)
    val_dataset = FisheyeToIcoDataset(root_val_dataset,
                                      root_val_dataset.ocams,
                                      root_val_dataset.pose_dict,
                                      level=model.level,
                                      reduction=reduction,
                                      transform=transform)
    logger.info(f'{len(val_dataset)} samples were found.')
    val_loader = DataLoader(val_dataset,
                            args.batch_size,
                            shuffle=False,
                            num_workers=args.workers)

    model = nn.DataParallel(model)

    # setup writer
    log_folder = os.path.dirname(args.pretrained)
    logger.info(f'=> save in checkpoint folder: {log_folder}')
    base = os.path.splitext(os.path.basename(args.pretrained))[0]
    root_dirname = args.root_dir.strip('/').split('/')[-1]
    # setup logger file handler
    handler = FileHandler(join(log_folder, f'eval_{root_dirname}_{base}.log'))
    handler.setLevel(INFO)
    logger.addHandler(handler)

    logger.info('Start evaluation')

    # ----------------------------
    # evaluation
    mode = 'eval'
    depth_folder = join(
        log_folder,
        f'depth_{root_dirname}_{base}') if args.save_depth else None
    ave_loss = run(epoch,
                   mode,
                   model,
                   val_loader,
                   converter,
                   device,
                   optimizer=None,
                   show_metrics=True,
                   depth_folder=depth_folder)
    logger.info('Finish training.')
コード例 #9
0
ファイル: eval.py プロジェクト: akiho-ally/StSqDB
def eval(model, split, seq_length, bs, n_cpu, disp):

    if use_no_element == False:
        dataset = StsqDB(
            data_file='data/no_ele/seq_length_{}/val_split_{}.pkl'.format(
                int(seq_length), split),
            vid_dir='data/videos_40/',
            seq_length=int(seq_length),
            transform=transforms.Compose([
                ToTensor(),
                Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ]),
            train=False)
    else:
        dataset = StsqDB(
            data_file='data/seq_length_{}/train_split_{}.pkl'.format(
                args.seq_length, args.split),
            vid_dir='data/videos_40/',
            seq_length=int(seq_length),
            transform=transforms.Compose([
                ToTensor(),
                Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ]),
            train=True)

    data_loader = DataLoader(dataset,
                             batch_size=int(bs),
                             shuffle=False,
                             num_workers=n_cpu,
                             drop_last=True)

    correct = []

    if use_no_element == False:
        element_correct = [[] for i in range(12)]
        element_sum = [[] for i in range(12)]
        confusion_matrix = np.zeros([12, 12], int)
    else:
        element_correct = [[] for i in range(13)]
        element_sum = [[] for i in range(13)]
        confusion_matrix = np.zeros([13, 13], int)

    for i, sample in enumerate(data_loader):
        images, labels = sample['images'].to(device), sample['labels'].to(
            device)
        logits = model(images)
        probs = F.softmax(logits.data, dim=1)  ##確率
        labels = labels.view(int(bs) * int(seq_length))
        _, c, element_c, element_s, conf = correct_preds(
            probs, labels.squeeze())
        if disp:
            print(i, c)
        correct.append(c)
        for j in range(len(element_c)):
            element_correct[j].append(element_c[j])
        for j in range(len(element_s)):
            element_sum[j].append(element_s[j])
        confusion_matrix = confusion_matrix + conf

    PCE = np.mean(correct)
    all_element_correct = np.sum(element_correct, axis=1)
    all_element_sum = np.sum(element_sum, axis=1)
    element_PCE = all_element_correct / all_element_sum
    return PCE, element_PCE, all_element_correct, all_element_sum, confusion_matrix
コード例 #10
0
def main(args=None):

    parser = argparse.ArgumentParser(
        description='Simple training script for training a RetinaNet network.')

    parser.add_argument('--dataset',
                        help='Dataset type, must be one of csv or coco.')
    parser.add_argument('--coco_path', help='Path to COCO directory')
    parser.add_argument(
        '--csv_train',
        help='Path to file containing training annotations (see readme)')
    parser.add_argument('--csv_classes',
                        help='Path to file containing class list (see readme)')
    parser.add_argument(
        '--csv_val',
        help=
        'Path to file containing validation annotations (optional, see readme)'
    )

    parser.add_argument(
        '--depth',
        help='Resnet depth, must be one of 18, 34, 50, 101, 152',
        type=int,
        default=50)
    parser.add_argument('--epochs',
                        help='Number of epochs',
                        type=int,
                        default=100)

    parser = parser.parse_args(args)

    # Create the data loaders
    if parser.dataset == 'coco':

        if parser.coco_path is None:
            raise ValueError('Must provide --coco_path when training on COCO,')

        dataset_train = CocoDataset(parser.coco_path,
                                    set_name='train2017',
                                    transform=transforms.Compose([
                                        ToTensor(),
                                        Normalizer(),
                                        Augmenter(),
                                        Resizer(1344, 1696)
                                    ]))
        dataset_val = CocoDataset(parser.coco_path,
                                  set_name='val2017',
                                  transform=transforms.Compose([
                                      ToTensor(),
                                      Normalizer(),
                                      Resizer(1344, 1696)
                                  ]))

    elif parser.dataset == 'csv':

        if parser.csv_train is None:
            raise ValueError('Must provide --csv_train when training on COCO,')

        if parser.csv_classes is None:
            raise ValueError(
                'Must provide --csv_classes when training on COCO,')

        dataset_train = CSVDataset(train_file=parser.csv_train,
                                   class_list=parser.csv_classes,
                                   transform=transforms.Compose([
                                       ToTensor(),
                                       Normalizer(),
                                       Augmenter(),
                                       Resizer(1344, 1696)
                                   ]))

        if parser.csv_val is None:
            dataset_val = None
            print('No validation annotations provided.')
        else:
            dataset_val = CSVDataset(train_file=parser.csv_val,
                                     class_list=parser.csv_classes,
                                     transform=transforms.Compose([
                                         ToTensor(),
                                         Normalizer(),
                                         Resizer(1344, 1696)
                                     ]))

    else:
        raise ValueError(
            'Dataset type not understood (must be csv or coco), exiting.')

    sampler = AspectRatioBasedSampler(dataset_train,
                                      batch_size=4,
                                      drop_last=False)
    dataloader_train = DataLoader(dataset_train,
                                  num_workers=3,
                                  collate_fn=collater,
                                  batch_sampler=sampler)

    if dataset_val is not None:
        sampler_val = AspectRatioBasedSampler(dataset_val,
                                              batch_size=4,
                                              drop_last=False)
        dataloader_val = DataLoader(dataset_val,
                                    num_workers=3,
                                    collate_fn=collater,
                                    batch_sampler=sampler_val)

    # Create the model
    if parser.depth == 18:
        retinanet_base = model.resnet18(
            num_classes=dataset_train.num_classes(), pretrained=True)
    elif parser.depth == 34:
        retinanet_base = model.resnet34(
            num_classes=dataset_train.num_classes(), pretrained=True)
    elif parser.depth == 50:
        retinanet_base = model.resnet50(
            num_classes=dataset_train.num_classes(), pretrained=True)
    elif parser.depth == 101:
        retinanet_base = model.resnet101(
            num_classes=dataset_train.num_classes(), pretrained=True)
    elif parser.depth == 152:
        retinanet_base = model.resnet152(
            num_classes=dataset_train.num_classes(), pretrained=True)
    else:
        raise ValueError(
            'Unsupported model depth, must be one of 18, 34, 50, 101, 152')

    use_gpu = True

    if use_gpu:
        retinanet_base = retinanet_base.cuda()
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
        retinanet_base = torch.nn.DataParallel(retinanet_base).cuda()
        retinanet = retinanet_base.module
    else:
        retinanet = retinanet_base

    retinanet.training = True

    optimizer = optim.Adam(retinanet.parameters(), lr=1e-5)

    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     patience=3,
                                                     verbose=True)

    loss_hist = collections.deque(maxlen=500)

    retinanet.train()
    retinanet.freeze_bn()

    print('Num training images: {}'.format(len(dataset_train)))

    for epoch_num in range(parser.epochs):

        retinanet.train()
        retinanet.freeze_bn()

        epoch_loss = []

        for iter_num, data in enumerate(dataloader_train):
            try:
                optimizer.zero_grad()
                retinanet.train()
                classification_loss, regression_loss = retinanet(
                    [data['img'].cuda().float(), data['annot'].cuda()])

                classification_loss = classification_loss.mean()
                regression_loss = regression_loss.mean()

                loss = classification_loss + regression_loss

                if bool(loss == 0):
                    continue

                loss.backward()

                torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1)

                optimizer.step()

                loss_hist.append(float(loss))

                epoch_loss.append(float(loss))

                print(
                    'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}'
                    .format(epoch_num, iter_num, float(classification_loss),
                            float(regression_loss), np.mean(loss_hist)))
                #coco_eval.evaluate_coco(dataset_val, retinanet,num_images=5)
                del classification_loss
                del regression_loss
            except Exception as e:
                print(e)
                torch.cuda.empty_cache()
                continue

        if parser.dataset == 'coco':

            print('Evaluating dataset')

            coco_eval.evaluate_coco(dataset_val, retinanet)

        elif parser.dataset == 'csv' and parser.csv_val is not None:

            print('Evaluating dataset')

            mAP = csv_eval.evaluate(dataset_val, retinanet, num_images=59)

        scheduler.step(np.mean(epoch_loss))

        torch.save(retinanet,
                   '{}_retinanet_{}.pt'.format(parser.dataset, epoch_num))

    retinanet.eval()

    torch.save(retinanet, 'model_final.pt'.format(epoch_num))
def eval(model, split, seq_length, n_cpu, disp, steps=1):
    print("------in function")
    if not _video_interpolation:
        dataset = GolfDB(data_file='data/val_split_{}.pkl'.format(split),
                         vid_dir='data/videos_160/',
                         seq_length=seq_length,
                         transform=transforms.Compose([
                             ToTensor(),
                             Normalize([0.485, 0.456, 0.406],
                                       [0.229, 0.224, 0.225])
                         ]),
                         train=False)

    else:
        dataset = GolfDB(data_file='data/val_split_{}.pkl'.format(split),
                         vid_dir='data/videos_160/'.replace(
                             'videos_160',
                             'videos_downsampled_' + str(steps) + 'x'),
                         seq_length=seq_length,
                         transform=transforms.Compose([
                             ToTensor(),
                             Normalize([0.485, 0.456, 0.406],
                                       [0.229, 0.224, 0.225])
                         ]),
                         train=False)
        steps = 1

    data_loader = DataLoader(dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=n_cpu,
                             drop_last=False)

    idx_keep = np.arange(0, seq_length, steps)
    idx_erase = np.delete(np.arange(0, seq_length), idx_keep)
    correct = []
    for i, sample in enumerate(data_loader):
        images, labels = sample['images'], sample['labels']

        if steps > 1:
            #### Downsample video (temporally)
            images[:,
                   idx_erase, :, :, :] = images[:,
                                                np.repeat(idx_keep, steps -
                                                          1)[:len(idx_erase
                                                                  )], :, :, :]

        # full samples do not fit into GPU memory so evaluate sample in 'seq_length' batches
        batch = 0
        while batch * seq_length < images.shape[1]:
            if (batch + 1) * seq_length > images.shape[1]:
                image_batch = images[:, batch * seq_length:, :, :, :]
            else:
                image_batch = images[:, batch * seq_length:(batch + 1) *
                                     seq_length, :, :, :]
            logits = model(image_batch.cuda())
            if batch == 0:
                probs = F.softmax(logits.data, dim=1).cpu().numpy()
            else:
                probs = np.append(probs,
                                  F.softmax(logits.data, dim=1).cpu().numpy(),
                                  0)
            batch += 1
        if i == 176:
            print('hello')
        _, _, _, _, c = correct_preds(probs, labels.squeeze())
        if disp:
            print(i, c)
        correct.append(c)
    PCE = np.mean(correct)

    return PCE
コード例 #12
0
ファイル: GCGAN.py プロジェクト: bam6o0/GCGAN
    def __init__(self, args, device):
        # parameters
        self.epoch = args.epochs
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.log_dir = args.log_dir
        self.model_name = "GCGAN"
        self.Glayer_num = args.Glayer
        self.Dlayer_num = args.Dlayer
        self.Ghidden_num = args.Ghidden
        self.z_dim = args.z_dim
        self.num_worker = args.num_worker
        self.device = device

        dataset = MovieLensDataset(dataset=self.dataset,
                                   transform=transforms.Compose([ToTensor()]))
        dataset_num = len(dataset)
        train_num = int(dataset_num * 0.8)
        train_dataset, test_dataset = random_split(
            dataset, [train_num, dataset_num - train_num])

        # load dataset
        self.train_loader = DataLoader(train_dataset,
                                       batch_size=self.batch_size,
                                       shuffle=True,
                                       num_workers=self.num_worker)
        self.test_loader = DataLoader(test_dataset,
                                      batch_size=len(dataset),
                                      shuffle=True,
                                      num_workers=self.num_worker)

        data = dataset[0]['u_perchase']
        self.u_feature_num = dataset[0]['u_feature'].shape[0]
        self.v_feature_num = dataset[0]['v_feature'].shape[1]

        # networks init

        self.G = generator(input_dim=self.z_dim,
                           feature_num=self.u_feature_num,
                           output_dim=data.shape[0],
                           layer_num=self.Glayer_num,
                           hidden_num=self.Ghidden_num).to(self.device)

        self.D = discriminator(in_features_u=self.u_feature_num,
                               num_item=data.shape[0],
                               in_features_v=self.v_feature_num,
                               rating=5,
                               output_dim=1,
                               layer_num=self.Dlayer_num).to(self.device)

        self.G_optimizer = optim.SGD(self.G.parameters(), lr=args.lrG)
        self.D_optimizer = optim.SGD(self.D.parameters(), lr=args.lrD)
        self.BCE_loss = nn.BCELoss().to(self.device)
        self.MSE_loss = nn.MSELoss().to(self.device)

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        print('-----------------------------------------------')
コード例 #13
0
def main():
    args = parser.parse_args()
    print('Arguments:')
    print(json.dumps(vars(args), indent=1))
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # device = torch.device('cpu')
    if device.type != 'cpu':
        cudnn.benchmark = True
    print("device:", device)

    print('=> setting data loader')
    erp_shape = (60, 120)
    transform = transforms.Compose(
        [ToTensor(), Normalize((0.0645, ), (0.2116, ))])
    trainset = UnfoldIcoDataset(datasets.MNIST(root='raw_data',
                                               train=True,
                                               download=True),
                                erp_shape,
                                args.level,
                                rotate=args.train_rot,
                                transform=transform)
    testset = UnfoldIcoDataset(datasets.MNIST(root='raw_data',
                                              train=False,
                                              download=True),
                               erp_shape,
                               args.level,
                               rotate=args.test_rot,
                               transform=transform)
    train_loader = DataLoader(trainset,
                              args.batch_size,
                              shuffle=True,
                              num_workers=args.workers)
    test_loader = DataLoader(testset,
                             args.batch_size,
                             shuffle=False,
                             num_workers=args.workers)

    print('=> setting model')
    start_epoch = 0
    model = HexRUNet_C(1)
    total_params = 0
    for param in model.parameters():
        total_params += np.prod(param.shape)
    print(f"Total model parameters: {total_params:,}.")
    model = model.to(device)

    # Loss function
    print('=> setting loss function')
    criterion = nn.CrossEntropyLoss()

    # setup solver scheduler
    print('=> setting optimizer')
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    print('=> setting scheduler')
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=100,
                                                gamma=0.1)

    if args.pretrained:
        checkpoint = torch.load(args.pretrained)
        print("=> using pre-trained weights")
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        optimizer.load_state_dict(checkpoint['optimizer'])
        scheduler.load_state_dict(checkpoint['scheduler'])
        print("=> Resume training from epoch {}".format(start_epoch))

    timestamp = datetime.now().strftime("%m%d-%H%M")
    log_folder = join('checkpoints', f'{args.arch}_{timestamp}')
    print(f'=> create log folder: {log_folder}')
    os.makedirs(log_folder, exist_ok=True)
    with open(join(log_folder, 'args.json'), 'w') as f:
        json.dump(vars(args), f, indent=1)
    writer = SummaryWriter(log_dir=log_folder)
    writer.add_text('args', json.dumps(vars(args), indent=1))

    # Training
    for epoch in range(start_epoch, args.epochs):

        # --------------------------
        # training
        # --------------------------
        model.train()
        losses = []
        pbar = tqdm(train_loader)
        total = 0
        correct = 0
        mode = 'train'
        for idx, batch in enumerate(pbar):
            # to cuda
            for key in batch.keys():
                batch[key] = batch[key].to(device)
            outputs = model(batch)
            labels = batch['label']

            # Loss function
            loss = criterion(outputs, labels)
            losses.append(loss.item())

            # update parameters
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # accuracy
            _, predicted = outputs.max(1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

            # update progress bar
            display = OrderedDict(mode=f'{mode}',
                                  epoch=f"{epoch:>2}",
                                  loss=f"{losses[-1]:.4f}")
            pbar.set_postfix(display)

            # tensorboard log
            if idx % args.log_interval == 0:
                niter = epoch * len(train_loader) + idx
                writer.add_scalar(f'{mode}/loss', loss.item(), niter)

        # End of one epoch
        scheduler.step()
        ave_loss = sum(losses) / len(losses)
        ave_acc = 100 * correct / total
        writer.add_scalar(f'{mode}/loss_ave', ave_loss, epoch)
        writer.add_scalar(f'{mode}/acc_ave', ave_acc, epoch)

        print(
            f"Epoch:{epoch}, Train Loss average:{ave_loss:.4f}, Accuracy average:{ave_acc:.2f}"
        )

        save_data = {
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'scheduler': scheduler.state_dict(),
            'ave_loss': ave_loss,
        }
        torch.save(save_data, join(log_folder, f'checkpoints_{epoch}.pth'))

        # --------------------------
        # evaluation
        # --------------------------
        model.eval()
        losses = []
        pbar = tqdm(test_loader)
        total = 0
        correct = 0
        mode = 'test'
        for idx, batch in enumerate(pbar):
            with torch.no_grad():
                # to cuda
                for key in batch.keys():
                    batch[key] = batch[key].to(device)
                outputs = model(batch)
                labels = batch['label']

                # Loss function
                loss = criterion(outputs, labels)
                losses.append(loss.item())

            # accuracy
            _, predicted = outputs.max(1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

            # update progress bar
            display = OrderedDict(mode=f'{mode}',
                                  epoch=f"{epoch:>2}",
                                  loss=f"{losses[-1]:.4f}")
            pbar.set_postfix(display)

            # tensorboard log
            if idx % args.log_interval == 0:
                niter = epoch * len(test_loader) + idx
                writer.add_scalar(f'{mode}/loss', loss.item(), niter)

        # End of one epoch
        ave_loss = sum(losses) / len(losses)
        ave_acc = 100 * correct / total
        writer.add_scalar(f'{mode}/loss_ave', ave_loss, epoch)
        writer.add_scalar(f'{mode}/acc_ave', ave_acc, epoch)

        print(
            f"Epoch:{epoch}, Test Loss average:{ave_loss:.4f}, Accuracy average:{ave_acc:.2f}"
        )

    writer.close()
    print("Finish")
コード例 #14
0
if __name__ == '__main__':
    args = get_parser().parse_args()

    os.makedirs('./checkpoints', exist_ok=True)
    os.makedirs('./tensorboards', exist_ok=True)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    torch.backends.cudnn.deterministic = True

    dataset = SentenceMatchingDataset(args.dataset,
                                      args.max_len,
                                      transform=ToTensor())

    embeddings_matrix = dataset.get_embedding()

    # split dataset into [0.8, 0.1, 0.1] for train, valid and test set
    train_length, valid_length = int(len(dataset) * 0.8), int(
        len(dataset) * 0.1)
    lengths = [
        train_length, valid_length,
        len(dataset) - train_length - valid_length
    ]

    train_dataset, valid_dataset, test_dataset = random_split(dataset, lengths)
    #   train_classes = Counter([sample['target'] for sample in train_dataset])
    #   train_sample_weights = [5 if sample['target'] == 1 else 1 for sample in train_dataset]
コード例 #15
0
ファイル: main.py プロジェクト: ataata107/dystab
if not os.path.exists(checkpoint_inpainter_path):
        os.makedirs(checkpoint_inpainter_path)
else:
  a1 = sorted(os.listdir(checkpoint_inpainter_path),key = toInt3,reverse= True)
  if(len(a1)>0):
    pretrained_inpainter = a1[0]

if not os.path.exists(checkpoint_dynamic_path):
        os.makedirs(checkpoint_dynamic_path)
else:
  a1 = sorted(os.listdir(checkpoint_dynamic_path),key = toInt3,reverse= True)
  if(len(a1)>0):
    pretrained_dynamic = a1[0]
# flow_dataset = FlowDataset(transform = transforms.Compose([ToTensor(),Rescale((cnvrt_size,cnvrt_size))]))
flow_dataset = FlowDataset(transform = transforms.Compose([ToTensor()]))

dataloader = DataLoader(flow_dataset, batch_size=batch_size,shuffle=True, num_workers=workers)

net_dynamic = createDeepLabv3().to(device)
net_dynamic.apply(weights_init)

net_impainter = Inpainter(ngpu=1).to(device) 
# net_impainter.apply(weights_init)
optimizerD = optim.Adam(net_dynamic.parameters(), lr=lr, betas=(beta1, beta2))
optimizerI = optim.Adam(net_impainter.parameters(), lr=lr, betas=(beta1, beta2))

if(pretrained_dynamic!=None):
  net_dynamic, optimizerD, start_epoch = load_ckp(checkpoint_dynamic_path+pretrained_dynamic, net_dynamic, optimizerD)
  print("Loaded pretrained: " + pretrained_dynamic)