Exemplo n.º 1
0
from opts import get_args  # Get all the input arguments
from test import Test
from train import Train
from confusion_matrix import ConfusionMatrix
from dataset.segmented_data import SegmentedData
import transforms

print('\033[0;0f\033[0J')
# Color Palette
CP_R = '\033[31m'
CP_G = '\033[32m'
CP_B = '\033[34m'
CP_Y = '\033[33m'
CP_C = '\033[0m'

args = get_args()  # Holds all the input arguments


def cross_entropy2d(x, target, weight=None, size_average=True):
    # Taken from https://github.com/meetshah1995/pytorch-semseg/blob/master/ptsemseg/loss.py
    n, c, h, w = x.size()
    log_p = F.log_softmax(x, dim=1)
    log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
    log_p = log_p[target.view(n * h * w, 1).repeat(1, c) >= 0]
    log_p = log_p.view(-1, c)

    mask = target >= 0
    target = target[mask]
    loss = F.nll_loss(log_p,
                      target,
                      ignore_index=250,
Exemplo n.º 2
0
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from tensorboardX import SummaryWriter

import opts
sys.path.insert(0, '..')
from src.models.hg import HourglassNet
from src.models.dis import HourglassDisNet
from src.datasets.lsp_mpii import LSPMPII_Dataset
from src.utils.misc import getValue, getLogDir, makeCkptDir
from src.utils.evals import accuracy

# Parse arguments
FLAGS = opts.get_args()

epoch_init = FLAGS.epoch_init
iter_init = FLAGS.iter_init
global_step = FLAGS.step_init  # for summary writer (will start on 1)

# Prepare dataset
dataset = LSPMPII_Dataset(
    FLAGS.dataDir, split='train',
    inp_res=FLAGS.inputRes, out_res=FLAGS.outputRes,
    scale_factor=FLAGS.scale, rot_factor=FLAGS.rotate, sigma=FLAGS.hmSigma)
dataloader = torch.utils.data.DataLoader(
    dataset, batch_size=FLAGS.batchSize, shuffle=True,
    num_workers=FLAGS.nThreads, pin_memory=True)

print('Number of training samples: %d' % len(dataset))
Exemplo n.º 3
0
import pytorch_lightning as pl
from pytorch_lightning.loggers import WandbLogger

# Custom Files
import opts
import data
import utils
import models


def experiment(args):
    utils.seed_everything(seed=args.seed)
    qa_model = models.QAModel(hparams=args)
    train_dl, valid_dl, test_dl = data.prepare_data(args)

    wandb_logger = WandbLogger(project='qa',
                               entity='nlp',
                               tags=args.tags,
                               offline=args.fast_dev_run)
    wandb_logger.watch(qa_model, log='all')
    args.logger = wandb_logger

    trainer = pl.Trainer.from_argparse_args(args)
    trainer.fit(qa_model, train_dataloader=train_dl, val_dataloaders=valid_dl)
    trainer.test(qa_model, test_dataloaders=test_dl)


if __name__ == '__main__':
    args = opts.get_args()
    pprint(vars(args))
    experiment(args)
Exemplo n.º 4
0
import os
import torch

from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from tqdm import tqdm

from dataset import SingleClassDataset
from get_coco_images import extract_class_annotations
from loss import CenterNetLoss
from models.dla import get_pose_net
from opts import get_args
from utils.lrupdater import LrUpdater
from utils.result import ResultTracker

args = get_args()

# Network model preparation
model = get_pose_net(34, heads={'hm': 1, 'wh': 2}, head_conv=-1).cuda()
if (args.restore != ""):
    print(f"Loading model from {args.restore}")
    state_dict = torch.load(args.restore)
    model.load_state_dict(state_dict)
model.eval()

# Datasets
annot_train, annot_val = extract_class_annotations(args.input, args.class_name)
train_dataset = SingleClassDataset(annot_train,
                                   args.input,
                                   args.input_size,
                                   args.input_size,
Exemplo n.º 5
0
# Issue fixes
from models.utils.fixes import init_keras
init_keras()

# Imports
from datasets.coco.dataset import CocoDataset
from models.model_factory import get_model
from opts import get_args
from utils.logger import log

# Parameters/options
opts = get_args()
coco_supercategories = opts.supercategories.split(',')
num_classes = len(coco_supercategories)

# Get dataset
log("Loading dataset", title=True)
dataset = CocoDataset(opts.train_ds_name, opts.train_ds_path,
                      coco_supercategories, opts)
val_dataset = CocoDataset(opts.val_ds_name, opts.val_ds_path,
                          coco_supercategories, opts)

# Model
log("Creating model", title=True)
model, train, loss = get_model(opts)
if opts.summary:
    model.summary()

# Training
log("Training", title=True)
if opts.epochs > 0: