예제 #1
0
import os
import random
import shutil
import sys

from semantic_segmentation.arguments import get_args

random.seed(1)

args = get_args('args for datasplit (camvid)', mode='data')

current_path = os.path.abspath('')

perc = args.percentage

if not 0 < perc < 100:
    print('Illegal usage of -p, only between 0 and 100')
    sys.exit(0)

NUM = int(367 * perc / 100)

image_dir = os.path.join(current_path, 'data/CamVid/train')
mask_dir = os.path.join(current_path, 'data/CamVid/trainannot')

new_dir = os.path.join(current_path, 'data/CamVid/trainsmall' + str(perc))
new_mask_dir = os.path.join(current_path, 'data/CamVid/trainsmallannot' + str(perc))

os.makedirs(new_dir, exist_ok=True)
os.makedirs(new_mask_dir, exist_ok=True)
ls = os.listdir(image_dir)
sample = random.sample(ls, NUM)
예제 #2
0
import comet_ml
import torch
from torch.utils.data import DataLoader

import semantic_segmentation.models as models
from semantic_segmentation.arguments import get_args
from semantic_segmentation.datasets.dataset import get_dataset
from semantic_segmentation.experiments.trainer import unfreeze, train_stagewise
from semantic_segmentation.utils.utils import *
args = get_args(desc='Stagewise training using less data of UNet based on ResNet encoder')
hyper_params = {
    "dataset": args.dataset,
    "model": args.model,
    "seed": args.seed,
    "num_classes": 12,
    "batch_size": 8,
    "num_epochs": args.epoch,
    "learning_rate": 1e-4,
    "stage": 0,
    "perc": str(args.percentage)
}

torch.manual_seed(hyper_params['seed'])
if args.gpu != 'cpu':
    torch.cuda.set_device(args.gpu)
    torch.cuda.manual_seed(hyper_params['seed'])

train_dataset, valid_dataset, num_classes = get_dataset(args.dataset, args.percentage)
hyper_params['num_classes'] = num_classes

trainloader = DataLoader(train_dataset, batch_size=hyper_params['batch_size'], shuffle=True, drop_last=True)
예제 #3
0
from comet_ml import Experiment

import torch
from torch.utils.data import DataLoader

import semantic_segmentation.models as models
from semantic_segmentation.arguments import get_args
from semantic_segmentation.datasets.dataset import get_dataset
from semantic_segmentation.experiments.trainer import pretrain

args = get_args(desc="standalone training for small dataset")
hyper_params = {
    "dataset": args.dataset,
    "model": args.model,
    "seed": args.seed,
    "perc": args.percentage,
    "num_classes": 12,
    "batch_size": 8,
    "num_epochs": args.epoch,
    "learning_rate": 1e-4,
}

torch.manual_seed(hyper_params['seed'])
if args.gpu != 'cpu':
    torch.cuda.set_device(args.gpu)
    torch.cuda.manual_seed(hyper_params['seed'])

train_dataset, valid_dataset, num_classes = get_dataset(
    args.dataset, args.percentage)
hyper_params['num_classes'] = num_classes
예제 #4
0
import comet_ml
import torch
from torch.utils.data import DataLoader

import semantic_segmentation.models as models
from semantic_segmentation.arguments import get_args
from semantic_segmentation.datasets.dataset import get_dataset
from semantic_segmentation.experiments.trainer import unfreeze, train_traditional
from semantic_segmentation.utils.utils import get_features_trad

args = get_args(desc='traditional kd training of UNet based on ResNet encoder')

hyper_params = {
    "model": args.model,
    "dataset": args.dataset,
    "seed": args.seed,
    "num_classes": 12,
    "batch_size": 8,
    "num_epochs": args.epoch,
    "learning_rate": 1e-4,
    "stage": 0
}

torch.manual_seed(hyper_params['seed'])
if args.gpu != 'cpu':
    torch.cuda.set_device(args.gpu)
    torch.cuda.manual_seed(hyper_params['seed'])

train_dataset, valid_dataset, num_classes = get_dataset(
    args.dataset, args.percentage)
hyper_params['num_classes'] = num_classes
예제 #5
0
import torch
from torch.utils.data import DataLoader

from semantic_segmentation.arguments import get_args
from semantic_segmentation.datasets.dataset import get_dataset
from semantic_segmentation.experiments.trainer import evaluate

args = get_args(desc="args for evaluation", mode='eval')

if args.gpu != 'cpu':
    torch.cuda.set_device(args.gpu)

_, valid_dataset, test_dataset, num_classes = get_dataset(
    args.dataset, None, True)

valloader = DataLoader(valid_dataset, batch_size=1, shuffle=False)
# testloader = DataLoader(test_dataset, batch_size=1, shuffle=False)

params = {"model": None, "seed": args.seed, 'num_classes': num_classes}

evaluate(valloader, args, params, mode='pretrain')  # without teacher training

evaluate(valloader, args, params, mode='classifier')  # stagewise training

evaluate(valloader, args, params, mode='simultaneous')  # simultanous training

evaluate(valloader, args, params,
         mode='traditional-kd')  # traditional-kd training
예제 #6
0
import comet_ml
import torch
from torch.utils.data import DataLoader

import semantic_segmentation.models as models
from semantic_segmentation.arguments import get_args
from semantic_segmentation.datasets.dataset import get_dataset
from semantic_segmentation.experiments.trainer import unfreeze, train_simultaneous
from semantic_segmentation.utils.utils import *

args = get_args('Simultaneous training of UNet based on ResNet encoder')

hyper_params = {
    "model": args.model,
    "dataset": args.dataset,
    "seed": args.seed,
    "num_classes": 12,
    "batch_size": 8,
    "num_epochs": args.epoch,
    "learning_rate": 1e-4
}

torch.manual_seed(hyper_params['seed'])
if args.gpu != 'cpu':
    torch.cuda.set_device(args.gpu)
    torch.cuda.manual_seed(hyper_params['seed'])

train_dataset, valid_dataset, num_classes = get_dataset(
    args.dataset, args.percentage)
hyper_params['num_classes'] = num_classes