コード例 #1
0
def main():

    opt = parse_args()
    print(opt.__dict__)

    if opt.color == "gray":
        color_space = cv2.COLOR_BGR2GRAY
    elif opt.color == "hsv":
        color_space = cv2.COLOR_BGR2HSV
    elif opt.color == "rgb":
        color_space = cv2.COLOR_BGR2RGB
    elif opt.color == "ycrcb":
        color_space = cv2.COLOR_BGR2YCrCb
    elif opt.color == "lab":
        color_space = cv2.COLOR_BGR2LAB

    # channels = tuple(opt.channels)

    # print("Task 1")
    # task1("datasets/ai_challenge_s03_c010-full_annotation.xml", color_space=color_space)
    # print("Task 2")
    # task2('datasets/AICity_data/train/S03/c010/data/', "datasets/ai_challenge_s03_c010-full_annotation.xml",color_space=color_space, channels=channels)

    # print("Task 3")
    # task3("datasets/AICity_data/train/S03/c010/data", 'datasets/ai_challenge_s03_c010-full_annotation.xml',
    #       save_to_disk=True)
    print("Task 4")
コード例 #2
0
from detectron2.engine import DefaultTrainer, DefaultPredictor
from detectron2.evaluation import COCOEvaluator
from detectron2.utils.visualizer import Visualizer
import random
from detectron2.data import MetadataCatalog
import cv2

from visualizer import plot_losses, show_results
from hooks import ValidationLoss
from kitti_mots_dataset import register_kitti_mots_dataset, get_kiti_mots_dicts
from opt import parse_args
from week6.dataloader import TrainerDA

if __name__ == '__main__':

    opts = parse_args()
    batch_size = 2

    register_kitti_mots_dataset("../datasets/KITTI-MOTS/training/image_02",
                                "../datasets/KITTI-MOTS/instances_txt",
                                ("kitti_mots_train", "kitti_mots_test"),
                                image_extension="png")

    # register_kitti_mots_dataset("datasets/MOTSChallenge/train/images",
    #                             "datasets/MOTSChallenge/train/instances_txt",
    #                             ("mots_challenge_train", "mots_challenge_test"),
    #                             image_extension="jpg")

    cfg_file = opts.config
    output_dir = opts.output
コード例 #3
0
from models import model_dict, model_channel_dict
import matplotlib.pyplot as plt
from image import get_mask_from_PIL_image, process_PIL_image, get_area_perimiters_from_mask, get_polsby_popper_score, get_pupil_ellipse_from_PIL_image
import asyncio
import math
import datetime
import json
from graph import print_stats

from helperfunctions import get_pupil_parameters, ellipse_area, ellipse_circumference
from Ellseg.pytorchtools import load_from_file
from Ellseg.utils import get_nparams, get_predictions
from Ellseg.args import parse_precision

# INITIAL LOADING OF ARGS
args = parse_args()
filename = args.load
if not os.path.exists(filename):
    print("model path not found !!!")
    exit(1)
MODEL_DICT_STR, CHANNELS, IS_ELLSEG, ELLSEG_MODEL = model_channel_dict[
    filename]
ELLSEG_FOLDER = 'Ellseg'
ELLSEG_FILEPATH = './' + ELLSEG_FOLDER
ELLSEG_PRECISION = 32  # precision. 16, 32, 64

ELLSEG_PRECISION = parse_precision(ELLSEG_PRECISION)

# SETTINGS
FPS = None  #10
ROTATION = 0
コード例 #4
0
    return map_k, avg_mask_metrics


def calc_mask_metrics(out_dict, gt_mask, pred_mask):
    gt_mask = gt_mask.astype("uint8").ravel()
    pred_mask = pred_mask.astype("uint8").ravel()
    # TODO check what happen with some masks
    if pred_mask.max() != 1:
        pred_mask = (pred_mask / 255).astype("uint8")
    out_dict["recall"].append(recall_score(gt_mask, pred_mask))
    out_dict["precision"].append(precision_score(gt_mask, pred_mask))
    out_dict["f1"].append(f1_score(gt_mask, pred_mask))


if __name__ == '__main__':
    opt = parse_args()

    opt.histogram = "multiresolution"
    opt.dist = "intersection"
    opt.mr_splits = [5, 5]
    opt.color = "RGB"
    opt.bins = 256
    opt.concat = True
    opt.save = True

    os.chdir("..")
    mkdir(opt.output)
    log = os.path.join(opt.output, "log.txt")
    log_file = open(log, "a")
    print(opt, file=log_file)
コード例 #5
0
ファイル: nn.py プロジェクト: MCV-2019-M1-Project/Team3
def main():

    torch.backends.cudnn.benchmark = True

    args = parse_args()
    mkdir(args.output)
    print(args.__dict__)
    print(args.__dict__, file=open(os.path.join(args.output, "log.txt"), "a"))

    train_set = create_dataset(args, True)
    val_set = create_dataset(args, False)
    labels = torch.tensor(train_set.pairs[2])
    p_class = 1.0 / len(labels[labels == 1])
    n_class = 1.0 / len(labels[labels != 1])
    sample_probabilities = torch.where(
        labels == 1, torch.full_like(labels, p_class), torch.full_like(labels, n_class)
    )

    epoch_length = labels.shape[0]
    sampler = torch.utils.data.sampler.WeightedRandomSampler(
        sample_probabilities, epoch_length
    )

    train_loader = DataLoader(
        train_set,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.num_workers,
        sampler=sampler,
    )
    val_loader = DataLoader(
        val_set, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers
    )

    model = Siamese()
    model = model.cuda()
    if "best_model.pth" in os.listdir(args.output):
        model.load_state_dict(torch.load(os.path.join(args.output, "best_model.pth")))

    if args.ngpu > 1:
        model = nn.DataParallel(model, range(args.ngpu))

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, T_max=args.epochs * len(train_loader)
    )
    criterion = torch.nn.CosineEmbeddingLoss(margin=args.margin)
    if not args.test_only:
        train(
            model, optimizer, scheduler, criterion, train_loader, val_loader, args.epochs, args.output
        )
    else:
        transforms = T.Compose(
            [
                T.Resize(args.size),
                T.CenterCrop(args.size),
                T.ToTensor(),
                T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
            ]
        )
        test_set = MuseumDatasetTest(args.root, transforms, args.val_set)

        test_loader = DataLoader(
            test_set, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers
        )
        embed(model, test_loader, args.output)