Ejemplo n.º 1
0
def train():
    vis = Visualizer(server='http://turing.livia.etsmtl.ca', env='EEG')
    data_root = '/home/AN96120/python_project/Seizure Prediction/processed_data/fft_meanlog_std_lowcut0.1highcut180nfreq_bands12win_length_sec60stride_sec60/Dog_1'
    dataloader_train = get_dataloader(data_root, training=True)
    dataloader_test = get_dataloader(data_root, training=False)
    # No interaction has been found in the training and testing dataset.
    weights = t.Tensor([1/(np.array(dataloader_train.dataset.targets)==0).mean(),1/(np.array(dataloader_train.dataset.targets)==1).mean()  ])
    criterion = nn.CrossEntropyLoss(weight=weights.cuda())

    net = convNet ()
    net.cuda()

    optimiser = t.optim.Adam(net.parameters(),lr= 1e-4,weight_decay=1e-4)
    loss_avg = AverageValueMeter()
    epochs = 10000
    for epoch in range(epochs):
        loss_avg.reset()
        for ii, (data, targets) in enumerate(dataloader_train):
            data, targets= data.type(t.FloatTensor), targets.type(t.LongTensor)
            data = data.cuda()
            targets = targets.cuda()
            optimiser.zero_grad()
            output = net(data)
            loss = criterion(output,targets)
            loss_avg.add(loss.item())
            loss.backward()
            optimiser.step()
        vis.plot('loss',loss_avg.value()[0])

        _,auc_train=val(dataloader_train,net)
        _, auc_test =val(dataloader_test,net)
        print(auc_train,auc_test)
Ejemplo n.º 2
0
def test():
    model = nn.DataParallel(
        RetinaUNet(base=config.Base,
                   InChannel=config.InputChannel,
                   OutChannel=1,
                   BackBone='fpn',
                   IncludeTop=False,
                   Godown=True,
                   IncludeSeg=False)).cuda()
    vis = Visualizer(env=config.Env, port=config.vis_port)

    if config.Data_type.lower() == '2dplus':
        img_read = read_2dPlus
        img_lst = get_Path_2dPlus()
    elif config.Data_type.lower() == '2d':
        img_read = read_image_2D
        img_lst = get_ImgPath()
    else:
        raise ValueError
    train_lst, val_lst = data_split_2D(img_lst,
                                       ratio=(1 - config.Val_percent),
                                       shuffle=False)
    train_data = RetinaDataSet(train_lst, img_read)
    train_dataloader = DataLoader(train_data,
                                  batch_size=config.TrainBatchSize,
                                  shuffle=True,
                                  num_workers=config.Num_workers)

    val_data = RetinaDataSet(val_lst, img_read, isTrain=False)
    val_dataloader = DataLoader(val_data,
                                batch_size=1,
                                shuffle=True,
                                num_workers=config.Num_workers)
    val(model, val_dataloader, vis=vis)
Ejemplo n.º 3
0
    print("1: " +
          str(SE3.rmse_avg_raw(ground_truth_list, pose_estimate_list, delta)))
    if data_file_2:
        print("2: " + str(
            SE3.rmse_avg_raw(ground_truth_list, pose_estimate_list_2, delta)))
    if data_file_3:
        print("3: " + str(
            SE3.rmse_avg_raw(ground_truth_list, pose_estimate_list_3, delta)))
    if data_file_4:
        print("4: " + str(
            SE3.rmse_avg_raw(ground_truth_list, pose_estimate_list_4, delta)))

handles = []

visualizer = Visualizer.Visualizer(ground_truth_list,
                                   plot_steering=False,
                                   plot_trajectory=False,
                                   plot_rmse=False)
visualizer.visualize_ground_truth(clear=True, draw=False)

if plot_vo:
    patch_0 = Visualizer.make_patch(color='green', label='gt')
    handles.append(patch_0)
    patch_1 = Visualizer.make_patch(color='red', label=label_1)
    handles.append(patch_1)
    visualizer.visualize_poses(pose_estimate_list, draw=False, style='-rx')
    if data_file_2:
        patch_2 = Visualizer.make_patch(color='blue', label=label_2)
        handles.append(patch_2)
        visualizer.visualize_poses(pose_estimate_list_2,
                                   draw=False,
                                   style='-bx')
Ejemplo n.º 4
0
import time
import os.path
import warnings
import threading
from collections import OrderedDict

from Visualization import server
from Visualization import DataAnalyzer
from Visualization import Visualizer

url = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vSoU-enLc3GpVJ8sMYHCyC2g6jt_87wFNwC0DHwjES8tikVBDGNNp5l7b4YvXPD3YzUu24esi_ajhN8/pubhtml?gid=1127098673&single=true&exportFormat=csv'
output = 'Data/policies.csv'
import requests
import pandas as pd
csv_content = requests.get(url)

#html = requests.get(url).content
#df_list = pd.read_html(html)
#df = df_list[-1]

#df.to_csv(output)

url2='https://www.ecdc.europa.eu/sites/default/files/documents/COVID-19-geographic-disbtribution-worldwide-2020-03-28.xlsx'
read_file = pd.read_excel (r'https://www.ecdc.europa.eu/sites/default/files/documents/COVID-19-geographic-disbtribution-worldwide-2020-03-28.xlsx')
read_file.to_csv (r'Data/time_series.csv', index = None, header=True)

time.sleep(10)
started_visualization=server.run_visualisation_server()
__visualizer = Visualizer.Visualizer(server_running = True)

Ejemplo n.º 5
0

im_greyscale_reference_1, im_depth_reference_1 = ref_image_list[0]
(image_height, image_width) = im_greyscale_reference_1.shape
se3_identity = np.identity(4, dtype=Utils.matrix_data_type)
# image gradient induces a coordiante system where y is flipped i.e have to flip it here
intrinsic_identity = Intrinsic.Intrinsic(-517.3, -516.5, 318.6, 239.5) # freiburg_1
if use_ndc:
    #intrinsic_identity = Intrinsic.Intrinsic(1, 1, 1/2, 1/2) # for ndc
    intrinsic_identity = Intrinsic.Intrinsic(-1, -516.5/517.3, 318.6/image_width, 239.5/image_height) # for ndc


camera_reference = Camera.Camera(intrinsic_identity, se3_identity)
camera_target = Camera.Camera(intrinsic_identity, se3_identity)

visualizer = Visualizer.Visualizer(ground_truth_list)

motion_cov_inv = np.identity(6,dtype=Utils.matrix_data_type)
twist_prior = np.zeros((6,1),dtype=Utils.matrix_data_type)

for i in range(0, len(ref_image_list)):
    im_greyscale_reference, im_depth_reference = ref_image_list[i]
    im_greyscale_target, im_depth_target = target_image_list[i]

    im_depth_reference /= depth_factor
    im_depth_target /= depth_factor


    #depth_t = (im_depth_reference != 0).astype(Utils.depth_data_type_float)
    #im_depth_reference = np.add(im_depth_reference,depth_t)
    #depth_t = (im_depth_target != 0).astype(Utils.depth_data_type_float)
Ejemplo n.º 6
0
import pygame, pygame.mixer
from pygame.locals import *
from pygame.key import *

import sys
sys.path.append('../libs/')
sys.path.append('../config/')

from CarModel import CarModel, CarPose
from Visualization import Visualizer
from Config import Config

clock = pygame.time.Clock()

config = Config()

# create car and visualizer inputs.
car = CarModel(config, CarPose(1000, 800, 0.0))
visualizer = Visualizer(config)

while True:

    #Limit the framerate
    timeDelta = clock.tick(config.fpsLimit) / 1000.0

    car.setSlewRate(visualizer.getSlewRate())
    car.setAcceleration(visualizer.getAcceleration())

    car.update(timeDelta)
    visualizer.draw(car.getPose())
Ejemplo n.º 7
0
        se3_estimate_acc_2 = np.matmul(se3_estimate_acc_2, SE3_est_2)
        pose_estimate_list_2.append(se3_estimate_acc_2)

    if data_file_3:
        SE3_est_3 = pose_estimate_list_loaded_3[i]
        se3_estimate_acc_3 = np.matmul(se3_estimate_acc_3, SE3_est_3)
        pose_estimate_list_3.append(se3_estimate_acc_3)

    if data_file_4:
        SE3_est_4 = pose_estimate_list_loaded_4[i]
        se3_estimate_acc_4 = np.matmul(se3_estimate_acc_4, SE3_est_4)
        pose_estimate_list_4.append(se3_estimate_acc_4)


delta = 30
if (count - 1) - start_count >= delta:

    print(SE3.rmse_avg_raw(ground_truth_list,pose_estimate_list, delta))

visualizer = Visualizer.Visualizer(ground_truth_list,plot_steering=False, plot_trajectory=False)
visualizer.visualize_ground_truth(clear=True,draw=False)
if plot_vo:
    visualizer.visualize_poses(pose_estimate_list, draw= False)
    if data_file_2:
        visualizer.visualize_poses(pose_estimate_list_2, draw= False, style='-ro')
    if data_file_3:
        visualizer.visualize_poses(pose_estimate_list_3, draw= False, style='-bx')
    if data_file_3:
        visualizer.visualize_poses(pose_estimate_list_4, draw= False, style='-bo')
print('visualizing..')
visualizer.show()
Ejemplo n.º 8
0
def train(**kwargs):
    vis = Visualizer(env=config.Env, port=config.vis_port)
    model = nn.DataParallel(
        RetinaUNet(base=config.Base,
                   InChannel=config.InputChannel,
                   OutChannel=1,
                   BackBone='fpn',
                   IncludeTop=False,
                   Godown=False,
                   IncludeSeg=False)).to(config.Device)

    model.train()
    scaler = torch.cuda.amp.GradScaler()
    Seg_Matrix = EvalMatrix()
    Anc_Matrix = EvalMatrix()
    if config.checkpoint:
        try:
            if isinstance(model, nn.DataParallel):
                model.module.load(get_pth(model, config.checkpoint))
            else:
                model.load(get_pth(model, config.checkpoint))
            print("Load Model Successfully")
        except:
            pass

    if config.Data_type.lower() == '2dplus':
        img_read = read_2dPlus
        img_lst = get_Path_2dPlus()
    elif config.Data_type.lower() == '2d':
        img_read = read_image_2D
        img_lst = get_ImgPath()
    else:
        raise ValueError

    train_lst, val_lst = data_split_2D(img_lst,
                                       ratio=(1 - config.Val_percent),
                                       shuffle=True)
    train_data = RetinaDataSet(train_lst, img_read)
    train_dataloader = DataLoader(train_data,
                                  batch_size=config.TrainBatchSize,
                                  shuffle=True,
                                  num_workers=config.Num_workers)

    val_data = RetinaDataSet(val_lst, img_read, isTrain=False)
    val_dataloader = DataLoader(val_data,
                                batch_size=1,
                                shuffle=True,
                                num_workers=config.Num_workers)

    lr = config.lr

    RegCriterion = BBoxRegLoss().to(config.Device)
    AncCriterion = AnchorLoss().to(config.Device)
    SegCriterion = SegLoss().to(config.Device)
    optimizer = optim.Adam(params=model.parameters(),
                           lr=lr,
                           weight_decay=config.Weight_decay)
    # scheduler_exp = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.1)
    # scheduler_cosin = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10, eta_min=1e-5, last_epoch=-1)
    scheduler_monitor = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer,
        mode='min',
        factor=0.1,
        patience=3,
        threshold_mode='rel',
        threshold=0.0001,
        cooldown=5,
        min_lr=1e-5,
        eps=1e-8)

    previousLoss = 0
    previousValLoss = 100
    debug_patient = config.Debug_Patient
    patient = config.Lr_Patient
    for epoch in range(config.Max_epoch):
        Seg_Matrix.reset()
        Anc_Matrix.reset()
        start_time = time.time()
        Seg_precision, Seg_sensi, loss_counter, AvgAncLoss, AvgAncPrec, AvgAncSensi = 0, 0, 0, 0, 0, 0
        # if epoch != 0:
        #     for param_group in optimizer.param_groups:
        #         param_group['lr'] = lr * config.lr_decay

        for ii, (img, mask, anchors_labels, bbox_labels, ancLossIdx,
                 bboxLossIdx, segIdx, _) in enumerate(tqdm(train_dataloader)):
            input = V(img).float().to(config.Device, dtype=torch.float)
            mask = V(mask).float().to(config.Device, dtype=torch.float)
            optimizer.zero_grad()
            seg, ancOutput, boxOutput = model(input)
            # ---------------------Anchor Output Statistics----------------
            # vis.show_hist(name="Train Anchor Output", tensor=ancOutput)
            Anc_precision, Anc_sensi, Anc_iou = 0, 0, 0
            box_loss, anc_loss, seg_loss, loss = 0., 0., 0., 0.
            temp_seg = 0
            if seg is not None:
                temp_seg = SegCriterion(seg, mask, segIdx)
            if temp_seg != 0 and not t.isnan(temp_seg):
                seg_loss += temp_seg

            for i in range(config.Output_features):
                # ----------------------Bounding Box Regression-----------------
                temp_reg = RegCriterion(
                    boxOutput[i],
                    V(bbox_labels[i]).to(config.Device, dtype=torch.float),
                    V(bboxLossIdx[i]).to(config.Device, dtype=torch.float))
                if temp_reg != 0 and not t.isnan(temp_reg):
                    box_loss += temp_reg / config.Output_features
                # ----------------Anchor Loss Computation----------------
                temp_anc = AncCriterion(
                    ancOutput[i],
                    V(anchors_labels[i]).to(config.Device, dtype=torch.float),
                    V(ancLossIdx[i]).to(config.Device, dtype=torch.float))
                # handle = ancOutput[i].register_hook(utils.get_grad)
                if temp_anc != 0 and not t.isnan(temp_anc):
                    anc_loss += temp_anc

                Anc_Matrix.genConfusionMat(ancOutput[i], anchors_labels[i])
                Anc_precision += (Anc_Matrix.precision() /
                                  config.Output_features)
                Anc_sensi += (Anc_Matrix.sensitive() / config.Output_features)
                Anc_iou += (Anc_Matrix.mIoU())

            if seg_loss != 0:
                loss += seg_loss
            if anc_loss != 0:
                loss += anc_loss
            if box_loss != 0:
                loss += box_loss

            if loss == 0:
                # vis.log("Nan Loss: Seg:{}, Box:{}, Anc:{}".format(myStr(seg_loss), myStr(box_loss), myStr(anc_loss)))
                continue

            loss.backward()
            # ---------------------Anchor Gradient Statistics----------------
            # vis.show_hist(name="Train Anchor Gradient", tensor=t.tensor([k.mean() for k in utils.features['loss']]))
            loss_counter += t2i(loss)
            optimizer.step()
            # scheduler_cosin.step()
            # if anc_loss != 0:
            #     scheduler_monitor.step(anc_loss)
            # ---------------------Train Evaluation-------------------------
            if seg is not None:
                Seg_Matrix.genConfusionMat(seg.clone(), mask.clone())
                Seg_precision += Seg_Matrix.precision()
                Seg_sensi += Seg_Matrix.sensitive()
            AvgAncLoss += anc_loss
            AvgAncPrec += Anc_precision
            AvgAncSensi += Anc_sensi
            if seg_loss != 0:
                vis.log('train loss in Segmentation: ' + myStr(seg_loss))
                vis.plot('Segmentation loss:', t2i(seg_loss))

            if ii % config.Print_freq == config.Print_freq - 1:
                vis.log('train loss in bounding box regression: ' +
                        myStr(box_loss))
                vis.log('train loss in Anchor Net: ' + myStr(anc_loss))
                vis.plot("Anchor Precision:", t2i(Anc_precision))
                vis.plot("Anchor Sensitive:", t2i(Anc_sensi))
                vis.plot('Bounding box regression loss', t2i(box_loss))
                vis.plot('Anchor Net Loss: ', t2i(anc_loss))
                vis.plot('Training Loss: ', t2i(loss))
        end_time = time.time()

        # -----------------Validation--------------------
        val_loss = val(model, val_dataloader, vis, boxShow=False)
        avg_loss = loss_counter / len(train_dataloader)
        vis.plot("Avg Train Anchor Loss:",
                 t2i(AvgAncLoss / len(train_dataloader)))
        vis.plot("Avg Train Anchor Precision:",
                 t2i(AvgAncPrec / len(train_dataloader)))
        vis.plot("Avg Train Anchor Sensitive:",
                 t2i(AvgAncSensi / len(train_dataloader)))
        epoch_str = (
            'Epoch: {}, Train Loss: {:.5f}, Train Seg Mean Precision: {:.5f}, Train Seg Mean Sensitive:{:.5f},\
                Valid Loss: {:.5f}'.format(
                epoch, avg_loss, Seg_precision / len(train_dataloader),
                Seg_sensi / len(train_dataloader), val_loss.item()))
        print(
            epoch_str + " Time:" + str(end_time - start_time) +
            ' lr: {}'.format(optimizer.state_dict()['param_groups'][0]['lr']))

        if avg_loss >= previousLoss != 0:
            debug_patient -= 1
            if patient == 0:
                patient = config.Lr_Patient
                lr = lr * config.lr_decay
                for param_group in optimizer.param_groups:
                    param_group['lr'] = lr
            else:
                patient -= 1

            if debug_patient == 0:
                pdb.set_trace()
                debug_patient = config.Debug_Patient

        previousLoss = avg_loss
        if val_loss < previousValLoss / 2 and previousLoss != 100:
            model.module.save()
        previousValLoss = val_loss
Ejemplo n.º 9
0
from Visualization import Visualizer
import time

visualizer = Visualizer.VisualizerThread(1, "Visualizer")
visualizer.start()

time.sleep(5)

visualizer.stop()
Ejemplo n.º 10
0
    #ground_truth_acc[0,3] = SE3_ref_target_clean[0,3] # ds3
    ground_truth_acc[1, 3] = SE3_ref_target_clean[1, 3]
    ground_truth_list.append(ground_truth_acc)

    ref_image_list.append((im_greyscale_reference, im_depth_reference))
    target_image_list.append((im_greyscale_target, im_depth_target))

    encoder_ts = float(rgb_encoder_dict[ref_id][0])
    encoder_values = encoder_dict[encoder_ts]
    encoder_values_float = [float(encoder_values[0]), float(encoder_values[1])]
    encoder_list.append(encoder_values_float)

    SE3_est = pose_estimate_list_loaded[i]

    se3_estimate_acc = np.matmul(se3_estimate_acc, SE3_est)
    pose_estimate_list.append(se3_estimate_acc)

delta = 30
if (count - 1) - start_count >= delta:
    print(SE3.rmse_avg_raw(ground_truth_list, pose_estimate_list, delta))

visualizer = Visualizer.Visualizer(ground_truth_list,
                                   plot_steering=plot_steering,
                                   title=None)
visualizer.visualize_ground_truth(clear=True, draw=False)
if plot_steering:
    visualizer.visualize_steering(encoder_list, clear=False, draw=False)
if plot_vo:
    visualizer.visualize_poses(pose_estimate_list, draw=False)
print('visualizing..')
visualizer.show()
Ejemplo n.º 11
0
# -*- coding: utf-8 -*-
from models.oneD_cnn_model import oneD_conv
from cnn_based_model.dataset_cnn import get_dataloader
from torch import nn
import torch as t,numpy as np
from tqdm import tqdm
from Visualization import Visualizer
from torchnet.meter import AverageValueMeter
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score,confusion_matrix
vis = Visualizer('http://turing.livia.etsmtl.ca',env='EEG')

folder_name = 'Dog_1/features'
dataloader_train = get_dataloader(folder_name)
dataloader_test = get_dataloader(folder_name,training=False)

def val(dataloader, net):
    avg_acc=AverageValueMeter()
    avg_acc.reset()
    y_true =[]
    y_predict=[]
    y_predict_proba=[]
    net.eval()
    with t.no_grad():
        for i,(data,target) in enumerate(dataloader):
            data=data.type(t.FloatTensor)
            data = data.cuda()
            target = target.cuda()
            output = net(data)
            decision = output.max(1)[1]
            y_predict.extend(decision.cpu().numpy().tolist())
Ejemplo n.º 12
0
def get_tensor_dimensions_impl(model, layer, image_size, for_input=False):
    t_dims = None

    def _local_hook(_, _input, _output):
        nonlocal t_dims
        t_dims = _input[0].size() if for_input else _output.size()
        return _output

    layer.register_forward_hook(_local_hook)
    dummy_var = t.zeros(1, 3, image_size, image_size)
    model(dummy_var)
    return t_dims


if __name__ == "__main__":
    vis = Visualizer(env=config.Env, port=config.vis_port)
    model = nn.DataParallel(
        RetinaUNet(base=64, InChannel=3, OutChannel=1, BackBone='unet', IncludeTop=False, Godown=False,
                   IncludeSeg=False)).cuda(0)
    # for name in model.module.ANCNets.state_dict():
    #     if name.endswith("weight"):
    #         print(name, ": ", model.module.ANCNets.state_dict()[name].shape)

    # model = FPN(base=config.Base, InChannel=config.InputChannel, OutChannel=1, backbone='ResNet50', IncludeSeg=False).cuda(0)
    x = t.randn(1, 3, 256, 256).cuda()
    _, features, box = model(x)
    # print(len(features))
    for f in features:
        print(f.shape)
Ejemplo n.º 13
0
        elif use_ackermann_cov:
            motion_cov_inv = ackermann_cov_large_inv
        else:
            motion_cov_inv = solver_manager.motion_cov_inv_final
            motion_cov_inv[2, :] = ackermann_cov_large_inv[2, :]

        twist_prior = np.multiply(1.0, solver_manager.twist_final)

        #twist_prior = ackermann_twist

        #twist_prior = np.add(twist_prior,solver_manager.twist_final)
        #se3_estimate_acc = np.matmul(solver_manager.SE3_est_final,se3_estimate_acc)

        #  SE3_est = SE3.twist_to_SE3(ackermann_twist)
        SE3_est = solver_manager.SE3_est_final
        se3_estimate_acc = np.matmul(se3_estimate_acc, SE3_est)
        pose_estimate_list.append(se3_estimate_acc)
        vo_twist_list.append(solver_manager.twist_final)
print("visualizing..")

if calc_vo:
    FileIO.write_vo_output_to_file(name, info, output_dir_path, vo_twist_list)

visualizer = Visualizer.Visualizer(ground_truth_list,
                                   plot_steering=plot_steering)
visualizer.visualize_ground_truth(clear=True, draw=False)
if plot_steering:
    visualizer.visualize_steering(encoder_list, clear=False, draw=False)
if calc_vo:
    visualizer.visualize_poses(pose_estimate_list, draw=False)
visualizer.show()