예제 #1
0
def initKK():

	#global args
	global args

	#global import
	global PluginManager

	#global import
	global argparse

	#get python version
	pythonVersion = sys.version_info

	#check that python is at least 2.7
	if sys.version_info[0] == 2 and sys.version_info[1] < 7:

		#err msg
		# ->as logging isn't init'd yet, just print directly
		print('ERROR: KnockKnock requires python 2.7+ (found: %s)' % (pythonVersion))

		#bail
		return False

	#TODO: check for python 3.0?

	#try import argparse
	# ->should work now since just checked that python is 2.7+
	try:

		#import
		import argparse

	#handle exception
	# ->bail w/ error msg
	except ImportError:

		#err msg
		# ->as logging isn't init'd yet, just print directly
		print('ERROR: could not load required module (argparse)')

		#bail
		return False

	#add knock knock's lib path to system path
	# ->ensures 3rd-party libs will be imported OK
	sys.path.insert(0, os.path.join(utils.getKKDirectory(), 'libs'))

	#now can import 3rd party lib
	# ->yapsy
	from yapsy.PluginManager import PluginManager

	#parse options/args
	# ->will bail (with msg) if usage is incorrect
	args = parseArgs()

	#init output/logging
	if not utils.initLogging(args.verbosity):

		#bail
		return False

	#dbg msg
	utils.logMessage(utils.MODE_INFO, 'initialized logging')

	#check version (Mavericks/Yosemite for now)
	# ->this isn't a fatal error for now, so just log a warning for unsupported versions
	if not utils.isSupportedOS():

		#dbg msg
		utils.logMessage(utils.MODE_WARN, '%s is not an officially supported OS X version (you milage may vary)' % ('.'.join(utils.getOSVersion())))

	#dbg msg
	else:

		#dbg msg
		utils.logMessage(utils.MODE_INFO, '%s is a supported OS X version' % ('.'.join(utils.getOSVersion())))

	#load python <-> Objc bindings
	# ->might fail if non-Apple version of python is being used
	if not utils.loadObjcBindings():

		#dbg msg
		utils.logMessage(utils.MODE_ERROR, 'python <-> Objc bindings/module not installed\n       run via /usr/bin/python or install modules via \'pip install pyobjc\' to fix')

		#bail
		return False

	#load whitelists
	whitelist.loadWhitelists()

	#init plugin manager
	if not initPluginManager():

		#bail
		return False

	#dbg msg
	utils.logMessage(utils.MODE_INFO, 'initialized plugin manager')

	#giving warning about r00t
	if 0 != os.geteuid():

		#dbg msg
		utils.logMessage(utils.MODE_INFO, 'not running as r00t...some results may be missed (e.g. CronJobs)')

	return True
예제 #2
0
#!/usr/bin/python
"""
This library implements some api functions for quizlet
"""

import utils
import logging
import os
import httplib
import urllib
import json

utils.initLogging()
conf = utils.getConf()


def _get_quizlet_response(method, path, params):
    accesstoken = conf['quizlet']['accesstoken']

    headers = {
        "Authorization": "Bearer %s" % accesstoken,
        "Content-Type": "application/x-www-form-urlencoded"
    }

    conn = httplib.HTTPSConnection("api.quizlet.com")

    conn.request(method, path, params, headers)
    resp = conn.getresponse()
    return resp

예제 #3
0
def initKK():

    #global args
    global args

    #global import
    global PluginManager

    #global import
    global argparse

    #get python version
    pythonVersion = sys.version_info

    #check that python is at least 2.7
    if sys.version_info[0] == 2 and sys.version_info[1] < 7:

        #err msg
        # ->as logging isn't init'd yet, just print directly
        print('ERROR: KnockKnock requires python 2.7+ (found: %s)' %
              (pythonVersion))

        #bail
        return False

    #TODO: check for python 3.0?

    #try import argparse
    # ->should work now since just checked that python is 2.7+
    try:

        #import
        import argparse

    #handle exception
    # ->bail w/ error msg
    except ImportError:

        #err msg
        # ->as logging isn't init'd yet, just print directly
        print('ERROR: could not load required module (argparse)')

        #bail
        return False

    #add knock knock's lib path to system path
    # ->ensures 3rd-party libs will be imported OK
    sys.path.insert(0, os.path.join(utils.getKKDirectory(), 'libs'))

    #now can import 3rd party lib
    # ->yapsy
    from yapsy.PluginManager import PluginManager

    #parse options/args
    # ->will bail (with msg) if usage is incorrect
    args = parseArgs()

    #init output/logging
    if not utils.initLogging(args.verbosity):

        #bail
        return False

    #dbg msg
    utils.logMessage(utils.MODE_INFO, 'initialized logging')

    #check version (Mavericks/Yosemite for now)
    # ->this isn't a fatal error for now, so just log a warning for unsupported versions
    if not utils.isSupportedOS():

        #dbg msg
        utils.logMessage(
            utils.MODE_WARN,
            '%s is not an officially supported OS X version (your mileage may vary)'
            % ('.'.join(utils.getOSVersion())))

    #dbg msg
    else:

        #dbg msg
        utils.logMessage(
            utils.MODE_INFO, '%s is a supported OS X version' %
            ('.'.join(utils.getOSVersion())))

    #load python <-> Objc bindings
    # ->might fail if non-Apple version of python is being used
    if not utils.loadObjcBindings():

        #dbg msg
        utils.logMessage(
            utils.MODE_ERROR,
            'python <-> Objc bindings/module not installed\n       run via /usr/bin/python or install modules via \'pip install pyobjc\' to fix'
        )

        #bail
        return False

    #load whitelists
    whitelist.loadWhitelists()

    #init plugin manager
    if not initPluginManager():

        #bail
        return False

    #dbg msg
    utils.logMessage(utils.MODE_INFO, 'initialized plugin manager')

    #giving warning about r00t
    if 0 != os.geteuid():

        #dbg msg
        utils.logMessage(
            utils.MODE_INFO,
            'not running as r00t...some results may be missed (e.g. CronJobs)')

    return True
예제 #4
0
    logging.debug("Subplots : %s", SubplotsPerPlot)
    logging.debug("nTotal : %s", nTotal)
    logging.debug("nPlosts : %s", nPlots)

    iPLotted = 0
    for i in range(nPlots):
        fig = make_subplots(SubplotsPerPlot[0], SubplotsPerPlot[1])
        for ir in range(SubplotsPerPlot[0]):
            for ic in range(SubplotsPerPlot[1]):
                logging.debug(
                    "Plot %s -- iPLotted = %s / row = %s / column = %s", i,
                    iPLotted, ir + 1, ic + 1)
                fig.add_trace(figures[iPLotted], row=ir + 1, col=ic + 1)
                fig.update_xaxes(title_text=figures_titles[iPLotted][0],
                                 row=ir + 1,
                                 col=ic + 1)
                fig.update_yaxes(title_text=figures_titles[iPLotted][1],
                                 row=ir + 1,
                                 col=ic + 1)
                iPLotted += 1
        if not batch:
            fig.show()


if __name__ == "__main__":
    initLogging(20)

    dataset = "dataset_1809.10717.csv"

    plotDataset(dataset, batch=False)
예제 #5
0
import signal #Portability problem ???
import tempfile

import pipes

#import Gnuplot, Gnuplot.funcutils

import p2dbstore
import p2data
import utils


args = utils.argParse('reader')

#Init output (logging and verbosity)
utils.initLogging(args['verbosity'])

logger = utils.getLogger()


if args['csvdump'] != None and args['database']:
	p2data.csvDump(args['database'], args['csvdump'])
	exit(0)

if args['last_data'] != None:
	p2data.csvLastDataDump(args['last_data'])
	exit(0)

if 'field_list' in args and args['field_list']:
	names = p2data.colNames()
	print "Field list :"
예제 #6
0
파일: main.py 프로젝트: zyronix/maske
import backend
import processes
import convert
from output import puppet

from utils import initLogging

from os.path import basename

log = initLogging()

log.info("main: Starting run")
processes_dic = {}

log.info("main: Starting process detect")
for plugin in processes.plugins:
    log.debug("main: running %s process plugin" %plugin.__name__)
    processes_dic[plugin.__name__] = plugin.run()
log.info("main: Done with process detect")

backend_dic = {}

log.info("main: Starting package backend run")
for plugin in backend.plugins:
    log.debug("main: running %s package backend plugin" % plugin.__name__)
    for key in processes_dic.keys():
        backend_dic[plugin.__name__] = plugin.run(processes_dic[key])

log.info("main: Done with package backend run")

converted = []
예제 #7
0
def initKK():

	#global args
	global args

	#global import
	global PluginManager

	#add knock knock's lib path to system path
	# ->ensures 3rd-party libs will be imported OK
	sys.path.insert(0, os.path.join(utils.getKKDirectory(), 'libs'))

	#now can import 3rd party lib
	# ->yapsy
	from yapsy.PluginManager import PluginManager

	#parse options/args
	# ->will bail (with msg) if usage is incorrect
	args = parseArgs()

	#init output/logging
	if not utils.initLogging(args.verbosity):

		#bail
		return False

	#dbg msg
	utils.logMessage(utils.MODE_INFO, 'initialized logging')

	#check version
	# ->only support Mavericks for now
	if not utils.isSupportedOS():

		#dbg msg
		utils.logMessage(utils.MODE_INFO, '%.1f is not a fully supported OS X version' % (utils.getOSVersion()))

		#bail
		#return False

	#dbg msg
	else:

		#dbg msg
		utils.logMessage(utils.MODE_INFO, '%.1f is a supported OS X version' % (utils.getOSVersion()))

	#load python <-> Objc bindings
	# ->might fail if non-Apple version of python is being used
	if not utils.loadObjcBindings():

		#dbg msg
		utils.logMessage(utils.MODE_ERROR, 'python <-> Objc bindings/module not installed\n       run via /usr/bin/python or install modules via \'pip install pyobjc\' to fix')

		#bail
		return False

	#load whitelists
	whitelist.loadWhitelists()

	#init plugin manager
	if not initPluginManager():

		#bail
		return False

	#dbg msg
	utils.logMessage(utils.MODE_INFO, 'initialized plugin manager')

	#giving warning about r00t
	if 0 != os.geteuid():

		#dbg msg
		utils.logMessage(utils.MODE_INFO, 'not running as r00t...some results may be missed')

	return True
예제 #8
0
def initKK():

    #global args
    global args

    #global import
    global PluginManager

    #add knock knock's lib path to system path
    # ->ensures 3rd-party libs will be imported OK
    sys.path.insert(0, os.path.join(utils.getKKDirectory(), 'libs'))

    #now can import 3rd party lib
    # ->yapsy
    from yapsy.PluginManager import PluginManager

    #parse options/args
    # ->will bail (with msg) if usage is incorrect
    args = parseArgs()

    #init output/logging
    if not utils.initLogging(args.verbosity):

        #bail
        return False

    #dbg msg
    utils.logMessage(utils.MODE_INFO, 'initialized logging')

    #check version
    # ->only support Mavericks for now
    if not utils.isSupportedOS():

        #dbg msg
        utils.logMessage(
            utils.MODE_INFO, '%.1f is not a fully supported OS X version' %
            (utils.getOSVersion()))

        #bail
        #return False

    #dbg msg
    else:

        #dbg msg
        utils.logMessage(
            utils.MODE_INFO,
            '%.1f is a supported OS X version' % (utils.getOSVersion()))

    #load python <-> Objc bindings
    # ->might fail if non-Apple version of python is being used
    if not utils.loadObjcBindings():

        #dbg msg
        utils.logMessage(
            utils.MODE_ERROR,
            'python <-> Objc bindings/module not installed\n       run via /usr/bin/python or install modules via \'pip install pyobjc\' to fix'
        )

        #bail
        return False

    #load whitelists
    whitelist.loadWhitelists()

    #init plugin manager
    if not initPluginManager():

        #bail
        return False

    #dbg msg
    utils.logMessage(utils.MODE_INFO, 'initialized plugin manager')

    #giving warning about r00t
    if 0 != os.geteuid():

        #dbg msg
        utils.logMessage(utils.MODE_INFO,
                         'not running as r00t...some results may be missed')

    return True
예제 #9
0
	fdpid.write(str(pid))
	fdpid.close()
	
	logger.debug("Background process started. Pid = "+str(pid))
	
	return pid


signal.signal(signal.SIGINT, gentle_exit)
signal.signal(10, gentle_exit)

#Argument parse
args = utils.argParse('monitor')

#Init output (logging and verbosity)
utils.initLogging(args['verbosity'], args['log_file'], args['log_level'], args['log_num'], args['log_size'])

logger = utils.getLogger()

#start background process to start a daemon
pidfile = args['pidfile']
if args['background']:
	exit(start_daemon(pidfile))
elif args['stop']: #or kill an existing daemon
	try:
		pidfd = open(pidfile,"r")
		pid = int(pidfd.read())
		os.kill(pid, 10)
		logger.debug("Sig 10 send to process",pid)
	except:
		exit(0)
def train_model():
    args = parser.parse_args()

    ## Logging
    log_path = "./trained_models/{}/".format(args.name)
    os.makedirs(log_path, exist_ok=True)
    initLogging(log_file=log_path + 'train.log')
    if args.tensorboard:
        logger = SummaryWriter(log_path + 'train-pre{}-nll{}'.format(
            args.pretrain_epochs, args.train_epochs))
        logger_val = SummaryWriter(log_path + 'validation-pre{}-nll{}'.format(
            args.pretrain_epochs, args.train_epochs))

    logging.info("------------- {} -------------".format(args.name))
    logging.info("Batch size : {}".format(args.batch_size))
    logging.info("Learning rate : {}".format(args.learning_rate))
    logging.info("Use Planning Coupled: {}".format(args.use_planning))
    logging.info("Use Target Fusion: {}".format(args.use_fusion))

    ## Initialize network and optimizer
    PiP = pipNet(args)
    if args.use_cuda:
        PiP = PiP.cuda()
    optimizer = torch.optim.Adam(PiP.parameters(), lr=args.learning_rate)
    crossEnt = torch.nn.BCELoss()

    ## Initialize training parameters
    pretrainEpochs = args.pretrain_epochs
    trainEpochs = args.train_epochs
    batch_size = args.batch_size

    ## Initialize data loaders
    logging.info("Train dataset: {}".format(args.train_set))
    trSet = highwayTrajDataset(path=args.train_set,
                               targ_enc_size=args.social_context_size +
                               args.dynamics_encoding_size,
                               grid_size=args.grid_size,
                               fit_plan_traj=False)
    logging.info("Validation dataset: {}".format(args.val_set))
    valSet = highwayTrajDataset(path=args.val_set,
                                targ_enc_size=args.social_context_size +
                                args.dynamics_encoding_size,
                                grid_size=args.grid_size,
                                fit_plan_traj=True)
    trDataloader = DataLoader(trSet,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=args.num_workers,
                              collate_fn=trSet.collate_fn)
    valDataloader = DataLoader(valSet,
                               batch_size=batch_size,
                               shuffle=True,
                               num_workers=args.num_workers,
                               collate_fn=valSet.collate_fn)
    logging.info(
        "DataSet Prepared : {} train data, {} validation data\n".format(
            len(trSet), len(valSet)))
    logging.info("Network structure: {}\n".format(PiP))

    ## Training process
    for epoch_num in range(pretrainEpochs + trainEpochs):
        if epoch_num == 0:
            logging.info('Pretrain with MSE loss')
        elif epoch_num == pretrainEpochs:
            logging.info('Train with NLL loss')
        ## Variables to track training performance:
        avg_time_tr, avg_loss_tr, avg_loss_val = 0, 0, 0
        ## Training status, reclaim after each epoch
        PiP.train()
        PiP.train_output_flag = True
        for i, data in enumerate(trDataloader):
            st_time = time.time()
            nbsHist, nbsMask, planFut, planMask, targsHist, targsEncMask, targsFut, targsFutMask, lat_enc, lon_enc, _ = data
            if args.use_cuda:
                nbsHist = nbsHist.cuda()
                nbsMask = nbsMask.cuda()
                planFut = planFut.cuda()
                planMask = planMask.cuda()
                targsHist = targsHist.cuda()
                targsEncMask = targsEncMask.cuda()
                lat_enc = lat_enc.cuda()
                lon_enc = lon_enc.cuda()
                targsFut = targsFut.cuda()
                targsFutMask = targsFutMask.cuda()

            # Forward pass
            fut_pred, lat_pred, lon_pred = PiP(nbsHist, nbsMask, planFut,
                                               planMask, targsHist,
                                               targsEncMask, lat_enc, lon_enc)
            if epoch_num < pretrainEpochs:
                # Pre-train with MSE loss to speed up training
                l = maskedMSE(fut_pred, targsFut, targsFutMask)
            else:
                # Train with NLL loss
                l = maskedNLL(fut_pred, targsFut, targsFutMask) + crossEnt(
                    lat_pred, lat_enc) + crossEnt(lon_pred, lon_enc)

            # Back-prop and update weights
            optimizer.zero_grad()
            l.backward()
            prev_vec_norm = torch.nn.utils.clip_grad_norm_(
                PiP.parameters(), 10)
            optimizer.step()

            # Track average train loss and average train time:
            batch_time = time.time() - st_time
            avg_loss_tr += l.item()
            avg_time_tr += batch_time

            # For every 100 batches: record loss, validate model, and plot.
            if i % 100 == 99:
                eta = avg_time_tr / 100 * (len(trSet) / batch_size - i)
                epoch_progress = i * batch_size / len(trSet)
                logging.info(f"Epoch no:{epoch_num+1}" +
                             f" | Epoch progress(%):{epoch_progress*100:.2f}" +
                             f" | Avg train loss:{avg_loss_tr/100:.2f}" +
                             f" | ETA(s):{int(eta)}")

                if args.tensorboard:
                    logger.add_scalar(
                        "RMSE" if epoch_num < pretrainEpochs else "NLL",
                        avg_loss_tr / 100, (epoch_progress + epoch_num) * 100)

                ## Validatation during training:
                eval_batch_num = 20
                with torch.no_grad():
                    PiP.eval()
                    PiP.train_output_flag = False
                    for i, data in enumerate(valDataloader):
                        nbsHist, nbsMask, planFut, planMask, targsHist, targsEncMask, targsFut, targsFutMask, lat_enc, lon_enc, _ = data
                        if args.use_cuda:
                            nbsHist = nbsHist.cuda()
                            nbsMask = nbsMask.cuda()
                            planFut = planFut.cuda()
                            planMask = planMask.cuda()
                            targsHist = targsHist.cuda()
                            targsEncMask = targsEncMask.cuda()
                            lat_enc = lat_enc.cuda()
                            lon_enc = lon_enc.cuda()
                            targsFut = targsFut.cuda()
                            targsFutMask = targsFutMask.cuda()
                        if epoch_num < pretrainEpochs:
                            # During pre-training with MSE loss, validate with MSE for true maneuver class trajectory
                            PiP.train_output_flag = True
                            fut_pred, _, _ = PiP(nbsHist, nbsMask, planFut,
                                                 planMask, targsHist,
                                                 targsEncMask, lat_enc,
                                                 lon_enc)
                            l = maskedMSE(fut_pred, targsFut, targsFutMask)
                        else:
                            # During training with NLL loss, validate with NLL over multi-modal distribution
                            fut_pred, lat_pred, lon_pred = PiP(
                                nbsHist, nbsMask, planFut, planMask, targsHist,
                                targsEncMask, lat_enc, lon_enc)
                            l = maskedNLLTest(fut_pred,
                                              lat_pred,
                                              lon_pred,
                                              targsFut,
                                              targsFutMask,
                                              avg_along_time=True)
                        avg_loss_val += l.item()
                        if i == (eval_batch_num - 1):
                            if args.tensorboard:
                                logger_val.add_scalar(
                                    "RMSE" if epoch_num < pretrainEpochs else
                                    "NLL", avg_loss_val / eval_batch_num,
                                    (epoch_progress + epoch_num) * 100)
                            break
                # Clear statistic
                avg_time_tr, avg_loss_tr, avg_loss_val = 0, 0, 0
                # Revert to train mode after in-process evaluation.
                PiP.train()
                PiP.train_output_flag = True

        ## Save the model after each epoch______________________________________________________________________________
        epoCount = epoch_num + 1
        if epoCount < pretrainEpochs:
            torch.save(
                PiP.state_dict(),
                log_path + "{}-pre{}-nll{}.tar".format(args.name, epoCount, 0))
        else:
            torch.save(
                PiP.state_dict(), log_path + "{}-pre{}-nll{}.tar".format(
                    args.name, pretrainEpochs, epoCount - pretrainEpochs))

    # All epochs finish________________________________________________________________________________________________
    torch.save(PiP.state_dict(), log_path + "{}.tar".format(args.name))
    logging.info("Model saved in trained_models/{}/{}.tar\n".format(
        args.name, args.name))
예제 #11
0
"""
import torch
import logging
from tqdm import tqdm
from torch.optim import Adam
from torch.nn import CrossEntropyLoss
from data.dataloader import generate_loader
from config import Configure
from networks.RCNN import BiGruCNN
from framework import MyFrame
from utils import initLogging

name = 'HAN_rcnn_drop0.2'
mylog = 'logs/' + name + '.log'
path = 'weights/' + name + '.pkl'
initLogging(mylog)
device = 2
total_epochs = 30
valid_best_score = 0.

train_loader, valid_loader, testa_loader = generate_loader(train_bs=32)
opt = Configure()
net = BiGruCNN
loss_func = CrossEntropyLoss(size_average=True)
solver = MyFrame(net=net, loss=loss_func, opt=opt, lr=1e-3, device=device)
solver.load(path)
# solver.net.embedding.weight.requires_grad = True

no_optim_round = 0
for epoch in range(total_epochs):
    # train
예제 #12
0
def model_evaluate():

    args = parser.parse_args()

    ## Initialize network
    PiP = pipNet(args)
    PiP.load_state_dict(
        torch.load('./trained_models/{}/{}.tar'.format(
            (args.name).split('-')[0], args.name)))
    if args.use_cuda:
        PiP = PiP.cuda()

    ## Evaluation Mode
    PiP.eval()
    PiP.train_output_flag = False
    initLogging(log_file='./trained_models/{}/evaluation.log'.format((
        args.name).split('-')[0]))

    ## Intialize dataset
    logging.info("Loading test data from {}...".format(args.test_set))
    tsSet = highwayTrajDataset(path=args.test_set,
                               targ_enc_size=args.social_context_size +
                               args.dynamics_encoding_size,
                               grid_size=args.grid_size,
                               fit_plan_traj=True,
                               fit_plan_further_ds=args.plan_info_ds)
    logging.info("TOTAL :: {} test data.".format(len(tsSet)))
    tsDataloader = DataLoader(tsSet,
                              batch_size=args.batch_size,
                              shuffle=False,
                              num_workers=args.num_workers,
                              collate_fn=tsSet.collate_fn)

    ## Loss statistic
    logging.info(
        "<{}> evaluated by {}-based NLL & RMSE, with planning input of {}s step."
        .format(args.name, args.metric, args.plan_info_ds * 0.2))
    if args.metric == 'agent':
        nll_loss_stat = np.zeros(
            (np.max(tsSet.Data[:, 0]).astype(int) + 1,
             np.max(tsSet.Data[:, 13:(13 + tsSet.grid_cells)]).astype(int) + 1,
             args.out_length))
        rmse_loss_stat = np.zeros(
            (np.max(tsSet.Data[:, 0]).astype(int) + 1,
             np.max(tsSet.Data[:, 13:(13 + tsSet.grid_cells)]).astype(int) + 1,
             args.out_length))
        both_count_stat = np.zeros(
            (np.max(tsSet.Data[:, 0]).astype(int) + 1,
             np.max(tsSet.Data[:, 13:(13 + tsSet.grid_cells)]).astype(int) + 1,
             args.out_length))
    elif args.metric == 'sample':
        rmse_loss = torch.zeros(25).cuda()
        rmse_counts = torch.zeros(25).cuda()
        nll_loss = torch.zeros(25).cuda()
        nll_counts = torch.zeros(25).cuda()
    else:
        raise RuntimeError("Wrong type of evaluation metric is specified")
    avg_eva_time = 0

    ## Evaluation process
    with torch.no_grad():
        for i, data in enumerate(tsDataloader):
            st_time = time.time()
            nbsHist, nbsMask, planFut, planMask, targsHist, targsEncMask, targsFut, targsFutMask, lat_enc, lon_enc, idxs = data
            # Initialize Variables
            if args.use_cuda:
                nbsHist = nbsHist.cuda()
                nbsMask = nbsMask.cuda()
                planFut = planFut.cuda()
                planMask = planMask.cuda()
                targsHist = targsHist.cuda()
                targsEncMask = targsEncMask.cuda()
                lat_enc = lat_enc.cuda()
                lon_enc = lon_enc.cuda()
                targsFut = targsFut.cuda()
                targsFutMask = targsFutMask.cuda()

            # Inference
            fut_pred, lat_pred, lon_pred = PiP(nbsHist, nbsMask, planFut,
                                               planMask, targsHist,
                                               targsEncMask, lat_enc, lon_enc)

            # Performance metric
            if args.metric == 'agent':
                dsIDs, targsIDs = tsSet.batchTargetVehsInfo(idxs)
                l, c = maskedNLLTest(fut_pred,
                                     lat_pred,
                                     lon_pred,
                                     targsFut,
                                     targsFutMask,
                                     separately=True)
                # Select the trajectory with the largest probability of maneuver label when evaluating by RMSE
                fut_pred_max = torch.zeros_like(fut_pred[0])
                for k in range(lat_pred.shape[0]):
                    lat_man = torch.argmax(lat_pred[k, :]).detach()
                    lon_man = torch.argmax(lon_pred[k, :]).detach()
                    indx = lon_man * 3 + lat_man
                    fut_pred_max[:, k, :] = fut_pred[indx][:, k, :]
                # Using the most probable trajectory
                ll, cc = maskedMSETest(fut_pred_max,
                                       targsFut,
                                       targsFutMask,
                                       separately=True)
                l = l.detach().cpu().numpy()
                ll = ll.detach().cpu().numpy()
                c = c.detach().cpu().numpy()
                cc = cc.detach().cpu().numpy()
                for j, targ in enumerate(targsIDs):
                    dsID = dsIDs[j]
                    nll_loss_stat[dsID, targ, :] += l[:, j]
                    rmse_loss_stat[dsID, targ, :] += ll[:, j]
                    both_count_stat[dsID, targ, :] += c[:, j]
            elif args.metric == 'sample':
                l, c = maskedNLLTest(fut_pred, lat_pred, lon_pred, targsFut,
                                     targsFutMask)
                nll_loss += l.detach()
                nll_counts += c.detach()
                fut_pred_max = torch.zeros_like(fut_pred[0])
                for k in range(lat_pred.shape[0]):
                    lat_man = torch.argmax(lat_pred[k, :]).detach()
                    lon_man = torch.argmax(lon_pred[k, :]).detach()
                    indx = lon_man * 3 + lat_man
                    fut_pred_max[:, k, :] = fut_pred[indx][:, k, :]
                l, c = maskedMSETest(fut_pred_max, targsFut, targsFutMask)
                rmse_loss += l.detach()
                rmse_counts += c.detach()

            # Time estimate
            batch_time = time.time() - st_time
            avg_eva_time += batch_time
            if i % 100 == 99:
                eta = avg_eva_time / 100 * (len(tsSet) / args.batch_size - i)
                logging.info("Evaluation progress(%):{:.2f}".format(
                    i / (len(tsSet) / args.batch_size) * 100, ) +
                             " | ETA(s):{}".format(int(eta)))
                avg_eva_time = 0

    # Result Summary
    if args.metric == 'agent':
        # Loss averaged from all predicted vehicles.
        ds_ids, veh_ids = both_count_stat[:, :, 0].nonzero()
        num_vehs = len(veh_ids)
        rmse_loss_averaged = np.zeros((args.out_length, num_vehs))
        nll_loss_averaged = np.zeros((args.out_length, num_vehs))
        count_averaged = np.zeros((args.out_length, num_vehs))
        for i in range(num_vehs):
            count_averaged[:, i] = \
                both_count_stat[ds_ids[i], veh_ids[i], :].astype(bool)
            rmse_loss_averaged[:,i] = rmse_loss_stat[ds_ids[i], veh_ids[i], :] \
                                      * count_averaged[:, i] / (both_count_stat[ds_ids[i], veh_ids[i], :] + 1e-9)
            nll_loss_averaged[:,i]  = nll_loss_stat[ds_ids[i], veh_ids[i], :] \
                                      * count_averaged[:, i] / (both_count_stat[ds_ids[i], veh_ids[i], :] + 1e-9)
        rmse_loss_sum = np.sum(rmse_loss_averaged, axis=1)
        nll_loss_sum = np.sum(nll_loss_averaged, axis=1)
        count_sum = np.sum(count_averaged, axis=1)
        rmseOverall = np.power(
            rmse_loss_sum / count_sum,
            0.5) * 0.3048  # Unit converted from feet to meter.
        nllOverall = nll_loss_sum / count_sum
    elif args.metric == 'sample':
        rmseOverall = (torch.pow(rmse_loss / rmse_counts, 0.5) * 0.3048).cpu()
        nllOverall = (nll_loss / nll_counts).cpu()

    # Print the metrics every 5 time frame (1s)
    logging.info("RMSE (m)\t=> {}, Mean={:.3f}".format(
        rmseOverall[4::5], rmseOverall[4::5].mean()))
    logging.info("NLL (nats)\t=> {}, Mean={:.3f}".format(
        nllOverall[4::5], nllOverall[4::5].mean()))
예제 #13
0
    model.compile(optimizer='adam', loss='mse')

    logging.info("Starting to train %s epochs", epochs)
    model.fit(X, y, epochs=epochs, verbose=0)

    for testSeq, expectation in testSequence:
        x_input = np.array(testSeq)
        x_input = x_input.reshape((1, nsteps, nfeatures))
        yhat = model.predict(x_input, verbose=0)
        logging.info("Predicted %s as next step for %s", yhat, testSeq)
        logging.info("Expected %s", expectation)


if __name__ == "__main__":
    initLogging(10)

    if True:
        logging.info("Prediction next integer")
        seriesLength = 4
        lengths = 200
        startVal = 1
        seriesLin = [1 * x for x in range(1, lengths + 1)]

        testSeries = [
            ([(lengths + 2) + x
              for x in range(seriesLength)], lengths + 2 + seriesLength),
            ([(lengths + 20) + x
              for x in range(seriesLength)], lengths + 20 + seriesLength),
            ([(lengths + 200) + x
              for x in range(seriesLength)], lengths + 200 + seriesLength)