Beispiel #1
0
    def verify(self, classifier):
        log.start('Verifying classifier')

        self.checkAgreementData = pandas.read_csv(self.checkAgreementDataFile)
        columns = self.checkAgreementData.columns[1:-2]
        checkAgreementData = self.checkAgreementData[columns][goodFeatures].as_matrix()
		
        agreementPredictions = classifier.classify(checkAgreementData)

        realPredictions = agreementPredictions[self.checkAgreementData['signal'].values == 0]
        monteCarloPrediction = agreementPredictions[self.checkAgreementData['signal'].values == 1]
        realWeights = self.checkAgreementData[self.checkAgreementData['signal'] == 0]['weight'].values
        monteCarloWeights = self.checkAgreementData[self.checkAgreementData['signal'] == 1]['weight'].values
		
        agreementCoefficient = Case.getKolmogorovSmirnovDistance(realPredictions, monteCarloPrediction, realWeights, monteCarloWeights)

        self.checkCorrelationData = pandas.read_csv(self.checkCorrelationDataFile)
        columns = self.checkCorrelationData.columns[1:-1]
        checkCorrelationData = self.checkCorrelationData[columns][goodFeatures].as_matrix()
        masses = self.checkCorrelationData['mass']
		
        correlationPrediction = classifier.classify(checkCorrelationData)

        correlationCoefficient = Case.getCramerVonNeimanCoefficient(correlationPrediction, masses)

        verificationMetrics = VerificationMetrics(agreementCoefficient, self.agreementCutoff, correlationCoefficient, self.correlationCutoff)
        log.done(verificationMetrics)

        return verificationMetrics
Beispiel #2
0
    def test(self, classifier, testData, batchSize=None):
        log.start('Testing classifier')

        inputData, labels = testData

        batchSize = batchSize if batchSize is not None else inputData.shape[0]
        batchesCount = inputData.shape[0] / batchSize + 1
		
        predictions = None
        for batchIndex in xrange(batchesCount):
            inputBatch = inputData[batchIndex * batchSize:(batchIndex + 1) * batchSize]
			
            if predictions is None:
                predictions = classifier.classify(inputBatch)
            else:
                p = classifier.classify(inputBatch)
                if len(p):
                    predictions = numpy.concatenate([predictions, classifier.classify(inputBatch)])
			
            log.progress('Testing classifier: {0}%'.format((batchIndex + 1) * 100 / batchesCount))

        performance = Case.roc_auc_truncated(labels, predictions)

        testMetrics = TestMetrics(performance)
        log.done(testMetrics)

        return testMetrics
Beispiel #3
0
def base_app():

    """
    Initialize application and add resources to interact with employees Database
    """
    print(' * Initializing API.')
    restapi = Flask(__name__)
    CORS(restapi, supports_credentials=False)
    log.start()
    api = Api(restapi)
    # Employees resources
    api.add_resource(features.EmployeesResource1, '/employees')
    api.add_resource(features.EmployeesResource2, '/employees-by-id')
    api.add_resource(features.EmployeesResource3, '/employees-like')
    api.add_resource(features.EmployeesResource4, '/employees-roles')
    api.add_resource(features.NewEmployeesResource, '/new-employees')
    # Logs visualization
    api.add_resource(features.LogResource, '/log')

    # Front mocking
    api.add_resource(features.FrontMock, '/run')

    # Create Database
    configdb.start_database()

    return restapi
Beispiel #4
0
    def dump(self, submission):
        log.start('Dumping data')

        fileName = '{0}/{1}.csv'.format(self.submissionsDirectory, self.seed)

        submission.to_csv(fileName, index=False)

        log.done()
Beispiel #5
0
    def run(self):
        snap = threading.Thread(target=self.snap_thread, name="snap",args=())
        snap.setDaemon(True)
        snap.start()

        duplica = threading.Thread(target=self.duplica_thread, name="duplica",args=())
        duplica.setDaemon(True)
        duplica.start()

        log = threading.Thread(target=self.log_thread, name="log",args=())
        log.setDaemon(True)
        log.start()

        banco = threading.Thread(target=self.banco_thread, name="banco",args=())
        banco.setDaemon(True)                
        banco.start()

        self.main()
def execute(oai, user, pw, host, logfile,logdir,debug,timeout):
    
    case = '01'
    rv = 1
    oai.send_recv('cd $OPENAIR_TARGETS;')   
 
    try:
        log.start()
        test = '00'
        name = 'Check oai.svn.add'
        conf = 'svn st -q | grep makefile'
        diag = 'Makefile(s) changed. If you are adding a new file, make sure that it is added to the svn'
        rsp = oai.send_recv('svn st -q | grep -i makefile;') 
        for item in rsp.split("\n"):
            if "Makefile" in item:
                rsp2=item.strip() + '\n'
        oai.find_false_re(rsp,'Makefile')
    except log.err, e:
        diag = diag + "\n" + rsp2  
               #log.skip(case, test, name, conf, e.value, logfile)
        log.skip(case, test, name, conf, '', diag, logfile)
Beispiel #7
0
def execute(oai, user, pw, host, logfile, logdir, debug, timeout):

    case = "01"
    rv = 1
    oai.send_recv("cd $OPENAIR_TARGETS;")

    try:
        log.start()
        test = "00"
        name = "Check oai.svn.add"
        conf = "svn st -q | grep makefile"
        diag = "Makefile(s) changed. If you are adding a new file, make sure that it is added to the svn"
        rsp = oai.send_recv("svn st -q | grep -i makefile;")
        for item in rsp.split("\n"):
            if "Makefile" in item:
                rsp2 = item.strip() + "\n"
        oai.find_false_re(rsp, "Makefile")
    except log.err, e:
        diag = diag + "\n" + rsp2
        # log.skip(case, test, name, conf, e.value, logfile)
        log.skip(case, test, name, conf, "", diag, logfile)
Beispiel #8
0
    def createSubmission(self, classifier, testData, batchSize=None):
        log.start('Creating submission')
		
        batchSize = batchSize if batchSize is not None else input.shape[0]
        batchesCount = testData.shape[0] / batchSize + 1
		
        predictions = None
        for batchIndex in xrange(batchesCount):
            inputBatch = testData[batchIndex * batchSize:(batchIndex + 1) * batchSize]
			
            if predictions is None:
                predictions = classifier.classify(inputBatch)
            elif len(inputBatch):
                predictions = numpy.concatenate([predictions, classifier.classify(inputBatch)])
			
            log.progress('Creating submission: {0}%'.format((batchIndex + 1) * 100 / batchesCount))

        submission = pandas.DataFrame({"id": self.testData["id"], "prediction": predictions})

        log.done('submission' + str(submission.shape))

        return submission
Beispiel #9
0
    def loadData(self, minified=False):
        log.start('Loading data')

        self.trainingData = pandas.read_csv(self.trainingDataFile)
        columns = self.trainingData.columns[1:-4]
        trainingInput = self.trainingData[columns][goodFeatures].as_matrix()
        trainingLabels = self.trainingData['signal'].as_matrix()
        trainingData = trainingInput, trainingLabels

        self.validationData = pandas.read_csv(self.checkAgreementDataFile)
        columns = self.validationData.columns[1:-2]
        validationInput = self.validationData[columns][goodFeatures].as_matrix()
        validationLabels = self.validationData['signal'].as_matrix()
        validationData = validationInput, validationLabels

        self.testData = pandas.read_csv(self.testDataFile)
        columns = self.testData.columns[1:]
        testData = self.testData[columns][goodFeatures].as_matrix()

        message = 'trainingData{0}, testData{1}'.format(trainingInput.shape, testData.shape)
        log.done(message)

        return trainingData, validationData, testData
Beispiel #10
0
def execute(oai, user, pw, host, logfile,logdir,debug,cpu):
    
    case = '10'
    oai.send('cd $OPENAIR1_DIR;')     
    oai.send('cd SIMULATION/LTE_PHY;')   
    try:
        log.start()
        test = '200'
        name = 'Run oai.dlsim.sanity'
        conf = '-a -n 100'
        diag = 'dlsim is not running normally (Segmentation fault / Exiting / FATAL), debugging might be needed'
        trace = logdir + '/log_' + host + case + test + '_1.txt;'
        tee = ' 2>&1 | tee ' + trace
        oai.send_expect_false('./dlsim.rel8.'+ host + ' ' + conf + tee, 'Segmentation fault', 30)
        trace = logdir + '/log_'  + host + case + test + '_2.txt;'
        tee = ' 2>&1 | tee ' + trace
        oai.send_expect_false('./dlsim.rel8.'+ host + ' ' + conf + tee, 'Exiting', 30)
        trace = logdir + '/log_'  + host + case + test + '_3.txt;'
        tee = ' 2>&1 | tee ' + trace
        oai.send_expect_false('./dlsim.rel8.'+ host + ' ' + conf + tee, 'FATAL', 30)

    except log.err, e:
        log.fail(case, test, name, conf, e.value, diag, logfile,trace)
Beispiel #11
0
def start(version, log_level, profiling):
    global running, config, users, packages, events

    # intro
    print "metaTower v" + version + "\n"

    # profiling
    utils.setProfiling(profiling)

    # logging system
    log.start(log_level)

    # load initial configurations
    config = ConfigManager.ConfigManager()

    # events
    events = EventManager.EventManager()

    # packages
    packages = PackageManager.PackageManager()
    packages.loadDirectory("packages")

    return True
def execute(oai, user, pw, host,logfile,logdir,debug):
    
    case = '101'
    rv  = 1; 
    oai.send('cd $OPENAIR1_DIR;')     
    oai.send('cd SIMULATION/LTE_PHY;')   

    try:
        log.start()
        test = '01'
        name = 'Compile oai.rel8.phy.dlsim.make' 
        conf = 'make dlsim'  # PERFECT_CE=1 # for perfect channel estimation
        trace = logdir + '/log_' + case + test + '.txt;'
        tee = ' 2>&1 | tee ' + trace
        diag = 'check the compilation errors for dlsim in $OPENAIR1_DIR/SIMULATION/LTE_PHY'
        oai.send('make clean; make cleanall;')
        oai.send('rm -f ./dlsim.rel8.'+host)
        oai.send_expect_false('make dlsim -j4' + tee, makerr1,  1500)
        oai.send('cp ./dlsim ./dlsim.rel8.'+host)
                   
    except log.err, e:
        log.fail(case, test, name, conf, e.value, diag, logfile,trace)
        rv =0
Beispiel #13
0
    def train(classifier, trainingData, validationData, batchSize=None):
        log.start('Training classifier')

        inputData, labels = trainingData

        batchSize = batchSize if batchSize is not None else inputData.shape[0]
        batchesCount = inputData.shape[0] / batchSize

        start = time.time()

        for batchIndex in xrange(batchesCount):
            inputBatch = inputData[batchIndex * batchSize:(batchIndex + 1) * batchSize]
            labelsBatch = labels[batchIndex * batchSize:(batchIndex + 1) * batchSize]

            classifier.fit(inputBatch, labelsBatch)
            log.progress('Training classifier: {0}%'.format((batchIndex + 1) * 100 / batchesCount))

        end = time.time()
        elapsed = end - start

        trainingMetrics = TrainingMetrics(elapsed)
        log.done(trainingMetrics)

        return trainingMetrics
Beispiel #14
0
def execute(oai, user, pw, host, logfile,logdir,debug):
    
    case = '03'
    oai.send('cd $OPENAIR_TARGETS;')
    oai.send('cd SIMU/USER;')
    
    try:
        log.start()
        test = '00'
        name = 'Run oai.rel10.sf'
        conf = '-a -A AWGN -l7 -n 100'
        diag = 'OAI is not running normally (Segmentation fault / Exiting / FATAL), debugging might be needed'
        trace = logdir + '/log_' + host + case + test + '_1.txt'
        tee = ' 2>&1 | tee ' + trace
        oai.send_expect_false('./oaisim.rel10.' + host + ' ' + conf + tee, 'Segmentation fault', 30)
        trace = logdir + '/log_' + host + case + test + '_2.txt'
        tee = ' 2>&1 | tee ' + trace
        oai.send_expect_false('./oaisim.rel10.' + host + ' ' + conf + tee, 'Exiting', 30)
        trace = logdir + '/log_' + host + case + test + '_3.txt'
        tee = ' 2>&1 | tee ' + trace
        oai.send_expect_false('./oaisim.rel10.' + host + ' ' + conf + tee, 'FATAL', 30)

    except log.err, e:
        log.fail(case, test, name, conf, e.value, diag, logfile,trace)
Beispiel #15
0
# Common 3D Dashboard processes
###############################################################
# Author: Manel Muñiz (initial version 30/01/2020)
# Last updated: 05/02/2020 (Manel Muñiz)
###############################################################

import commonlibClient as commonLib
import settings
import log
import os
import libconfig

#### MAIN ####

print("Starting common 3D Dashboard backend...")

if os.path.isfile(settings.configCommonFile):
    print("Loading configuration from file " + settings.configCommonFile)
    libconfig.loadConfig(settings.configCommonFile)

log.start("common")

log.logInfo('Settings log path in ' + settings.logPath)
log.logInfo('Settings clients path in ' + settings.clientsPath)
log.logInfo('Settings output path in ' + settings.configFile)
log.logInfo('Settings output filename is ' + settings.outputJSFilename)

configFile = str(settings.clientsPath + "common.cfg")

commonLib.processCommonFile(configFile)
Beispiel #16
0
        for item in rsp.split("\n"):
            if "Makefile" in item:
                rsp2 = item.strip() + "\n"
        oai.find_false_re(rsp, "Makefile")
    except log.err, e:
        diag = diag + "\n" + rsp2
        # log.skip(case, test, name, conf, e.value, logfile)
        log.skip(case, test, name, conf, "", diag, logfile)
    else:
        log.ok(case, test, name, conf, "", logfile)

    oai.send("cd SIMU/USER;")
    oai.send("mkdir " + logdir + ";")

    try:
        log.start()
        test = "01"
        name = "Compile oai.rel8.make"
        conf = "make"
        trace = logdir + "/log_" + case + test + ".txt;"
        tee = " 2>&1 | tee " + trace
        diag = "check the compilation errors for oai"
        oai.send("make cleanall;")
        oai.send("make cleanasn1;")
        oai.send("rm -f ./oaisim.rel8." + host)
        oai.send_expect_false("make -j4 JF=1" + tee, makerr1, timeout)
        oai.send("cp ./oaisim ./oaisim.rel8." + host)
    except log.err, e:
        log.fail(case, test, name, conf, e.value, diag, logfile, trace)
        rv = 0
    else:
Beispiel #17
0
MIN_SNR = 0
MAX_SNR = 40
PERF = 75
OPT = "-L"
FRAME = 2000
#OPT="-L -d" # 8bit decoder , activate dci decoding at UE


def execute(oai, user, pw, host, logfile, logdir, debug, cpu):

    case = '10'
    oai.send('cd $OPENAIR1_DIR;')
    oai.send('cd SIMULATION/LTE_PHY;')

    try:
        log.start()
        test = '300'
        name = 'Run oai.ulsim.sanity'
        conf = '-a -n 100'
        diag = 'ulsim is not running normally (Segmentation fault / Exiting / FATAL), debugging might be needed'
        trace = logdir + '/log_' + host + case + test + '_1.txt;'
        tee = ' 2>&1 | tee ' + trace
        oai.send_expect_false('./ulsim.rel8.' + host + ' ' + conf + tee,
                              'Segmentation fault', 30)
        trace = logdir + '/log_' + host + case + test + '_2.txt;'
        tee = ' 2>&1 | tee ' + trace
        oai.send_expect_false('./ulsim.rel8.' + host + ' ' + conf + tee,
                              'Exiting', 30)
        trace = logdir + '/log_' + host + case + test + '_3.txt;'
        tee = ' 2>&1 | tee ' + trace
        oai.send_expect_false('./ulsim.rel8.' + host + ' ' + conf + tee,
def main():
    """Main procedure of DFA minimization problem generator.
    
    Parses command-line arguments and builds solution and task DFA accordingly.
    Saves result and cleans up.
    """

    # add and check parameters

    class MyFormatter(argparse.ArgumentDefaultsHelpFormatter,
                      argparse.MetavarTypeHelpFormatter,
                      argparse.RawTextHelpFormatter):
        pass

    parser = argparse.ArgumentParser(
        description='Command-line tool to generate DFA minimization problems.',
        formatter_class=MyFormatter,
        epilog=_EPILOG)

    for groupName in _ARGUMENTS:
        group = parser.add_argument_group(groupName)

        for option in _ARGUMENTS[groupName]:
            if len(option) == 4:
                group.add_argument(option[0],
                                   type=option[1],
                                   default=option[2],
                                   help=option[3])
            else:
                group.add_argument(option[0],
                                   type=option[1],
                                   default=option[2],
                                   help=option[3],
                                   choices=option[4])

    args = parser.parse_args()

    strToBool = lambda x: x == 'yes'

    args.ps = strToBool(args.ps)
    args.c = strToBool(args.c)
    args.pt = strToBool(args.pt)
    args.dfa = strToBool(args.dfa)
    args.tex = strToBool(args.tex)
    args.pdf = strToBool(args.pdf)
    args.shuf = strToBool(args.shuf)

    args.out = pathlib.Path(args.out)

    if args.k > args.n:
        log.k_too_big()
        return

    if args.n < args.f:
        log.f_too_big()
        return

    if args.pt and not args.ps:
        log.invalid_p_options()
        return

    if args.k == 0 and args.e > 0:
        log.not_extendable()
        return

    if any(
            map(lambda x: x < 0, (args.k, args.n, args.f, args.dmin, args.dmax,
                                  args.e, args.u))):
        log.neg_value()
        return

    if not args.out.exists() or not args.out.is_dir():
        log.creating_output_dir()
        args.out.mkdir()
        log.done()

    log.start(args)

    # construct solution dfa

    log.building_solution(args)

    build = next_min_dfa if args.b == 'enum' else rand_min_dfa

    solDFA = build(args.k, args.n, args.f, args.dmin, args.dmax, args.ps,
                   args.out)

    if solDFA is None and args.b == 'enum':

        log.done()
        log.enum_finished()
        return

    log.done()

    # extend dfa

    log.extending_solution(args)

    for i in range(10):

        try:

            reachDFA, taskDFA = extend_dfa(solDFA, args.e, args.u, args.pt,
                                           args.c)

        except DFANotExtendable:

            log.failed()
            log.dfa_not_extendable(args)
            return

        except PygraphIndexErrorBug:

            log.failed()
            log.pygraph_bug('extending')

            if i == 9:
                log.pygraph_bug_abort(args)
                return

        else:

            log.done()
            break

    # generate graphical representation of solution and task dfa

    if args.dfa or args.tex or args.pdf:
        log.saving()
        save_exercise(solDFA, reachDFA, taskDFA, args.out, args.dfa, args.tex,
                      args.pdf, args.shuf)
        log.done()
    else:
        log.no_saving()

    # clean up working directory

    log.cleaning()

    for f in args.out.iterdir():
        if f.suffix in ('.toc', '.aux', '.log', '.gz', '.bbl', '.blg', '.out'):
            f.unlink()

    log.done()
Beispiel #19
0
import os
import string
import glob
import settings
import log
import libconfig
import libclient

#### MAIN ####

print("Starting 3D Dashboard backend...")

if os.path.isfile(settings.configFile):
    print("Loading configuration from file " + settings.configFile)
    libconfig.loadConfig(settings.configFile)

log.start("backend")

log.logInfo('Settings log path in ' + settings.logPath)
log.logInfo('Settings clients path in ' + settings.clientsPath)
log.logInfo('Settings output path in ' + settings.outputPath)
log.logInfo('Settings output filename is ' + settings.outputJSFilename)
log.logInfo('Settings output plotly image path is ' + settings.plotlyPath)

clientList = glob.glob(settings.clientsPath + "/*.cfg")

for c in clientList:
    client_process = libclient.Client()
    result = client_process.processClientConfigFile(clientFile=c)
    # log.logDebug(list(result))
Beispiel #20
0
    Date: 07/07/18
    Version: N/a
    Description: Testing Log Module
'''

# ------------------------------ Imports ------------------------------

import log

# ----------------------------- Functions -----------------------------

# N/a

# ---------------------------- Main Module ----------------------------

log.start()  # Log program start to log.txt

if (__name__ == "__main__"):

    log.write("Main Module Started")  # Write to log.txt

    try:
        x = str(input("What is your name?: "))
    except:
        print("Error Occured")
        log.error("Input Error")  # Log input error to log.txt

    print(f"Hi {}".format(x))
    log.write(f"User name is {}".format(x))  # Write to log.txt

log.end()  # Log succesful program end
Beispiel #21
0
 def _start(rej,res):
   try:
     log.start(msg,addr)
     res(None)
   except Exception as e:
     rej(e)