Example #1
0
def Run_Algoritm(Path, Algorithm, Discretization_type, NumOfBins, NumOfNeg):
    '''
    function that runs the algorithm that user choose and return the results of the user inputs and clean the files before that.
    :return:nothing
    '''
    pre = PreProcessing()

    test = pd.read_csv(Path + '/test.csv')
    train = pd.read_csv(Path + '/train.csv')
    struct = pre.read_structure(Path + '/Structure.txt')

    pre.Clean_Data(train, struct, Discretization_type, NumOfBins)
    pre.Delete_Nan_Class_Row(test)
    pre.Fill_Nan_Values(test, struct)

    pre.Save_Data(train, Path, 'train')
    pre.Save_Data(test, Path, 'test')

    if Algorithm not in ['naive bayes classifier (our)', 'ID3 (our)']:
        runner = BuildAlgorithm()
        train, test = runner.Convert_Strings_To_Numbers(Path)
        return runner.Run(Algorithm, train, test, Path, NumOfNeg)
    else:
        process = Processing()
        model = process.Build_Model(Path, Algorithm, train)
        process.Save_Model(Path, model)
        train = pd.read_csv(Path + '/train.csv')
        pre.Delete_Nan_Class_Row(train)
        pre.Fill_Nan_Values(train, struct)
        return process.Running_Algorithm(Path, Algorithm, train, test)
Example #2
0
def Validation(model, optimizer, lossfunction, targetvar, train_graphs,
               val_graphs, train_queue, val_queue, val_queue_fill,
               train_queue_fill, workers, batch_size, val_batches, epoch,
               epochs):
    deadcheck = False
    traincheck = False
    val_loss = torch.tensor([0], dtype=float).to(device)
    for k in range(val_batches):
        with torch.no_grad():
            batch = Processing.GrabBatch(val_queue, device)
            output = model(batch)[:, 0]
            loss = lossfunction(output, batch.y[:, targetvar])
        val_loss += loss
        deadcheck = Processing.Process_check(val_queue_fill, deadcheck, k,
                                             val_batches)
        if deadcheck == True and traincheck == False and epoch < epochs - 1:
            train_queue_fill = Processing.Spawn_Processes(
                workers, train_graphs, train_queue, batch_size)
            traincheck = True
    deadcheck = Processing.Process_check(val_queue_fill, deadcheck, k,
                                         val_batches)
    if deadcheck == False:
        print('Validation Processes Still Alive. Terminating...')
        for process in val_queue_fill:
            process.terminate()
        if epoch < epochs - 1:
            train_queue_fill = Processing.Spawn_Processes(
                workers, train_graphs, train_queue, batch_size)

    return val_loss, train_queue_fill
Example #3
0
def runExperiment(filter, removedWordFile, modelfile, resultFile, vocabFile):
    data = FileReader.getData(fileToRead, types, 2018)

    vocabDict = data[0]
    catDict = data[1]

    sortedVocabDict = OrderedDict(sorted(vocabDict.items()))

    removedWords = Processing.filterAndRemove(sortedVocabDict, catDict, filter)

    Utility.outputListToFile(sortedVocabDict.keys(), vocabFile)
    Utility.outputListToFile(removedWords, removedWordFile)

    Processing.smoothAllCategories(vocabDict, catDict, smoothingValue)

    ModelGenerator.outputModelToFile(modelfile, sortedVocabDict, catDict,
                                     smoothingValue)

    model = FileReader.getModel(modelfile)

    testData = FileReader.getTestData(fileToRead, 2019)

    return ModelGenerator.outputBaysianClassification(resultFile, testData,
                                                      model, types,
                                                      sortedVocabDict, catDict)
Example #4
0
def Predict(model,prediction_graphs,workers,pred_mini_batches,batch_size,currfolder,device,targetvar):
    # print('PREDICTING: \n \
    #       model   : %s \n \
    #       n_events: %s' %(baseline,pred_mini_batches*batch_size))
    predictions     = []
    truths          = []
    pred_events     = []
    manager         = torch.multiprocessing.Manager()
    q               = manager.Queue()
    slaves          = Processing.Spawn_Processes(workers, prediction_graphs, q,batch_size)
    dead_check      = False
    model.eval()
    with torch.no_grad():
        for mini_batch in range(0,pred_mini_batches):
            data            = Processing.GrabBatch(q,device)
            prediction      = model(data)
            truth           = Pidclass(data.y[:,targetvar]).unsqueeze(1).detach().cpu().numpy()
            pred_events.extend(data.event_no.detach().cpu().numpy())
            predictions.extend(prediction.detach().cpu().numpy())
            truths.extend(truth)
            dead_check =Processing.Process_check(slaves, dead_check, mini_batch, pred_mini_batches)
            #print (" predict batch ",mini_batch,"out of " ,pred_mini_batches)
        if( dead_check == False):
            for slave in slaves:
                slave.terminate()
        print('Saving results...')
        truths          = pd.DataFrame(truths)
        predictions     = pd.DataFrame(predictions)
        pred_events     = pd.DataFrame(pred_events)
        result          = pd.concat([pred_events,truths, predictions],axis = 1)
        result.columns  = ['event_no','Pid','Antineutrino','Neutrino']
        result.to_csv(currfolder + 'predictions.csv',index=False)
def main():
    scanner = Processing.League_Scanner(model_path)
    img = getimg()
    while True:
        img = getimg()
        img = Processing.process(img, scanner)
        cv2.imshow("", img)
        cv2.waitKey(int(1000 / fps))
Example #6
0
def addtoredemption():
    print("hello")
    Processing.addtoredemptionhistory(session['user'], session["cardname"], session["cardno"])
    session.clear()
    session['user'] = "******"
    session["cardname"] = "UOB Platinum Card"
    session["cardno"] = "9888-6121-0824-1112"
    return redirect(url_for('allredemption'))
Example #7
0
def catalogueuob():
    session['user']="******"
    session["cardname"] = "UOB Platinum Card"
    session["cardno"]= "9888-6121-0824-1112"
    if request.method == 'POST':
        session['cart'] = True
        fullname = request.form["fullname"]
        itempoint = request.form["point"]
        currentpoint = request.form["currentpoint"]
        redeempoint = request.form["redeempoint"]
        quantity = request.form["quantity"]
        print(quantity )

        if quantity == "0":
            flash("Enter a valid quantity!", 'danger')
        else:
            Processing.cartdict(session['user'],session["cardname"],session["cardno"],fullname, itempoint,currentpoint,redeempoint,quantity )
            print("You have successfully redeemed {} {}".format(quantity, fullname))
     #  listqty=Processing.list_qty(session['user'],session["cardno"])
    uob_redeempts = Processing.processredeemedpoints(session['user'],session["cardname"],session["cardno"])
    uob_currentpts = Processing.processcurrentpoints(session['user'],session["cardname"],session["cardno"])
    uob_preferreditems = Processing.processpreferreduob(session['user'],session["cardname"],session["cardno"])
    uob_allitems = Processing.processallitems(session['user'],session["cardname"],session["cardno"])
    uob_retialitems = Processing.processretailitems(session['user'],session["cardname"],session["cardno"])
    uob_diningitems = Processing.processdiningitems(session['user'],session["cardname"],session["cardno"])
    uob_leisureitems = Processing.processleisureitems(session['user'],session["cardname"],session["cardno"])
    #user = session["user"], cardno = session["cardno"],

    return render_template('catalogueuob.html',uobcurrentpts=uob_currentpts, uobredeempts=uob_redeempts, uobpreferreditems = uob_preferreditems, uoballitems = uob_allitems,uobretialitems=uob_retialitems, uobdiningitems=uob_diningitems, uobleisureitems=uob_leisureitems)
Example #8
0
def trying():
    uob_currentpts = Processing.processpoints()
    uob_allitems = Processing.processallitems
    uob_retialitems = Processing.processretailitems()
    uob_diningitems = Processing.processdiningitems()
    uob_leisureitems = Processing.processleisureitems()
    return render_template('trying.html',
                           uobcurrentpts=uob_currentpts,
                           uoballitems=uob_allitems,
                           uobretialitems=uob_retialitems,
                           uob_diningitems=uob_diningitems,
                           uobleisureitems=uob_leisureitems)
Example #9
0
def cartuob():
    uobcheckoutList=[]
    checkoutpoints  = Processing.processredeemedpoints(session['user'],session["cardname"],session["cardno"])
    uobcheckoutList = Processing.checkout(session['user'],session["cardname"],session["cardno"])

    if request.method == 'POST' and 'remove' in request.form:
        position = request.form["remove"]

        print(position)
        Processing.removeitem(session['user'], session["cardname"],session["cardno"],position)
        return redirect(url_for('cartuob'))
    return render_template('cartuob.html', uobcheckout=uobcheckoutList, uobcheckoutpts=checkoutpoints)
Example #10
0
    def r(self):
        from importlib import reload
        import traceback
        import Processing as p

        try:
            # Do some things.
            time.sleep(1)
            p = reload(p)
            p.test(driver, self.filename)
        except Exception:
            traceback.print_exc()
    def formAutomation(self, event):
        #  Hide frame window
        self.Hide()
        #  Close driver window
        self.driver.quit()

        print('Completed Selenium Form Tests')
        print('Processing...')

        # Processing and collating analytics data
        Processing.csv_prettifier(str(Path(self.homeDir + '/Downloads')), '',
                                  self.formOutput.GetValue(), True)

        print('\nComplete')

        sys.exit()
Example #12
0
    def doLive(self, lastLength):
        """
        @param lastLength: Last length of file (only read newly logged lines)
        @return: Processed data, current timestamp, current file length (-1 if finished)
        Opens the DD logfile and parses all lines up to lastLength
        No multithreading is performed, as its intended for minor(live) updates only.  
        """
        try:
            logfile = open(self.logfilename, "r")
            filedata = logfile.readlines()
            filedata = filedata[lastLength:]
            logfile.close()
        except IOError:
            return [], -1

        processedData = []

        # No reverse, already have the starting point and such.
        for L in filedata:
            data = L.strip()

            # Attempt to match data (no match for chat, petspam, logout messages, etc)
            ret = Processing.getMatch(data)
            if ret == None:
                continue

            # Store data and continue loop
            processedData.append(ret)

        # END-WHILE
        return processedData, len(filedata)
Example #13
0
    def setUp(self):

        # initialize a configuration object
        self.session = Processing.Session()

        # create simulated system input arguments
        sys_argv = [                                                           \
                '/Users/abelbrown/Documents/workspace/ATG/src/Processing.py'  ,\
                '--year=2013'                                                 ,\
                '--doy=201'                                                   ,\
                '--expt=anet'                                                 ,\
                '--org=aws'                                                   ,\
                '--stn=igs.yell'                                              ,\
                '--stn=igs.p213'                                              ,\
                '-stn=igs.yell'                                               ,\
                '--network_id=n1'                                             ,\
                '--should_iterate=no'                                         ,\
                '--sp3_type=osf'                                              ,\
                '--dns=napeos'                                                ,\
                '--eop_type=bull_b'                                           ,\
                '--expt_type=RELAX'                                           ,\
                '--minspan=10'                                                 \
        ]

        # configure the object using this list of arguments
        self.session.configure_with_args(sys_argv)
def preProcessExamples(posWords, negWords):
    count = 0
    for fName, example in fileToExample.items():
        phrases, tokens, example.label = Processing.getPhrasesAndTokens(
            example.content, posWords, negWords)
        example.phrases = phrases
        example.tokens = tokens
        if example.label > 0:
            example.label = 2
        elif example.label < 0:
            example.label = 1
        label = 1
        for phrase in phrases:
            addPhrase(phrase, example.rating)
        for token in tokens:
            label += addToken(token, example.rating, posWords, negWords)
        if label > 0:
            example.groundTruth = 2
        elif label < 0:
            example.groundTruth = 1

        fileToExample[fName] = example
        count += 1
    filterPhrasesAndTokens()
    calculateScore()
Example #15
0
 def revert(self, symbol, previous=None):
     """
     revert from StateFinder states by using the transition matrix
     """
     if not int(float(symbol[2])) in self.tab:
         return [symbol]
     if int(symbol[1]) - int(symbol[0]) < 0:
         return []
     mat = self.tab[int(float(symbol[2]))]
     sym = []
     if previous not in mat:
         (sval, slen) = self.selectStart(mat)
     else:
         (sval, slen) = self.selectNext(mat, previous)
     curt = int(symbol[0])
     while curt < int(symbol[1]):
         sym.append([
             int(curt),
             int(min(curt + (slen * self.rate), int(symbol[1]))), sval
         ])
         curt += (slen * self.rate)
         (sval, slen) = self.selectNext(mat, sval)
     if len(sym) > 0:
         sym[len(sym) - 1][1] = int(symbol[1])
     else:
         sym.append([int(symbol[0]), int(symbol[1]), sval])
     prle = Processing.RLEProcess()
     return prle.batch_process(sym)
Example #16
0
def main():
    beer_dataframe = []  # Dataframe de la mustra

    crafted_beer_db = MongoDatabase('mongodb://localhost:27017/',
                                    'Crafted-Brewery-Database')
    style_histogram = {}

    for i in range(0, CRAFTED_BEER_SETS):
        retrieved_data = crafted_beer_db.fetch_collection(
            'Crafted-Brewery-Beers-' + str(i))
        beer_dataframe.extend(retrieved_data)

        # Ranking de los estilos de cerveza mas comunes
        style_histogram = Processing.data_histogram(retrieved_data,
                                                    'Style',
                                                    histogram=style_histogram)

    style_histogram.pop("N/A")

    # Con todos los datos listos, pasamos a un Dataframe para facilitar la visualizacion
    import seaborn as sns
    import matplotlib.pyplot as plt

    beer_dataframe = pd.DataFrame(beer_dataframe)
    pairplot = beer_dataframe.loc[:, ['ABV', 'IBU']]

    sns.pairplot(pairplot)
    plt.savefig('..\Results\Crafted-beer-correlation-pairplot.png')
    return
Example #17
0
def processing_picam(queue1, queue2, resolution, framerate):
    """
    Processing with the Pi cam
    This function is the function executed by the first thread, it launch the capture of images by the camera and
    process them by the algorithm established in the file "Processing".
    Each image returned by the processing algorithm is put on a queue.
    :param queue1: the fist queue where to put images detected
    :param queue2: the second queue where to put images detected
    :param resolution: the resolution of the camera
    :param framerate: the framerate of the camera
    """
    # Camera configuration
    camera = PiCamera()
    camera.resolution = resolution
    camera.framerate = framerate
    rawCapture = PiRGBArray(camera, size=resolution)

    # Give time to make the focus
    time.sleep(2)
    i = 0

    # Global loop
    for frame in camera.capture_continuous(rawCapture,
                                           format="bgr",
                                           use_video_port=True):
        image = frame.array  # Convert frame to array understood by OpenCv

        images = Processing.pre_processing(image)  # Image processing
        # images_end = Processing.pre_processing_end(image)              # !!! WIP !!!
        images_end = None
        # TODO : solve end of speed limit detection

        if images is not None:
            if i % 2 == 0:
                queue1.put((images, "speed_limit"))
            else:
                queue2.put((images, "speed_limit"))
        if images_end is not None:
            if i % 2 == 0:
                queue1.put((images_end, "speed_limit_end"))
            else:
                queue2.put((images_end, "speed_limit_end"))

        rawCapture.truncate(
            0)  # Clear the stream in preparation for the next frame
        if cv2.waitKey(40) & 0xFF == ord('q'):
            break

        print("END")
        print("")
        print("")

        # Reset the counter to avoid big number problems
        if i <= 11:
            i = i + 1
        else:
            i = 0

    cv2.destroyAllWindows()
    def setUp(self):

        # initialize a configuration object
        self.session = Processing.Session()

        # init session object properly
        self.session.options['year'] = 2012
        self.session.options['doy'] = 201
Example #19
0
def alldbsredemption():
    alldbsredemptionList = []
    alldbsredemptionList = Processing.processalldbsredemption(
        session['user'], session["cardno"])
    return render_template('alldbsredemption.html',
                           alldbsredemptions=alldbsredemptionList)

    return render_template('alldbsredemption.html')
def preprocess(parentDir):
    global rating
    subdirs = ["/pos", "/neg"]
    for subdir in subdirs:
        for fileName in listdir(parentDir + subdir):
            fname = parentDir + subdir + "/" + fileName
            example = Example()
            example.content = readFileContent(fname)
            example.klass = subdir[1:]
            example.rating = int(fileName.split("_")[2])
            rating[int(fileName.split("_")[2])] = rating[int(
                fileName.split("_")[2])] + 1
            example.name = fileName
            fileToExample[fname] = example
    negWords = Processing.getNegativeWords(parentDir)
    posWords = Processing.getPositiveWords(parentDir)
    processRatings()
    preProcessExamples(posWords, negWords)
    preProcessExamplesWithHash()
    def pageLoads(self, event):
        # Hide frame window
        self.Hide()

        # Finding all navigation links and the associated analytics data
        AnalyticsHits.endpoint_hits(self.webURL.GetValue(), self.homeDir,
                                    self.driver)
        #  Close driver window
        self.driver.quit()

        print('Downloaded Analytics Data')
        print('Processing...')

        # Processing and collating analytics data
        Processing.csv_prettifier(str(Path(self.homeDir + '/Downloads')),
                                  'Endpoints.csv', 'Pageloads.xlsx', False)

        print('\nComplete!')

        sys.exit()
Example #22
0
def train_event_fillers(patients):
    # Train model
    features, labels = Processing.features_and_labels(patients)
    classifier, feature_map = Classification.train_classifier(features, labels)

    # Write models to file
    classf_file = MODEL_DIR + EVENT_FILLER_MODEL_NAME
    featmap_file = MODEL_DIR + EVENT_FILLER_FEATMAP_NAME

    joblib.dump(classifier, classf_file)
    Pickle.dump(feature_map, open(featmap_file, "wb"))
Example #23
0
def main():

    while True:

        screen = GetScreen('TetrisOnline')
        processed_screen = Processing.ProcessImage(screen)
        cv2.imshow('window', processed_screen)

        if cv2.waitKey(25) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            break
Example #24
0
 def test_connect_to_processing(self):
     import Processing
     engine = self.connectivityFixture()
     outports = Connectors(engine, "multi1", Connector.Port, Connector.Out)
     processing = Processing.Processing("multi2", engine)
     self.assertEqual(3, outports > processing)
     self.assertEqual([], engine.controlConnections())
     self.assertEqual([
         ('multi1', 'OutPort1', 'multi2', 'InPort1'),
         ('multi1', 'OutPort2', 'multi2', 'InPort2'),
         ('multi1', 'OutPort3', 'multi2', 'InPort3'),
     ], engine.portConnections())
Example #25
0
def trying():
    session["cart"] = {}
    session['user'] = "******"
    session["cardno"] = "9888-6121-0824-1112"
    #  listqty=Processing.list_qty(session['user'],session["cardno"])
    uob_currentpts = Processing.processpoints(session['user'],
                                              session["cardno"])
    uob_trenditems = Processing.processtrenditems(session['user'],
                                                  session["cardno"])
    uob_allitems = Processing.processallitems(session['user'],
                                              session["cardno"])
    uob_retialitems = Processing.processretailitems(session['user'],
                                                    session["cardno"])
    uob_diningitems = Processing.processdiningitems(session['user'],
                                                    session["cardno"])
    uob_leisureitems = Processing.processleisureitems(session['user'],
                                                      session["cardno"])
    #user = session["user"], cardno = session["cardno"],

    return render_template('trying.html',
                           uobcurrentpts=uob_currentpts,
                           uobtrenditems=uob_trenditems,
                           uoballitems=uob_allitems,
                           uobretialitems=uob_retialitems,
                           uobdiningitems=uob_diningitems,
                           uobleisureitems=uob_leisureitems)
Example #26
0
def getSpo2(self,numSeconds, samplerate):
    print "begin measure"
    startTime = wiringpi.millis()
    Spo2 = Sp2.Spo2Sensor(sampleAvg= 8,sampleRate=samplerate)
    newSample = False
    AFthreshold= 17
    Spo2.enableAfull()
    Spo2.setFIFOAF(AFthreshold)
    interrupt  = Button(7)

    while wiringpi.millis()-startTime < numSeconds*1000:
      interrupt.when_activated = Spo2.sampleAvailable()
      if Spo2.newSample == True:
          Spo2.readSample()
          Spo2.newSample = False
          
    
    print "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"

    print "Buffer IR: ", len(Spo2.buffer_ir)
    print "Buffer Red: ", len(Spo2.buffer_red)

    pro = pr.Processing()

    #get Red and Ir buffers
    self.IR = Spo2.buffer_ir
    self.Red = Spo2.buffer_red

    #Delay Signal to avoid start overshoot
    #self.Red = pro.delaySignal(self.Red)
    #self.IR = pro.delaySignal(self. IR)

    #Median filter to the signals
    self.IR = sp.medfilt(self.IR)
    self.Red = sp.medfilt(self.Red)

    #low pass filter at 60hz
    #self.IR = pro.NotchFilter(self.IR, 60,samplerate)
    #self.Red = pro.NotchFilter(self.Red, 60,samplerate)

    #lowpass filter at 6Hz:
    #self.IR = pro.lowPasFIRFilter(self.IR, 6,samplerate)
    #self.Red = pro.lowPasFIRFilter(self.Red, 6,samplerate)
    
    #Compute Spo2Value:
    self.Spo2Value = pro.calcSpO2(self.Red,self.IR)
    print "Spo2: ", Spo2Value, "%"

    #get AC componente to plot the signal:
    self.Red = pro.getACcomponent(self.Red)
    self.IR = pro.getACcomponent(self.IR)
    self.IR = pro.Normalize(self.IR)
Example #27
0
def main(camDimensions,per):
    pygame.init()
    pygame.camera.init()
    print("Finding all cameras...")
    cameras = list_real_cameras()
    print("Searching for Intel RealSense...")
    
    camNumber = findRealSense(cameras)
    if camNumber != "Error, No <Intel(R) RealSense(TM) 3D Camera> found":
        print("Identified realSense.")
        realSense = pygame.camera.Camera(camNumber, camDimensions )
        print("Running camera %s" %camNumber)
        realSense.start()
        
        # grab first frame
        img = realSense.get_image()
        
        WIDTH = img.get_width()
        HEIGHT = img.get_height()
        screen = pygame.display.set_mode( ( WIDTH, HEIGHT ) )
        pygame.display.set_caption(realSense.dev.getdisplayname())
        #print("Opening new Camera...")
        print("----------------------------------")
        #import Embedded
        mode = "waiting"
        infeltrationCount = 0
        Running = True
        while Running :
            # draw frame
            screen.blit(img, (0,0))
            pygame.display.flip()
            # grab next frame
            if per:
                loadimg = pygame.image.tostring(img, "RGBA",False)
                pil_image = Image.frombytes("RGBA",(640,480),loadimg)
                p = Processing.colorPerc(pil_image,136)
                print(p)
            img = realSense.get_image()
            for e in pygame.event.get() :
                if e.type == pygame.QUIT :
                    #realSense.stop()
                    pygame.quit()
                    Running = False
                    break
                elif e.type == pygame.KEYDOWN:
                    if e.key == pygame.K_SPACE:
                        pygame.display.set_caption("Captured")
                        name = getTimeString()
                        print("Captured: " + name)
                        pygame.image.save(realSense.get_image(),"../media/" + name + ".png")
                    pygame.display.set_caption(realSense.dev.getdisplayname())
Example #28
0
def Trainloop(model,optimizer,lossfunction,targetvar,device,train_graphs,val_graphs,workers,batch_size,train_batches,val_batches,epochs):
    average_train_loss_per_epoch = list()
    average_val_loss_per_epoch=list()
    for epoch in tqdm(range(epochs)):

        deadcheck=False
        valcheck=False
        model.train()
        train_loss = torch.tensor([0],dtype = float).to(device)
        if epoch==0:
                Manager=torch.multiprocessing.Manager()
                train_queue=Manager.Queue()
                val_queue=Manager.Queue()
                train_queue_fill=Processing.Spawn_Processes(workers, train_graphs, train_queue, batch_size)
        for k in range(train_batches):
            with torch.enable_grad():
                model.train()
                batch=Processing.GrabBatch(train_queue,device)
                optimizer.zero_grad()
                output=model(batch)
                # print (output)
                # print (batch.x,output.size(),batch)
                # print(output.size())
                loss=lossfunction(output,batch.y[:,targetvar])
                # print (loss)
                loss.backward()
                optimizer.step()
                # print (loss,output,batch.y[:,targetvar])

            deadcheck=Processing.Process_check(train_queue_fill,deadcheck,k,train_batches)
            train_loss +=loss

            if deadcheck==True and valcheck==False:
                val_queue_fill=Processing.Spawn_Processes(workers,val_graphs,val_queue,batch_size)
                valcheck=True
            # print (k)
            if(torch.sum(torch.isnan(output)) != 0):
                raise TypeError('NAN ENCOUNTERED AT : %s / %s'%(k,train_batches))
        if( deadcheck == False):
                print('Training Processes Still Alive. Terminating...')
                for process in train_queue_fill:
                    process.terminate()
                val_queue_fill=Processing.Spawn_Processes(workers,val_graphs,val_queue,batch_size)
        if deadcheck==True and valcheck==False:

                valcheck=True
        with torch.no_grad():
            val_loss,train_queue_fill=Validation(model,optimizer,lossfunction,targetvar,train_graphs,val_graphs,train_queue,val_queue,val_queue_fill,train_queue_fill,workers,batch_size,val_batches,epoch,epochs)
        average_train_loss_per_epoch.append(train_loss.item()/(train_batches*batch_size))
        average_val_loss_per_epoch.append(val_loss.item()/(val_batches*batch_size))
        print (train_loss.item()/(train_batches*batch_size))
    deadcheck=Processing.Process_check(train_queue_fill,deadcheck,k,train_batches) 
    if( deadcheck == False):
        print('Training done. Terminating slaves...')
        for process in train_queue_fill:
            process.terminate()
    del batch,loss,train_loss,val_loss
    return model,average_train_loss_per_epoch,average_val_loss_per_epoch
Example #29
0
def evaluate_best_cfa_threshold(x_tst, y_true, thresholds):
    """
    Takes training data and a list of thresholds and calculates confusion matrices for each threshold
    :param x_tst: The test data
    :param y_true: The test labels
    :param thresholds: List of thresholds
    :return:
    """
    print("Evaluating CFA Feature...")
    y_cfas = []
    y_peakis = []
    confusion_matrices = []
    for file in tqdm(x_tst):
        #print("Current file: " + file)

        # Calculate the spectrogram
        spectrogram = Processing.cfa_preprocessing(file)

        # CFA classification
        cfa, peakis = CFA.calculate_cfa(file, spectrogram)
        y_cfas.append(cfa)
        y_peakis.append(peakis)

    for threshold in thresholds:
        print("Evaluation for CFA with threshold " + str(threshold) + ":")

        y_pred = []
        for peakis in y_peakis:
            if np.mean(peakis) < threshold:
                y_pred.append(0)  # Speech
            else:
                y_pred.append(1)  # Music

        report = sklearn.metrics.classification_report(y_true, y_pred, labels,
                                                       target_names)
        confusion_matrix = sklearn.metrics.confusion_matrix(
            y_true, y_pred, labels)

        print("Report")
        print(report)

        print("Confusion Matrix")
        print(confusion_matrix)
        confusion_matrices.append(confusion_matrix)
        #pretty_print(confusion_matrix, "CFA", iteration=str(iteration) + "_" + str(threshold))
        print(
            "----------------------------------------------------------------- \n\n"
        )

    return confusion_matrices  # Confusion matrices of all thresholds for 1 run
Example #30
0
    def post(self):
        image = request.files["file"]
        image.filename = "test.jpg"
        print(image)

        filename = secure_filename(image.filename)
        image.save(
            os.path.join(
                'C:/Users/Maciek/Documents/Github/FYP/FYP/Backend/userImage',
                filename))
        result = "null"
        result = Processing.predictResult()
        ret = result.astype(str)
        print(ret)
        return ret
Example #31
0
    def initialize(self):

        # create date object
        self.date = pyDate.Date(year=self.options['year'],
                                doy=self.options['doy'])

        # check for pre-existing solution if lazy
        (solutionAlreadyExists,
         key) = Resources.soln_exists(self.date, self.options['expt'],
                                      self.options['org'],
                                      self.options['network_id'])

        if solutionAlreadyExists and self.isLazy:
            raise Processing.LazyException("file exists: " + key)

        # do all the program independent stuff
        super(Session, self).initialize()

        # get the resource bucket path
        bucket = self.get_resources_path()

        # get the apr file
        apr_file = glob.glob(os.path.join(bucket, '*.apr'))

        # yell about it if not found
        if len(apr_file) != 1:
            raise GamitException('problem identifying APR resources in ' +
                                 bucket)

        # create apr file
        wlapr2apr(os.path.join(bucket, apr_file[0]))

        # get the binaries for gamit
        self.files['bin'] = Resources.get_bin('gamit', self.work_dir_path)

        # get the tables for gamit
        self.files['tables'] = Resources.get_tables('gamit',
                                                    self.work_dir_path)

        # create custom setup shell script
        self.files['setup_script_path'] = self.__create_setup_script()

        # create custom run script
        self.files['run_script_path'] = self.__create_run_script()

        # create the custom cleanup script
        self.files['teardown_script_path'] = self.__create_teardown_script()
Example #32
0
def main():
    MAP_WIDTH = 25
    MAP_HEIGHT = 25
    PLAYER = Objects.Player(int(MAP_WIDTH / 2), int(MAP_HEIGHT / 2))
    ITEMS = Objects.Items()
    ENEMIES = Objects.spawnEnemies(5, MAP_WIDTH, MAP_HEIGHT)
    MAP = Maps.TestMap(MAP_WIDTH, MAP_HEIGHT, PLAYER, ITEMS, ENEMIES)
    timer = 0
    while True:
        # Print Updated Map
        MAP.printMap(PLAYER, ITEMS)
        # Input from User
        inp = Processing.processInput(PLAYER, MAP, ITEMS, ENEMIES)
        # TODO: Update Map, based on input and player position
        MAP.updateMap(inp, PLAYER, ENEMIES, timer)
        timer += 1
    return (0)
def find_attributes_per_substance(classifier, feature_map, sent, previous_sent):
    """ Get all attributes assigned to each substance -- {substance: field: [Attributes]} """
    attribs_found_per_substance = {subst: {} for subst in SUBSTANCE_TYPES}

    # Get features
    attrib_feature_sets, attributes = Processing.features(sent, previous_sent)

    # Get classifications
    for attrib, features in zip(attributes, attrib_feature_sets):

        # If there is only one substance for the type of attribute, just assign to that substance
        if attrib.type in KNOWN_SUBSTANCE_ATTRIBS:
            add_attrib_with_known_substance(attribs_found_per_substance, attrib)

        # Else, use machine learning classifier
        else:
            add_attrib_using_classifier(classifier, feature_map, features, attribs_found_per_substance, attrib)

    return attribs_found_per_substance
Example #34
0
def run(dt,outdir):
    
    #org  = ['g06'];
    #expt = ['glbl','glbf','glbd','anet','gnet','capp','swpp','tigg','glbk'];
    #expt = ['glbk'];
    #expt = ['anet','gnet'];
    expt = ['glbf'];
    org  = ['n08'];
    
    for e in expt:
        for o in org:
            
            # construct the AWS/S3 bucket path
            soln_bucket = Processing.solution_bucket(dt.year,dt.doy,'',e,o,None);
                                                                                         
            # ok get the snx with this bucket prefix
            files = Resources.get_resources(soln_bucket, '.mat.gz', outdir);
            
            # blab about it ... maybe
            if len(files) > 0: print soln_bucket,len(files);
Example #35
0
def run(fileName):
    
    org  = ['g06'];
    expt = ['odot'];
    
    fparts = fileName.split('.');
    
    # defensive check
    if len(fparts) != 4: return; 
    
    for e in expt:
        for o in org:
            
            # construct the AWS/S3 bucket path
            soln_bucket = Processing.solution_bucket(fparts[1],fparts[2],'',e,o,fparts[3]);
                                                                                         
            # ok get the snx with this bucket prefix
            files = Resources.list_resources(soln_bucket,'.mat.gz');
            
            # blab about it ... maybe
            if len(files) == 0:  print 'no solution for',fileName;
import glob
import os
import sys
sys.path.append("PIL")
import Processing

for filename in glob.glob('input/*.gif'):
    im = Processing.open(filename)
    im = im.convert('1')
    newFilename = os.path.splitext(filename)[0] + '.pbm'
    im.save(newFilename)
    print('python tspart.py "' + newFilename + '" "output/' + os.path.splitext(os.path.basename(filename))[0] + '.svg"')
    os.system('python tspart.py "' + newFilename + '" "output/' + os.path.splitext(os.path.basename(filename))[0] + '.svg"')