def _pack_message(self,device = None):
     addresses = (device or [self.device_token])
     deliver_before_timestamp = (datetime.now() + (5 * 60))
     push_id = int(round(int(time.time()) * random()) + int(time.time())) # simplify
     t = get_template("bbpush.post")
     c = Context({'addresses':addresses,'deliver_before_timestamp':deliver_before_timestamp ,'push_id':push_id,'app_id':settings.BB_APP_ID})
     content = t.render(c)
def buffer_newevents(evtype=None, timeout=3000, verbose=False):
    """
    Wait for and return any new events recieved from the buffer between
    calls to this function
    
    timeout    = maximum time to wait in milliseconds before returning
    """
    global ftc, nEvents  # use to store number events processed accross function calls
    if not "nEvents" in globals():  # first time initialize to events up to now
        start, nEvents = ftc.poll()

    if verbose:
        print "Waiting for event(s) " + str(evtypes) + " with timeout_ms " + str(timeout_ms)

    start = time.time()
    elapsed_ms = 0
    events = []
    while len(events) == 0 and elapsed_ms < timeout:
        nSamples, curEvents = ftc.wait(-1, nEvents, timeout_ms - elapsed_ms)
        if curEvents > nEvents:
            events = ftc.getEvents([nEvents, curEvents - 1])
            if not evttype is None:
                events = filter(lambda x: x.type in evtype, events)
        nEvents = curEvents  # update starting number events (allow for buffer restarts)
        elapsed_ms = (time.time() - start) * 1000
    return events
Example #3
0
	def why_opinion(self):
		feature_names = self.vectorizer.get_feature_names()
		opinion_idx = list(np.argsort(self.clf.feature_log_prob_[1])) #most important are last
		opinion_idx.reverse() #most important are first
		self.opinion_idx = opinion_idx
		i_filtered=0
		i_opinion_idx = 0
		filtered_keywords = []
		stime=time.time()
		while i_filtered<100:
			if not self.vectorizer.get_feature_names()[opinion_idx[i_opinion_idx]] in nltk.corpus.stopwords.words('english'):
				filtered_keywords.append(self.vectorizer.get_feature_names()[opinion_idx[i_opinion_idx]])
				i_filtered+=1
			i_opinion_idx+=1
		self.fk=filtered_keywords
		print 'why while: '+str(time.time()-stime)

		news_words=[]
		opinion_words = []
		ratio = self.clf.feature_log_prob_[1]-self.clf.feature_log_prob_[0]
		for i,count in enumerate(self.vect.toarray()[0]):
			if count>0:
				opinion_words.append([count*np.exp(ratio[i]), count, ratio[i], self.vectorizer.get_feature_names()[i]])
		opinion_words_sorted = sorted(opinion_words, key=lambda x:x[2])
		return opinion_words_sorted #, news_words_sorted
 def testLargeDirReinstall(self):
     """Benchmark and test reinstalling FSD with a directory holding a large number of people"""
     from time import time
     from random import choice
     
     qi = getToolByName(self.portal, 'portal_quickinstaller')
     acl = getToolByName(self.portal, 'acl_users')
     
     # pick a user and make sure they exist in acl_users before we start
     user_id = choice(self.person_ids)
     person = self.directory[user_id]
     self.failUnless(acl.getUserById(id=user_id),"Problem:  person is not listed in acl_users")
     self.failUnless(person.UID(),"Problem: expected person object %s to have a UID.  It does not" % person)
     
     # how long does it take to reinstall FSD?
     import time
     start_time = time.time()
     qi.reinstallProducts(products='FacultyStaffDirectory')
     end_time = time.time()
     elapsed_time = end_time-start_time
     reinstall_report = "\nreinstalling FSD with a directory containing %s people took %s seconds\n" % (self.numPeople, elapsed_time)
     print "\n" + ("*" * 20) + reinstall_report + ("*" * 20)
     
     # test that a person in the FSD is still a user
     self.failUnless(acl.getUserById(id=user_id),"Problem:  after reinstall person is not listed in acl_users")
     self.failUnless(person.UID(),"Problem: after reinstall expected person object %s to have a UID.  It does not" % person)
Example #5
0
def InsertKeyWordToDB(fromSubDir,toSubDir):
    
    db = DB()
    parser = Parser()
    
    for index in range(fromSubDir,toSubDir):
        for root,dirs,files in os.walk('test/keyword/'+str(index)+"/"):
            #each subdir: 1000record
            start = time.time()
            for afile in files:
                if afile  == '.DS_Store':
                    continue
                words = afile.split('_')
                
                aExpert = Expert(words[0].strip(),words[1].strip(),words[2].replace(".html","").strip())
                aExpert.setKeyword(parser.parseKeyword(root,afile))
                aExpert.ChangeKeywordsToString()
                #print aExpert.keywordsList
                if not db.isExpertExist(aExpert):
                    db.insertExpert(aExpert)
            end = time.time()
            db.conn.commit()
            
            print ("KeywordSubDir %d is Done!"%index),
            print time.strftime('%m-%d %H:%M:%S',time.localtime(time.time())),"total:",end-start
            f = open("KeywordsToDB.log","a")
            f.write(time.strftime('%m-%d %H:%M:%S',time.localtime(time.time()))+" keywordSubDir"+str(index)+" is Done! "+"total"+str(end-start) )
            f.close()
            
    db.close()
Example #6
0
def InsertPaperToDB(fromSubDir,toSubDir): 
    db = DB()
    parser = Parser()
    
    for index in range(fromSubDir,toSubDir):   
        for root,dirs,files in os.walk('test/paper/'+str(index)+"/"):
            n = 1000*index
            start = time.time()
            
            for afile in files:
                if afile  == '.DS_Store':
                    continue
                words = afile.split('_')
                papers = (parser.parsePaper(root,afile))
                for eachPapaer in papers:
                    if not db.isPaperExist(eachPapaer):
                        db.insertPaper(eachPapaer)
                print "n:",n,
                print "Expert_ID %s is done"%words[0]
                n = n + 1 
                db.conn.commit()
            end = time.time()
            
            print ("PaperSubDir %d is Done!"%index),
            print time.strftime('%m-%d %H:%M:%S',time.localtime(time.time())),"time:",end-start,
            f = open("PaperToDB.log","a")
            f.write(time.strftime('%m-%d %H:%M:%S',time.localtime(time.time()))+" paperSubDir"+str(index)+" is Done! "+"total"+str(end-start) )
            f.close()
    db.close()      
Example #7
0
    def wait_for_server_status(self, server_id, desired_status, interval_time=None, timeout=None):

        interval_time = int(interval_time or self.servers_api_config.server_status_interval)
        timeout = int(timeout or self.servers_api_config.server_build_timeout)
        end_time = time.time() + timeout

        time.sleep(interval_time)
        while time.time() < end_time:
            resp = self.nova_cli_client.show_server(server_id)
            server = resp.entity

            if server.status.lower() == ServerStates.ERROR.lower():
                raise BuildErrorException('Build failed. Server with uuid "{0} entered ERROR status.'.format(server.id))

            if server.status == desired_status:
                break
            time.sleep(interval_time)

        else:
            raise TimeoutException(
                "wait_for_server_status ran for {0} seconds and did not "
                "observe server {1} reach the {2} status.".format(timeout, server_id, desired_status)
            )

        return resp
def env454run_info_upload(runobj):

    my_read_csv = dbUpload(runobj)
    start = time.time()
    my_read_csv.put_run_info()
    elapsed = (time.time() - start)
    print "put_run_info time = %s" % str(elapsed)
Example #9
0
def loadIntents(node_id, urllist, intPerGroup, addrate, duration):
    urlindex = 0
    group = 0
    start_id = 0
    sleeptimer = (1.000/addrate)
    tstart = time.time()
    while ( (time.time() - tstart) <= duration ):
        if urlindex < len(urllist):
            realurlind = urlindex
        else:
            realurlind = 0
            urlindex = 0

        u = str(urllist[realurlind])
        gstart = time.time()
        intents,start_id = setIntentJSN(node_id, intPerGroup, group, start_id)
        #print (str(intents))
        #print ("Starting intent id: " + str(start_id))
        result = post_json(u, intents)
        #print json.dumps(intents[group])
        #print ("post result: " + str(result))
        gelapse = time.time() - gstart
        print ("Group: " + str(group) + " with " + str(intPerGroup) + " intents were added in " + str('%.3f' %gelapse) + " seconds.")
        sleep(sleeptimer)
        urlindex += 1
        group += 1

    telapse = time.time() - tstart
    #print ( "Number of groups: " + str(group) + "; Totoal " + str(args.groups * args.intPerGroup) + " intents were added in " + str(telapse) + " seconds.")
    return telapse, group
def waitnewevents(evtypes, timeout_ms=1000, verbose=True):
    """Function that blocks until a certain type of event is recieved. 
    evttypes is a list of event type strings, recieving any of these event types termintes the block.  
    All such matching events are returned
    """
    global ftc, nEvents, nSamples, procnEvents
    start = time.time()
    update()
    elapsed_ms = 0

    if verbose:
        print "Waiting for event(s) " + str(evtypes) + " with timeout_ms " + str(timeout_ms)

    evt = None
    while elapsed_ms < timeout_ms and evt is None:
        nSamples, nEvents2 = ftc.wait(-1, procnEvents, timeout_ms - elapsed_ms)

        if nEvents2 > nEvents:  # new events to process
            procnEvents = nEvents2
            evts = ftc.getEvents((nEvents, nEvents2 - 1))
            evts = filter(lambda x: x.type in evtype, evts)
            if len(evts) > 0:
                evt = evts

        elapsed_ms = (time.time() - start) * 1000
        nEvents = nEvents2
    return evt
    def ana_file_sync(self,pflag):
        g.tprinter('Running ana_file_sync',pflag)
   
        self.missing_data_list = np.array(['missing'])
        tbaf = 0
        
        for i in self.data_list:

            tstart = time.time()
            if i in self.ana_file: ##Overvej om det virkelig er det kvikkeste!
                
                #g.printer('is in file',pflag)
                pass
            else:
                #g.printer('is not in file',pflag)
                self.missing_data_list = np.append(self.missing_data_list,i)
            print time.time()-tstart    
        if tbaf == 0:
            g.printer('ana_file is up to date',pflag)
        else:
            g.printer(str(tbaf)+' files have to be added to ana_file',pflag)
            
            
        for i in self.missing_data_list[1:]:

            k = os.path.split(i)

            info = k[-1].split('.')[-2].split('_')

            num_of_zeros = len(self.ana_file[0])-len(info)-1

 
            info = np.append(info,np.zeros(num_of_zeros))
            info = np.append(info,np.array(i))
            self.ana_file = np.vstack((self.ana_file,info)) 
Example #12
0
def DetectSound():
    # Get data from landmark detection (assuming face detection has been activated).
    data = memoryProxy.getData("SoundDetected")

    ##The SoundDetected key is organized as follows:
    ##
    ##[[index_1, type_1, confidence_1, time_1],
    ## ...,
    ##[index_n, type_n, confidence_n, time_n]]
    ##
    ##n is the number of sounds detected in the last audio buffer,
    ##index is the index (in samples) of either the sound start (if type is equal to 1) or the sound end (if type is equal to 0),
    ##time is the detection time in micro seconds
    ##confidence gives an estimate of the probability [0;1] that the sound detected by the module corresponds to a real sound.

    if len(data)==0:
        detected=False
        timestamp=time.time()
        soundInfo=[]
    else:
        detected=True
        timestamp=time.time()
        soundInfo=[]
        for snd in data:
            soundInfo.append([ snd[0], #index of sound start/end
                               snd[1], #type: 1=start, 0=end
                               snd[2]  #confidence: probability that there was a sound
                               ])
    return detected, timestamp, soundInfo
Example #13
0
 def wrapper(*args, **kargs):
     t1 = time.time()
     res = func(*args, **kargs)
     tel = time.time()-t1
     timeformated = time.strftime( "%H:%M:%S",time.gmtime(tel)) 
     print  '-'*5 + '%s took %0.3f ms' % (func.func_name + str(kargs) + str(args),   (tel)*1000.0)   + '|' + timeformated + '|'+ '-'*10 
     return res
Example #14
0
 def timer(self, object, cdata=''):
     if object:
         t = time.time()
         r = self.render(object, cdata)
         self.tagTimes[object.__class__.__name__] = time.time() - t
         return r
     return self.render(object, cdata)
Example #15
0
	def crawl(self):
		_starttime = time.time()
		if self.restrict == None:
			self.restrict = "http://%s.*" % self.init_domain

		print "Deadlink-crawler version 1.1"
		print "Starting crawl from URL %s at %s with restriction %s\n" % (self.init_url, strftime("%Y-%m-%d %H:%M:%S", gmtime()), "http://%s.*" % self.init_domain)

		while len(self.frontier) > 0:
			time.sleep(self.wait_time)
			
			next_time, next_url = self.frontier.next()
			
			while time.time() < next_time:
				time.sleep(0.5)
			
			try:
				self.visit_url(next_url[0], next_url[1])
			except urllib2.URLError:
				continue
		
		self.print_deadlinks(self.deadlinks)

		_elapsed = time.time() - _starttime
		
		print "\nSummary:\n--------"
		print "Crawled %d pages and checked %d links in %s time." % (self._pages, self._links, strftime("%H:%M:%S", gmtime(_elapsed)))
		print "Found a total of %d deadlinks in %d different pages" % (self._dead, self._via)
		
		if len(self.deadlinks) == 0:
			exit(0)
		else:
			exit(2)
Example #16
0
def illumina_files(runobj):  
    utils = PipelneUtils()
    start = time.time()
#     illumina_files_demultiplex_only(runobj)
    illumina_files = IlluminaFiles(runobj)    
    if runobj.do_perfect: 
#         illumina_files.perfect_reads()
        script_file_name = illumina_files.merge_perfect()
        utils.run_until_done_on_cluster(script_file_name)
        
        script_file_name = illumina_files.trim_primers_perfect()
        utils.run_until_done_on_cluster(script_file_name)

    else:
#         illumina_files.partial_overlap_reads()
#         pass
# TODO: test utils.run_until_done_on_cluster(illumina_files.partial_overlap_reads_cluster())
        #TODO: add cutting to 251
        script_file_name = illumina_files.partial_overlap_reads_cluster()         
        utils.run_until_done_on_cluster(script_file_name)
        
        script_file_name = illumina_files.filter_mismatches_cluster()
        utils.run_until_done_on_cluster(script_file_name)
        
#         illumina_files.filter_mismatches()
#     illumina_files.uniq_fa()
    script_file_name = illumina_files.uniq_fa_cluster()
    utils.run_until_done_on_cluster(script_file_name)
#     illumina_chimera(runobj)
    elapsed = (time.time() - start)
    print "illumina_files time = %s" % str(elapsed)
Example #17
0
def env454upload_main(runobj, full_upload):
    """
    Run: pipeline dbUpload testing -c test/data/JJH_KCK_EQP_Bv6v4.ini -s env454upload -l debug
    For now upload only Illumina data to env454 from files, assuming that all run info is already on env454 (run, run_key, dataset, project, run_info_ill tables)
    Tables:
    sequence_ill
    sequence_pdr_info_ill
    taxonomy
    sequence_uniq_info_ill

    """

    whole_start     = time.time()

    my_env454upload = dbUpload(runobj)
    filenames       = my_env454upload.get_fasta_file_names()
    if not filenames:
        logger.debug("\nThere is something wrong with fasta files or their names, please check pathes, contents and suffixes in %s." % my_env454upload.fasta_dir)

#     sequences = get_sequences(my_env454upload, filenames)
    for filename in filenames:
        sequences = my_env454upload.make_seq_upper(filename)
        if full_upload:
            env454upload_seq(my_env454upload, filename, sequences)
        wrapped   = wrapper(my_env454upload.get_seq_id_dict, sequences)
        get_seq_id_dict_time = timeit.timeit(wrapped, number=1)
        logger.debug("get_seq_id_dict() took %s sec to finish" % get_seq_id_dict_time)

    total_seq = env454upload_all_but_seq(my_env454upload, filenames, full_upload)
    my_env454upload.check_seq_upload()
    logger.debug("total_seq = %s" % total_seq)
    whole_elapsed = (time.time() - whole_start)
    print "The whole upload took %s s" % whole_elapsed
Example #18
0
def docs():
    s = time.time()
    try:
        return render_template(
            'docs.html', version=skyline_version, duration=(time.time() - s)), 200
    except:
        return 'Uh oh ... a Skyline 500 :(', 500
Example #19
0
def illumina_files_demultiplex_only(runobj):  
    start = time.time()
    illumina_files = IlluminaFiles(runobj)
    illumina_files.open_dataset_files()
    illumina_files.split_files(compressed = runobj.compressed)
    elapsed = (time.time() - start)
    print "illumina_files demultiplex only time = %s" % str(elapsed)
Example #20
0
    def do_real_import(self, vsfile, filepath,mdXML,import_tags):
        """
        Make the import call to vidispine, and wait for self._importer_timeout seconds for the job to complete.
        Raises a VSException representing the job error if the import job fails, or ImportStalled if the timeout occurs
        :param vsfile: VSFile object to import
        :param filepath: filepath of the VSFile
        :param mdXML: compiled metadata XML to import alongside the media
        :param import_tags: shape tags describing required transcodes
        :return: None
        """
        import_job = vsfile.importToItem(mdXML, tags=import_tags, priority="LOW", jobMetadata={"gnm_app": "vsingester"})

        job_start_time = time.time()
        close_sent = False
        while import_job.finished() is False:
            self.logger.info("\tJob status is %s" % import_job.status())
            if time.time() - job_start_time > self._importer_timeout:
                self.logger.error("\tJob has taken more than {0} seconds to complete, concluding that it must be stalled.".format(self._importer_timeout))
                import_job.abort()
                self.logger.error("\tSent abort signal to job")
                raise ImportStalled(filepath)
            if time.time() - job_start_time > self._close_file_timeout and not close_sent:
                vsfile.setState("CLOSED")
            sleep(5)
            import_job.update(noraise=False)
Example #21
0
    def treat_movement(self, rp, movement):
        if movement["status"] in ["error", "done"]:
            return

        now = int(time.time())
        movement["begin"] = now
        movement["status"] = "treating"
        self.memory.update_movement(movement)

        # checking sources
        movement["src_exist"] = True
        if not os.path.exists(rp["src"]):
            now = int(time.time())
            movement["src_exist"] = False
            movement["status"] = "error"
            movement["end"] = now
            movement["error"] = "source file : %s does not exist" % (rp["src"])
            self.memory.update_movement(movement)
            return

        # adapt in function of protocol
        if movement["protocol"] == "cp":
            self.do_cp(movement, rp)
            return
        if movement["protocol"] == "ftp":
            self.do_ftp(movement, rp)
            return
        now = int(time.time())
        movement["status"] = "error"
        movement["end"] = now
        movement["error"] = "protocol %s not known" % (movement["protocol"])
        self.memory.update_movement(movement)
        rp["status"] = "done"
        self.memory.update_relative_path(rp)
Example #22
0
def wait_ms(prev_time_ms, wait_time_ms):
    t = time.time()*1000. - prev_time_ms
    while t < wait_time_ms:
        if t < 0:
            break
        sleep((wait_time_ms - t)/1000.)
        t = time.time()*1000. - prev_time_ms
Example #23
0
def test_vrr():
    import time
    xa,ya,za = 0.,0.,0.
    xb,yb,zb = 0.,0.,0.
    xc,yc,zc = 0.,0.,0.
    xd,yd,zd = 0.,0.,0.
    norma = normb = normc = normd = 1.
    alphaa = alphab = alphac = alphad = 1.

    la,ma,na = 0,0,0
    lc,mc,nc = 0,0,0

    M = 0
    t0 = time.time()
    val1 = vrr((xa,ya,za),norma,(la,ma,na),alphaa,
               (xb,yb,zb),normb,alphab,
               (xc,yc,zc),normc,(lc,mc,nc),alphac,
               (xd,yd,zd),normd,alphad,M)
    t1 = time.time()
    val2 = vrr((xc,yc,zc),normc,(lc,mc,nc),alphac,
               (xd,yd,zd),normd,alphad,
               (xa,ya,za),norma,(la,ma,na),alphaa,
               (xb,yb,zb),normb,alphab,M)
    t2 = time.time()
    print "Values:  ",val1,val2
    print "Timings: ",t1-t0,t2-t1
    return
Example #24
0
File: PE_0032.py Project: mbh038/PE
def alligator():

    import time
    start_time = time.time()

    product_sum = 0
    check_pandigital = False
    set_of_products = set()
    
    def pandigital(l):
            seen = set() # Start with an empty set
            for i in l:
                    if i in seen:
                            return False
                    seen.add(i) # If it is not in the seen set, then add it.
            return True
     
    for x in range(1, 2000):
            for y in range(x+1, 2000):
                    z = x * y
                    digits = str(x) + str(y) + str(z) # This takes all three numbers and joins them into one long number
                    if (len(digits) == 9 and "0" not in (digits) and pandigital(digits)):
                            if z not in set_of_products:
                                    set_of_products.add(z)
                                    product_sum = product_sum + z
    
    print ("The sum of the pandigital products is: ", product_sum)
    print("--- %s seconds ---" % (time.time() - start_time))
Example #25
0
 def func(*args):
   import time
   start = time.time()
   ret = f(*args)
   took = time.time() - start
   print("%s took %f" % (f.__name__,took))
   return ret
Example #26
0
def DetectSoundLocation():
    # Get data from landmark detection (assuming face detection has been activated).
    data = memoryProxy.getData("SoundLocated")

    ##The SoundDetected key is organized as follows:
    ##
    ##[ [time(sec), time(usec)],
    ##
    ##  [azimuth(rad), elevation(rad), confidence],
    ##
    ##  [Head Position[6D]]
    ##]

    if len(data)==0:
        detected=False
        timestamp=time.time()
        soundInfo=[]
    else:
        detected=True
        #timestamp=data[0][0]+1E-6*data[0][1] #this works but only if a sound is located
        timestamp=time.time()
        soundInfo=[]
        for snd in data:
            soundInfo.append([ snd[1][0], #azimuth angle
                               snd[1][1], #elvation angle
                               snd[1][2], #confidence: probability that there was a sound
                               snd[2]])   #Headposition 6D
    return detected, timestamp, soundInfo
Example #27
0
def _log_rate(output_f,d, message=None):
    """Log a message for the Nth time the method is called.
    
    d is the object returned from init_log_rate
    """
    
    import time 

    if d[2] <= 0:
        
        if message is None:
            message = d[4]
        
        # Average the rate over the length of the deque. 
        d[6].append(int( d[3]/(time.time()-d[1])))
        rate = sum(d[6])/len(d[6])
        
        # Prints the processing rate in 1,000 records per sec.
        output_f(message+': '+str(rate)+'/s '+str(d[0]/1000)+"K ") 
        
        d[1] = time.time()
        
        # If the print_rate was specified, adjuect the number of records to
        # aaproximate that rate. 
        if d[5]:
            target_rate =  rate * d[5]
            d[3] = int((target_rate + d[3]) / 2)

        d[2] = d[3]
        
          
    d[0] += 1
    d[2] -= 1
Example #28
0
def exe():
    init_time = time_time()
    pder = PornDetector()
    print(time.time() - init_time)
    print("ARE YOU OK?")
    while True
        pder.porn_detector(True, False)
        print(time.time() - init_time)
Example #29
0
    def timed(*args, **kw):
        ts = time.time()
        result = method(*args, **kw)
        te = time.time()

        print '%r (%r, %r) %2.2f sec' % \
              (method.__name__, args, kw, te-ts)
        return result
Example #30
0
 def update_output(self):
     if (time.time() - self.last_output > self.OUTPUT_PERIOD):
         print("O%s A%s G%s" % (
             pf(self.data["orientation"]["data"]),
             pf(self.data["acceleration"]["data"]),
             pf(self.data["gyroscope"]["data"]),
         ))
         self.last_output = time.time()
Example #31
0
                        send_notification(
                            2, products_list[productlist[productcounter]])
        #TODO make a document to log all this information for debugging
        print(products_list[productlist[productcounter]][12:], "online at ",
              date, " code: ", 200, " using header #",
              headerlist[headercounter], "proxy ",
              proxy_dict[proxylist[proxycounter]]['http'], " #",
              proxylist[proxycounter])
    else:
        print(products_list[productlist[productcounter]][12:], "offline at",
              date, " code", 400, " using header #", headerlist[headercounter],
              "proxy ", proxy_dict[proxylist[proxycounter]]['http'], " #",
              proxylist[proxycounter])

        # put proxie in timeout dictoinary
        time_key = time.time()
        timeoutdict[time_key] = proxy_dict[proxylist[proxycounter]]
        # take proxie out of active proxies
        proxy_dict.pop(proxylist[proxycounter])
        # reorder dictionary
        proxy_dict = proxyorganize(proxy_dict)
        # increase time per request
        delaylimit += 2
        timeoutonweb = False
        # used to make new list of random numbers
        proxycounter = proxy_dict.__len__()

    #add proxie back into active
    for x in timeoutdict.keys():
        if (x + 400 < time.time()):
            proxy_dict[proxy_dict.__len__()] = timeoutdict[x]
Example #32
0
def train_model_and_evaluate(lods, out_data, seed=10):
    np.random.seed(seed)
    tf.compat.v1.set_random_seed(seed)
    n_knob_cols = len(lods.config['COLS_KNOBS'])

    nn_params = HYPER_PARAMS['nn_params']
    kpca_params = HYPER_PARAMS['kpca_params']

    tmp_trainval = lods.trainval
    tmp_shared_trainval = lods.shared_trainval

    if N_TRAIN_PER_JOB != -1:
        tmp_trainval = lods.trainval.get_x(N_TRAIN_PER_JOB)
    if N_SHARED_TRAIN_PER_JOB != -1:
        tmp_shared_trainval = lods.shared_trainval.get_x(
            N_SHARED_TRAIN_PER_JOB)

    if tmp_trainval is not None:
        logging.info("shape of remaining trainval (X): {}".format(
            tmp_trainval.X.shape))
    else:
        logging.info("tmp_trainval is None (perhaps because of get_x(0))")

    if tmp_shared_trainval is not None:
        logging.info("shape of remaining shared trainval (X): {}".format(
            tmp_shared_trainval.X.shape))
    else:
        logging.info(
            "tmp_shared_trainval is None (perhaps because of get_x(0))")

    if tmp_trainval is None:
        # in case we're invoking dataset.get_x(0)
        ds_train = tmp_shared_trainval
    else:
        ds_train = tmp_trainval + tmp_shared_trainval

    X_train = np.hstack([ds_train.a, ds_train.X, ds_train.Y])
    y_train = ds_train.targets.ravel()

    logging.info("Fitting KPCA on data of shape: {}".format(X_train.shape))

    # Make PCA and fit on loaded data
    fit_t = time.time()
    pca = KernelPCA(**kpca_params)
    pca.altered_centroids = None
    logging.info("Fitting KPCA on data of shape: {}".format(ds_train.Y.shape))

    if ENCODING_STRATEGY == 'shared':
        shared_train = lods.shared_trainval.get_x(N_OBS)
        pca.fit(ds_train.Y)
        encods_shared = pca.transform(shared_train.Y)
        centroids = compute_centroids(encods_shared, shared_train.a)
    else:
        encods = pca.fit_transform(ds_train.Y)
        centroids = compute_centroids(encods, ds_train.a)
    pca.centroids = centroids  # thisis why I love Python! :-)
    fit_t = time.time() - fit_t
    logging.info("KPCA fitting time is: {} minutes and {} seconds".format(
        fit_t // 60, int(fit_t / 60)))

    # Adjust the X vector by transforming Y into job's centroid
    X, y = translate_X_y(X_train, y_train, pca.centroids, n_knob_cols)

    # Make and fit a NN Regressor
    logging.info("Fitting regressor on data of shapes: {}, {}".format(
        X.shape, y.shape))
    reg = NNregressor(with_calibration=True,
                      **nn_params,
                      random_state=seed,
                      v1_compat_mode=True)
    reg.fit(X, y, log_time=True)
    training_mape = reg.MAPE(X, y)
    logging.info("Training Error: {:.2f}%".format(training_mape))
    out_data['training_errs'].append(training_mape)

    if ENCODING_STRATEGY == 'shared':
        observed_traces = lods.shared_traincomplement.get_x(N_OBS)
    else:
        observed_traces = lods.traincomplement.get_x(N_OBS)

    logging.info("observed_traces description: ")
    observed_traces.describe()

    observed_traces_slices = observed_traces.slice_by_job_id(
        alias_to_id=lods.alias_to_id)

    test_aliases = sorted(list(set(lods.test.a.ravel())))

    for test_job in observed_traces_slices:
        evaluate(test_job, pca, reg, lods, observed_traces,
                 observed_traces_slices, out_data, test_aliases)

    # out_data['kpcas'].append(pca)

    # Append trained regressor information to output_data
    out_data['regressors'].append(reg.get_persist_info())

    persist_data(copyDict(out_data), DATA_FNAME)
Example #33
0
def main():
    
    colormap='viridis'
    job_id = os.environ['PBS_JOBID']
    log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    device=args.device
    
    fp16=True
    if device=="CPU":
        fp16=False

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=device)

    # Read IR
    net = IENetwork(model=model_xml, weights=model_bin)

    assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"

    bn = "relu_1/Relu"
    print(bn)
    # add the last convolutional layer as output 
    net.add_outputs(bn)
    fc="predictions_1/MatMul"

    # name of the inputs and outputs
    input_blob = next(iter(net.inputs))
    out_blob = "predictions_1/Sigmoid"

    net.batch_size = 1

    exec_net = plugin.load(network=net)

    n,c,h,w=net.inputs[input_blob].shape
    files=glob.glob(os.getcwd()+args.input[0])
    
    
    if not os.path.isdir(args.output_dir):
        os.makedirs(args.output_dir, exist_ok=True)
    f=open(os.path.join(args.output_dir, 'result'+job_id+'.txt'), 'w')
    f1=open(os.path.join(args.output_dir, 'stats'+job_id+'.txt'), 'w') 
    progress_file_path = os.path.join(args.output_dir, "progress"+job_id+".txt")
    print(progress_file_path)
    time_images=[]
    tstart=time.time()
    for index_f, file in enumerate(files):
        [image1,image]= read_image(file)
        t0 = time.time()
        for i in range(args.number_iter):
            res = exec_net.infer(inputs={input_blob: image1})
            #infer_time.append((time()-t0)*1000)
        infer_time = (time.time() - t0)*1000
        log.info("Average running time of one iteration: {} ms".format(np.average(np.asarray(infer_time))))
        if args.perf_counts:
            perf_counts = exec_net.requests[0].get_perf_counts()
            log.info("Performance counters:")
            print("{:<70} {:<15} {:<15} {:<15} {:<10}".format('name', 'layer_type', 'exet_type', 'status', 'real_time, us'))
            for layer, stats in perf_counts.items():
                print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer, stats['layer_type'], stats['exec_type'],
                                                                  stats['status'], stats['real_time']))
        res_pb = res[out_blob]
        probs=res_pb[0][0]
        print("Probability of having disease= "+str(probs)+", performed in " + str(np.average(np.asarray(infer_time))) +" ms")
        
        # Class Activation Map    
        t0 = time.time()
        cam=class_activation_map_openvino(res, bn, fc , net, fp16)
        cam_time=(time.time() - t0) * 1000
        print("Time for CAM: {} ms".format(cam_time))


        fig,ax = plt.subplots(1,2)
        # Visualize the CAM heatmap
        cam = (cam - np.min(cam))/(np.max(cam)-np.min(cam))
        im=ax[0].imshow(cam, cmap=colormap)
        ax[0].axis('off')
        plt.colorbar(im,ax=ax[0],fraction=0.046, pad=0.04)

        # Visualize the CAM overlaid over the X-ray image 
        colormap_val=cm.get_cmap(colormap)  
        imss=np.uint8(colormap_val(cam)*255)
        im = Image.fromarray(imss)
        width, height = image.size
        cam1=resize_image(im, (height,width))
        heatmap = np.asarray(cam1)
        img1 = heatmap [:,:,:3] * 0.3 + image
        ax[1].imshow(np.uint16(img1))
        plt.xticks([]), plt.yticks([])  # to hide tick values on X and Y axis
        plt.savefig(os.path.join(args.output_dir, 'result'+job_id+'_'+str(index_f)+'.png'), bbox_inches='tight', pad_inches=0,dpi=300)
       
        avg_time = round((infer_time/args.number_iter), 1)
        
                    #f.write(res + "\n Inference performed in " + str(np.average(np.asarray(infer_time))) + "ms") 
        f.write("Pneumonia probability: "+ str(probs) + ", Inference performed in " + str(avg_time) + "ms \n") 
        time_images.append(avg_time)
        simpleProgressUpdate(progress_file_path,index_f* avg_time , (len(files)-1)* avg_time) 
    f1.write(str(np.average(np.asarray(time_images)))+'\n')
    f1.write(str(1))
from datetime import datetime
import argparse
from urllib import parse
import os, sys
import subprocess
import pandas as pd
from rdkit import Chem
from rdkit.Chem import Descriptors
from rdkit.Chem.rdMolDescriptors import CalcMolFormula
from pubchempy import Compound, get_compounds, get_synonyms
import tqdm
from tqdm import tqdm
from tqdm import tqdm_notebook
import time

s = time.time()

# Loading SMILES
print('Loading SMILES')

# Read appropriate csv file below
SMILES = pd.read_csv('drug_central_drugs.csv', index_col=0)['SMILES'].tolist()


def multi_preprocess_smi(smi):

    new_dict = {}

    try:
        # Filter 1- Convert to Canonical Smiles
        mol = Chem.MolFromSmiles(smi)
Example #35
0
from pandas import DataFrame
#import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import random
import numpy as np
from time import time
import timeit
import sys
import time

number_of_data_points = int(sys.argv[1])
number_of_clusters = int(sys.argv[2])


def get_random_cluster_points(number_points, number_dim):
    mu = np.random.randn()
    sigma = np.random.randn()
    p = sigma * np.random.randn(number_points, number_dim) + mu
    return p


p = get_random_cluster_points(number_of_data_points, 2)

print(time.time())
kmeans = KMeans(n_clusters=number_of_clusters, n_jobs=-1).fit(p)
print(time.time())
centroids = kmeans.cluster_centers_
Example #36
0
    def serial_reader(self, dev, channel, writer_packet_queue,
                      socket_packet_queue):
        """
        Thread responsible for reading from serial port, parsing the output and
        storing parsed packets into queue.

           :param dev: Device sniffing.
           :param channel: Channel to sniff passed as argument.
           :param writer_packet_queue: Queue string packet to write.
           :param socket_packet_queue: Queue string packet to send by socket.
        """
        sleep(2)

        timeout = time.time() + self.connection_open_timeout if self.connection_open_timeout else None
        while self.running.is_set():
            try:
                self.serial = Serial(dev, timeout=1, exclusive=True)
                break
            except Exception as e:
                if timeout and time.time() > timeout:
                    self.running.clear()
                    raise Exception(
                        "Could not open serial connection to sniffer before timeout of {} seconds".format(
                            self.connection_open_timeout))
                self.logger.debug(
                    "Can't open serial device: {} reason: {}".format(dev, e))
                sleep(0.5)

        try:
            self.serial.reset_input_buffer()
            self.serial.reset_output_buffer()

            init_cmd = []
            init_cmd.append(b'')
            init_cmd.append(b'sleep')
            init_cmd.append(b'channel ' + bytes(str(channel).encode()))
            for cmd in init_cmd:
                self.serial_queue.put(cmd)

            # Function serial_write appends twice '\r\n' to each command, so we have to calculate that for the echo.
            init_res = self.serial.read(
                len(b"".join(c + b"\r\n\r\n" for c in init_cmd)))

            if not all(cmd.decode() in init_res.decode() for cmd in init_cmd):
                msg = "{} did not reply properly to setup commands. Please re-plug the device and make sure firmware is correct. " \
                      "Recieved: {}\n".format(self, init_res)
                self.logger.error(msg)

            self.serial_queue.put(b'receive')
            self.setup_done.set()

            buf = b''

            while self.running.is_set():
                ch = self.serial.read()
                if ch == b'':
                    continue
                elif ch != b'\n' and ch != '\n':
                    buf += ch
                else:
                    m = search(self.RCV_REGEX, str(buf))
                    if m:
                        packet = a2b_hex(m.group(1)[:-4])
                        rssi = int(m.group(2))
                        lqi = int(m.group(3))
                        timestamp = int(m.group(4)) & 0xffffffff
                        channel = int(channel)
                        writer_packet_queue.put(
                            self.pcap_packet(packet, self.dlt, channel, rssi,
                                             lqi,
                                             self.correct_time(timestamp)))
                        socket_packet_queue.put(
                            self.pcap_packet(packet, self.dlt, channel, rssi,
                                             lqi,
                                             self.correct_time(timestamp)))
                    buf = b''

        except (
        serialutil.SerialException, serialutil.SerialTimeoutException) as e:
            self.logger.error(
                "Cannot communicate with serial device: {} reason: {}".format(
                    dev, e))
        finally:
            self.setup_done.set()  # In case it wasn't set before.
            if self.running.is_set():  # Another precaution.
                self.stop_sig_handler()
Example #37
0
 def restart(self):
     self.start = time.time()
Example #38
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int)
    parser.add_argument('--lr', type=float)
    parser.add_argument('--epochs', type=int, default=20)
    parser.add_argument('--eval_every', type=int, default=10)
    parser.add_argument('--actf',
                        type=str,
                        choices=['sigmoid', 'relu', 'linear'],
                        default='linear')
    args = parser.parse_args()

    ######

    # 4.5 YOUR CODE HERE

    batch_size = args.batch_size
    lr = args.lr
    epochs = args.epochs
    eval_every = args.eval_every

    train_loader, val_loader = load_data(batch_size)
    model, loss_fnc, optimizer = load_model(lr)

    t = 0

    train_accs = []
    tvals = []
    valid_accs = []
    timearray = []
    st = time.time()
    for epoch in range(0, epochs):
        accum_loss = 0
        total_corr = 0
        for i, batch in enumerate(train_loader):
            feats, label = batch
            optimizer.zero_grad()
            predictions = model(feats)
            batch_loss = loss_fnc(input=predictions.squeeze(),
                                  target=label.float())
            accum_loss = accum_loss + batch_loss
            batch_loss.backward()
            optimizer.step()
            corr = (predictions > 0.5).squeeze().long() == label.long()
            total_corr = total_corr + int(corr.sum())

            end = time.time()
            if (t + 1) % (args.eval_every) == 0:
                valid_acc = evaluate(model, val_loader)
                train_acc = evaluate(model, train_loader)

                print("Epoch: {}, Step: {} | Loss: {} | Valid acc: {}".format(
                    epoch + 1, t + 1, accum_loss / eval_every, valid_acc))
                accum_loss = 0
                valid_accs = valid_accs + [valid_acc]
                train_accs = train_accs + [train_acc]
                tvals = tvals + [t + 1]
                timearray = timearray + [end - st]
            t = t + 1

        print("Train acc:{}".format(
            float(total_corr) / len(train_loader.dataset)))
        print("Time", timearray[len(timearray) - 1])
    #lines = plt.plot(tvals, train_accs,'r', valid_accs,'b')
    plt.plot(tvals, sg.savgol_filter(train_accs, 9, 3, mode="nearest"), 'r')
    plt.plot(tvals, sg.savgol_filter(valid_accs, 9, 3, mode="nearest"), 'b')
    plt.title("Accuracy " + " vs." + " Steps")
    plt.xlabel("Steps")
    plt.ylabel("Accuracy")
    plt.legend(['Training', 'Validation'])
    plt.show()
    plt.close()

    plt.plot(timearray, sg.savgol_filter(train_accs, 9, 3, mode="nearest"),
             'r')
    plt.plot(timearray, sg.savgol_filter(valid_accs, 9, 3, mode="nearest"),
             'b')
    plt.title("Accuracy " + " vs." + " Time")
    plt.xlabel("Time")
    plt.ylabel("Accuracy")
    plt.legend(['Training', 'Validation'])
    plt.show()
    plt.close()
def RunH2Oaiglm_ptr(arg):
    import h2o4gpu as h2o4gpu
    import time

    trainX, trainY, validX, validY, trainW, fortran, mTrain, n, mvalid, intercept, lambda_min_ratio, n_folds, n_alphas, n_lambdas, n_gpus = arg

    # assume ok with 32-bit float for speed on GPU if using this wrapper
    if trainX is not None:
        trainX.astype(np.float32)
    if trainY is not None:
        trainY.astype(np.float32)
    if validX is not None:
        validX.astype(np.float32)
    if validY is not None:
        validY.astype(np.float32)
    if trainW is not None:
        trainW.astype(np.float32)

    print("Begin Setting up Solver")
    os.system(
        "rm -f error.txt ; touch error.txt ; rm -f varimp.txt ; touch varimp.txt"
    )  ## for visualization
    enet = h2o4gpu.ElasticNetH2O(n_gpus=n_gpus,
                                 fit_intercept=intercept,
                                 lambda_min_ratio=lambda_min_ratio,
                                 n_lambdas=n_lambdas,
                                 n_folds=n_folds,
                                 n_alphas=n_alphas)
    print("End Setting up Solver")

    ## First, get backend pointers
    sourceDev = 0
    t0 = time.time()
    a, b, c, d, e = enet.prepare_and_upload_data(trainX,
                                                 trainY,
                                                 validX,
                                                 validY,
                                                 trainW,
                                                 source_dev=sourceDev)
    t1 = time.time()
    print("Time to ingest data: %r" % (t1 - t0))

    ## Solve
    if 1 == 1:
        print("Solving")
        t0 = time.time()
        order = 'c' if fortran else 'r'
        double_precision = 0  # Not used
        store_full_path = 0
        enet.fit_ptr(mTrain,
                     n,
                     mvalid,
                     double_precision,
                     order,
                     a,
                     b,
                     c,
                     d,
                     e,
                     source_dev=sourceDev)
        t1 = time.time()
        print("Done Solving")
        print("Time to train H2O AI ElasticNetH2O: %r" % (t1 - t0))
        return r, W, arrVW1, arrVW2 ,arrVW3 ,arrVW4 
        fhandle.close()
    except IOError as ex:
        print('There was an error reading the instance')
        raise Exception
        sys.exit(1)

if __name__ == "__main__":

    fname = sys.argv[1]
    pool = ThreadPool(processes=1)

    n, W, arrVW1,arrVW2 ,arrVW3 ,arrVW4  = readData(fname)


    start_time = time.time()

    X = 0
    t = pool.apply_async(heristic, (n, W, X, arrVW1))
    X1, W1 = t.get()
    time.sleep(0.0001)
    t1 = pool.apply_async(heristic, (n, W1, X1, arrVW2))
    X2, W2 = t1.get()
    time.sleep(0.0001)
    t2 = pool.apply_async(heristic, (n, W2, X2,  arrVW3))
    X3, W3 = t2.get()
    time.sleep(0.0001)
    t3 = pool.apply_async(heristic, (n, W3, X3,  arrVW4))

    Xfinal, W4 = t3.get()
    
Example #41
0
labels = sshmm.labels
print('\tModel lables are: ', labels)

print()
print('Testing %s algorithm load disagg...' % algo_name)
acc = Accuracy(len(labels), folds)

print()
print('Connecting to EMU2 on %s...' % (device))
emu2 = serial.Serial(dev, 115200, timeout=1)

y0 = y1 = -1

while True:
    msg = emu2.readlines()
    ts = int(time.time())
    dt = datetime.datetime.fromtimestamp(ts)
    if msg == [] or msg[0].decode()[0] != '<':
        continue

    msg = ''.join([line.decode() for line in msg])

    try:
        tree = et.fromstring(msg)
    except:
        continue

    if tree.tag == 'InstantaneousDemand':
        power = int(tree.find('Demand').text, 16)
        power = int(power * precision)
def GetCurrentUnixTime():
    return int(time.time() * 1000.0)
 def expired(self):
     return int(time.time()) > self.expire_at
Example #44
0
    def train2(self, epochs, DQN, memory, game):

        batch_size = 32

        replay_memory_size = 1000
        play_steps = 100
        learning_steps = 200

        frame_repeat = 12
        display_step = 200

        loss_list = []
        total_steps = 0
        epoch_time = 0.0

        for epoch in range(epochs):

            start_time = time.time()

            # print("Playing...")
            game.new_episode()
            # game_step_count = 0
            # game_steps = []

            for play_step in range(play_steps):

                s1 = game.get_state().screen_buffer  #[3,480,640] uint8
                # a = get_eps_action(epoch, actions, preprocess(s1))
                s1 = s1.reshape([1, 3, resolution[0], resolution[1]])
                state = torch.from_numpy(preprocess(s1)).cuda()
                # best_action_index = get_best_action(state)
                # print (state.size())
                # fasdf
                q = DQN[0](Variable(state))
                val, index = torch.max(q, 1)
                index = index.data.cpu().numpy()[0]
                action = actions[index]

                reward = game.make_action(action,
                                          frame_repeat) / float(frame_repeat)
                isterminal = game.is_episode_finished()
                # game_step_count +=1
                s2 = game.get_state().screen_buffer if not isterminal else None
                memory.add_transition(s1, index, s2, isterminal, reward)

                if game.is_episode_finished():
                    # score = game.get_total_reward()
                    # train_scores.append(score)
                    # train_episodes_finished += 1
                    # game_steps.append(game_step_count)
                    game.new_episode()
                    # game_step_count = 0

            for learning_step in range(learning_steps):

                # if learning_step%10==0:
                #     print (learning_step)

                idxs = sample(range(0, memory.size), batch_size)
                s1 = memory.s1[idxs]

                batch = Variable(preprocess_pytorch(s1))

                self.optimizer.zero_grad()
                loss, dif, prec_sum = self.forward(batch, DQN)
                loss.backward()
                self.optimizer.step()

                if total_steps % display_step == 0:  # and batch_idx == 0:
                    print(
                        'Train Epoch: {}/{}'.format(epoch, epochs),
                        'epoch_time:{:.2f}'.format(epoch_time),
                        'Loss:{:.4f}'.format(loss.data.item()),
                        'dif:{:.4f}'.format(dif.data.item()),
                        'prec_sum:{:.4f}'.format(prec_sum.data.item()),
                    )

                    if total_steps != 0:
                        loss_list.append(loss.data.item())

                total_steps += 1

            epoch_time = time.time() - start_time

            if epoch % 10 == 0 and epoch > 2:
                #Save params
                save_params_v3(save_dir=exp_path_2,
                               model=self,
                               epochs=epoch + start_epoch)

                if len(loss_list) > 7:
                    #plot the training curve
                    plt.plot(loss_list[2:])
                    # save_dir = home+'/Documents/tmp/Doom/'
                    plt_path = exp_path_2 + 'training_plot.png'
                    plt.savefig(plt_path)
                    print('saved training plot', plt_path)
                    plt.close()
Example #45
0
 def __init__(self):
     self.start = time.time()
Example #46
0
import cobra
import matplotlib.pyplot as plt
import multiprocessing as mp
import numpy as np
import pandas as pd
import scipy.stats
from time import time

start_time = time.time()

# modifiable variables: cell # (max 8444), gene # (max 1800), threshold is % unique cells
cells = 2000
genes = 1800
threshold = .7
threads = mp.cpu_count() - 1
plot = False

modelOriginal = cobra.io.load_matlab_model('Recon3D.mat')

met_name = "etoh_"
modelOriginal.reactions.get_by_id("ETOHt").lower_bound = 1000
modelOriginal.reactions.get_by_id("ETOHt").upper_bound = 1000
modelOriginal.objective = "ALCD2x"

# read in scRNAseq data set
data = pd.read_csv('GSE115469_Data.csv', index_col=0)
# gene names should be the 0th column, which is the index column
genes_in_sc = data.index
# read in the map from gene name -> model name
f = open('map.txt', 'r')
dict_temp = f.readlines()
Example #47
0
 def get_time(self):
     end = time.time()
     m, s = divmod(end - self.start, 60)
     h, m = divmod(m, 60)
     time_str = "%02d:%02d:%02d" % (h, m, s)
     return time_str
Example #48
0
                frame.append(byte)

        str = ''
        for i in frame:
            str += chr(i)

        print len(str)

        while True:
            start = bufLen*iter
            end = bufLen+bufLen*iter
            indata = str[start:end]
            readSoFar = len(indata)

            if end > len(str) and len(decoded):
                if time.time() - lastByteTime > 2:
                    #save wav file
                    save_wav()
                    print mFramesCount
                    print iter
                    exit(0)
            elif indata:
                inbuffer += indata

            if len(inbuffer) == bufLen:
                seqNum, SI_received, PV_received = struct.unpack('BBh', inbuffer[0:4])
                seqNum = (seqNum >> 3)
                print "Frame sequence number: %d" % seqNum

                print "HDR_1 local: %d, HDR_1 received: %d" % (SI_Dec, SI_received)
                print "HDR_2 local: %d, HDR_2 received: %d" % (PV_Dec, PV_received)
Example #49
0
	def keyGo(self, result=None):
		if not self.timerentry_showendtime.value:
			self.timerentry_endtime.value = self.timerentry_starttime.value

		self.timer.resetRepeated()
		self.timer.timerType = {
			"wakeup": TIMERTYPE.WAKEUP,
			"wakeuptostandby": TIMERTYPE.WAKEUPTOSTANDBY,
			"autostandby": TIMERTYPE.AUTOSTANDBY,
			"autodeepstandby": TIMERTYPE.AUTODEEPSTANDBY,
			"standby": TIMERTYPE.STANDBY,
			"deepstandby": TIMERTYPE.DEEPSTANDBY,
			"reboot": TIMERTYPE.REBOOT,
			"restart": TIMERTYPE.RESTART
			}[self.timerentry_timertype.value]
		self.timer.afterEvent = {
			"nothing": AFTEREVENT.NONE,
			"wakeuptostandby": AFTEREVENT.WAKEUPTOSTANDBY,
			"standby": AFTEREVENT.STANDBY,
			"deepstandby": AFTEREVENT.DEEPSTANDBY
			}[self.timerentry_afterevent.value]

		if self.timerentry_type.value == "once":
			self.timer.begin, self.timer.end = self.getBeginEnd()

		if self.timerentry_timertype.value == "autostandby" or self.timerentry_timertype.value == "autodeepstandby":
			self.timer.begin = int(time()) + 10
			self.timer.end = self.timer.begin
			self.timer.autosleepinstandbyonly = self.timerentry_autosleepinstandbyonly.value
			self.timer.autosleepdelay = self.timerentry_autosleepdelay.value
			self.timer.autosleeprepeat = self.timerentry_autosleeprepeat.value
# Ensure that the timer repeated is cleared if we have an autosleeprepeat
			if self.timerentry_type.value == "repeated":
				self.timer.resetRepeated()
				self.timerentry_type.value = "once" # Stop it being set again

		if self.timerentry_type.value == "repeated":
			if self.timerentry_repeated.value == "daily":
				for x in (0, 1, 2, 3, 4, 5, 6):
					self.timer.setRepeated(x)

			if self.timerentry_repeated.value == "weekly":
				self.timer.setRepeated(self.timerentry_weekday.index)

			if self.timerentry_repeated.value == "weekdays":
				for x in (0, 1, 2, 3, 4):
					self.timer.setRepeated(x)

			if self.timerentry_repeated.value == "user":
				for x in (0, 1, 2, 3, 4, 5, 6):
					if self.timerentry_day[x].value:
						self.timer.setRepeated(x)

			self.timer.repeatedbegindate = self.getTimestamp(self.timerentry_repeatedbegindate.value, self.timerentry_starttime.value)
			if self.timer.repeated:
				self.timer.begin = self.getTimestamp(self.timerentry_repeatedbegindate.value, self.timerentry_starttime.value)
				self.timer.end = self.getTimestamp(self.timerentry_repeatedbegindate.value, self.timerentry_endtime.value)
			else:
				self.timer.begin = self.getTimestamp(time.time(), self.timerentry_starttime.value)
				self.timer.end = self.getTimestamp(time.time(), self.timerentry_endtime.value)

			# when a timer end is set before the start, add 1 day
			if self.timer.end < self.timer.begin:
				self.timer.end += 86400

		self.saveTimer()
		self.close((True, self.timer))
    x = Activation('relu')(x)
    # x = Dense(256)(x)
    # x = Activation('relu')(x)
    # x = Dense(256)(x)
    # x = Activation('relu')(x)
    x = Dense(num_classes)(x)
    x = Activation('sigmoid')(x)
    model = Model(inputs=image, outputs=x)

    model.summary()
    adam = optimizers.Adam(lr=.001)
    # model.load_weights("vgg19_att.h5")
    model.compile(loss='binary_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])
    start = time.time()
    model.fit(X_train,
              y_train,
              epochs=20,
              verbose=1,
              validation_data=(X_test, y_test),
              batch_size=32)
    end = time.time()
    time = end - start
    print("TIME: ", time)

    model.save_weights("vgg16_att.h5")

    # pred = model.predict(X_test)
    # prediction = []
    # for p in pred:
Example #51
0
def run(model_name,
        seed,
        target,
        number_of_patients,
        test_size,
        models,
        params,
        X,
        y,
        AUC_test_dict,
        Importances,
        Probabilities,
        save_csv=False,
        save_probabilities=True,
        save_models=False,
        imputation_method='mean',
        is_creatinine=False,
        is_continuous=False):

    start = time.time()
    time_now = datetime.now(central)
    print("Time when start: ", time_now.strftime("%H:%M:%S"))

    continuous = True
    if target == 'target_bin':
        continuous = False

    path_to_add = 'Hemoglobin/'
    if is_creatinine:
        path_to_add = 'Creatinine/'

    path_to_save = '../../Data/Generated_csv/' + path_to_add

    path_to_save_models = '../../Data/Results/' + path_to_add + 'Models/'

    #run only on a sample of the patients
    X_sample = X.iloc[:number_of_patients]
    y_sample = y.iloc[:number_of_patients]

    print(seed)

    #sample
    if not is_continuous:
        train_X, test_X, train_y, test_y = train_test_split(
            X_sample,
            y_sample,
            stratify=y_sample[target],
            test_size=test_size,
            random_state=seed)

    if is_continuous:
        train_X, test_X, train_y, test_y = train_test_split(
            X_sample, y_sample, test_size=test_size, random_state=seed)

    # function to test a model trained on all the data but tested on a filtered testing set
    filter_after = False
    if filter_after:
        test_X = test_X.loc[test_X.Hb_value_initial <= 10]
        test_y = test_y.loc[test_y.index.isin(test_X.index)]

    cols = train_X.columns
    if model_name == 'Log' or model_name == 'Lin':
        scaler = StandardScaler()
        train_X = scaler.fit_transform(train_X)
        test_X = scaler.transform(test_X)

    if save_csv:
        #save the csv for reproductibility and OCT
        train_X.to_csv(path_to_save + 'train_and_test/' + imputation_method +
                       '_train_X_seed_' + str(seed) + '.csv')
        test_X.to_csv(path_to_save + 'train_and_test/' + imputation_method +
                      '_test_X_seed_' + str(seed) + '.csv')
        train_y.to_csv(path_to_save + 'train_and_test/' + imputation_method +
                       '_train_Y_seed_' + str(seed) + '.csv')
        test_y.to_csv(path_to_save + 'train_and_test/' + imputation_method +
                      '_test_Y_seed_' + str(seed) + '.csv')

# Uncomment this code to do a grid search within the boostrapping
#if model_name=='OCT':
#    crit='gini'
#    print('starting grid search')
#
#    grid = iai.GridSearch(
#                       iai.OptimalTreeClassifier(
#                                                 random_seed=1,
#                                                 ),
#                       max_depth=range(5, 7), #do range
#                       criterion=crit,
#                       ls_num_tree_restarts=300
#                       )
#    grid.fit(train_X, train_y[target])
#    model = grid.get_learner()
#    print(grid.score(test_X, test_y[target], criterion='auc'))
#    print(grid.get_best_params())

#elif model_name=='ORT':
#     crit='mse'
#     print('starting grid search ORT')
#
#     grid = iai.GridSearch(
#                           iai.OptimalTreeRegressor(
#                                                    random_seed=1,
#                                                    ),
#                           max_depth=range(3, 10), #do range
#                           criterion=crit,
#                           ls_num_tree_restarts=300)
#     grid.fit(train_X, train_y[target])
#     model = grid.get_learner()
#     print(grid.score(test_X, test_y[target], criterion='mse'))
#     print(grid.get_best_params())

    elif model_name != 'Baseline':
        model = models[model_name](**params[model_name])
        model.fit(train_X, train_y[target])

    # uncomment this code if you want to load and already trained tree

    #if model_name=='OCT':
    #file_path=path_to_save_models+model_name+'_'+target+'_'+str(number_of_patients) +'_patients_test_size_'+str(test_size)+'mean'+'_seed'+str(1)
    #model=iai.read_json(file_path)

    if not is_continuous:

        if model_name == 'OCT':
            preds = model.predict_proba(test_X)['1']
        elif model_name == 'Baseline':
            preds = test_y[target + '_baseline'].values
        else:
            preds = model.predict_proba(test_X)[:, 1]
        AUC_test = roc_auc_score(test_y[target], preds)
        if not is_creatinine:
            test_X.reset_index(inplace=True)
            test_y.reset_index(inplace=True)
            # test the performances on a filtered dataset
            #subset_list = test_X.loc[test_X.Hb_value_initial<10].index
            #suberror = roc_auc_score(test_y.loc[test_y.index.isin(subset_list)][target], preds[subset_list])
            #print("Normal :",AUC_test)
            #print("Sub :",suberror)
            # if want to ouptut error on subgroup of patients
            #AUC_test=suberror

    if is_continuous:
        if model_name == 'ORT':
            preds = model.predict(test_X)
        elif model_name == 'Baseline':
            preds = test_y[target + '_baseline'].values
        else:
            preds = model.predict(test_X)
        AUC_test = mean_absolute_error(test_y[target], preds)
        if not is_creatinine:
            test_X.reset_index(inplace=True)
            test_y.reset_index(inplace=True)
            #subset_list = test_X.loc[test_X.Hb_value_initial<8].index
            #suberror = mean_absolute_error(test_y.loc[test_y.index.isin(subset_list)][target], preds[subset_list])
            #print("Normal :",AUC_test)
            #print("Sub :",suberror)
            # if want to ouptut error on subgroup of patients
            #AUC_test=suberror

    AUC_test_dict[target][model_name][seed] = AUC_test

    if save_probabilities:
        prob = pd.DataFrame()
        prob[target] = test_y[target]
        #prob['proba_'+target]=preds.reset_index()
        if model_name == 'OCT':
            prob['proba_' + target] = preds.values
        else:
            prob['proba_' + target] = preds
        Probabilities[target][model_name][seed] = prob
    if model_name == 'Baseline':
        return AUC_test

    if save_models:
        file_path = path_to_save_models + model_name + '_' + target + '_' + str(
            number_of_patients) + '_patients_test_size_' + str(
                test_size) + imputation_method + '_seed' + str(seed)
        if model_name == 'OCT' or model_name == 'ORT':
            model.write_json(file_path)
            model.write_html(file_path + '.html')
        else:
            f = open(file_path, "wb")
            pickle.dump(model, f)
            f.close()

    #add feature importance

    if model_name == 'OCT' or model_name == 'ORT':
        Importance = model.variable_importance()
    else:
        if model_name == 'Log':
            features = model.coef_[0]
        elif model_name == 'Lin':
            features = model.coef_

        else:
            features = model.feature_importances_
        Importance = pd.DataFrame(features, columns=['Importance'])
        Importance['Feature'] = list(cols)
    Importance['Model'] = model_name
    Importance['seed'] = seed
    Importance['target'] = target
    Importances[target][model_name][seed] = Importance

    print("End of one thread!")
    stop = time.time()
    print("took %.2f seconds" % ((stop - start)))
    if save_probabilities:
        return AUC_test, Importance, prob
    else:
        return AUC_test
Example #52
0
for dilution in np.unique(toc[toc.isRef == 'N'].Dilution):
    logging.debug("Processing dilution: %0.1f" % dilution)
    h5FileName = "Case%s.hdf5" % str(dilution).replace(".", "_", 1)
    try:
        with h5py.File(h5FileName, 'r') as f:
            pass
    except IOError as e:

        time_all = []

        caseFileList = ["../2015-10-15_Plot_time_vs_region_length_rvd3_synthetic_data/depth_chart/100/top_400_positions/%s"\
                       % filename for filename in toc.Filename[toc.Dilution==dilution] ]
        logging.info('Estimate %s' % caseFileList)

        t0 = time.time()
        (r, n, loc, refb) = rvd3.load_depth(caseFileList)
        t1 = time.time()
        time_load_depth = t1 - t0
        time_all.extend([time_load_depth])

        casephi, caseq, time_ini_model_para, time_ini_var_para, time_ini_ELBO, time_opt_gam, time_opt_delta, time_conv, \
        time_opt_mu0, time_opt_M0, time_opt_M, time_update_ELBO = rvd3.ELBO_opt(r, n, seed = 19860522, pool=None)

        time_all.extend([time_ini_model_para, time_ini_var_para, time_ini_ELBO, time_opt_gam, time_opt_delta, time_conv, \
                        time_opt_mu0, time_opt_M0, time_opt_M, time_update_ELBO])

        logging.debug("Saving model in %s" % h5FileName)
        t2 = time.time()
        rvd3.save_model(h5FileName, r, n, casephi, caseq, loc, refb)
        t3 = time.time()
Example #53
0
    def create(self,**kwargs):
        '''PARAMS:
        -------
        overwrite = [True | False]
        backup = [True | False]

        If database exists, with overwrite FALSE and backup FALSE, method exists with False
        If database does not exist it is created
        If database exists and overwrite without backup, it is deleted
        If database exists and backup is set, database will be backed up; regardless of overwrite.

        RETURN
        -----
            FLAG = True if a database is created, False if a database is not created.
            MESSAGE = database name + actions taken
        '''
        method = sys._getframe().f_code.co_name
        message = self.database + " "
        flag = True
        db_exists = False
        now = time.time()
        bak_name = ""
        modifier = time.strftime("%Y-%m-%d-%s", time.gmtime(now))
        overwrite = kwargs.get('overwrite',False)
        backup = kwargs.get('backup',True)

        if DEBUG: print('Class {classname} Method {method} Arguments {args}'.format(classname=self.class_name, method=method, args=kwargs))

        db_exists = self.name()[0]

        if DEBUG and db_exists: print('Database {database} exists'.format(database=self.database))

        if backup:
            if DEBUG: print("Database backed up to filename is {}".format(bak_name))

            dir_name = os.path.dirname(self.database)
            base_name = os.path.basename(self.database)
            file_name, file_ext = base_name.rsplit('.', 1)
            bak_name = dir_name + "/" + file_name + "_" + modifier + "." + file_ext
            try:
                copyfile(self.database, bak_name)
                message += "- File was backed up"
            except IOError as e:
                message += "- Error file was not backed up {error}".format(error=e)
                return False, message

        if overwrite and db_exists:
                try:
                    os.remove(self.database)
                    message += "- database file deleted"

                except Error as e:
                    message += "- Error file was not deleted {error}".format(error=e)
                    return False, message

        self.connect()
        wal = "PRAGMA journal_mode=WAL;"
        cur = self.db_connect.cursor()
        cur.execute(wal)
        results = cur.fetchall()
        message += "- " + str(sqlite3.version) + ":" + str(results)
        if db_exists and not overwrite:
            message += "- Wal mode is enabled"
        else:
            message += "- New Database created with WAL mode enabled"
        self.db_connect.close()

        return flag, message
Example #54
0
PORT = 33843  # webam
TIME_PORT = 37133
CAPTURE_FILE_NAME = "capture.jpg"
URL_PREFIX = "data:image/jpg;base64,"
q = queue.Queue()
cap = cv2.VideoCapture(0)
if cap.isOpened() is False:
    raise ("Camera IO Error")

#time synchronization
soc = socket(AF_INET)
soc.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
print("[*] connecting to %s:%s" % (HOST, TIME_PORT))
soc.connect((HOST, TIME_PORT))

t1 = time.time()
#print t1
soc.send(str(t1))
t2 = soc.recv(1024)
t2 = float(t2)
#print t2
t3 = time.time()
#print t3
dt = t2 - t1 / 2 - t3 / 2
#print dt
soc.close()


def compare_capture(cap1, cap2):
    cap1_hist = cv2.calcHist([cap1], [0], None, [256], [0, 256])
    cap2_hist = cv2.calcHist([cap2], [0], None, [256], [0, 256])
Example #55
0
def timestamp():
    return int(time.time())
Example #56
0
    def keyGo(self, result=None):
        if not self.timerentry_service_ref.isRecordable():
            self.session.openWithCallback(
                self.selectChannelSelector, MessageBox,
                _("You didn't select a channel to record from."),
                MessageBox.TYPE_ERROR)
            return
        self.timer.name = self.timerentry_name.value
        self.timer.description = self.timerentry_description.value
        self.timer.justplay = self.timerentry_justplay.value == "zap"
        self.timer.always_zap = self.timerentry_justplay.value == "zap+record"
        self.timer.rename_repeat = self.timerentry_renamerepeat.value
        if self.timerentry_justplay.value == "zap":
            if not self.timerentry_showendtime.value:
                self.timerentry_endtime.value = self.timerentry_starttime.value
        self.timer.resetRepeated()
        self.timer.afterEvent = {
            "nothing": AFTEREVENT.NONE,
            "deepstandby": AFTEREVENT.DEEPSTANDBY,
            "standby": AFTEREVENT.STANDBY,
            "auto": AFTEREVENT.AUTO
        }[self.timerentry_afterevent.value]
        self.timer.descramble = {
            "normal": True,
            "descrambled+ecm": True,
            "scrambled+ecm": False,
        }[self.timerentry_recordingtype.value]
        self.timer.record_ecm = {
            "normal": False,
            "descrambled+ecm": True,
            "scrambled+ecm": True,
        }[self.timerentry_recordingtype.value]
        self.timer.service_ref = self.timerentry_service_ref
        self.timer.tags = self.timerentry_tags

        if self.timer.dirname or self.timerentry_dirname.value != defaultMoviePath(
        ):
            self.timer.dirname = self.timerentry_dirname.value
            config.movielist.last_timer_videodir.value = self.timer.dirname
            config.movielist.last_timer_videodir.save()

        if self.timerentry_type.value == "once":
            self.timer.begin, self.timer.end = self.getBeginEnd()
        if self.timerentry_type.value == "repeated":
            if self.timerentry_repeated.value == "daily":
                for x in (0, 1, 2, 3, 4, 5, 6):
                    self.timer.setRepeated(x)

            if self.timerentry_repeated.value == "weekly":
                self.timer.setRepeated(self.timerentry_weekday.index)

            if self.timerentry_repeated.value == "weekdays":
                for x in (0, 1, 2, 3, 4):
                    self.timer.setRepeated(x)

            if self.timerentry_repeated.value == "user":
                for x in (0, 1, 2, 3, 4, 5, 6):
                    if self.timerentry_day[x].value:
                        self.timer.setRepeated(x)

            self.timer.repeatedbegindate = self.getTimestamp(
                self.timerentry_repeatedbegindate.value,
                self.timerentry_starttime.value)
            if self.timer.repeated:
                self.timer.begin = self.getTimestamp(
                    self.timerentry_repeatedbegindate.value,
                    self.timerentry_starttime.value)
                self.timer.end = self.getTimestamp(
                    self.timerentry_repeatedbegindate.value,
                    self.timerentry_endtime.value)
            else:
                self.timer.begin = self.getTimestamp(
                    time.time(), self.timerentry_starttime.value)
                self.timer.end = self.getTimestamp(
                    time.time(), self.timerentry_endtime.value)

            # when a timer end is set before the start, add 1 day
            if self.timer.end < self.timer.begin:
                self.timer.end += 86400

        if self.timer.eit is not None:
            event = eEPGCache.getInstance().lookupEventId(
                self.timer.service_ref.ref, self.timer.eit)
            if event:
                n = event.getNumOfLinkageServices()
                if n > 1:
                    tlist = []
                    ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
                    parent = self.timer.service_ref.ref
                    selection = 0
                    for x in range(n):
                        i = event.getLinkageService(parent, x)
                        if i.toString() == ref.toString():
                            selection = x
                        tlist.append((i.getName(), i))
                    self.session.openWithCallback(
                        self.subserviceSelected,
                        ChoiceBox,
                        title=_("Please select a subservice to record..."),
                        list=tlist,
                        selection=selection)
                    return
                elif n > 0:
                    parent = self.timer.service_ref.ref
                    self.timer.service_ref = ServiceReference(
                        event.getLinkageService(parent, 0))
        self.saveTimer()
        self.close((True, self.timer))
Example #57
0
 def __init__(self, config=None):
     self.current_time_epoch = int(time.time())
Example #58
0
def oauth_grant():
    '''
    This endpoint takes the following parameters:
    * code - The code parameter provided in the redirect
    * client_id - Your client ID
    * client_secret - your client secret
    '''

    application = g.db.query(OauthApp).filter_by(
        client_id=request.values.get("client_id"),
        client_secret=request.values.get("client_secret")).first()
    if not application:
        return jsonify(
            {"oauth_error": "Invalid `client_id` or `client_secret`"}), 401
    if application.is_banned:
        return jsonify({"oauth_error": f"Application `{application.app_name}` is suspended."}), 403

    if request.values.get("grant_type") == "code":

        code = request.values.get("code")
        if not code:
            return jsonify({"oauth_error": "code required"}), 400

        auth = g.db.query(ClientAuth).filter_by(
            oauth_code=code,
            access_token=None,
            oauth_client=application.id
        ).first()

        if not auth:
            return jsonify({"oauth_error": "Invalid code"}), 401

        auth.oauth_code = None
        auth.access_token = secrets.token_urlsafe(128)[0:128]
        auth.access_token_expire_utc = int(time.time()) + 60 * 60

        g.db.add(auth)

        g.db.commit()

        data = {
            "access_token": auth.access_token,
            "scopes": auth.scopelist,
            "expires_at": auth.access_token_expire_utc,
            "token_type": "Bearer"
        }

        if auth.refresh_token:
            data["refresh_token"] = auth.refresh_token

        return jsonify(data)

    elif request.values.get("grant_type") == "refresh":

        refresh_token = request.values.get('refresh_token')
        if not refresh_token:
            return jsonify({"oauth_error": "refresh_token required"}), 401

        auth = g.db.query(ClientAuth).filter_by(
            refresh_token=refresh_token,
            oauth_code=None,
            oauth_client=application.id
        ).first()

        if not auth:
            return jsonify({"oauth_error": "Invalid refresh_token"}), 401

        auth.access_token = secrets.token_urlsafe(128)[0:128]
        auth.access_token_expire_utc = int(time.time()) + 60 * 60

        g.db.add(auth)

        data = {
            "access_token": auth.access_token,
            "scopes": auth.scopelist,
            "expires_at": auth.access_token_expire_utc
        }

        return jsonify(data)

    else:
        return jsonify({"oauth_error": f"Invalid grant_type `{request.values.get('grant_type','')}`. Expected `code` or `refresh`."}), 400
 def __init__(self, user_id: int):
     self.user_id = user_id
     self.id = uuid4().hex
     self.expire_at = int(time.time()) + CONFIRMATION_EXPIRATION_DELTA
     self.confirmed = False
Example #60
0
    def getConfigListValues(self):
        if not self.timerentry_service_ref.isRecordable():
            self.session.openWithCallback(
                self.selectChannelSelector, MessageBox,
                _("You didn't select a channel to record from."),
                MessageBox.TYPE_ERROR)
            return
        if self.timerentry_justplay.value == 'record':
            if not harddiskmanager.inside_mountpoint(
                    self.timerentry_dirname.value):
                if harddiskmanager.HDDCount(
                ) and not harddiskmanager.HDDEnabledCount():
                    self.session.open(MessageBox, _("Unconfigured storage devices found!") + "\n" \
                     + _("Please make sure to set up your storage devices with the improved storage management in menu -> setup -> system -> storage devices."), MessageBox.TYPE_ERROR)
                    return
                elif harddiskmanager.HDDEnabledCount(
                ) and defaultStorageDevice() == "<undefined>":
                    self.session.open(MessageBox, _("No default storage device found!") + "\n" \
                     + _("Please make sure to set up your default storage device in menu -> setup -> system -> recording paths."), MessageBox.TYPE_ERROR)
                    return
                elif harddiskmanager.HDDEnabledCount(
                ) and defaultStorageDevice() != "<undefined>":
                    part = harddiskmanager.getDefaultStorageDevicebyUUID(
                        defaultStorageDevice())
                    if part is None:
                        self.session.open(MessageBox, _("Default storage device is not available!") + "\n" \
                         + _("Please verify if your default storage device is attached or set up your default storage device in menu -> setup -> system -> recording paths."), MessageBox.TYPE_ERROR)
                        return
                else:
                    self.session.open(
                        MessageBox,
                        _("Recording destination for this timer does not exists."
                          ), MessageBox.TYPE_ERROR)
                    return

        self.timer.name = self.timerentry_name.value
        self.timer.description = self.timerentry_description.value
        self.timer.justplay = self.timerentry_justplay.value == "zap"
        if self.timerentry_justplay.value == "zap":
            if not self.timerentry_showendtime.value:
                self.timerentry_endtime.value = self.timerentry_starttime.value

        self.timer.resetRepeated()
        self.timer.afterEvent = {
            "nothing": AFTEREVENT.NONE,
            "deepstandby": AFTEREVENT.DEEPSTANDBY,
            "standby": AFTEREVENT.STANDBY,
            "auto": AFTEREVENT.AUTO
        }[self.timerentry_afterevent.value]
        self.timer.service_ref = self.timerentry_service_ref
        self.timer.tags = self.timerentry_tags

        if self.timer.dirname or self.timerentry_dirname.value != defaultMoviePath(
        ):
            self.timer.dirname = self.timerentry_dirname.value
            config.movielist.last_timer_videodir.value = self.timer.dirname
            config.movielist.last_timer_videodir.save()

        if self.timerentry_type.value == "once":
            self.timer.begin, self.timer.end = self.getBeginEnd()
        if self.timerentry_type.value == "repeated":
            if self.timerentry_repeated.value == "daily":
                for x in (0, 1, 2, 3, 4, 5, 6):
                    self.timer.setRepeated(x)

            if self.timerentry_repeated.value == "weekly":
                self.timer.setRepeated(self.timerentry_weekday.index)

            if self.timerentry_repeated.value == "weekdays":
                for x in (0, 1, 2, 3, 4):
                    self.timer.setRepeated(x)

            if self.timerentry_repeated.value == "user":
                for x in (0, 1, 2, 3, 4, 5, 6):
                    if self.timerentry_day[x].value:
                        self.timer.setRepeated(x)

            self.timer.repeatedbegindate = self.getTimestamp(
                self.timerentry_repeatedbegindate.value,
                self.timerentry_starttime.value)
            if self.timer.repeated:
                self.timer.begin = self.getTimestamp(
                    self.timerentry_repeatedbegindate.value,
                    self.timerentry_starttime.value)
                self.timer.end = self.getTimestamp(
                    self.timerentry_repeatedbegindate.value,
                    self.timerentry_endtime.value)
            else:
                self.timer.begin = self.getTimestamp(
                    time.time(), self.timerentry_starttime.value)
                self.timer.end = self.getTimestamp(
                    time.time(), self.timerentry_endtime.value)

            # when a timer end is set before the start, add 1 day
            if self.timer.end < self.timer.begin:
                self.timer.end += 86400