示例#1
0
文件: main_new.py 项目: jbu/Mneme
 def show_goals(self):
     t1 = time.clock()
     personis_vis = cherrypy.session.get('vismgr')
     res = personis_vis.show_goals()
     t2 = time.clock()
     write_log('notice','Show goals visualisation')
     print 'show_goal took %0.3fms' % ((t2-t1)*1000.0)
     return res
示例#2
0
文件: main_new.py 项目: jbu/Mneme
    def show_size(self):
        t1 = time.clock()
        personis_vis = cherrypy.session.get('vismgr')
        modeltree = cherrypy.session.get('modeltree')
        res = personis_vis.show_size(modeltree)

        t2 = time.clock()
        print res
        if type(res) is ListType:
            res = json.dumps(res)
        write_log('notice','Show size visualisation')
        print 'show_health took %0.3fms' % ((t2-t1)*1000.0)
        return res
示例#3
0
    def get_asx_dailys(self): #download and save all ASX files for the date required 
        self.get_last_save_date()

        while self.last_date < self.date:
            tic = time.clock()
            self.last_date = self.last_date + timedelta(days=1) #next day from last save
            if self.last_date.weekday() < 5: #if Monday to Friday
                #print 'Getting data for %s' % str(self.last_date)
                for dirs,tails in self.asx_dirs.iteritems():
                    self.get_asx(dirs,tails) #download the htm files to directory structure
                    if self.got_htm_file == True:
                       self.get_asx_table()     #returns self.allasxdata, a dictionary of dataframes in the ASX web data 
                       self.update_warehouse(tails)
            toc = time.clock()    
            info_text = 'Data for %s processed in %s seconds' % (self.last_date,str(toc-tic))
            logger.info(info_text)
def main():
    start_time = datetime.now()
    start_time1 = time.clock()
    ic("Thread %s is Running..." % current_thread().name)
    t = list()
    for i in range(5):
        t.append(Thread(target=thread_test, name="thread" + str(i + 1)))
        t[i].start()
    for i in range(5):
        t[i].join()

    ic("global num: ", num)
    ic("Thread %s is Ended..." % current_thread().name)
    end_time = datetime.now()
    end_time1 = time.clock()
    ic(end_time - start_time, end_time1 - start_time1)
示例#5
0
文件: main_new.py 项目: jbu/Mneme
    def register_app(self, data):
        t1 = time.clock()
        res = ""
        try:
            print data
            if 'nogoals' not in data:
                self.__setgoal = True
            else:
                self.__setgoal = False

            result = cherrypy.session.get('appmgr').register_app(data)

            if "Error" not in result:
                write_log('notice','Apps register operation successful')

                res = result
            else:
                res = "Error in opration "
                write_log('error','Apps register Operation Failed; '+result)

        except Exception,e:
            print e
            write_log('error','Apps regsiter Operation Failed; Error:'+str(e))

            res = "Error in opration "
示例#6
0
    def get_asx_dailys(
            self):  #download and save all ASX files for the date required
        self.get_last_save_date()

        while self.last_date < self.date:
            tic = time.clock()
            self.last_date = self.last_date + timedelta(
                days=1)  #next day from last save
            if self.last_date.weekday() < 5:  #if Monday to Friday
                #print 'Getting data for %s' % str(self.last_date)
                for dirs, tails in self.asx_dirs.iteritems():
                    self.get_asx(
                        dirs,
                        tails)  #download the htm files to directory structure
                    if self.got_htm_file == True:
                        self.get_asx_table(
                        )  #returns self.allasxdata, a dictionary of dataframes in the ASX web data
                        self.update_warehouse(tails)
            toc = time.clock()
            info_text = 'Data for %s processed in %s seconds' % (
                self.last_date, str(toc - tic))
            logger.info(info_text)
示例#7
0
文件: main_new.py 项目: jbu/Mneme
    def show_sub_context(self,context=None):
        um = cherrypy.session.get('um')
        browser_activities = um.get_evidence_new(context = ['Admin'], componentid = 'browseractivity')

        t1 = time.clock()
        print context
        
        cherrypy.session['cur_context'] = context

        udata = context.decode("utf-8")
        context = udata.encode("ascii","ignore")
        context_list = context.split('/')
        modeltree = cherrypy.session.get('modeltree')
        
        genshi_tmpl = LoadGenshiTemplate(cherrypy.session.get('cur_session'), cherrypy.session.get('username'), browser_activities)
        try:
            #contexts, components = self.personis_um.all_access_model(context_list);
            contexts,components = um.get_all_component(context_list);
            if type(contexts) is ListType:
                t2 = time.clock()
                print 'show_subcontext took %0.3fms' % ((t2-t1)*1000.0)
                write_log('notice','Subdirectories Clicked: operation successful')
                for item in modeltree:
                    if item.name == context_list[0]:
                        item.visited = 1
                    else:
                        item.visited = 0
                return genshi_tmpl.browse_sub_elements(context, "", sorted(contexts), sorted(components,key=operator.attrgetter('Identifier')), self.modeltree)
            else:
                e = contexts
                write_log('error','Subdirectories Clicked: Operation Failed; Error:'+str(e))
                return genshi_tmpl.greeting_template(e, "Browse", modeltree)

        except Exception,e:
            #print e
            write_log('error','Sub browser Clicked: Operation Failed; Error:'+str(e))
            return genshi_tmpl.greeting_template(e, "Browse", modeltree)
示例#8
0
  def update_warehouse(self,tails):
      '''Ok, this works but is extremely inefficient in terms of the amount of drive space required for the panel object - basically its embarassing.  Something is not quite here - probably a user error...'''
      if self.last_date == date(2009,7,14): #Create ASX HDF5 data file for the first time
         asx_futures = HDFStore(self.asx_path + self.asx_warehouse_file) #open the asx data warehouse!
         ota = Panel({self.last_date:self.allasxdata['EA']})
         ben = Panel({self.last_date:self.allasxdata['EE']})
         asx_futures['OTA_' + tails] = ota
         asx_futures['BEN_' + tails] = ben
         asx_futures.close()      
      else:            
         tic=time.clock()
         asx_futures = HDFStore(self.asx_path + self.asx_warehouse_file) #open the asx data warehouse!
         ota = asx_futures['OTA_' + tails]     #get ota
         ben = asx_futures['BEN_' + tails]
         asx_futures.close()   
         os.remove(self.asx_path + self.asx_warehouse_file)
         toc=time.clock()
         info_text = 'Opening ' + self.asx_warehouse_file + ' took %s seconds' % (str(toc-tic))
         logger.info(info_text)
         tic=time.clock()
         ota = ota.join(Panel({self.last_date:self.allasxdata['EA']}),how='outer') #join the new data to the exisiting panel data - this took a bit of figuring out. Outer is the union of the indexes so when a new row appears for a new quater, Nulls or NaNs full the remainder of the dataframe
         ben = ben.join(Panel({self.last_date:self.allasxdata['EE']}),how='outer')
         toc=time.clock()
         info_text = 'Data join took %s seconds' % (str(toc-tic))
         logger.info(info_text)
         tic=time.clock()
         asx_futures = HDFStore(self.asx_path + self.asx_warehouse_file) #open the asx data warehouse!
         asx_futures['OTA_' + tails] = ota      #overwrite ota_xxx
         asx_futures['BEN_' + tails] = ben 
         asx_futures.close() #closing ASX warehouse
         toc=time.clock()
         info_text = 'Resaving ' + self.asx_warehouse_file + ' took %s seconds' % (str(toc-tic))
         logger.info(info_text)
         to_excel = True
         if to_excel == True:
             #Spit to XLS ****THIS IS SLOW**** comment out if updating h5 file
             try:      #to local linux box
                ota.to_excel(self.asx_path + 'OTA_' + tails + '.xls') #spit to excel
                ben.to_excel(self.asx_path + 'BEN_' + tails + '.xls')
             except error_text:
               logger.error(error_text)
 
             try:      #to P:/ASX_dailys/
                ota.to_excel(self.asx_path_P + 'OTA_' + tails + '.xls') #spit to excel
                ben.to_excel(self.asx_path_P + 'BEN_' + tails + '.xls')
             except error_text:
                logger.error(error_text)
示例#9
0
文件: main_new.py 项目: jbu/Mneme
    def add_new_evidence(self,data=None):
        #show_id = request.GET.get('name')
        res = ""
        t1 = time.clock()
        um = cherrypy.session.get('um')
        try:
            #print data
            udata = data.decode("utf-8")
            data = udata.encode("ascii","ignore")
            cur_context = cherrypy.session.get('cur_context')

            if um.add_new_evidence(cur_context, data):
                res = """ Successfully added"""
                write_log('notice','Add new evidence operation successful')

            else:
                res = """Failed to add"""
                write_log('error','Show evidence list Operation Failed; Error:'+str(res))

        except Exception, e:
            #print e
            res = """Failed to add""" + str(e)
            write_log('error','Add new evidence Operation Failed; Error:'+str(e))
示例#10
0
def search_image(img):
    desc = extract_image(img, conf.ENABLE_ROTATED_SEARCH)
    if desc is not False and desc is not None:
        con = sqlite_connect(db.SQLITE_DB + '_' + conf.ALGORITHM + '.db')
        cur = con.cursor()
        cur.execute("SELECT mysql_id, arr FROM descriptor")
        data = cur.fetchall()

        idx = np.array([])
        dataset = None
        for i in data:
            if dataset is None:
                dataset = i[1]
                idx = np.linspace(i[0], i[0], len(i[1]))
            else:
                dataset = np.concatenate((dataset, i[1]))
                idx = np.concatenate((idx, np.linspace(i[0], i[0], len(i[1]))))

        flann = FLANN()
        params = flann.build_index(dataset, algorithm="kdtree", trees=1, target_precision=conf.SEARCH_PRECISION,
                                   log_level="info", cores=4)

        if conf.ENABLE_ROTATED_SEARCH is False:
            desc = [desc]

        arr = []
        for d in desc:
            timer_start = time.clock()
            result, dists = flann.nn_index(d, 5, checks=params["checks"])
            print 'time for every search:', time.clock() - timer_start

            uniq, idx_count = np.unique(idx[result], return_counts=True)
            top_results = np.argwhere(idx_count > len(d) * conf.SEARCH_PRECISION)
            top_counts = idx_count[top_results]
            top_ids = uniq[top_results]
            # print uniq
            # print idx_count
            # print len(d)

            t = np.hstack((top_ids, top_counts))  # getting [id,count] array
            if len(t) > 0:
                arr.append(t)

        flann.delete_index()

        if len(arr) > 0:
            sub_case = ''
            list_ids = []
            list_sub = []
            for i in arr:
                list_sub.append((str(int(i[0][0])), str(int(i[0][1]))))
                list_ids.append((str(int(i[0][0]))))
            ids = ",".join(list_ids)

            # print 'ids', ids
            # print 'sub', list_sub
            # print 'listIDs', list_ids

            for i in list_sub:
                sub_case += " WHEN i2.id = " + i[0] + " THEN " + i[1]  # adding rank

            q = "SELECT i.id, i.src_name, i.target_type, i.target, i.user_id, i.title, u.username, i.pos, i.pinned " \
                "FROM image i " \
                "LEFT JOIN user u ON u.id = i.user_id " \
                "LEFT JOIN (" \
                "SELECT i2.id, (CASE " + sub_case + " ELSE 0 END) AS rank " \
                                                    "FROM image i2 " \
                                                    "WHERE i2.id IN (" + ids + ")) AS sub ON sub.id = i.id " \
                                                                               "WHERE i.id IN (" + ids + ") " \
                                                                                                         "ORDER BY i.pinned DESC, sub.rank DESC LIMIT 1"

            result = mysql_fetch(q, {})

            return result
    return False
示例#11
0
def main(argv):
    allParams = {}
    #"""
    res = readObject(allParams,argv)
    if(res!="Alright!"):
        return {"greetings":"some issue with reading the file. ErrorMsg --> "+str(res)}
    constVars['numParts'] = allParams['numParts']

    # should remove when we uncomment the above part
    """
    allParams['path'] = "../componentExamples/load_files/nr1_fvcorr.domn.193K" #"part_1.log" #"/home/amoghli13/PACE/serverless/serverless-training-set/rodinia_cfd/componentExamples/nr_4_fvcorr.domn.193K"
    constVars['numParts'] = 1
    allParams['numParts'] = 1 # should come in as a parameter 
    allParams['bucketName'] = "cfd_data"
    allParams['keySuffix'] = "misc" #"cfd_np2_part"
    allParams['curPartNum'] = 0
    allParams['exchangeNum'] = 0
    constVars['numParts'] = allParams['numParts']
    allParams["fluxKey"] = str(allParams['keySuffix'])+"_"+str(allParams["curPartNum"])
    allParams["fluxFilename"] = str(allParams['keySuffix'])+"_"+str(allParams["curPartNum"])+".log"
    #"""
    # end of removal.
    path = allParams['path']
    print ("\t path: %s "%(path))
    initParams(allParams)
    #print ("\t allParams['ff_flux_contribution_momentum_x']: %s "%(allParams['ff_flux_contribution_momentum_x']))
    readFile(allParams['path'],allParams)
    
    # Solving part.. 
    # Create arrays and set initial conditions
    varSizes = (allParams['nelr'])*constVars['NVAR']*constVars['numParts'] # defensively increasing by 1 element.
    variables = [0.0 for x in range(varSizes)]
    #print ("\t allParams['nelr']*constVars['NVAR']: %d "%(allParams['nelr']*constVars['NVAR']))
    initialize_variables(allParams['nelr'], variables, allParams['ff_variable']);

    old_variables = [0.0 for x in range(varSizes)]
    fluxes = [0.0 for x in range(allParams['nelr']*constVars['NVAR'])]
    sync_fluxes = [0.0 for x in range(varSizes)]
    step_factors = [0.0 for x in range(allParams['nelr'])]

    allParams['variables'] = variables
    allParams['old_variables'] = old_variables
    allParams['fluxes'] = fluxes
    allParams['sync_fluxes'] = sync_fluxes
    allParams['step_factors'] = step_factors

    #print ("\t allParams['variables'][0:20]: %s "%(str(allParams['variables'][0:20])))
    begin = time.clock()

    # Begin iterations
    for i in range(constVars['iterations']):
        allParams['old_variables'] = [x for x in (allParams['variables'])]
        #copy<float>(old_variables, variables, nelr*NVAR);
        # print ("\t Before --> allParams['step_factors'][0:20]: %s "%(str(allParams['step_factors'][0:20])))
        # for the first iteration we compute the time step
        compute_step_factor(allParams['nelr'], allParams['variables'], allParams['areas'], allParams['step_factors']);
        # print ("\t After --> allParams['step_factors'][0:20]: %s "%(str(allParams['step_factors'][0:20])))

        #print ("\t Before --> allParams['fluxes'][0:20]: %s "%(str(allParams['fluxes'][0:20])))
        for j in range(constVars['RK']):
            compute_flux(allParams['nelr'],allParams['elements_surrounding_elements'],allParams['normals'],allParams['variables']
                ,allParams['fluxes'],allParams['ff_variable'],allParams['ff_flux_contribution_momentum_x'],allParams['ff_flux_contribution_momentum_y']
                ,allParams['ff_flux_contribution_momentum_z'],allParams['ff_flux_contribution_density_energy'])
            #(nelr, elements_surrounding_elements, normals, variables, fluxes, ff_variable, ff_flux_contribution_momentum_x, ff_flux_contribution_momentum_y,ff_flux_contribution_momentum_z, ff_flux_contribution_density_energy);

            # should synchronize
            synchronizeFluxes(allParams) # push local flux, wait for remote fluxes
            #print ("\t Hows it going?")
            #time_step(j, allParams['nelr'], allParams['old_variables'], allParams['variables'], allParams['step_factors'], allParams['fluxes']);
            time_step(j, allParams['nelr'], allParams['old_variables'], allParams['variables'], allParams['step_factors'], allParams['sync_fluxes']);

        #print ("\t After --> allParams['fluxes'][0:20]: %s "%(str(allParams['fluxes'][0:20])))
        end = time.clock();
        diff = end-begin
        #if(i%10==0): print ("\t i: %d diff: %.6lf "%(i,diff))
        break;
    print ("\t End: %s Begin: %s Time: %s "%(end,begin,diff))
    resultStr = "Took %.4lf time "%(diff)
    if(allParams["curPartNum"]==0):
        deleteVarFiles(allParams)
    return {"greetings" : resultStr}
示例#12
0
文件: main_new.py 项目: jbu/Mneme
            cur_context = cherrypy.session.get('cur_context')

            if um.add_new_evidence(cur_context, data):
                res = """ Successfully added"""
                write_log('notice','Add new evidence operation successful')

            else:
                res = """Failed to add"""
                write_log('error','Show evidence list Operation Failed; Error:'+str(res))

        except Exception, e:
            #print e
            res = """Failed to add""" + str(e)
            write_log('error','Add new evidence Operation Failed; Error:'+str(e))

        t2 = time.clock()
        print 'add_new_evidence took %0.3fms' % ((t2-t1)*1000.0)
        return res
    add_new_evidence.exposed  = True

    def add_new_element(self,data):
        data = data.split('_')
        cur_context = cherrypy.session.get('cur_context')
        context = cur_context.split('/')
        if cur_context == "None" or data[3] == "Home":
            context = []
        print context
        um = cherrypy.session.get('um')
        if data[0] == 'component':
            try:
                um.make_new_component(data[1],'attribute', 'string',None,data[2],context)
示例#13
0
文件: main_new.py 项目: jbu/Mneme
 def wrapper(*arg):
     t1 = time.clock()
     res = func(*arg)
     t2 = time.clock()
     print '%s took %0.3fms' % (func.func_name, (t2-t1)*1000.0)
     return res
示例#14
0
文件: main_new.py 项目: jbu/Mneme
    def users_json(self, rows=None, sidx=None, _search=None, searchField=None,
        searchOper=None, searchString=None, page=None, sord=None, nd=None): # 1 line
        """Returns all users from test_data in a json format that's compatible with jqGrid.""" # 2 lines
        t1 = time.clock()
        header = ["value", "flags", "source", "evidence_type", "creation_time", "time", "useby", "owner", "comment"] # 3 lines
        reslist = []
        genshi_tmpl = LoadGenshiTemplate(cherrypy.session.get('cur_session'), cherrypy.session.get('username'))
        cur_component = cherrypy.session.get('cur_component')
        cur_context = cherrypy.session.get('cur_context')        
        if cur_component != 'None':
            #print "getting new"
            context = cur_context.split()
            um = cherrypy.session.get('um')
            reslist = um.get_evidence_new(context, cur_component)
            cherrypy.session['cur_component'] = 'None'
        else:
            #print "getting default"
            cherrypy.session['cur_component'] = 'firstname'
            reslist = um.get_evidence_new()

        #users_list = test_data_to_list(test_data) # 4 lines
        evdlist = []
        i = 0
        #{'comment': None, 'evidence_type': 'explicit', 'creation_time': 1322914468.889158, 'value': 'Bob',
        #'source': 'Jane', 'flags': [], 'time': None, 'owner': 'Jane', 'objectType': 'Evidence', 'useby': None}
        myEvd = []

        if type(reslist) is ListType:
            for res in reslist:
                print "Inside user_json "
                myEvd = [0]*10
                myEvd[0] = i
                for key, value in res.__dict__.items():
                #print "%s:%s"%(key, value)
                    for item in header:
                        if item == key:
                        #print "key: %s %s--"%(item,key)
                            if key == 'creation_time' or key == 'time' or key == 'useby':
                                if value:
                                    import datetime
                                    value = datetime.datetime.fromtimestamp(int(value)).strftime('%d/%m/%Y %H:%M:%S')
                            elif key == 'flags':
                                if value:
                                    value = ''.join(value)
                                else:
                                    value="None"
                            __index = header.index(item)
                            #print "%s in %d" %(value,__index+1)
                            myEvd[__index+1]=value
                evdlist.append(myEvd)
                i = i+1
                #print "Evidence: %d" %i
                #for val in myEvd:
                #    print val

            import my_jqGrid
            result_page = my_jqGrid.jqgrid_json(self, evdlist, header, rows=rows, sidx=sidx, _search=_search,
            searchField=searchField, searchOper=searchOper, searchString=searchString, page=page, sord=sord)

            t2 = time.clock()
            print 'user-json took %0.3fms' % ((t2-t1)*1000.0)
            write_log('notice','Show evidence list operation successful')

            return result_page

        else:
            #print reslist
            e = reslist
            write_log('error','Show evidence list Operation Failed; Error:'+str(e))
            modeltree = cherrypy.session.get('modeltree')
            return genshi_tmpl.greeting_template(e, "Evidencelist upload", modeltree)
示例#15
0
    def update_warehouse(self, tails):
        '''Ok, this works but is extremely inefficient in terms of the amount of drive space required for the panel object - basically its embarassing.  Something is not quite here - probably a user error...'''
        if self.last_date == date(
                2009, 7, 14):  #Create ASX HDF5 data file for the first time
            asx_futures = HDFStore(
                self.asx_path +
                self.asx_warehouse_file)  #open the asx data warehouse!
            ota = Panel({self.last_date: self.allasxdata['EA']})
            ben = Panel({self.last_date: self.allasxdata['EE']})
            asx_futures['OTA_' + tails] = ota
            asx_futures['BEN_' + tails] = ben
            asx_futures.close()
        else:
            tic = time.clock()
            asx_futures = HDFStore(
                self.asx_path +
                self.asx_warehouse_file)  #open the asx data warehouse!
            ota = asx_futures['OTA_' + tails]  #get ota
            ben = asx_futures['BEN_' + tails]
            asx_futures.close()
            os.remove(self.asx_path + self.asx_warehouse_file)
            toc = time.clock()
            info_text = 'Opening ' + self.asx_warehouse_file + ' took %s seconds' % (
                str(toc - tic))
            logger.info(info_text)
            tic = time.clock()
            ota = ota.join(
                Panel({self.last_date: self.allasxdata['EA']}), how='outer'
            )  #join the new data to the exisiting panel data - this took a bit of figuring out. Outer is the union of the indexes so when a new row appears for a new quater, Nulls or NaNs full the remainder of the dataframe
            ben = ben.join(Panel({self.last_date: self.allasxdata['EE']}),
                           how='outer')
            toc = time.clock()
            info_text = 'Data join took %s seconds' % (str(toc - tic))
            logger.info(info_text)
            tic = time.clock()
            asx_futures = HDFStore(
                self.asx_path +
                self.asx_warehouse_file)  #open the asx data warehouse!
            asx_futures['OTA_' + tails] = ota  #overwrite ota_xxx
            asx_futures['BEN_' + tails] = ben
            asx_futures.close()  #closing ASX warehouse
            toc = time.clock()
            info_text = 'Resaving ' + self.asx_warehouse_file + ' took %s seconds' % (
                str(toc - tic))
            logger.info(info_text)
            to_excel = True
            if to_excel == True:
                #Spit to XLS ****THIS IS SLOW**** comment out if updating h5 file
                try:  #to local linux box
                    ota.to_excel(self.asx_path + 'OTA_' + tails +
                                 '.xls')  #spit to excel
                    ben.to_excel(self.asx_path + 'BEN_' + tails + '.xls')
                except error_text:
                    logger.error(error_text)

                try:  #to P:/ASX_dailys/
                    ota.to_excel(self.asx_path_P + 'OTA_' + tails +
                                 '.xls')  #spit to excel
                    ben.to_excel(self.asx_path_P + 'BEN_' + tails + '.xls')
                except error_text:
                    logger.error(error_text)
 def __init__(self, name):
     self.time_admitted = time.clock()
     self.name = name
示例#17
0
    def get_lmp(self, latest=False,
                       start_at=False, end_at=False, market='hourly', grp_type='ALL',node='ALL',csv=False, **kwargs):
        # Construct_oasis_payload expects market option to be one of 'hourly', 'fivemin', 'tenmin', 'na', 'dam'
        
        # if csv = False, pulls xml files, parses SLOWLY, and returned data is 
        # a list of dicts, each of which has a main index of the timestamp
        
        # if csv=True, pulls csv files, parses more quickly, and returns Pandas
        # Panel data structure
        
        
        # Expected parameters:
        #  node: CAISO node ID.  Can be set to individual node or "ALL".  "ALL" will override grp_type
        #  grp_type: either "ALL_APNodes" or "ALL" - This will trigger day-by-day iteration
        #      NOTE: This needs to be turned off for processing individual nodes.  This will override node types
        #  market= "DAM", "HASP", "RTM"
        #  start_at and end_at can be a variety of parsable input types, with or without time codes
        #      i.e. '2013-10-12T11:45:30' or '2011-10-12'
        
        # Relevant XML Calls:
        #  PRC_LMP -        for market_run_id='DAM'
        #  PRC_HASP_LMP     for market_run_id='HASP'
        #  PRC_INTVL_LMP    for market_run_id='RTM'
        #  PRC_RTPD_LMP     No longer valid?
        
        # Max call interval:
        #  In the HASP and RTM markets, requesting more than the max interval length may result in the wrong data being returned.
        # Individual nodes: <31 days
        # Calling "ALL" or "ALL_APNODES":
        #    DAM: 1 day, returns 4 files from expanded zip. Each has 20-line header
        #    HASP: 1 hour, returns one file with all components (LMP, MCC, MCE, MCL)
        #    RTM: 1 hour, returns one file with all components (LMP, MCC, MCE, MCL)

        #PRC_LMP
        # if grp_type=="ALL" or "ALL_APNODES", we are processing full node sets:
        #   remove 'node' from the payload
        #   can only process one time step at a time,
        #       Time step for DAM = 1 day; time step otherwise = 1 hr
        #       
        # if node is not "ALL", we are dealing with a specific node:
        #   remove grp_type from payload
        #   Check to make sure that the date is less than 31 days or cut into pieces
        
        # set args
        self.handle_options(data='load', latest=latest,
                            start_at=start_at, end_at=end_at, market=market, grp_type=grp_type,node=node, **kwargs)
        
        requestSpan = self.options['end_at'] - self.options['start_at']  # This is the duration spanned by our request     
        requestStart = self.options['start_at'] #This should be a datetime object
        requestEnd = self.options['end_at'] # This should be a datetime object
        print 'Request span is:',requestSpan
        
        # ensure market and freq are set             # What is this for?
        if 'market' not in self.options:
            if self.options['forecast']:
                self.options['market'] = self.MARKET_CHOICES.dam
            else:
                self.options['market'] = self.MARKET_CHOICES.fivemin
        """if 'freq' not in self.options:              # What is the frequency used for?
            if self.options['forecast']:
                self.options['freq'] = self.FREQUENCY_CHOICES.hourly
            else:
                self.options['freq'] = self.FREQUENCY_CHOICES.fivemin
        """
        # Clean up conflicting commands
        # Check this: this may currently be buggy when requesting grp_type=ALL_APNODES but excluding 'node' in the call 
        if self.options['node']=='ALL' and self.options['grp_type']!='ALL':
            del self.options['grp_type']    # Node typically overrides the grp_type call
            
        
        # Decision fork: either we are handing "all" nodes or we are handling an individual node
        if self.options['grp_type']=='ALL' or self.options['grp_type']=='ALL_APNodes':
            # If we are processing full node sets, need to iterate across the appropriate time blocks
            del self.options['node']  # Get rid of node commands to ensure we aren't sending mixed signals.  This will override the node option.
        
            if market=='DAHR':
                print ('The DAM LMP call is not yet implemented... you should go do that.')
                
            else:  # We are not in DAM, but in HASP or RTM
                # If we are requesting all nodes in the Hour-ahead market or real-time markets, we can request at most one hour at a time

                if market=='RTHR':
                    # This is a request for the Hour-Ahead Scheduling Process (HASP)
                    oasis_API_call= 'PRC_HASP_LMP'
                else: #if ':market=='RTM
                    # Assume that this is a request for the real-time market
                    oasis_API_call= 'PRC_INTVL_LMP'
                
                parsed_data = [] # Placeholder
                
                currentStartAt = requestStart       # Initialize loop conditions
                currentEndAt = currentStartAt
                
                # The contents of the following if statement can probably be refactored 
                if requestSpan.total_seconds()>3600:
                    timeStep = timedelta(hours=1)       # Increment by one hour each iteration
                    currentEndAt = currentEndAt + timeStep # Priming the pump
                    
                    # The following loop can probably be refactored significantly
                    while currentEndAt < requestEnd:
                    # Set up payload, fetch data, and parse data
                        
                        self.options['start_at']=currentStartAt
                        self.options['end_at']=currentEndAt
                        payload = self.construct_oasis_payload(oasis_API_call,csv=csv)
                        print 'Requesting data for time starting at ', (currentStartAt).strftime(self.oasis_request_time_format)
                        startRequest = time.clock()
                        oasis_data = self.fetch_oasis(payload=payload)
                        endRequest = time.clock()
                        print 'Imported data in ',endRequest-startRequest,' s'
                        parsed_data.append(self.parse_lmp(oasis_data,csv=csv))
                        print 'Parsed Data in ', time.clock()-endRequest,' s'
                        currentStartAt = currentEndAt
                        currentEndAt = currentEndAt + timeStep
                # Previous 'if' block was to get us within one time step of the finish.  This will get us the rest of the way.
                        
                #Clean up final iteration to get to the end time
                print 'Exited the loop'
                self.options['start_at']=currentStartAt
                self.options['end_at']=requestEnd
                payload = self.construct_oasis_payload(oasis_API_call,csv=csv)
                print 'Requesting data for time starting at ', (currentStartAt).strftime(self.oasis_request_time_format)
                oasis_data = self.fetch_oasis(payload=payload)
                parsed_data.append(self.parse_lmp(oasis_data,csv))
                result = parsed_data
                
                #merge dataframes if you have been pulling csv's
                if csv:
                    for i in range(len(parsed_data)):
                        if i == 0: result = parsed_data[0]
                        else:
                            result = result.append(parsed_data[i])
                    result = result.unstack()
                    result.columns = result.columns.droplevel()
        
        else:
            # If we aren't handling full node sets, we are handling individual nodes and can request up to 31 days of data at a time
            print('The single-node calls are not yet implemented... you should go do that.')
        
        
        # Return either just the most recent datapoint, or return all the parsed data
        # It seems like this could be moved to a separate function
        # Commenting out for now because it looks like it needs a specific data structure, i.e. a dict with a 'timestamp' key
        """
        if self.options['latest']: 
            # select latest
            latest_dp = None
            latest_ts = self.utcify('1900-01-01 12:00')
            now = self.utcify(datetime.utcnow(), tz_name='utc')
            for dp in parsed_data:
                if dp['timestamp'] < now and dp['timestamp'] > latest_ts:
                    latest_dp = dp
                    latest_ts = dp['timestamp']

            # return latest
            if latest_dp:
                return [latest_dp]
            else:
                return []
        else:
            # return all data
            return parsed_data
        """
        return result
	cv2.namedWindow("output", cv2.WINDOW_NORMAL)
        imS = cv2.resize(predicted_frame, (960, 540)) 
        cv2.imshow("output", imS)
	key = cv2.waitKey(3) & 0xFF

	if label_pair[1] < 45 and label_pair[0] > 0 and locked is True:
		f = open('Log.txt', 'a')
		f.write("\n At ")
		f.write(str(datetime.now()))
		f.write(", ")
		f.write(subjects[label_pair[0]])
		f.write(" is entering the room with a variance of " + str(label_pair[1]))
		f.close()
		GPIO.output(Motor1E,GPIO.HIGH)
		waitTime = time.clock()+10
		while time.clock() < waitTime:
			pass
		GPIO.output(Motor1E, GPIO.LOW)
		locked = False

	input_state = GPIO.input(18)
	if input_state == False:
		f = open('Log.txt', 'a')
		if locked is True:
			GPIO.output(Motor1E,GPIO.HIGH)
                	waitTime = time.clock()+10
			while time.clock() < waitTime:
				pass
                	GPIO.output(Motor1E, GPIO.LOW)
			locked = False
示例#19
0
def TickCount() -> float:
    return time.clock()
>>> import sys
>>> print(sys.version)
3.8.5 (tags/v3.8.5:580fbb0, Jul 20 2020, 15:43:08) [MSC v.1926 32 bit (Intel)]
>>>
>>> print(time.perf_counter(), time.localtime(time.perf_counter()))
89.4915672 time.struct_time(tm_year=1970, tm_mon=1, tm_mday=1, tm_hour=1, tm_min=1, tm_sec=29, tm_wday=3, tm_yday=1, tm_isdst=0)
>>> 

>>> import time
>>> time.time()
1614326427.3598132
>>> time.localtime(time.time())
time.struct_time(tm_year=2021, tm_mon=2, tm_mday=26, tm_hour=9, tm_min=0, tm_sec=57, tm_wday=4, tm_yday=57, tm_isdst=0)
>>> time.asctime(time.localtime(time.time()))
'Fri Feb 26 09:01:38 2021'
>>> time.localtime(time.clock())
Traceback (most recent call last):
  File "<pyshell#4>", line 1, in <module>
    time.localtime(time.clock())
AttributeError: module 'time' has no attribute 'clock'
>>> time.localtime(time.clock()))
SyntaxError: unmatched ')'
>>> import calendar
>>> calendar.month(2021, 7)
'     July 2021\nMo Tu We Th Fr Sa Su\n          1  2  3  4\n 5  6  7  8  9 10 11\n12 13 14 15 16 17 18\n19 20 21 22 23 24 25\n26 27 28 29 30 31\n'

>>> calendar.month(2021, 7, w=1, l=2)
'     July 2021\n\nMo Tu We Th Fr Sa Su\n\n          1  2  3  4\n\n 5  6  7  8  9 10 11\n\n12 13 14 15 16 17 18\n\n19 20 21 22 23 24 25\n\n26 27 28 29 30 31\n\n'
>>> calendar.setfirstweekday(6)
>>> calendar.month(2021, 7)
'     July 2021\nSu Mo Tu We Th Fr Sa\n             1  2  3\n 4  5  6  7  8  9 10\n11 12 13 14 15 16 17\n18 19 20 21 22 23 24\n25 26 27 28 29 30 31\n'