Пример #1
0
def get_details(html):
	soup=BeautifulSoup(html)
	#得到作者、作者链接、微博正文
	div_content=soup.find_all(attrs={'class': 'content clearfix'})
	#得到发微博时间
	div_time=soup.find_all(attrs={'class':'feed_from W_textb'})
	#将用户名称,用户主页地址、微博正文、发微博时间初始化
	nick_name=[]
	nickname_href=[]
	content_text=[]
	time=[]
	#print get_content[0]
	for i in range(len(div_content)):
		#查找a标签
		a_tag=div_content[i].find('a')
		nick_name.append(a_tag.get('nick-name'))
		nickname_href.append(a_tag.get('href'))
		#查找p标签
		p_tag=div_content[i].find('p')
		content_text.append(p_tag.get_text())
	#得到发微博时间
	for j in range(len(div_time)):
		a_time=div_time[j].find('a')
		time.append(a_time.get('title'))
	return (nick_name,nickname_href,content_text,time)
 def diffuseave(self):
     self.difffile=open('diffusion.xvg','w')
     diffave=[]
     diffave2=[]
     for i in range(self.diffmax):
         diffave.append(0.)
         diffave2.append(0.)
     for iatom in self.atoms:
         iatom.diffusecalc(self.diffmax)
         for i in range(self.diffmax):
             diffave[i]+=iatom.diffusion[i]
             diffave2[i]+=iatom.diffusion2[i]
     for i in range(self.diffmax):
         diffave[i]/=float(self.NAtom)
         diffave2[i]/=float(self.NAtom)
     print>>self.difffile,'#Mean square displacement of all atoms'
     print>>self.difffile,'#Time     <r2>'
     print>>self.difffile,'@  title \"Mean square displacement\"'
     print>>self.difffile,'@  xaxis label \"Time (reduced time units)\"'
     print>>self.difffile,'@  yaxis label \"Mean square displacement (sigma^2)\"'
     for i in range(self.diffmax):
         print>>self.difffile,'%8.4lf %8.4lf'%(i*self.Step,diffave2[i])
     self.difffile.close()
     #Fit linear regression line to <r^2>=6Dt
     time=[]
     for i in range(self.diffmax):
         time.append(i*self.Step)
     slope,intercept,r2=regression(time,diffave2)
     print 'displace^2 vs t: slope=%lf intercept=%lf R2=%lf'\
           %(slope,intercept,r2)
     self.diffconst=slope/6.
     print 'Diffusion constant from Einstein relation=%lf (reduced units)'%(self.diffconst)
     diffusion_cgs=self.diffconst*0.1344*self.Step*100000.
     print 'Diffusion constant from Einstein relation=%lf (x10-5 cm^2/sec)'%(diffusion_cgs)
Пример #3
0
 def lastUpdate(self):
     time = [self.parser.source.time()]
     h = inspect.getmro(self.__class__)
     h = h[:-3]
     for c in h:
         time.append(int(os.path.getmtime(inspect.getfile(c))+.5))
     return max(time)
Пример #4
0
 def seconds_to_human_time(cls, seconds, suffixes=['y','w','d','h','m','s'], add_s=False, separator=' '):
     """
        convert seconds to human time
     """
     # the formatted time string to be returned
     time = []
     
     # the pieces of time to iterate over (days, hours, minutes, etc)
     # - the first piece in each tuple is the suffix (d, h, w)
     # - the second piece is the length in seconds (a day is 60s * 60m * 24h)
     parts = [(suffixes[0], 60 * 60 * 24 * 7 * 52),
           (suffixes[1], 60 * 60 * 24 * 7),
           (suffixes[2], 60 * 60 * 24),
           (suffixes[3], 60 * 60),
           (suffixes[4], 60),
           (suffixes[5], 1)]
     
     # for each time piece, grab the value and remaining seconds, and add it to
     # the time string
     for suffix, length in parts:
         value = seconds / length
         if value > 0:
             seconds = seconds % length
             time.append('%s%s' % (str(value),
                            (suffix, (suffix, suffix + 's')[value > 1])[add_s]))
         if seconds < 1:
             break
     
     return separator.join(time)
Пример #5
0
def flux2011( ) :
  time = []
  flux = []
  fin = open( "fluxes.txt", "r" )
  fout = open( "fluxVsTime", "w" )

  for line in fin :
    a = line.split()
    if len(a) == 2 :
      time.append( 24.*float(a[0]) )  # dechrs
      flux.append( float(a[1]) )      # flux

  # ... create decimal time array 
  t = numpy.array( time, dtype=float )
  print t

  # ... retrieve tsys, rmspath, tau230, elev arrays
  rmspath = getVar( "rmspath.log", t )
  tau230 = getVar( "tau230.log", t )
  source = getSource ( "uvindex.log", t )
  elevarray = getVar15( "elev.log", t )

  # ... use elev of C1 as the elevation
  elev = numpy.empty( (len(time)), dtype=float )
  for n in range (0, len(time) ) :
    elev[n] = elevarray[n][0]

  # ... all array lengths should match, otherwise this routine will crash!
  print len(time), len(flux), len(rmspath), len(tau230), len(source), len(elev)

  fout.write("#   UThrs     S(Jy)    el    tau   path   source\n")
  for n in range (0, len(time)) : 
    fout.write("%10.5f   %6.3f   %4.1f   %4.2f  %4.0f   %s\n" % (t[n], flux[n], elev[n], tau230[n], rmspath[n], source[n] ))
  fin.close()
  fout.close()
def read_coft():
    try:
        with open('COFT','r'):
            f = open('COFT','r')
            line = f.readline()
            s = line.split(',')
            time = []
            flux_1 = []
            flux_2 = []
            flux_3 = []
            while line:
                time.append(float(s[1]))
                flux_1.append(float(s[3]))
                flux_2.append(float(s[4]))
                flux_3.append(float(s[5]))
                line = f.readline()
                s = line.split(',')

            t = np.asarray(time)
            f1 = np.asarray(flux_1)
            f2 = np.asarray(flux_2)
            f3 = np.asarray(flux_3)
            fig = plt.figure()
            ax = fig.add_subplot(111)
            ax.plot(t, f1, label='gas flow')
            ax.plot(t, f2, label='liquid flow')
            ax.plot(t, f3, label='total flow')
            ax.legend()
            plt.show()
    except IOError:
        print "COFT WAS NOT GENERATED"
    return 0
Пример #7
0
	def fivePinjun(self,action):
		#datetime.datetime.strptime("12:10:20",'%H:%M:%S')
		#date_time = datetime.datetime.strptime("2012-10-20",'%Y-%m-%d')
		#.resample(rule="5M",how="mean")
		#time=pandas.tseries.index.DatetimeIndex
		time=[]
		for tab in xrange(len(self.df["Time"])):

			#print datetime.datetime.strptime(self.df["Date"][tab]+' '+self.df["Time"][tab],'%Y-%m-%d %H:%M:%S')
			time.append(datetime.datetime.strptime(self.df["Date"][tab]+' '+self.df["Time"][tab],'%Y-%m-%d %H:%M:%S'))
		#time=pandas.PeriodIndex(time,freq='S')

		ts=Series(np.array(self.df[self.df.columns[self.columnChooseIndex[0]+6]]), index=time)

		#self.ts1=pandas.DataFrame({"DateTime":ts.index,self.df.columns[self.columnChooseIndex[0]+6]:ts})

		temps1=ts.resample("5Min")
		self.ts1=pandas.DataFrame({"DateTime":temps1.index,self.df.columns[self.columnChooseIndex[0]+6]:temps1})
		self.dataModel = DataFrameModel()
		self.dtableView.setModel(self.dataModel)
		self.dataModel.setDataFrame(self.ts1)
		self.dataModel.signalUpdate()
		self.dtableView.resizeColumnsToContents()
		self.dtableView.show()
		self.isProcess=True
Пример #8
0
	def Process(self):
		process=Process(self)
		process.exec_()
		time=[]
		PP=process.combine.split('\\')

		for tab in xrange(len(self.df["Time"])):
			time.append(datetime.datetime.strptime(self.df["Date"][tab]+' '+self.df["Time"][tab],'%Y-%m-%d %H:%M:%S'))
		#time=pandas.PeriodIndex(time,freq='S')

		ts=Series(np.array(self.df[self.df.columns[self.columnChooseIndex[0]+6]]), index=time)

		#self.ts1=pandas.DataFrame({"DateTime":ts.index,self.df.columns[self.columnChooseIndex[0]+6]:ts})

		#temps1=eval("ts."+str(PP[0])+'.'+'("'+str(PP[1])+'")')

		if str(PP[0])=="resample":
                        temps1=ts.resample(str(PP[1]))
		self.ts1=pandas.DataFrame({"DateTime":temps1.index,self.df.columns[self.columnChooseIndex[0]+6]:temps1})
		self.dataModel = DataFrameModel()
		self.dtableView.setModel(self.dataModel)
		self.dataModel.setDataFrame(self.ts1)
		self.dataModel.signalUpdate()
		self.dtableView.resizeColumnsToContents()
		self.dtableView.show()
		self.isProcess=True
Пример #9
0
def elapsedTime(seconds, suffixes=["y", "w", "d", "h", "m", "s"], add_s=False, separator=" "):
    """
    Takes an amount of seconds and turns it into a human-readable amount of time.
    """
    # the formatted time string to be returned
    if seconds == 0:
        return "0s"
    time = []

    # the pieces of time to iterate over (days, hours, minutes, etc)
    # - the first piece in each tuple is the suffix (d, h, w)
    # - the second piece is the length in seconds (a day is 60s * 60m * 24h)
    parts = [
        (suffixes[0], 60 * 60 * 24 * 7 * 52),
        (suffixes[1], 60 * 60 * 24 * 7),
        (suffixes[2], 60 * 60 * 24),
        (suffixes[3], 60 * 60),
        (suffixes[4], 60),
        (suffixes[5], 1),
    ]

    # for each time piece, grab the value and remaining seconds, and add it to
    # the time string
    for suffix, length in parts:
        value = seconds / length
        if value > 0:
            seconds = seconds % length
            time.append("%s%s" % (str(value), (suffix, (suffix, suffix + "s")[value > 1])[add_s]))
        if seconds < 1:
            break

    return separator.join(time)
Пример #10
0
def stop_test():
    global record_data
    global data

    record_data = False

    speed = []
    time = []
    verticalSpeed = []
    pitch = []
    altitude = []


    for i in range(0,len(data)):
        speed.append( data[i]['speed'])
        time.append( data[i]['time'])
        verticalSpeed.append(data[i]['verticalSpeed'])
        pitch.append(data[i]['pitch'])
        altitude.append(data[i]['altitude'])



    plotting.plot_chart(time,speed,"time-speed")
    plotting.plot_chart(time,verticalSpeed,"time-verticalSpeed")
    plotting.plot_chart(time,pitch,"time-pitch")
    plotting.plot_chart(time,altitude,"time-altitude")
Пример #11
0
def plot_save(af_stat_file, plot_path):
    #at_time(sec), nonNLP_percentage, insect_percentage, poison_percentage, insecticide_percentage
    time = []
    non_nlp = []
    insect = []
    poison = []
    insecticide = []
    line_no = 0
    #Expected CSV format
    with open(af_stat_file) as f:
      for line in f:
        if line_no == 0:
          line_no = line_no + 1
          continue
        line = [x.strip() for x in line.split(',') ]
        time.append(float(line[0]))
        non_nlp.append(float(line[1]))
        insect.append(float(line[2]))
        poison.append(float(line[3]))
        insecticide.append(float(line[4]))
    
    plt.plot(time, insect)
    plt.plot(time, poison)
    plt.plot(time, insecticide)
    plt.plot(time, non_nlp)
    plt.legend(['Insect', 'Poison', 'Insecticide', 'Non-nlp'], loc='best') 
    plt.ylabel('Percentage in AF')
    plt.xlabel('Time(sec)')
    plt.savefig(plot_path+"/plot.png")
    plt.clf()
Пример #12
0
def getdrift_raw(filename,id3,interval,datetime_wanted):
    
  # range_time is a number,unit by one day.  datetime_wanted format is num
  d=ml.load(filename)
  lat1=d[:,8]
  lon1=d[:,7]
  idd=d[:,0]
  year=[]
  for n in range(len(idd)):
      year.append(str(idd[n])[0:2])
  h=d[:,4]
  day=d[:,3]
  month=d[:,2]
  time1=[]
  for i in range(len(idd)):
      time1.append(date2num(datetime.datetime.strptime(str(int(h[i]))+' '+str(int(day[i]))+' '+str(int(month[i]))+' '+str(int(year[i])), "%H %d %m %y")))


  idg1=list(ml.find(idd==id3))
  idg2=list(ml.find(np.array(time1)<=datetime_wanted+interval/24))
  "'0.25' means the usual Interval, It can be changed base on different drift data "
  idg3=list(ml.find(np.array(time1)>=datetime_wanted-0.1))
  idg23=list(set(idg2).intersection(set(idg3)))
  # find which data we need
  idg=list(set(idg23).intersection(set(idg1)))
  print 'the length of drifter data is  '+str(len(idg)),str(len(set(idg)))+'   . if same, no duplicate'
  lat,lon,time=[],[],[]
  
  for x in range(len(idg)):
      lat.append(round(lat1[idg[x]],4))
      lon.append(round(lon1[idg[x]],4))
      time.append(round(time1[idg[x]],4))
  # time is num
  return lat,lon,time
Пример #13
0
def readInVariable():
    gc.disable()
    print("In readInVariable() function")
    wb = load_workbook('resources/dbMetrics.xlsx', use_iterators = True, read_only = True)
    ws = wb.get_sheet_by_name("metrics")
    numRows = ws.get_highest_row()
    #print(numRows)
    date = []
    time = []
    RCMPL = []
    Unblocked = []
    timeInitial = datetime.datetime.now()
    #print(numRow)
    #print(ws.iter_rows('A2:S'+str(numRows)))
    ws_iter = tuple(ws.iter_rows('A2:D'+str(numRows)))
    #print("11111")
    #print(type(ws_iter))
    i = 0
    j= 1
    for row in ws_iter:
        #if(i%500 == 0):
            #print(i, datetime.datetime.now()-timeInitial)
        for cell in row:
            if j == 1:
                date.append(cell.value)
            elif j == 2:
                time.append(cell.value)
            elif j == 3:
                RCMPL.append(cell.value)
            elif j == 4:
                Unblocked.append(cell.value)
            j = j+1
        j = 1
    print("Length of date ",len(date), len(RCMPL))
Пример #14
0
 def elapsed_time(self, seconds, suffixes=['y','w','d','h','m','s'], add_s=False, separator=''):
     """
     Takes an amount of seconds and turns it into a human-readable amount
     of time.
     From http://snipplr.com/view.php?codeview&id=5713
     """
     # the formatted time string to be returned
     time = []
     
     # the pieces of time to iterate over (days, hours, minutes, etc)
     # - the first piece in each tuple is the suffix (d, h, w)
     # - the second piece is the length in seconds (a day is 60s * 60m * 24h)
     parts = [(suffixes[0], 60 * 60 * 24 * 7 * 52),
               (suffixes[1], 60 * 60 * 24 * 7),
               (suffixes[2], 60 * 60 * 24),
               (suffixes[3], 60 * 60),
               (suffixes[4], 60),
               (suffixes[5], 1)]
     
     # for each time piece, grab the value and remaining seconds, and add it to
     # the time string
     for suffix, length in parts:
         value = seconds // length
         if value > 0:
             seconds = seconds % length
             time.append('%s%s' % (str(value),
                 (suffix, (suffix, suffix + 's')[value > 1])[add_s]))
         if seconds < 1:
             break
     
     return separator.join(time)
Пример #15
0
def float_float(T,t,n,T1,N):

	num = T/t
	num = int(num)
	print num
	conc = []
	time = []
	for i in range(0,num+1):
	    print('input concn at time')
	    print i*t
	    conc.append(float(raw_input('')))
	    time.append(i*t)

	print conc
	print time
	#time.sleep(15)

	m=[[]]
	m[0].extend([conc[0],time[0]])
	for i in range(1, num+1):
	    m.append([conc[i],time[i]])

	    # print tc
	    # conc_time.append(tc)
	    
	print m
def graph_axis_trajectory(axis, pdf_name):
    time = []
    pos = []
    vel = []
    acc = []
    for t in axis:
        time.append(t[0])
        pos.append(t[1])
        vel.append(t[2])
        acc.append(t[3])

    fig = plt.figure()
    fig.clf()
    ax1 = fig.add_subplot(311)
    ax1.plot(time, pos)
    ax1.set_xlabel('Time (s)')
    ax1.set_ylabel('Position (m)')
    ax2 = fig.add_subplot(312)
    ax2.plot(time, vel)
    ax2.set_xlabel('Time (s)')
    ax2.set_ylabel('Velocity (m/s)')
    ax3 = fig.add_subplot(313)
    ax3.plot(time, acc)
    ax3.set_xlabel('Time (s)')
    ax3.set_ylabel('Acceleration (m/s^2)')
    fig.savefig((pdf_name + ".pdf"))
Пример #17
0
def smooth_values(timestamps, values, totals, radius):
    """
    Sliding window

    >>> t = [dt(2011, 01, 20, 0, 0), dt(2011, 01, 21, 0, 0), \
             dt(2011, 01, 22, 0, 0), dt(2011, 01, 23, 0, 0), \
             dt(2011, 01, 28, 0, 0), dt(2011, 01, 30, 0, 0), \
             dt(2011, 01, 31, 0, 0)]
    >>> v = [1,2,3,4,5,6,7]
    >>> tot = [2,3,4,5,6,7,8]
    >>> smooth_values(t, v, tot, 3)
    ([datetime.datetime(2011, 1, 20, 0, 0), datetime.datetime(2011, 1, 20, 12, 0), datetime.datetime(2011, 1, 21, 0, 0), datetime.datetime(2011, 1, 22, 0, 0), datetime.datetime(2011, 1, 24, 8, 0), datetime.datetime(2011, 1, 27, 0, 0), datetime.datetime(2011, 1, 29, 16, 0), datetime.datetime(2011, 1, 30, 12, 0), datetime.datetime(2011, 1, 31, 0, 0)], [1, 3, 6, 9, 12, 15, 18, 13, 7], [2, 5, 9, 12, 15, 18, 21, 15, 8])
    """
    time = []
    ser = []
    tot = []
    k = radius / 2
    for i in range(-(radius / 2 + 1), len(timestamps) - (radius / 2) + 1):
        v = i if i > 0 else 0
        time.append(dt_average(timestamps[v:v + k]))
        ser.append(sum(values[v:v + k]))
        tot.append(sum(totals[v:v + k]))
        if k < radius:
            k += 1
    return time, ser, tot
Пример #18
0
def getGains( infile ) :
  time = []
  gain = [] 
  p= subprocess.Popen( ( shlex.split('gplist vis=%s options=all' % infile) ), \
     stdout=subprocess.PIPE,stdin=subprocess.PIPE,stderr=subprocess.STDOUT) 
  result = p.communicate()[0]
  lines = result.split("\n")
  ngains = (len(lines) - 3)/23
    # caution: this presumes 23 antennas will be listed for each time, and that
    # gains output has 3 header lines (and one blank line at the end ?)
  gainComplex = numpy.zeros( (ngains,15), dtype=complex )
  ng = -1 
  for n in range(3, len(lines) ) :
    a = lines[n].split()
    if ( (len(a) > 0) and (a[0] != "Ant") ) : 
      ng = ng + 1
      time.append( a[0] )
      nant = int(a[2])
      if ( nant != 1 ) :
        print "getGains error - unexpected ant number"
      gainComplex[ng][nant-1] = float(a[5]) + 1j * float(a[6])
    elif ( len(a) > 0 ) :
      nant = int(a[1]) 
      if ( nant < 16 ) :
        gainComplex[ng][nant-1] = float(a[4]) + 1j * float(a[5])
  return [time, gainComplex]
Пример #19
0
def readfire(s): # reads the output of a fire file and 
		 # returns the time vector and the population vector
	
	# input file should have two columns: first column is time in generations
	# second column is population
	
	time = []
	pop = []
	
	with open(s, 'r') as input_file:
		for line in input_file:
		    temp = line.strip()
		    if 'f()' in temp:
			time = []
			pop = []
			temp = line.strip()
			a,b = temp.split()
			time.append(float(a))
			pop.append(float(b))
	
	#with open(s, 'r') as input_file:
		#throwaway = input_file.readline()
		#while throwaway.strip() != 'START HERE':
			#throwaway = input_file.readline()
		#for line in input_file:
			#print 'hello'
			#temp = line.strip()
			#a,b = temp.split()
			#time.append(float(a))
			#pop.append(float(b))
			#print a, b	
	
	print 'readfire is done'
	
	return [time, pop]
Пример #20
0
def format_time(timespan, precision=3):
    """Formats the timespan in a human readable form"""
    if timespan >= 60.0:
        # we have more than a minute, format that in a human readable form
        parts = [("d", 60 * 60 * 24), ("h", 60 * 60), ("min", 60), ("s", 1)]
        time = []
        leftover = timespan
        for suffix, length in parts:
            value = int(leftover / length)
            if value > 0:
                leftover = leftover % length
                time.append('{0}{1}'.format(str(value), suffix))
            if leftover < 1:
                break
        return " ".join(time)
    # Unfortunately the unicode 'micro' symbol can cause problems in
    # certain terminals.
    # See bug: https://bugs.launchpad.net/ipython/+bug/348466
    # Try to prevent crashes by being more secure than it needs to
    # E.g. eclipse is able to print a mu, but has no sys.stdout.encoding set.
    units = ["s", "ms", 'us', "ns"]  # the save value
    if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:
        try:
            '\xb5'.encode(sys.stdout.encoding)
            units = ["s", "ms", '\xb5s', "ns"]
        except Exception:
            pass
    scaling = [1, 1e3, 1e6, 1e9]

    if timespan > 0.0:
        order = min(-int(math.floor(math.log10(timespan)) // 3), 3)
    else:
        order = 3
    return "{1:.{0}g} {2}".format(precision, timespan * scaling[order],
                                  units[order])
Пример #21
0
def run_experiment(mode, cmd, t):
    file.write(" ".join(cmd) + "\n")
    time = []
    timeout = 120
    if mode == "stm" or mode == "gen":
        timeout = 1200
    for i in range(t):
        while (True) :
            out = timeout_command(cmd, timeout)
            if out == None:
                print "Timed out ", cmd
                continue
            r = re.search("(?<=Time = )[0-9]*\.[0-9]*", out)
            if r == None:
                print "retry ", cmd
                continue
            out = r.group(0);
            break
        time.append(float(out))
        file.write(out + " ")
    file.write("\n")
    file.write("Std dev = " + "%.3f" % (numpy.std(time)))
    file.write("\n")
    file.write("min = " + "%.3f" % (numpy.amin(time)) + "\n")
    file.write("max = " + "%.3f" % (numpy.amax(time)) + "\n")
    return (numpy.median(time), numpy.amin(time), numpy.amax(time))
Пример #22
0
def done():
    global time
    
    time.append(datetime.datetime.now())
    standup_time()
    tabled()
    post_message('Bye!')
    reset()
Пример #23
0
def processingTimeByPhoto(serie):
	time = []
	last = []
	for key, track in serie.items():
		time.append(track['pt1'])
		last = track['pt2']
	time.append(last)
	return time
Пример #24
0
def gettrack_codar(lon_vel_list,lat_vel_list,u,v,startdate,hours,la,lo,uu,vv,q): # tracks particle at surface
            
            # calculate the points near la,la
            distance,index_lon,index_lat=nearxy(lon_vel_list,lat_vel_list,lo,la)
            
           
            #index_startdate=0#get the index of startdate
            # get u,v
            u1=float(u[index_lat][index_lon])
            v1=float(v[index_lat][index_lon])
            if u1<=-998.0/100:# case of no good data
                u1=uu[0]
                v1=vv[0]
                print "no good data in this lat lon at "+num2date(startdate+q+1).strftime("%d/%m/%y %H")+"h"
                
            #nsteps=scipy.floor(min(numdays,jdmat_m_num[-1])/daystep)
            # get the velocity data at this first time & place
            lat_k='lat'+str(1)
            lon_k='lon'+str(1)
            uu,vv,lon_k,lat_k,time=[],[],[],[],[]
            uu.append(u1)
            vv.append(v1)
            lat_k.append(la)
            lon_k.append(lo)
            time.append(startdate)
            
               
            # first, estimate the particle move to its new position using velocity of previous time steps
            lat1=lat_k[0]+float(vv[0]*3600)/1000/1.8535/60
            lon1=lon_k[0]+float(uu[0]*3600)/1000/1.8535/60*(scipy.cos(float(la)/180*np.pi))
                # find the closest model time for the new timestep
            '''
                jdmat_m_num_i=time[i-1]+float(1.0/24)
                time.append(jdmat_m_num_i)
                #print jdmat_m_num_i
                index_startdate=index_startdate+1

                #index_startdate=int(round(np.interp(jdmat_m_num_i,jdmat_m_num,range(len(jdmat_m_num)))))
                #find the point's index of near lat1,lon1
                index_location=nearxy(lon_vel,lat_vel,lon1,lat1)[1]
                ui=u[index_startdate][index_location]
                vi=v[index_startdate][index_location]
                #if u1<>-999.0/100:# case of good data
                vv.append(vi)
                uu.append(ui)
                
                # estimate the particle move from its new position using velocity of previous time steps
                lat_k.append(float(lat1+lat_k[i-1]+float(vv[i]*3600)/1000/1.8535/60)/2)
                lon_k.append(float(lon1+lon_k[i-1]+float(uu[i]*3600)/1000/1.8535/60*scipy.cos(float(lat_k[i])/180*np.pi))/2)
                #else:
                '''
                
                #  vv.append(0)
                #  uu.append(0)
                  # estimate the particle move from its new position using velocity of previous time steps
                #  lat_k.append(float(lat1))
                #  lon_k.append(float(lon1))      
            return lat1,lon1,time,uu,vv
Пример #25
0
def start():
    global time
    
    if len(time) != 0:
        post_message('But we\'ve already started!')
        return
    time.append(datetime.datetime.now())
    post_message('Let\'s get started! What did you work on yesterday? What are you working on today? What, if any, are your blockers?\nWhen you\'re done, please type !next')
    next()
Пример #26
0
def done():
    global time

    time.append(datetime.datetime.now())
    standup_time()
    tabled()
    post_summary()
    post_message("Bye!")
    reset()
Пример #27
0
def start():
    global time

    if len(time) != 0:
        post_message("But we've already started!")
        return
    time.append(datetime.datetime.now())
    post_message("Let's get started! %s\nWhen you're done, please type !next" % start_message)
    next()
Пример #28
0
	def retrival(self,objectIdArr):
		num=[str(len(objectIdArr))]
		time=[]
		articles=[]
		for objectId in objectIdArr:
			res=self.mongodb[objectId[2]].find_one({'_id':objectId[1]})
			time.append(res['time'])
			articles.append(re.split(u'[\u3002\uff1b\uff01\uff1f\n]#PU ',res['article'].rstrip(' ')))
		return num+time+articles
Пример #29
0
def start():
    global time
    
    if len(time) != 0:
        post_message('But we\'ve already started!')
        return
    time.append(datetime.datetime.now())
    post_message('Let\'s get started! %s\nWhen you\'re done, please type !next' % start_message)
    next()
Пример #30
0
  def _initPointDet(self):
    if self._isPointDet and self._useMemoryCache:
      data = []
      time = []
      for th5,datapath, timepath  in zip(self._h5s,self._paths['data'],self._paths['time']):
	data.append(th5[datapath[0]][:])
	time.append(th5[timepath[0]][:])

      self.fields = {'data':[data,time]}	
Пример #31
0
def plot_VEQ_data(all_data):
    '''
    Plot VEQ data

    Process raw string output from Vokaturi into array of audio emotions
    Order: [Neutral, Happy, Sad, Angry, Fear][neg, neu, pos, compound, text]
  
    '''
    #Initialize all empty arrays
    time = []
    neutral = []
    happy = []
    sad = []
    angry = []
    fear = []
    compound = []
    #get all data into arrays
    for i in all_data:
        time.append(i[0])
        neutral.append(i[1])
        happy.append(i[2])
        sad.append(i[3])
        angry.append(i[4])
        fear.append(i[5])
        compound.append(i[9])

    #Sanity printing
    # print(time)
    # print(neutral)
    # print(happy)
    # print(sad)
    # print(angry)
    # print(fear)
    # print(compound)

    trace_neutral = go.Scatter(x=time,
                               y=neutral,
                               name='Neutral',
                               line=dict(color=('rgb(140, 65, 244)'),
                                         width=2.5,
                                         dash='dash'))

    trace_happy = go.Scatter(x=time,
                             y=happy,
                             name='Happy',
                             line=dict(color=('rgb(89, 244, 66)'),
                                       width=2.5,
                                       dash='dash'))

    trace_sad = go.Scatter(x=time,
                           y=sad,
                           name='Sad',
                           line=dict(color=('rgb(65, 178, 244)'),
                                     width=2.5,
                                     dash='dash'))

    trace_angry = go.Scatter(x=time,
                             y=angry,
                             name='Angry',
                             line=dict(color=('rgb(244, 65, 65)'),
                                       width=2.5,
                                       dash='dash'))

    trace_fear = go.Scatter(x=time,
                            y=fear,
                            name='Fear',
                            line=dict(color=('rgb(244, 157, 65)'),
                                      width=2.5,
                                      dash='dash'))
    trace_sentiment = go.Scatter(x=time,
                                 y=compound,
                                 name='Sentiment',
                                 line=dict(color=('rgb(0, 0, 0)'), width=4))

    data = [
        trace_neutral, trace_happy, trace_sad, trace_angry, trace_fear,
        trace_sentiment
    ]

    # Edit the layout
    layout = dict(
        title='Audio Emotion over Session',
        xaxis=dict(title='Time'),
        yaxis=dict(title='Emotion Level'),
    )

    fig = dict(data=data, layout=layout)
    py.offline.plot(fig, filename='audio_emotion.html')
Пример #32
0
    def parse_detail_page(self, source):

        html = etree.HTML(source)
        preson_id = html.xpath(
            "//div[@class='cnt f-brk']/a[@class='s-fc7']/@href")
        song_name = "".join(
            html.xpath("//div[@class='tit']/em[@class='f-ff2']/text()"))
        singer = ''.join(
            html.xpath(
                "//div[@class='cnt']/p[1]/span/a[@class='s-fc7']/text()"))
        album = ''.join(
            html.xpath("//div[@class='cnt']/p[2]/a[@class='s-fc7']/text()"))
        comment_sum = ''.join(
            re.findall(r'<span class="j-flag">(.*?)</span>', source,
                       re.DOTALL))
        self.music['album'] = album
        # print(self.message)
        # print('++'*30)
        self.music['song_name'] = song_name
        self.music['singer'] = singer
        self.music['album'] = album
        self.music['comment_sum'] = comment_sum
        try:
            # 插入数据
            self.mysqlCommand.insert_musicData(self.music)
        except Exception as e:
            print("插入音乐数据失败", str(e))  # 输出插入失败的报错语句
        # 获取点击量
        points_tags = re.findall(r'<i class="zan u-icn2 u-icn2-12">(.*?)</a>',
                                 source, re.DOTALL)
        point = []
        for i in points_tags:

            point_rag = re.sub('</i> ', '', i)
            point_rag = re.sub('</i>', '0', point_rag)
            point.append(point_rag)
        # 请求评论人详情页
        for i in preson_id:
            preson_url = "https://music.163.com" + i
            try:
                self.request_preson_page(preson_url, song_name)
            except:
                self.driver.close()
                self.driver.switch_to.window(self.driver.window_handles[1])
        name = html.xpath("//div[@class='cnt f-brk']/a[1]/text()")
        comment_tags = re.findall(
            r'<div class="cnt f-brk">.*?</a>(.*?)</div>.*?</a>(.*?)</div>',
            source, re.DOTALL)
        comments = []
        for item in comment_tags:
            comment = str()
            for i in item:
                comment_tag = re.sub('<br />', ' ', i)

                comment_tag = re.sub('<(.*?)>', '', comment_tag)
                if item.index(i) == 1 and comment_tag != '|回复':
                    comment_tag = '\n 评论回复' + comment_tag
                comment += comment_tag
            comment = comment.rstrip('|回复')
            comment = ''.join(comment)
            comment = '""' + comment + '""'
            comments.append(comment)

        time = []
        times = html.xpath("//div[@class='time s-fc4']/text()")
        for i in times:
            time.append(i.replace(' ', ''))
        # print('++' * 30)
        for i in range(len(name)):
            self.message['song_name'] = song_name
            self.message['name'] = name[i]
            self.message['comments'] = comments[i]
            self.message['time'] = time[i]
            self.message['point'] = point[i]
            self.message['url'] = "https://music.163.com" + preson_id[i]
            self.mysqlCommand.insert_messageData(self.message)
Пример #33
0
    def generateCharts(totalPoints, totalIssues,
                        totalPullRequest, sprintsIssues,
                        assigneesIssues, statusIssues,
                        sprintsIssuesDevs,statusIssuesDevs,
                        sprintsPoints, assigneesPoints,
                        statusPoints, sprintsPointsDevs,
                        statusPointsDevs,
                        events, graphsURLs,
                        eventGraphsURLs, uploadImage=False):
        
        startTimeAuthImgur = datetime.now()
        print("AuthImgurTime")
        print(startTimeAuthImgur)
        try:
            client = authenticateImgur()
        except:
            print("Imgur auth problem")
        print("Final AuthImgurTime")
        print(datetime.now() - startTimeAuthImgur)
        
        startTimeGraphs = datetime.now()
        print("GraphsTime")
        print(startTimeGraphs)
        #percents = [totalPoints, totalIssues, totalPullRequest]
        
        try:
            pieCharts = [sprintsIssues, sprintsPoints]
            for pie in pieCharts:
                
                pieChart(sorted(pie.items(), key=operator.itemgetter(0)))
                
                if uploadImage == True:
                    image = client.upload_from_path('C:\\Users\\vinic\\Projects\\projet\\pieCharts\\foo.png', anon=False)
                    link_img = image['link']
                    print("Pie uploaded")
                    print(link_img)
                    graphsURLs.append(link_img)
                    print("Pie appended")
        except:
            print("Pie chart error!")

        try:    
            barCharts = {'assignees':[assigneesIssues, assigneesPoints ],'status':[statusIssues, statusPoints]}
            for key, value in barCharts.items():      
                print(value[0])
                print(value[1])
                barA = sorted(value[0].items(), key=operator.itemgetter(0))
                barB = sorted(value[1].items(), key=operator.itemgetter(0))
                barChart(barA, barB, "Tarefas", "Pontos")
                if uploadImage == True:
                    image = client.upload_from_path('C:\\Users\\vinic\\Projects\\projet\\barCharts\\bar.png', anon=False)
                    link_img = image['link']
                    print("Bar uploaded")
                    print(link_img)
                    graphsURLs.append(link_img)
                    print("Bar appended")
        except:
            print("Bar chart error!")

        
        #stackedBarCharts = [sprintsIssuesDevs, statusIssuesDevs, sprintsPointsDevs, statusPointsDevs]
        #for stackedBar in stackedBarCharts:
        #    link_img = stackedBarChart(bar)
        #    image = client.upload_from_path('C:\\Users\\vinic\\Projects\\projet\\foo1.png', anon=False)
        #    link_img = image['link']
        #    print(link_img)
        #    graphsURLs.append(link_img)

        try: 
            time = []
            eventStatus = []
            eventTag = []
            print("events " + str(len(events)))
            for issueEvent in events:
                for eventID, eventData in issueEvent:
                    for eD in eventData:
                        print(eventID)
                        print(eventData)
                        if eventData['event'] == "labeled" or eventData['event'] == "unlabeled":
                            print(eventData['event'])
                            try:
                                print("Over here")      
                                if (eventData['detail']['status'].rfind("-") != -1):
                                    print("Status :" + str(status))
                                    if int(eventData['detail']['status'].split(" - ")[0]) >= 0:
                                        
                                        eventStatus.append(int(status.split(" - ")[0]))
                                        
                                else:
                                    eventStatus.append(status)

                                date_string = str(eventData['created_at'])
                                print("Date " + str(date_string))
                                try:
                                    int_time = int(time.mktime(datetime.strptime(date_string, "%Y-%m-%d %H:%M:%S").timetuple()))
                                    print("int_time: " + str(int_time))
                                    time.append(str(int_time))
                                except:
                                    time.append(str(date_string))
                                print("here")
                                    
                            except:
                                print("Error map events to charts")
                                #eventTag.append(eventData)
                                #time.append(str(eventData['created_at']))
                                
                        else:
                            print("!!!!!!")
                            
                        print("Time:")
                        print(str(len(time)))
                        print("EventStatus")
                        print(str(len(eventStatus)))
                        print("EventTag")
                        print(str(len(eventTag)))
                        print("EventGraphsURLs")
                        print(str(len(eventGraphsURLs)))
                    try:
                        if len(time) > 0 and len(eventStatus) > 0:
                            print(time[0])
                            print(len(eventStatus))
                            lineChart(time, eventStatus)
                            print("Ploted")
                        if uploadImage == True:
                            image = client.upload_from_path('C:\\Users\\vinic\\Projects\\projet\\lineCharts\\simpleChart.png', anon=False)
                            print(image)
                            link_img = image['link']
                            print("Line uploaded")
                            print(link_img)
                            eventGraphsURLs.append(link_img)
                            print("Line appended")
                            
                    except:
                        print("Simple chart error")
        except:
            print("Line chart error!")
            
        print("Final graficos")
        print(datetime.now() - startTimeGraphs)
    return z
    

for i in range(1,NSum+1):
    with nidaqmx.Task() as task:
        task.ai_channels.add_ai_voltage_chan("Dev1/ai{0}".format(port1))
        task.ai_channels.add_ai_voltage_chan("Dev1/ai{0}".format(port2))
        task.timing.cfg_samp_clk_timing(fs,samps_per_chan=number_of_samples_per_channel)
        data = np.array(task.read(number_of_samples_per_channel))/amp_gain
        
        
        z_1= np.conj(FFT_hanning_Normalised(data[0],number_of_samples_per_channel)) #Take the complex conjugate of the Fourier transform of signal 1
        z_2 = FFT_hanning_Normalised(data[1],number_of_samples_per_channel) #Take the fourier transform of 2
        cc_0 = np.multiply(z_1,z_2) #Product of the dot product of the two    
        Ldata.append(cc_0[0:int((number_of_samples_per_channel/2)+1)])
        time.append(ts)

#np.save(file_n,Ldata)
#np.save(file_t,time)
  
#print(Ldata[0])
#print(Ldata[1])

stop = timeit.default_timer()
print('Time: ', stop - start)

for j in range(1,int(NSum/segment_size)):
    for i in np.arange(j*segment size)):
        results = []
        t = []
        a = np.average(FFT_SpecConv(Ldata[i][110:140],1))
Пример #35
0
           continue
       motion = 1
 
       (x, y, w, h) = cv2.boundingRect(contour) 
       # making green rectangle arround the moving object 
       cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3) 
 
   # Appending status of motion 
   motion_list.append(motion) 
 
   motion_list = motion_list[-2:] 
 
   # Appending Start time of motion 
   if motion_list[-1] == 1 and motion_list[-2] == 0:
       print("threshold") 
       time.append(datetime.now()) 
 
   # Appending End time of motion 
   if motion_list[-1] == 0 and motion_list[-2] == 1: 
       time.append(datetime.now()) 
 
   # Displaying image in gray_scale 
   cv2.imshow("Gray Frame", gray) 
 
   # Displaying the difference in currentframe to 
   # the staticframe(very first_frame) 
   cv2.imshow("Difference Frame", diff_frame) 
 
   # Displaying the black and white image in which if 
   # intencity difference greater than 30 it will appear white 
   cv2.imshow("Threshold Frame", thresh_frame) 
Пример #36
0
            liste4.append(frame[i,j][0])


    
    liste1 = str(liste1)
    liste2 = str(liste2)
    liste3 = str(liste3)
    liste4 = str(liste4)

    
    try:
        if sum(BOX1)/len(BOX1) + 1000 < len(liste1) or\
           sum(BOX1)/len(BOX1) - 1000 > len(liste1):
            cv2.rectangle(frame,(100,100), (150, 150), (255,0,255), 1)
            pos.append(1)
            time.append(c)
  
        if sum(BOX2)/len(BOX2) + 1000 < len(liste2) or\
           sum(BOX2)/len(BOX2) - 1000 > len(liste2):
            cv2.rectangle(frame,(300,100), (350, 150), (255,0,255), 1)
            pos.append(2)
            time.append(c)

        if sum(BOX3)/len(BOX3) + 500 < len(liste3) or\
           sum(BOX3)/len(BOX3) - 500 > len(liste3):
            cv2.rectangle(frame,(400,100), (450, 150), (255,0,255), 1)
            pos.append(3)
            time.append(c)

        
        if sum(BOX4)/len(BOX4) + 80 < len(liste4) or\
    url = []
    title = []
    source = []
    time = []
    ext_date = []

    with open(filename, newline='', encoding='utf-8') as f:
        column_names = ['id', 'url', 'title', 'source', 'time']
        frdr = csv.DictReader(f, fieldnames=column_names)

        for row in frdr:

            url.append(row['url'])
            title.append(row['title'])
            source.append(row['source'])
            time.append(row['time'])

        last_row = len(url)

    for l in range(last_row):
        if l == 0:
            id.append('id')
            ext_date.append('extract date')
        else:
            id.append(l)
            ext_date.append(datetime.date.today())

    with open(filename, 'w', newline='', encoding='utf-8') as f:
        column_names = ['id', 'url', 'title', 'source', 'time', 'extract date']
        frtr = csv.DictWriter(f, fieldnames=column_names)
Пример #38
0
h = []
h.append(120000)
v = []
v.append(0)
a = []
a.append(-9.81)
dp = []
dp.append(0)
Dt = []
Dt.append(0)
Findrag = []
Findrag.append(0)
index = 0
current_height = h[0]
time = []
time.append(0)
while current_height > 1000:
    M = Mach(h[index], v[index])
    D = 0.5 * CD(M) * current_density(h[index]) * S * (v[index])**2
    D_body = 0.5 * 1.14 * current_density(
        h[index]) * 9.5e-3 * (v[index])**2  #Rough approx for bluff body drag
    dp1 = current_density(h[index]) * 1 / 2 * (v[index])**2
    accel = (D + D_body - W) / m
    a.append(accel)

    v_new = v[index] - a[index] * dt
    current_height = h[index] - v_new * dt

    v.append(v_new)
    h.append(current_height)
    time.append(time[index] + 0.1)
Пример #39
0
def hello():
    flag = 30
    flag1 = 0
    flag2 = 375
    i = 0
    h = 0
    # Assigning our static_back to None
    static_back = None

    # List when any moving object appear
    motion_list = [None, None]

    # Time of movement
    time = []

    # Initializing DataFrame, one column is start
    # time and other column is end time
    df = pandas.DataFrame(columns=["Start", "End"])

    # Capturing video
    video = cv2.VideoCapture(0)

    # Infinite while loop to treat stack of image as video
    while True:
        # Reading frame(image) from video
        check, screenshot = video.read()
        frame = screenshot
        blur = cv2.blur(screenshot, (1, 1))
        # Initializing motion = 0(no motion)
        motion = 0

        # Converting color image to gray_scale image
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Converting gray scale image to GaussianBlur
        # so that change can be find easily
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        # In first iteration we assign the value
        # of static_back to our first frame
        if static_back is None:
            static_back = gray
            continue

        # Difference between static background
        # and current frame(which is GaussianBlur)
        diff_frame = cv2.absdiff(static_back, gray)

        # If change in between static background and
        # current frame is greater than 30 it will show white color(255)
        thresh_frame = cv2.threshold(diff_frame, 30, 255, cv2.THRESH_BINARY)[1]
        thresh_frame = cv2.dilate(thresh_frame, None, iterations=2)

        # Finding contour of moving object
        (_, cnts, _) = cv2.findContours(thresh_frame.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)

        for contour in cnts:
            if cv2.contourArea(contour) < 10000:
                continue
            motion = 1

            if flag == 0:
                cv2.imwrite('opencv' + str(i) + '.png', blur)
                i = i + 1
                flag = 30
            flag = flag - 1

            if flag1 == 0:
                threading.Thread(target=msg_siren).start()
                flag1 = flag1 + 1
            if flag2 == 0:
                threading.Thread(target=email_send).start()
                flag2 == 375
            flag2 = flag2 - 1

            (x, y, w, h) = cv2.boundingRect(contour)
            # making green rectangle arround the moving object
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)

            h = h + 1
        # Appending status of motion
        motion_list.append(motion)

        motion_list = motion_list[-2:]

        # Appending Start time of motion
        if motion_list[-1] == 1 and motion_list[-2] == 0:
            time.append(datetime.now())

        # Appending End time of motion
        if motion_list[-1] == 0 and motion_list[-2] == 1:
            time.append(datetime.now())

        # Displaying image in gray_scale
        cv2.imshow("Gray Frame", gray)

        # Displaying the difference in currentframe to
        # the staticframe(very first_frame)
        cv2.imshow("Difference Frame", diff_frame)

        # Displaying the black and white image in which if
        # intencity difference greater than 30 it will appear white
        cv2.imshow("Threshold Frame", thresh_frame)

        # Displaying color frame with contour of motion of object
        cv2.imshow("Color Frame", frame)

        key = cv2.waitKey(1)
        # if q entered whole process will stop
        if key == ord(' '):
            mixer.quit()
            static_back = None
            flag1 = 0

        if key == ord('q'):
            # if something is movingthen it append the end time of movement
            if motion == 1:
                time.append(datetime.now())
            break

    # Appending time of motion in DataFrame
    for i in range(0, len(time), 2):
        df = df.append({
            "Start": time[i],
            "End": time[i + 1]
        },
                       ignore_index=True)

    # Creating a csv file in which time of movements will be saved
    df.to_csv("Time_of_movements.csv")

    video.release()
    print(h)
    # Destroying all the windows

    cv2.destroyAllWindows()
Пример #40
0
    def __init__(self):

        nodes = np.loadtxt("D:/Tidal_ananlysis_data/analysis/nodes.csv")
        print("Loading 20 %")
        t = np.loadtxt("D:/Tidal_ananlysis_data/analysis/t.csv")
        clear_output(wait=True)
        print("Loading 40 %")
        WD = np.loadtxt("D:/Tidal_ananlysis_data/analysis/d.csv")
        clear_output(wait=True)
        print("Loading 60 %")
        u = np.loadtxt("D:/Tidal_ananlysis_data/analysis/u.csv")
        clear_output(wait=True)
        print("Loading 80 %")
        v = np.loadtxt("D:/Tidal_ananlysis_data/analysis/v.csv")
        clear_output(wait=True)
        print("loading 100 %")

        idx = np.loadtxt(
            "D:/Tidal_ananlysis_data/analysis/nodes_inex(Rm_DCSM_zuno_WDmin=1.5,nl=(1, 1)).csv",
            dtype=int,
        )
        t = t[::6]
        WD = WD[::6, idx]
        v = v[::6, idx]
        u = u[::6, idx]
        nodes = nodes[idx]

        time = []
        for timestep in t:
            tt = datetime.datetime.fromtimestamp(timestep)
            time.append(tt)
        time = np.array(time)

        WD_t = np.zeros(WD.shape)
        u_t = np.zeros(u.shape)
        v_t = np.zeros(v.shape)

        q = int(0)
        qq = 1
        for node in range(len(nodes)):
            q = q + int(1)
            if q == 10:
                clear_output(wait=True)
                print(np.round(qq / len(nodes) * 1000, 3), "%")
                q = int(0)
                qq += 1
            d_n, u_n, v_n = self.Tidal_analysis(node, WD, u, v, time)
            WD_t[:, node] = d_n
            u_t[:, node] = u_n
            v_t[:, node] = v_n

        bat, nodes_bat = self.bath()
        bath = griddata(nodes_bat, bat, nodes)

        print(WD.shape)
        WD_new = np.zeros(WD.shape)
        for i in range(WD.shape[0]):
            WD_new[i] = WD_t[i] - bath

        WD_new[WD_new < 0] = 0

        self.t = t - t[0]
        self.WD = WD_new
        self.u = u_t
        self.v = v_t
        self.nodes = nodes
        self.tria = Delaunay(nodes)
Пример #41
0
def detect_camera():
    print('mtav', Motion.objects.all())
    # Assigning our static_back to None
    static_back = None

    # List when any moving object appear
    motion_list = [None, None]

    # Time of movement
    time = []

    # Initializing DataFrame, one column is start
    # time and other column is end time
    df = pandas.DataFrame(columns=["Start", "End"])

    print(df)

    time_count = 0
    # Capturing video
    video = cv2.VideoCapture(
        'rtsp://admin:@[email protected]:554/Streaming/Channels/101')

    while True:
        time_count = time_count + 1

        # if time_count == 60:
        #     break
        # Reading frame(image) from video
        # print(video)
        check, frame = video.read()
        # print(frame,check)

        # Initializing motion = 0(no motion)
        motion = 0

        # Converting color image to gray_scale image
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Converting gray scale image to GaussianBlur
        # so that change can be find easily
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        # In first iteration we assign the value
        # of static_back to our first frame
        if static_back is None:
            static_back = gray
            continue

        # Difference between static background
        # and current frame(which is GaussianBlur)
        diff_frame = cv2.absdiff(static_back, gray)

        # If change in between static background and
        # current frame is greater than 30 it will show white color(255)
        thresh_frame = cv2.threshold(diff_frame, 30, 255, cv2.THRESH_BINARY)[1]
        thresh_frame = cv2.dilate(thresh_frame, None, iterations=2)

        # Finding contour of moving object
        cnts, _ = cv2.findContours(thresh_frame.copy(), cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)

        for contour in cnts:
            # print(cv2.contourArea(contour))
            if cv2.contourArea(contour) < 10000:
                continue
            motion = 1

            (x, y, w, h) = cv2.boundingRect(contour)
            # making green rectangle arround the moving object
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)

        # Appending status of motion
        motion_list.append(motion)

        motion_list = motion_list[-2:]

        # Appending Start time of motion
        if motion_list[-1] == 1 and motion_list[-2] == 0:
            time.append(datetime.now())
            print(datetime.now(), 'start')

        # Appending End time of motion
        if motion_list[-1] == 0 and motion_list[-2] == 1:
            Motion.objects.create(start_time=datetime.now(),
                                  end_time=time[-1],
                                  camera_index=1)
            time.append(datetime.now())
            print(datetime.now(), 'end')

        # Displaying image in gray_scale
        # cv2.imshow("Gray Frame", gray)

        # # Displaying the difference in currentframe to
        # # the staticframe(very first_frame)
        # cv2.imshow("Difference Frame", diff_frame)

        # # Displaying the black and white image in which if
        # # intensity difference greater than 30 it will appear white
        # cv2.imshow("Threshold Frame", thresh_frame)

        # Displaying color frame with contour of motion of object
        cv2.imshow("Color Frame", frame)

        key = cv2.waitKey(1)
        # if q entered whole process will stop
        if key == ord('q'):
            # if something is movingthen it append the end time of movement
            if motion == 1:
                time.append(datetime.now())
            break

    # Appending time of motion in DataFrame
    print(time)
    for i in range(0, len(time), 2):

        df = df.append({
            "Start": time[i],
            "End": time[i + 1]
        },
                       ignore_index=True)

    # Creating a CSV file in which time of movements will be saved
    df.to_csv("Time_of_movements.csv")

    video.release()
Пример #42
0
         fixedTime = datetime.fromtimestamp(ACTime)
         
         if l==0:
             prevACtime=fixedTime
          
         rate=False
         for ra in range(len(contestData)):
           ranu=int(ra)
           data=contestData[ranu]
           if data["id"]==subData["contest_id"] and data["rate_change"]!="-" and not(data["rate_change"]=="All" and data["start_epoch_second"]<1468670400):
             rate=True
             break
             
         if rate==True: 
             RPS+=subData["point"]
             time.append(fixedTime)
             ac.append(RPS)
             contests.append(subData["contest_id"])
             problems.append(subData["problem_id"])
             prevTime = fixedTime
             print(subData["problem_id"]+" "+data["rate_change"])
             
 if RPS>highest:
     highest=RPS
 #最終ACから現在の日までx軸に平行な線を引く
 if prevTime.day!=tod.day and prevTime.month!=tod.month and prevTime.year!=tod.year:
   day=datetime.date(prevTime)
   while day<=tod:
     time.append(day)
     ac.append(RPS)
     day+=timedelta(1)
Пример #43
0
    #print(parts[0])
    #print(parts[1])
    bins.append((int(parts[0]), int(parts[1])))
#for random testing:
#voltage = [3.3*random.random() for x in range(len(names))]
print(bins)

node_voltage = list()
time = list()
interval = 0.001
count = 0
if "single" in string_to_write:
    node_position = bins[0][0]
    for y in bins:
        node_voltage.append(y[1] * 3.3 / 1023)
        time.append(interval * count)
        count += 1
    plt.plot(time, node_voltage, 'bo')
    plt.axis([0, time[-1], 0, 3.3])
    plt.show()
    sys.exit(0)

#Reorganizing for the order of the breadboard layout
#Create place holders when all is not called
#organizing to operate different modes
old_voltage = [0] * 128  #create a list of zeros of 128 elements
for y in bins:
    old_voltage[y[0]] = 3.3 * y[1] / 1023

old_names = list(range(128))
Пример #44
0
    def get_epsolid_reward(self,
                           env,
                           start_time,
                           k1=1.5,
                           k2=2.5,
                           k3=0.5,
                           total_step=10000):
        '''
        进行PID模拟全程
        :param env: 环境
        :param start_time: 当前的起始时间
        :param k1: kp
        :param k2: kd
        :param k3: ki
        :return: 损失值
        '''
        self.env = copy.copy(env)
        alpha = []
        theta = []
        desired_theta = []
        q = []
        time = []
        control = []
        ierror = 0
        derror_list = []
        error_list = []
        dez_list = []
        tp = 0  # 峰值时间
        ts = total_step  # 调整时间
        count = 0
        loss_sin = 0
        if_alpha = False
        # ----------进行循环控制----------
        for i in range(total_step):
            if count >= belief_times and ts == total_step:
                ts = i
                break
            self.env.pitch_desired = PITCH_DESIRED[i + start_time]
            error = self.env.pitch_desired * 57.3 - self.env.observation[0]
            loss_sin += math.fabs(error)
            derror = self.env.dpithch_desired * 57.3 - self.env.observation[1]
            error_list.append(error)
            derror_list.append(derror)
            ierror = ierror + error * self.env.tau
            action = k1 * error + k2 * derror + k3 * ierror
            dez_list.append(action)
            # ----------系统分析----------
            # TODO:超调时间
            if (error == 0 and tp == 0):
                tp = i
            self.env.step(np.array([action]))
            alpha.append(self.env.arfa * 57.3)
            if self.env.arfa * 57.3 < -1 or self.env.arfa * 57.3 > 10:
                if_alpha = True
            theta.append(self.env.observation[0])
            desired_theta.append(self.env.pitch_desired * 57.3)
            q.append(self.env.observation[1])
            time.append(i)
            control.append(action)

            if (abs(error) <= abs(adjust_bound * self.env.pitch)):
                count += 1
            else:
                count = 0
        # ----------系统分析----------
        # 超调
        Overshoot = max(np.array(theta)) - max(np.array(desired_theta))
        if Overshoot < Overshoot_target:
            Overshoot = 0
        else:
            Overshoot = (Overshoot - Overshoot_target) / Overshoot_target
        # 调整时间
        if ts <= ts_target:
            ts = 0
        else:
            ts = (ts - ts_target) / ts_target
        # 稳态误差
        st_error = 0.0
        for i in range(10):
            st_error += abs(error_list[-i])
        st_error /= 10.0
        if st_error < Static_error_target:
            Static_error = 0
        else:
            Static_error = (st_error -
                            Static_error_target) / Static_error_target
        done = True
        # TODO:目前的奖励设置不合理
        if if_alpha:
            loss_sin += 450000
            done = False
        return loss_sin, done, ts, Overshoot, Static_error
Пример #45
0
    def search_article_info(self, name, page=1):
        """搜索文章

        Args:
            name: 搜索文章关键字
            page: 搜索的页数

        Returns:
            列表,每一项均是{'name','url','img','zhaiyao','gzhname','gzhqrcodes','gzhurl','time'}
            name: 文章标题
            url: 文章链接
            img: 文章封面图片缩略图,可转为高清大图
            zhaiyao: 文章摘要
            time: 文章推送时间,10位时间戳
            gzhname: 公众号名称
            gzhqrcodes: 公众号二维码
            gzhurl: 公众号最近文章地址

        """
        text = self._search_article_text(name, page)
        page = etree.HTML(text)
        img = list()
        info_imgs = page.xpath(u"//div[@class='wx-rb wx-rb3']/div[1]/a/img")
        for info_img in info_imgs:
            img.append(info_img.attrib['src'])
        url = list()
        info_urls = page.xpath(u"//div[@class='wx-rb wx-rb3']/div[2]/h4/a")
        for info_url in info_urls:
            url.append(info_url.attrib['href'])
        name = list()
        info_names = page.xpath(u"//div[@class='wx-rb wx-rb3']/div[2]/h4")
        for info_name in info_names:
            cache = self._get_elem_text(info_name)
            cache = cache.replace('red_beg', '').replace('red_end', '')
            name.append(cache)
        zhaiyao = list()
        info_zhaiyaos = page.xpath(u"//div[@class='wx-rb wx-rb3']/div[2]/p")
        for info_zhaiyao in info_zhaiyaos:
            cache = self._get_elem_text(info_zhaiyao)
            cache = cache.replace('red_beg', '').replace('red_end', '')
            zhaiyao.append(cache)
        gzhname = list()
        gzhqrcodes = list()
        gzhurl = list()
        info_gzhs = page.xpath(u"//div[@class='wx-rb wx-rb3']/div[2]/div/a")
        for info_gzh in info_gzhs:
            gzhname.append(info_gzh.attrib['title'])
            gzhqrcodes.append(info_gzh.attrib['data-encqrcodeurl'])
            gzhurl.append(info_gzh.attrib['href'])
        time = list()
        info_times = page.xpath(u"//div[@class='wx-rb wx-rb3']/div[2]/div/span/script/text()")
        for info_time in info_times:
            time.append(re.findall('vrTimeHandle552write\(\'(.*?)\'\)', info_time)[0])
        returns = list()
        for i in range(len(url)):
            returns.append(
                {
                    'name': name[i],
                    'url': url[i],
                    'img': img[i],
                    'zhaiyao': zhaiyao[i],
                    'gzhname': gzhname[i],
                    'gzhqrcodes': gzhqrcodes[i],
                    'gzhurl': gzhurl[i],
                    'time': time[i]
                }
            )
        return returns
# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])
counter = 0

fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
plt.xlabel('Time')
plt.ylabel('Micro Volts')
plt.title('EEG Channel 1')

data = []
time = []

print("pull samples...")
while True:
    # get a new sample (you can also omit the timestamp part if you're not
    # interested in it)
    sample = inlet.pull_sample()
    counter += 1

    data.append(sample[0][0])
    time.append(counter)

    ax1.clear()
    ax1.plot(time, data)
    plt.pause(0.05)

    print(sample)
plt.show()
Пример #47
0
main_div = soup.find_all(
    "div",
    attrs={"class": re.compile("cb-col cb-col-100 cb-lst-itm cb-lst-itm-lg")})
links = []
intro = []
time = []
typ = []
heading = []
print("Processing News")
for s in main_div:
    links.append("https://www.cricbuzz.com" + s.a["href"])
    s1 = s.find("div", attrs={"class": re.compile("cb-nws-intr")})
    intro.append(s1.text)
    s2 = s.find("div", attrs={"class": re.compile("cb-nws-time")})
    typ.append(s2.text)
    s3 = s.find("span", attrs={"class": re.compile("cb-nws-time")})
    time.append(s3.text)
    s4 = s.find(
        "h2", attrs={"class": re.compile("cb-nws-hdln cb-font-18 line-ht24")})
    heading.append(s4.text)
l = len(heading)
with open("out.txt", "w") as file:
    for i in range(l):
        file.write(typ[i] + "\n")
        file.write(heading[i] + "\n")
        file.write(intro[i] + "\n")
        file.write("Ref: " + links[i] + "\n")
        file.write("Posted: " + time[i] + "\n\n")
file.close()
print("Your News is ready in 'out.txt'")
Пример #48
0
def generate_time():
    time = []
    for i in range(240):
        time.append(i / 10)
    return time
Пример #49
0
def motiondetect():
    # Assigning our static_back to None
    static_back = None

    # List when any moving object appear
    motion_list = [None, None]

    # Time of movement
    time = []

    # Initializing DataFrame, one column is start
    # time and other column is end time
    df = pandas.DataFrame(columns=["Start", "End"])

    # Capturing video
    video = cv2.VideoCapture(0)

    # Infinite while loop to treat stack of image as video
    while True:
        # Reading frame(image) from video
        check, frame = video.read()

        # Initializing motion = 0(no motion)
        motion = 0

        # Converting color image to gray_scale image
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Converting gray scale image to GaussianBlur
        # so that change can be find easily
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        # In first iteration we assign the value
        # of static_back to our first frame
        if static_back is None:
            static_back = gray
            continue

        # Difference between static background
        # and current frame(which is GaussianBlur)
        diff_frame = cv2.absdiff(static_back, gray)

        # If change in between static background and
        # current frame is greater than 30 it will show white color(255)
        thresh_frame = cv2.threshold(diff_frame, 30, 255, cv2.THRESH_BINARY)[1]
        thresh_frame = cv2.dilate(thresh_frame, None, iterations=2)

        # Finding contour of moving object
        (cnts, _) = cv2.findContours(thresh_frame.copy(), cv2.RETR_EXTERNAL,
                                     cv2.CHAIN_APPROX_SIMPLE)

        for contour in cnts:
            if cv2.contourArea(contour) < 10000:
                continue
            motion = 1

            (x, y, w, h) = cv2.boundingRect(contour)
            # making green rectangle arround the moving object
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)

        # Appending status of motion
        motion_list.append(motion)

        motion_list = motion_list[-2:]

        # Appending Start time of motion
        if motion_list[-1] == 1 and motion_list[-2] == 0:
            time.append(datetime.now())

        # Appending End time of motion
        if motion_list[-1] == 0 and motion_list[-2] == 1:
            time.append(datetime.now())

        # Displaying image in gray_scale
        cv2.imshow("Gray Frame", gray)

        # Displaying the difference in currentframe to
        # the staticframe(very first_frame)
        cv2.imshow("Difference Frame", diff_frame)

        # Displaying the black and white image in which if
        # intencity difference greater than 30 it will appear white
        cv2.imshow("Threshold Frame", thresh_frame)

        # Displaying color frame with contour of motion of object
        cv2.imshow("Color Frame", frame)

        key = cv2.waitKey(1)
        # if q entered whole process will stop
        if key == ord('q'):
            # if something is movingthen it append the end time of movement
            if motion == 1:
                print("motion detected")
            break

    video.release()

    # Destroying all the windows
    cv2.destroyAllWindows()
    def fp(self, windows_density=20, denoise=False):
        windows_size = self.framerate // windows_density  #windows_size:每个窗口的帧数
        hamming_window = np.hamming(windows_size)
        if denoise:
            noise_fft = self.denoise(2, windows_size)
            self.landmarks = [
            ]  # 用landmarks储存landmark,landmark是一个tuple:(时间,频率),并根据先时间后频率对landmark在list中从前到后排序
            #!!!
            time = []
            frequency = []

            # 每次的fft先暂时储存起来!等找出最大的aver_max再秋后算账(处理处landmark)
            ffts = []
            max_aver_max = 0
            for i in range(0, self.nframes - windows_size, windows_size):
                window = self.wave_data[0][
                    i:i + windows_size]  # window即一个窗口(离散的时域函数)
                fft = np.abs(np.fft.fft(window))  # fft即对这个窗口进行傅里叶转换得到的离散频域函数
                #对噪声反向补偿
                fft = fft - noise_fft
                # 滤波并保留landmark----(出现的时间,频率)
                max1 = np.max(fft[:10])
                max2 = np.max(fft[10:20])
                max3 = np.max(fft[20:40])
                max4 = np.max(fft[40:80])
                max5 = np.max(fft[80:160])
                max6 = np.max(fft[160:511])
                aver_max = (max1 + max2 + max3 + max4 + max5 + max6) / 6
                if aver_max > max_aver_max:
                    max_aver_max = aver_max
                ffts.append(fft[:windows_size // 2])
            max_aver_max *= 0.8
            for i in range(len(ffts)):
                for j in range(windows_size //
                               2):  # 只有前一般的频谱是不重复的,所以统计landmark只统计前一半
                    if ffts[i][j] > max_aver_max:
                        self.landmarks.append((int(
                            (i)), int(j * windows_density)))
                        #!!!
                        time.append(int(i))
                        frequency.append(int(j * windows_density))

            # 计算锚(取targetzone为紧跟在锚点后面的5个点),储存在fps中,每个锚就是一个指纹,用一个tuple表示:(锚点绝对时间位置,锚点频率,目标点频率,时间差)-------全部转换成string!
            self.fps = []
            for i in range(0, len(self.landmarks) - 5):
                for j in range(i + 1, i + 6):
                    self.fps.append(
                        (str(self.landmarks[i][0]), str(self.landmarks[i][1]),
                         str(self.landmarks[j][1]),
                         str(self.landmarks[j][0] - self.landmarks[i][0])))
            #!!!
            plt.scatter(time, frequency)
            plt.show()
            print(len(self.landmarks))

        else:
            self.landmarks = [
            ]  # 用landmarks储存landmark,landmark是一个tuple:(时间,频率),并根据先时间后频率对landmark在list中从前到后排序
            '''
            time = []
            frequency = []
            '''
            #每次的fft先暂时储存起来!等找出最大的aver_max再秋后算账(处理处landmark)
            ffts = []
            max_aver_max = 0
            for i in range(0, self.nframes - windows_size, windows_size):
                window = self.wave_data[0][
                    i:i + windows_size]  # window即一个窗口(离散的时域函数)
                #下面两行用汉明窗(删除即默认矩形窗)
                tailored_window = np.array(window) * np.array(
                    hamming_window)  # 用haming窗口剪裁
                fft = np.abs(
                    np.fft.fft(tailored_window))  # fft即对这个窗口进行傅里叶转换得到的离散频域函数
                # 滤波并保留landmark----(出现的时间,频率)
                max1 = np.max(fft[:10])
                max2 = np.max(fft[10:20])
                max3 = np.max(fft[20:40])
                max4 = np.max(fft[40:80])
                max5 = np.max(fft[80:160])
                max6 = np.max(fft[160:511])
                aver_max = (max1 + max2 + max3 + max4 + max5 + max6) / 6
                if aver_max > max_aver_max:
                    max_aver_max = aver_max
                ffts.append(fft[:windows_size // 2])
            max_aver_max *= 0.8
            for i in range(len(ffts)):
                for j in range(windows_size //
                               2):  # 只有前一般的频谱是不重复的,所以统计landmark只统计前一半
                    if ffts[i][j] > max_aver_max:
                        self.landmarks.append((int(
                            (i)), int(j * windows_density)))
                        '''
                        time.append(int(i))
                        frequency.append(int(j * windows_density))
                        '''
            # 计算锚(取targetzone为紧跟在锚点后面的5个点),储存在fps中,每个锚就是一个指纹,用一个tuple表示:(锚点绝对时间位置,锚点频率,目标点频率,时间差)-------全部转换成string!
            self.fps = []
            for i in range(0, len(self.landmarks) - 5):
                for j in range(i + 1, i + 6):
                    self.fps.append(
                        (str(self.landmarks[i][0]), str(self.landmarks[i][1]),
                         str(self.landmarks[j][1]),
                         str(self.landmarks[j][0] - self.landmarks[i][0])))
            '''
Пример #51
0
    elif len(predAmp) < len(predAmpN):
        predAmpN = predAmpN[:len(predAmp)]
        cleanTimeN = cleanTimeN[:len(predAmpN)]
        nonCoalesceLen = len(predAmp)
    else:
        nonCoalesceLen = len(predAmp)
    nonCoalescePeriod = len(ampPeaks['posTime']) - nonCoalesceLen
    nonCoalescePeriodN = len(ampPeaks['negTime']) - nonCoalesceLen

    time = list()
    amp = list()
    length = max([len(cleanTime), len(cleanTimeN)])
    if abs(cleanTime[0]) > abs(cleanTimeN[0]):
        for t in xrange(length):
            if t < len(cleanTime):
                time.append(cleanTime[t])
                amp.append(predAmp[t])
            if t < len(cleanTimeN):
                time.append(cleanTimeN[t])
                amp.append(predAmpN[t])
    else:
        for t in xrange(length):
            if t < len(cleanTimeN):
                time.append(cleanTimeN[t])
                amp.append(predAmpN[t])
            if t < len(cleanTime):
                time.append(cleanTime[t])
                amp.append(predAmp[t])

    #======================================
    # During Coalescence
Пример #52
0
cog = []
sog = []
yaw_gps = []
pitch_gps = []
roll_gps = []
ax = []
ay = []
az = []

for line in urlopen('https://navview.blob.core.windows.net/data-test/'+ sys.argv[1] +'.txt'):
    line = line.strip()
    raw_data.append(line.decode('utf-8').split(','))

length = len(raw_data)  
for i in range(1,length):
    time.append(raw_data[i][0])
    roll.append (raw_data[i][1])
    pitch.append(raw_data[i][2])
    wx.append (raw_data[i][3])
    wy.append(raw_data[i][4])
    wz.append (raw_data[i][5])
    lat.append(raw_data[i][6])
    long.append (raw_data[i][7])
    cog.append(raw_data[i][8])
    sog.append (raw_data[i][9])
    yaw_gps.append(raw_data[i][10])
    pitch_gps.append (raw_data[i][11])
    roll_gps.append(raw_data[i][12])
    ax.append (raw_data[i][13])
    ay.append(raw_data[i][14])
    # az.append (raw_data[i][15])
Пример #53
0
    def model_simulation(self, k1=1.5, k2=2.5, k3=0.5, iterator=0):
        total_step = 1000
        self.env.reset()
        alpha = []
        theta = []
        desired_theta = []
        q = []
        time = []
        i = 1
        control = []
        ierror = 0
        derror_list = []
        error_list = []
        action_list = []
        dez_list = []

        while i < total_step:
            """ FOR DEBUG """
            # if i % 10 == 0:
            #     print(i,self.env.state[1],self.env.theta_desired)
            error = self.env.theta_desired - self.env.state[1]
            derror = -self.env.state[2]
            error_list.append(error)
            derror_list.append(derror)
            ierror = ierror + error * self.env.tau
            action = k1 * error + k2 * derror + k3 * ierror

            action_list.append(action)
            dez_list.append(self.env.delta_z)

            self.env.step(np.array([action]))
            alpha.append(self.env.state[0])
            theta.append(self.env.state[1])
            desired_theta.append(self.env.theta_desired)
            q.append(self.env.state[2])
            time.append(i)
            control.append(action)
            i = i + 1
            # "绘制$\\alpha$曲线"
            # plt.xticks(fontproperties='Times New Roman')
            # plt.yticks(fontproperties='Times New Roman')
            # plt.xlabel("Number of Iterations")
            # plt.ylabel("Attack Angle")
            # plt.plot(alpha, label="$\\alpha$")
            # plt.legend(loc='best', prop={'family': 'Times New Roman'})
            # # 图上的legend,记住字体是要用prop以字典形式设置的,而且字的大小是size不是fontsize,这个容易和xticks的命令弄混
            # plt.title("$\\alpha$ In %s Epoch " % str(iterator), fontdict={'family': 'Times New Roman'})
            # plt.show()
            #
            # "绘制$\\delta_z$曲线"
            #
            # plt.xticks(fontproperties='Times New Roman')
            # plt.yticks(fontproperties='Times New Roman')
            # plt.xlabel("Number of Iterations")
            # plt.ylabel("Elevator")
            # plt.plot(dez_list, label="$\\delta_z$")
            # plt.title("$\\delta_z$  In %s epoch " % str(iterator), fontdict={'family': 'Times New Roman'})
            # plt.legend(loc='best', prop={'family': 'Times New Roman'})
            # plt.show()
            # "绘制theta曲线"
            #
            # plt.figure(num=2)
            #
            # plt.xticks(fontproperties='Times New Roman')
            # plt.yticks(fontproperties='Times New Roman')
            # plt.xlabel("Number of Iterations")
            # plt.ylabel("Pitch Angle")
            #
            # plt.plot(theta, label="time-theta")
            # plt.plot(desired_theta, label="time-desired_theta")
            # plt.legend(loc='best', prop={'family': 'Times New Roman'})
            # plt.title("$ \\theta$  In %s epoch " % str(iterator), fontdict={'family': 'Times New Roman'})
            # plt.savefig("%sepoch.pdf" % iterator)
            # plt.show()
        return alpha, dez_list, theta, desired_theta
Пример #54
0
def alertinfo(request):
    if 'emp_id' in request.session and 'Username' in request.session:
        empid = request.session['emp_id']
        obje = Alert.objects.filter(cluster="systemlog")
        for obje1 in obje:
            ip = obje1.ip_endpoint
        es = Elasticsearch([ip])
        alert_namelist = []
        alert_id = []
        doc = {"size": 10000, "query": {"match": {"emp_id": empid}}}
        res = es.search(index="alertinfo", doc_type='post', body=doc)
        for row in res['hits']['hits']:
            alert_name = row["_source"]["alert_name"]
            alert_namelist.append(alert_name)
            id = len(alert_namelist)
            alert_id.append(id)
        al = set(alert_namelist)
        alert_nameunique = list(al)
        sk = len(alert_nameunique)
        value = []
        kibanaquerylist = []
        k1 = []
        sizelist = []
        lastalertsent = []
        timestamp = []
        mailsentlist = []
        smssentlist = []
        mailsent = []
        for name in alert_nameunique:
            body = {
                "size": 10000,
                "sort": [{
                    "lastalertsent": {
                        "order": "asc"
                    }
                }],
                "query": {
                    "match": {
                        "alert_name": name
                    }
                }
            }
            result = es.search(index="alertinfo", doc_type='post', body=body)
            for row in result['hits']['hits']:
                kibanaquery = row["_source"]["kibana_query"]
                k1.append(kibanaquery)
                kibanaquerylist.append(kibanaquery)
                lastalert = row["_source"]["lastalertsent"]
                lastalertsent.append(lastalert)
                mailsent = row["_source"]["mail_sent"]
                mailsent[:] = [item for item in mailsent if item != '']
                mailsent[:] = [item for item in mailsent if item != 'None']
                mailsent = [item.encode('utf-8') for item in mailsent]
                mailsentlist.append(mailsent)
                smssent = row["_source"]["sms_sent"]
                smssent = [item.encode('utf-8') for item in smssent]
                smssent[:] = [item for item in smssent if item != '']
                smssent[:] = [item for item in smssent if item != 'None']

                smssentlist.append(smssent)

            s1 = len(k1)
            size = s1
            sizelist.append(size)
            del k1[:]
            kibana = []

        for ls in lastalertsent:
            current_timestamp = datetime.datetime.fromtimestamp(ls).strftime(
                '%Y-%m-%d-%H:%M:%S')
            timestamp.append(current_timestamp)

        kibana = []
        time = []
        mail_sent = []
        count = 0
        sms_sent = []
        for size in sizelist:
            kibana.append([kibanaquerylist[i + count] for i in range(size)])
            time.append([timestamp[i + count] for i in range(size)])
            mail_sent.append([mailsentlist[i + count] for i in range(size)])
            sms_sent.append([smssentlist[i + count] for i in range(size)])
            count += size
        allist = []

        zippedlist = zip(alert_nameunique, kibana, time, mail_sent, sms_sent,
                         alert_id)
        return render(request, 'application/alertinfo.html', {
            'kibanaquery': zippedlist,
            'alert': alert_id
        })
    else:
        return redirect('form')
Пример #55
0
    def get_epsolid_reward(self, k1=1.5, k2=2.5, k3=0.5, is_test=False):
        total_step = 2000
        self.env.reset()
        alpha = []
        theta = []
        desired_theta = []
        q = []
        time = []
        i = 1
        control = []
        ierror = 0
        derror_list = []
        error_list = []
        dez_list = []
        # 峰值时间
        tp = 0
        '''计算调整时间   如果调整时间过大我们就加大惩罚'''
        ts = total_step
        count = 0
        for i in range(total_step):
            if count >= belief_times:
                ts = i - belief_times
                break
            error = self.env.theta_desired - self.env.state[1]
            derror = -self.env.state[2]
            error_list.append(error)
            derror_list.append(derror)
            ierror = ierror + error * self.env.tau
            action = k1 * error + k2 * derror + k3 * ierror

            dez_list.append(action)
            if (error == 0 and tp == 0):
                tp = i
            self.env.step(np.array([action]))
            alpha.append(self.env.state[0])
            theta.append(self.env.state[1])
            desired_theta.append(self.env.theta_desired)
            q.append(self.env.state[2])
            time.append(i)
            control.append(action)

            if (abs(error) <= abs(adjust_bound * self.env.theta_desired)):
                count += 1
            else:
                count = 0
            # ## 分阶段优化,因为每个阶段的任务应该是不同的,模拟人的思想,模拟我们自己的调参经验,先得到一个可行解,然后转移得到带有约束的最优解

            ## 虽然我觉得这里应该加入极大值限制,这里是不是应该改环境
            if self.env.state[
                    0] < self.env.alpha_threshold_min or self.env.state[
                        0] > self.env.alpha_threshold_max:
                count += 1

        # 超调量 kp
        Overshoot = max(abs(np.array(theta))) - max(
            abs(np.array(desired_theta)))
        Overshoot = 0 if Overshoot < Overshoot_target else (
            Overshoot - Overshoot_target) / Overshoot_target
        # 调整时间
        ts = 0 if ts <= ts_target else (ts - ts_target) / ts_target
        r = Overshoot + ts
        # r = Overshoot + ts
        # 判断是否满足约束,约束判准
        if is_test:
            return ts
        else:
            return r
with connection.cursor() as cursor:
    cursor.execute(
        "SELECT DATE_FORMAT(time, '%%H:%%i'), course, marketId FROM races WHERE date = %s ORDER BY time", (date_today))
    markets = cursor.fetchall()
    tab = []
    for market in markets:
        tab.append((market['DATE_FORMAT(time, \'%H:%i\')']+', '+market['course']+', '+market['marketId']+'\n'))

time = []
# sample is for gmt+2
for row in tab:
    # not_dst
    if time.localtime().tm_isdst == 0:
        if(row[0:2] == '19'):
            time.append(('20'+row[2:]))
        elif(row[0:2] == '23'):
            time.append(('00'+row[2:]))
        else:
            time.append((row[0]+str(int(row[1])+1)+row[2:]))
    elif time.localtime().tm_isdst == 1:
    # dst
        if(row[0:2] == '18'):
            time.append(('20'+row[2:]))
        elif(row[0:2] == '19'):
            time.append(('21'+row[2:]))
        elif(row[0:2] == '23'):
            time.append(('01'+row[2:]))
        else:
            time.append((row[0]+str(int(row[1])+2)+row[2:]))
    else:
Пример #57
0
import ast
import time
from db_connect import get_conn

parser = argparse.ArgumentParser()
parser.add_argument('E',
                    type=int,
                    help='number of exchange transactions in a process')
parser.add_argument('P', type=int, help='number of processes')
parser.add_argument('I', help='isolation level')
args = parser.parse_args()

processes = []
for i in xrange(0, args.P):
    processes.append(
        subprocess.Popen([
            'python',
            'c:/Users/User/OneDrive/NUS/course/CS5421/assigment/project5/run_exchanges.py',
            str(args.E), args.I
        ],
                         stdout=subprocess.PIPE))  ##MODIFIED

for process in processes:
    process.wait()

time = []
for process in processes:
    time.append(ast.literal_eval(process.communicate()[0]))

print float(sum(time)) / len(time)
Пример #58
0
BASE = "https://news.ycombinator.com/news?p="
session_requests = requests.session()

ct = 0
title = []
score = []
hnuser = []
time = []
for i in range(1, 51):
    URL = BASE + str(i)
    r = session_requests.get(URL)
    soup = BeautifulSoup(r.content, 'html.parser')
    for node in soup.find_all("a", "storylink"):
        title.append(node.text)
    for node in soup.find_all("span", "age"):
        time.append(node.text)
    for node in soup.find_all("a", "hnuser"):
        hnuser.append(node.text)
    for node in soup.find_all("span", "score"):
        score.append(node.text)
    if len(title) > len(hnuser):
        title.pop()
        time.pop()


class post(Document):
    title = StringField(max_length=120, required=True)
    author = StringField(max_length=120, required=True)
    time = StringField(max_length=120, required=True)
    score = StringField(max_length=50, required=True)
Пример #59
0
 def random_date(start, end, numero):
     time = []
     for e in range(0,numero):
         time.append(str_time_prop(start, end, '%m/%d/%Y %I:%M %p', random.random()))
     return time
Пример #60
0
            continue
        motion = 1

        (x, y, w, h) = cv2.boundingRect(contour)
        # making green rectangle arround the moving object
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)

    # we now append status of motion
    motion_list.append(motion)

    # we slice the array from the second last element
    motion_list = motion_list[-2:]

    # appending Start time of motion
    if motion_list[-1] == 1 and motion_list[-2] == 0:
        time.append(datetime.now().strftime("%H:%M:%S"))

    # appending End time of motion
    if motion_list[-1] == 0 and motion_list[-2] == 1:
        time.append(datetime.now().strftime("%H:%M:%S"))

    # displaying image in gray_scale
    cv2.imshow("Gray Frame", gray)

    # displaying the difference in currentframe to the staticframe(very first_frame)
    cv2.imshow("Difference Frame", diff_frame)

    # displaying the black and white image in which if intencity difference greater than 30 it will appear white
    cv2.imshow("Threshold Frame", thresh)

    # displaying color frame with contour of motion of object