예제 #1
0
파일: main.py 프로젝트: andrut/utils
def print_datetime_endl(t, st):
	if t - st > 3600*18:
		print "%s <=%s\n" % (Color.END_TIME, time.strftime("%Y-%m-%d %H:%M", time.gmtime(t)))
	elif t - st != 0:
		print "%s <=%s\n" % (Color.END_TIME, time.strftime("%H:%M", time.gmtime(t)))
	else:
		print ""
예제 #2
0
def move_data():
    db = DBConnection().db
       
    mobiles = ['18310505991', '13693675352', '13581731204']
    message = "数据库T_LOCATION已经完全转移到T_LOCATION_NEW,请及确认表信息的正确性和完整性。"
    #max_row = 1000000000
    max_row = 250000000
    begin_time = time.gmtime(time.time())
    for i in range(10000, max_row, 10000):
        sql = "INSERT INTO T_LOCATION_NEW" \
              " SELECT * FROM T_LOCATION WHERE id <=%d AND id > %d -10000" \
              " and (timestamp between 0 and 1448899200)" % (i, i)
        logging.info("exectue sql:%s", sql)
        
        n = db.execute(sql)
        #time.sleep(0.1)
        logging.info("last record  row id =%s", n)
        break
       # if i = 250000000:
        if i == 240000000:
            for mobile in mobiles:
                SMSHelper.send(mobile, message)    
                print "send", mobile
    end_time = time.gmtime(time.time())
    L_bak = "alter table T_LOCATION rename  to T_LOCATION_bak"
    NEW_L = "alter table T_LOCATION_NEW rename  to T_LOCATION"
    
    for i in range(1, 5): 
        time.sleep(1)
        logging.info("Will rename table neame after %d second", 5-i)
    
    db.execute(L_bak)
    db.execute(NEW_L)
    logging.info("exchange tables T_LOCATION and T_LOCATION_NEW is accomplished ")
    logging.info("Move table data begin_time:%s, end_time:%s", begin_time, end_time)
    def createTestWorkspace(self):
        """ Create a workspace for testing against with ideal log values
        """
        from mantid.simpleapi import CreateWorkspace
        from mantid.simpleapi import AddSampleLog
        from time import gmtime, strftime,mktime
        import numpy as np

        # Create a matrix workspace
        x = np.array([1.,2.,3.,4.])
        y = np.array([1.,2.,3.])
        e = np.sqrt(np.array([1.,2.,3.]))
        wksp = CreateWorkspace(DataX=x, DataY=y,DataE=e,NSpec=1,UnitX='TOF')

        # Add run_start
        tmptime = strftime("%Y-%m-%d %H:%M:%S", gmtime(mktime(gmtime())))
        AddSampleLog(Workspace=wksp,LogName='run_start',LogText=str(tmptime))

        tsp_a=kernel.FloatTimeSeriesProperty("SensorA")
        tsp_b=kernel.FloatTimeSeriesProperty("SensorB")
        tsp_c=kernel.FloatTimeSeriesProperty("SensorC")
        for i in arange(25):
            tmptime = strftime("%Y-%m-%d %H:%M:%S", gmtime(mktime(gmtime())+i))
            tsp_a.addValue(tmptime, 1.0*i*i)
            tsp_b.addValue(tmptime, 2.0*i*i)
            tsp_c.addValue(tmptime, 3.0*i*i)

        wksp.mutableRun()['SensorA']=tsp_a
        wksp.mutableRun()['SensorB']=tsp_b
        wksp.mutableRun()['SensorC']=tsp_c

        return wksp
예제 #4
0
파일: epgWeb.01.py 프로젝트: rohhy/phspd
    def Navigation(self, service, day_from):
      nav = "<P>"
      gmtime = time.gmtime( day_from + 24*3600 )
      
      #service menu
      for s in self.listServices():
        url = self.gmtime2url(s[0], gmtime)
        if s[0] == self.sel_service:
          nav = nav + "<B>%s</B>"%s[0]
        else:
          nav = nav + "<A href=\'%s%s%s\'>%s</A> "%(self.ip, self.scriptUrl, url, s[0])

      #date-day menu
      nav = nav + "</P>\n<P>"
      dayOfWeek = { 0:"Ne", 1:"Po", 2:"Ut", 3:"St", 4:"Ct", 5:"Pa", 6:"So" }

      for day in range(-2, 5):        
        gmtime = time.gmtime( day_from + day*24*3600 )
        date = time.strftime("%Y-%m-%d", gmtime)
        dayCode = int(time.strftime("%w", gmtime))
        date = date + "(" + dayOfWeek[dayCode] + ")"
        url = self.gmtime2url(service, gmtime)
        if day == 1:
          nav = nav + "<B>%s</B>"%date
        else:
          nav = nav + "<A href=\'%s\'>%s</A> "%(url, date)
      return nav + "</P>\n"
예제 #5
0
		def _set_expire(seconds=0):
			'''
			Creates a time object N seconds from now
			'''
			now = calendar.timegm(time.gmtime());
			then = now + seconds
			return time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime(then))
예제 #6
0
 def _format_data(self, start_time, timestamp, name, units, values):
     fields = _fields[:]
     file_timestamp = time.strftime('%Y%m%d%H%M',time.gmtime(start_time))
     value_timestamp = time.strftime('%Y%m%d%H%M',time.gmtime(timestamp))
     fields[_field_index['units']] = units
     fields[_field_index['commodity']] = self.commodity
     meter_id = name + '|1'
     if units:
         meter_id += '/%s' % units
     fields[_field_index['meter_id']] = meter_id
     fields[_field_index['receiver_id']] = ''
     fields[_field_index['receiver_customer_id']] = self.customer_name + '|' + self.account_name
     fields[_field_index['timestamp']] = file_timestamp
     # interval put into "MMDDHHMM" with MMDD = 0000
     fields[_field_index['interval']] = '0000%02d%02d' % (self.period / 3600, (self.period % 3600) / 60)
     fields[_field_index['count']] = str(len(values))
     value_sets =  []
     for value in values:
         try:
             value = '%f' % value
             protocol_text = ''
         except ValueError:
             value = ''
             protocol_text = 'N'
         value_set = (value_timestamp, protocol_text, value)
         value_sets.append(string.join(value_set, ','))
         value_timestamp = ''
     fields[_field_index['interval_data']] = string.join(value_sets, ',')
     return string.join(fields, ',')
예제 #7
0
 def analyzePage(self):
     maxArchSize = str2size(self.get('maxarchivesize'))
     archCounter = int(self.get('counter','1'))
     oldthreads = self.Page.threads
     self.Page.threads = []
     T = time.mktime(time.gmtime())
     whys = []
     for t in oldthreads:
         if len(oldthreads) - self.archivedThreads <= int(self.get('minthreadsleft',5)):
             self.Page.threads.append(t)
             continue #Because there's too little threads left.
         #TODO: Make an option so that unstamped (unsigned) posts get archived.
         why = t.shouldBeArchived(self)
         if why:
             archive = self.get('archive')
             TStuple = time.gmtime(t.timestamp)
             vars = {
                     'counter' : archCounter,
                     'year' : TStuple[0],
                     'month' : TStuple[1],
                     'monthname' : int2month(TStuple[1]),
                     'monthnameshort' : int2month_short(TStuple[1]),
                     'week' : int(time.strftime('%W',TStuple)),
                     }
             archive = archive % vars
             if self.feedArchive(archive,t,maxArchSize,vars):
                 archCounter += 1
                 self.set('counter',str(archCounter))
             whys.append(why)
             self.archivedThreads += 1
         else:
             self.Page.threads.append(t)
     return set(whys)
예제 #8
0
    def text_to_xml_item(self, filepath):
        """read file and generate xml"""
        pname = os.path.basename(filepath).replace(".txt", "")
        date = os.path.getmtime(filepath)
        (tags, title, content) = self.read_text(filepath)  # TODO: do exception proc.
        categories = self.get_categories(filepath)
        date_str = time.strftime( "%Y-%m-%d %H:%M:%S", time.localtime(date) );
        date_str_gmt = time.strftime( "%Y-%m-%d %H:%M:%S", time.gmtime(date) );
        pubDate_str = time.strftime( "%a, %d %b %Y %H:%M:%S +0000", time.gmtime(date) );
        tidied = content
        tidied = tidied.replace("\r\n", "\n")
        
        rex = re.compile(r"<pre>.*?</pre>", re.S)
        tidied = rex.sub(self.escape, tidied)

        tidied = BeautifulStoneSoup(tidied).prettify()
        tidied = tidied.replace("\n", "")
        tidied = tidied.replace(",", "&#44;")
        tidied = self.unescape(tidied)
        
        # add entry
        post_item = wordpress.Item(
            title = title,
            pubDate = pubDate_str,
            post_date = date_str,
            post_date_gmt = date_str_gmt,
            content = tidied,
            post_name = pname)
        post_item.tags = tags
        post_item.categories = categories
        self._wxr.channel.items.append(post_item)
예제 #9
0
def main():

    # zprava
    zprava = matrix([[20],[17],[2],[5],[6]])
    # klic
    klic = matrix([[18, 0,19,12,23],
                   [22,30,32,19,10],
                   [19,17, 2,32,32],
                   [11,24,20,22, 5],
                   [30, 0,19,26,22]])

    print "Brutal force started in",strftime("%H:%M:%S", gmtime())

    for a in range(26):
        print ""
        print a,
        for b in range(26):
            print ".",
            for c in range(26):
                for d in range(26):
                    for e in range(26):
                        matice = matrix([[a],[b],[c],[d],[e]])
                        nasobek = klic * matice
                        if ( (nasobek[0]%33==28) & (nasobek[1]%33==9) & (nasobek[2]%33==8) & (nasobek[3]%33==4) & (nasobek[4]%33==14)):
                            print matice

    print ""
    print "Brutal force ended in",strftime("%H:%M:%S", gmtime())                            
예제 #10
0
def logout(environ, start_response):
    """
    Expire the tiddlyweb_user cookie when a POST is received.
    """
    uri = environ.get('HTTP_REFERER',
            server_base_url(environ) +
            environ['tiddlyweb.config'].get('logout_uri', '/'))
    path = environ.get('tiddlyweb.config', {}).get('server_prefix', '')
    cookie = Cookie.SimpleCookie()
    cookie['tiddlyweb_user'] = ''
    cookie['tiddlyweb_user']['path'] = '%s/' % path

    if 'MSIE' in environ.get('HTTP_USER_AGENT', ''):
        cookie['tiddlyweb_user']['expires'] = time.strftime(
                '%a, %d-%m-%y %H:%M:%S GMT', time.gmtime(time.time() - 600000))
    else:
        cookie['tiddlyweb_user']['max-age'] = '0'

    cookie_output = cookie.output(header='')
    start_response('303 See Other', [
        ('Set-Cookie', cookie_output),
        ('Expires', time.strftime(
            '%a, %d %b %Y %H:%M:%S GMT', time.gmtime(time.time() - 600000))),
        ('Cache-Control', 'no-store'),
        ('Location', uri),
        ])

    return [uri]
 def handle_request( self, trans, url, http_method=None, **kwd ):
     if 'Name' in kwd and not kwd[ 'Name' ]:
         # Hack: specially handle parameters named "Name" if no param_value is given
         # by providing a date / time string - guarantees uniqueness, if required.
         kwd[ 'Name' ] = time.strftime( "%a, %d %b %Y %H:%M:%S", time.gmtime() )
     if 'Comments' in kwd and not kwd[ 'Comments' ]:
         # Hack: specially handle parameters named "Comments" if no param_value is given
         # by providing a date / time string.
         kwd[ 'Comments' ] = time.strftime( "%a, %d %b %Y %H:%M:%S", time.gmtime() )
     socket.setdefaulttimeout( 600 )
     # The following calls to urllib2.urlopen() will use the above default timeout.
     try:
         if not http_method or http_method == 'get':
             page = urllib2.urlopen( url )
             response = page.read()
             page.close()
             return response
         elif http_method == 'post':
             page = urllib2.urlopen( url, urllib.urlencode( kwd ) )
             response = page.read()
             page.close()
             return response
         elif http_method == 'put':
             url += '/' + str( kwd.pop( 'id' ) ) + '?key=' + kwd.pop( 'key' )
             output = self.put( url, **kwd )
     except Exception, e:
         raise
         message = 'Problem sending request to the web application: %s.  URL: %s.  kwd: %s.  Http method: %s' % \
         ( str( e ), str( url ), str( kwd ), str( http_method )  )
         return self.handle_failure( trans, url, message )
예제 #12
0
  def _InsertEvent(self, title='Tennis with Beth',
      content='Meet for a quick lesson', where='On the courts',
      start_time=None, end_time=None, recurrence_data=None):
    """Inserts a basic event using either start_time/end_time definitions
    or gd:recurrence RFC2445 icalendar syntax.  Specifying both types of
    dates is not valid.  Note how some members of the CalendarEventEntry
    class use arrays and others do not.  Members which are allowed to occur
    more than once in the calendar or GData "kinds" specifications are stored
    as arrays.  Even for these elements, Google Calendar may limit the number
    stored to 1.  The general motto to use when working with the Calendar data
    API is that functionality not available through the GUI will not be
    available through the API.  Please see the GData Event "kind" document:
    http://code.google.com/apis/gdata/elements.html#gdEventKind
    for more information"""

    event = gdata.calendar.data.CalendarEventEntry()
    event.title = atom.data.Title(text=title)
    event.content = atom.data.Content(text=content)
    event.where.append(gdata.data.Where(value=where))

    if recurrence_data is not None:
      # Set a recurring event
      event.recurrence = gdata.data.Recurrence(text=recurrence_data)
    else:
      if start_time is None:
        # Use current time for the start_time and have the event last 1 hour
        start_time = time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime())
        end_time = time.strftime('%Y-%m-%dT%H:%M:%S.000Z',
            time.gmtime(time.time() + 3600))
      event.when.append(gdata.data.When(start=start_time,
          end=end_time))

    new_event = self.cal_client.InsertEvent(event)

    return new_event
예제 #13
0
파일: pimento.py 프로젝트: j0npau1/pimento
def record(session):
    starttime = time.time()
    call ("clear")
    print "Time-lapse recording started", time.strftime("%b %d %Y %I:%M:%S", time.localtime())
    print "CTRL-C to stop\n"
    print "Frames:\tTime Elapsed:\tLength @", session.fps, "FPS:"
    print "----------------------------------------"

    while True:
        routinestart = time.time()

        send_command(session)
        
        session.framecount += 1

        # This block uses the time module to format the elapsed time and final
        # video time displayed into nice xx:xx:xx format. time.gmtime(n) will
        # return the day, hour, minute, second, etc. calculated from the
        # beginning of time. So for instance, time.gmtime(5000) would return a
        # time object that would be equivalent to 5 seconds past the beginning
        # of time. time.strftime then formats that into 00:00:05. time.gmtime
        # does not provide actual milliseconds though, so we have to calculate
        # those seperately and tack them on to the end when assigning the length
        # variable. I'm sure this isn't the most elegant solution, so
        # suggestions are welcome.
        elapsed = time.strftime("%H:%M:%S", time.gmtime(time.time()-starttime))
        vidsecs = float(session.framecount)/session.fps
        vidmsecs = str("%02d" % ((vidsecs - int(vidsecs)) * 100))
        length = time.strftime("%H:%M:%S.", time.gmtime(vidsecs)) + vidmsecs

        stdout.write("\r%d\t%s\t%s" % (session.framecount, elapsed, length))
        stdout.flush()
        time.sleep(session.interval - (time.time() - routinestart))
예제 #14
0
    def enumerate_recent_revisions(self, start_day, end_day):
        self.logger.info('enumerating revisions from %s through %s' %
                (ccm.util.to_date(start_day), ccm.util.to_date(end_day)))
        start_str = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(start_day))
        end_str = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(end_day))
        out = subprocess.check_output(['svn', 'log', '--xml',
            '-r', '{%s}:{%s}' % (start_str, end_str), self.url])
        xml = minidom.parseString(out)
        entries = xml.getElementsByTagName("logentry")
        rv = []
        for e in entries:
            r = Rev()
            rv.append(r)
            r.revision = e.getAttribute('revision')
            r.author = e.getElementsByTagName("author")[0].firstChild.nodeValue
            date = e.getElementsByTagName("date")[0].firstChild.nodeValue
            r.when = time.mktime(time.strptime(date, '%Y-%m-%dT%H:%M:%S.%fZ'))

            diff = subprocess.check_output(['svn', 'diff', '-c', r.revision, self.url])
            first_chars = [l[0] for l in diff.split('\n') if l]

            # '=' lines begin each file, followed by '+++' and '---' which don't count as
            # changed lines, so we'll subtract those out
            equals = first_chars.count('=')
            plus = first_chars.count('+')
            minus = first_chars.count('-')
            r.added = plus - equals
            r.removed = minus - equals

        return rv
예제 #15
0
파일: remind.py 프로젝트: thsnr/phenny
def remind(phenny, input):
    m = r_command.match(input.bytes)
    if not m:
        return phenny.reply("Sorry, didn't understand the input.")
    length, scale, message = m.groups()

    length = float(length)
    factor = scaling.get(scale, 60)
    duration = length * factor

    if duration % 1:
        duration = int(duration) + 1
    else: duration = int(duration)

    t = int(time.time()) + duration
    reminder = (input.sender, input.nick, message)

    try: phenny.rdb[t].append(reminder)
    except KeyError: phenny.rdb[t] = [reminder]

    dump_database(phenny.rfn, phenny.rdb)

    if duration >= 60:
        w = ''
        if duration >= 3600 * 12:
            w += time.strftime(' on %d %b %Y', time.gmtime(t))
        w += time.strftime(' at %H:%MZ', time.gmtime(t))
        phenny.reply('Okay, will remind%s' % w)
    else: phenny.reply('Okay, will remind in %s secs' % duration)
def FormatDateTime(value, **kwargs):
    formatStringList = []
    if kwargs.get('dateFormat', 'short') in ('full', 'long', 'medium', 'short'):
        formatStringList.append('%Y.%m.%d')
    timeFormat = kwargs.get('timeFormat', 'short')
    if timeFormat in ('full', 'long', 'medium'):
        formatStringList.append('%H:%M:%S')
    elif timeFormat == 'short':
        formatStringList.append('%H:%M')
    formatString = ' '.join(formatStringList)
    if isinstance(value, long):
        value = value + eveLocalization.GetTimeDelta() * const.SEC
        year, month, weekday, day, hour, minute, second, msec = blue.os.GetTimeParts(value)
        day_of_year = 1
        is_daylight_savings = -1
        value = (year,
         month,
         day,
         hour,
         minute,
         second,
         weekday,
         day_of_year,
         is_daylight_savings)
    elif isinstance(value, (time.struct_time, tuple)):
        value = calendar.timegm(value)
        value = time.gmtime(value + eveLocalization.GetTimeDelta())
    elif isinstance(value, float):
        value = time.gmtime(value + eveLocalization.GetTimeDelta())
    else:
        logger.LogTraceback('datetime only accepts blue time or Python time as values, but we received a ', type(value).__name__, '.')
        return None
    return PrepareLocalizationSafeString(time.strftime(formatString, value), 'time')
예제 #17
0
    def enumerate_recent_revisions(self, start_day, end_day):
        self.logger.info('enumerating revisions from %s through %s' %
                (ccm.util.to_date(start_day), ccm.util.to_date(end_day)))
        start_str = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(start_day))
        end_str = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(end_day))
        out = subprocess.check_output(['git', 'log',
                                       '--since', start_str, '--until', end_str,
                                       '--format=format:%H %ct %aE', '--shortstat'],
            cwd=self.local_path)
        out = out.split('\n')

        hash_re = re.compile(r'^([a-z0-9]{40}) (\d+) ([^ ]+)')
        shortstat_re = re.compile(r'\s*\d+ files? changed(, (\d+) insertions?...)?(, (\d+) deletions?...)?')
        rv = []
        while out:
            line = out.pop(0)
            mo = hash_re.match(line)
            if mo:
                rv.append(Rev())
                rv[-1].revision, rv[-1].when, rv[-1].author = mo.groups()
            mo = shortstat_re.match(line)
            if mo:
                if mo.group(2):
                    rv[-1].added = int(mo.group(2))
                if mo.group(4):
                    rv[-1].removed = int(mo.group(4))
        return rv
예제 #18
0
파일: service.py 프로젝트: jspring11/boto
 def create_msg(self, key, params=None):
     m = self.input_queue.new_message()
     if params:
         m.update(params)
     if key.path:
         t = os.path.split(key.path)
         m['OriginalLocation'] = t[0]
         m['OriginalFileName'] = t[1]
         mime_type = mimetypes.guess_type(t[1])[0]
         if mime_type == None:
             mime_type = 'application/octet-stream'
         m['Content-Type'] = mime_type
         s = os.stat(key.path)
         t = time.gmtime(s[7])
         m['FileAccessedDate'] = time.strftime(ISO8601, t)
         t = time.gmtime(s[8])
         m['FileModifiedDate'] = time.strftime(ISO8601, t)
         t = time.gmtime(s[9])
         m['FileCreateDate'] = time.strftime(ISO8601, t)
     m['Date'] = time.strftime(RFC1123, time.gmtime())
     m['Host'] = gethostname()
     m['Bucket'] = key.bucket.name
     m['InputKey'] = key.key
     m['Size'] = key.size
     return m
예제 #19
0
def fetch_data(server, port, location, size):
    readsize = 0
    attempts = 5
    while True:
        try:
            t1 = time.time()
            conn = httplib.HTTPConnection(server, port)
            #conn.set_debuglevel(1)
            conn.request("GET", location)
            resp = conn.getresponse()
            if resp.status == 200:
                data = resp.read (ReadSize)
                t2 = time.time()
                conn.close()
                if not (len(data) == ReadSize):
                    print time.strftime("%b %d - %H:%M:%S", time.gmtime()), "http://%s%s" % (server, location), resp.status, resp.reason, "size", len(data), "time", t2-t1
                    exit (0)
            break
        except socket.error, msg:
            print "socket error %s" % msg
            break
        except httplib.HTTPException, msg:
            print time.strftime("%b %d - %H:%M:%S", time.gmtime()), "read data http://%s:%d%s, http error %s" % (server, port, location, msg)
            exit(0)
            attempts -= 1
            if attempts == 0:
                raise
            else:
                print "try %d times" % (5-attempts)
                time.sleep(1)
                continue
예제 #20
0
def log(s0, s1):
	# s0 = "E" 			<- Log error and EXIT
	# s0 = "I"			<- Log and print message
	# s0 = ""			<- Log method
	# s1 = "Hej hej"	<- Message
	if s0 == "E":
		output = "ERROR: " + strftime("%d %b %Y %H:%M:%S", gmtime()) + " " + inspect.stack()[1][3] + "() " + s1 + "\n"
		file = open("log", "a")
		file.write(output)
		print output # Print message in terminal
		i = 1
		s = ""
		# trace back
		while inspect.stack()[i][3] != "main":
			s = s + "\n" + inspect.stack()[i][3] + "() "
			i = i + 1
			
		s = s + "\n" + inspect.stack()[i][3] + "() "	
		print s
		file = open("log", "a")
		file.write(s)
		quit()	# And quit
	elif s0 == "I":	
		msg = strftime("%d %b %Y %H:%M:%S", gmtime()) + " INFO: "+ s1 + "\n"
		file = open("log", "a")
		file.write(msg)
		print s1
	else:
		output = strftime("%d %b %Y %H:%M:%S", gmtime()) + " " + inspect.stack()[1][3] + "() " + s1 + "\n"
		file = open("log", "a")
		file.write(output)
예제 #21
0
def main(filenms, workdir, resultsdir):

    # Change to the specified working directory
    os.chdir(workdir)

    job = set_up_job(filenms, workdir, resultsdir)
    
    print "\nBeginning PALFA search of %s" % (', '.join(job.filenms))
    print "UTC time is:  %s"%(time.asctime(time.gmtime()))
    
    try:
        search_job(job)
    except:
        print "***********************ERRORS!************************"
        print "  Search has been aborted due to errors encountered."
        print "  See error output for more information."
        print "******************************************************"
        raise
    finally:
        clean_up(job)

        # And finish up
        job.total_time = time.time() - job.total_time
        print "\nFinished"
        print "UTC time is:  %s"%(time.asctime(time.gmtime()))

        # Write the job report
        # job.write_report(job.basefilenm+".report")
        job.write_report(os.path.join(job.outputdir, job.basefilenm+".report"))
예제 #22
0
def run_tsung(xml, target, additional_file='', run_name=''):
    """Runs tsung tests with a given xml against the given target - Replaces localhost in the xml with the target and fetches the reports dir.

    :param xml: The path to the xml file to upload.
    :param target: The machine to run the test against.
    :param additional_file: The path to additional file to upload.
    :param run_name: Prepend the log directory of tsung with this.
    :returns: A tar.gz of the logs directory.
    
    **Example Usage:**

    .. code-block:: sh 
    
        fab -i awskey.pem -u root -H machine_with_tsung run_tsung:xml=tsung-tests/test.xml,target=machine_to_test_against,additional_file=tsung-tests/test.csv

    """
    put(xml,'current_test.xml')
    if additional_file:
        put(additional_file,'.')
    run("sed -i 's|targetmachine|"+target+"|' current_test.xml")
    from time import gmtime, strftime
    logdir = run_name+strftime("%Y%m%d%H%M%S",gmtime())
    run('mkdir '+logdir)
    run("tsung -f current_test.xml -l "+logdir+' start')
    with cd(os.path.join(logdir,strftime("%Y*",gmtime()))):
        run('/usr/lib/tsung/bin/tsung_stats.pl')
    run('tar -cf '+logdir+'.tar '+logdir)
    run('gzip '+logdir+'.tar')
    get(logdir+'.tar.gz','.')
def lambda_handler(event, context):
    print "=== Start parsing EBS backup script. ==="
    ec2 = boto3.client('ec2')
    response = ec2.describe_instances()
    namesuffix = time.strftime('-%Y-%m-%d-%H-%M')
    data = None

    # Get current day + hour (using GMT)
    hh = int(time.strftime("%H", time.gmtime()))
    day = time.strftime("%a", time.gmtime()).lower()

    exclude_list = config['exclude']

    # Loop Volumes.
    try:
        for r in response['Reservations']:
            for ins in r['Instances']:
                for t in ins['Tags']:
                    if t['Key'] == 'Name':
                        for namestr in config['exclude_name']:
                            if namestr in t['Value']:
                                print 'Excluding Instance with ID ' + ins['InstanceId']
                                exclude_list.append(ins['InstanceId'])
                    if (ins['InstanceId'] not in exclude_list) and (not any('ignore' in t['Key'] for t in ins['Tags'])):
                        for tag in ins['Tags']:
                            if tag['Key'] == config['tag']:
                                data = tag['Value']

                        if data is None and config['auto-create-tag'] == 'true':
                            print "Instance %s doesn't contains the tag and auto create is enabled." % ins['InstanceId']
                            create_backup_tag(ins, ec2)
                            data = config['default']
                        schedule = json.loads(data)
                        data = None

                        if hh == schedule['time'][day] and not ins['State']['Name'] == 'terminated':
                            print "Getting the list of EBS volumes attached to \"%s\" ..." % ins['InstanceId']
                            volumes = ins['BlockDeviceMappings']
                            for vol in volumes:
                                vid = vol['Ebs']['VolumeId']
                                print "Creating snapshot of volume \"%s\" ..." % (vid)
                                snap_res = ec2.create_snapshot(VolumeId=vid, Description=vid + namesuffix)
                                if snap_res['State'] == 'error':
                                    notify_topic('Failed to create snapshot for volume with ID %s.\nCheck Cloudwatch \
                                                 logs for more details.' % vid)
                                    sys.exit(1)
                                elif maintain_retention(ec2, vid, schedule['retention']) != 0:
                                    print "Failed to maintain the retention period appropriately."
                    else:
                        print "Instance %s is successfully ignored." % ins['InstanceId']
    except botocore.exceptions.ClientError as e:
        print 'Recieved Boto client error %s' % e
    except KeyError as k:
        if config['auto-create-tag'] == 'true':
            print "Inside KeyError %s" % k
            create_backup_tag(ins, ec2)
    except ValueError:
        # invalid json
        print 'Invalid value for tag \"backup\" on instance \"%s\", please check!' % (ins['InstanceId'])
    print "=== Finished parsing EBS backup script. ==="
예제 #24
0
def pbot(message, channel=''):
    if channel: 
        msg = '[%s %s] <%s> %s' % (time.strftime('%H:%M:%S', time.gmtime()), channel, 'BOT', message)
    else: 
        msg = '[%s] <%s> %s' % (time.strftime('%H:%M:%S', time.gmtime()), 'BOT', message)

    print msg
예제 #25
0
파일: message.py 프로젝트: 10sr/hue
 def for_key(self, key, params=None, bucket_name=None):
     if params:
         self.update(params)
     if key.path:
         t = os.path.split(key.path)
         self['OriginalLocation'] = t[0]
         self['OriginalFileName'] = t[1]
         mime_type = mimetypes.guess_type(t[1])[0]
         if mime_type is None:
             mime_type = 'application/octet-stream'
         self['Content-Type'] = mime_type
         s = os.stat(key.path)
         t = time.gmtime(s[7])
         self['FileAccessedDate'] = get_ts(t)
         t = time.gmtime(s[8])
         self['FileModifiedDate'] = get_ts(t)
         t = time.gmtime(s[9])
         self['FileCreateDate'] = get_ts(t)
     else:
         self['OriginalFileName'] = key.name
         self['OriginalLocation'] = key.bucket.name
         self['ContentType'] = key.content_type
     self['Host'] = gethostname()
     if bucket_name:
         self['Bucket'] = bucket_name
     else:
         self['Bucket'] = key.bucket.name
     self['InputKey'] = key.name
     self['Size'] = key.size
예제 #26
0
 def __call__(self, parser, namespace, values, option_string=None):
     """
     Each action should call this logic.
     """
         
     dis_dict = {'--add': '+',
                 '--remove': '-',
                 '-a': '+',
                 '-rm': '-',
                 '--reset': '*'}
     #debug
     #print '%r %r %r' % (namespace, values, option_string)
     if option_string == '--reset':
         print 'Base reset to: ', values
         Log.history.append(('*' + str(values),
                            time.strftime(STATIC_T, time.gmtime())))
         Log.save_log()
     elif option_string == '--clear':
         Log.history = []
         Log.save_log()
         os.remove(STATIC_P)
         sys.exit('Clear all data...OK')
     else:
         try:
             Log.history.append((dis_dict[option_string] + str(values),
                                 time.strftime(STATIC_T, time.gmtime())))
         except KeyError:
             pass
         else:
             Log.save_log()
         Log.print_log()
예제 #27
0
파일: critic.py 프로젝트: jensl/critic
def handleStaticResource(req):
    if req.path == "static-resource/":
        raise request.Forbidden("Directory listing disabled!")
    resources_path = os.path.join(
        configuration.paths.INSTALL_DIR, "resources")
    resource_path = os.path.abspath(os.path.join(
        resources_path, req.path.split("/", 1)[1]))
    if not resource_path.startswith(resources_path + "/"):
        raise request.Forbidden()
    if not os.path.isfile(resource_path):
        raise request.NotFound()
    last_modified = htmlutils.mtime(resource_path)
    HTTP_DATE = "%a, %d %b %Y %H:%M:%S GMT"
    if_modified_since = req.getRequestHeader("If-Modified-Since")
    if if_modified_since:
        try:
            if_modified_since = time.strptime(if_modified_since, HTTP_DATE)
        except ValueError:
            pass
        else:
            if last_modified <= calendar.timegm(if_modified_since):
                raise request.NotModified()
    req.addResponseHeader("Last-Modified", time.strftime(HTTP_DATE, time.gmtime(last_modified)))
    if req.query and req.query == htmlutils.base36(last_modified):
        req.addResponseHeader("Expires", time.strftime(HTTP_DATE, time.gmtime(time.time() + 2592000)))
        req.addResponseHeader("Cache-Control", "max-age=2592000")
    setContentTypeFromPath(req)
    req.start()
    with open(resource_path, "r") as resource_file:
        return [resource_file.read()]
 def run(self, objfile):
     self.key = "PETimestamp"
     self.score = 0
     
     if objfile.get_type() == 'PE32' or objfile.get_type() == 'MS-DOS':
         timeStamp = None
         
         try:
             pe = PE(data=objfile.file_data)
             peTimeDateStamp = pe.FILE_HEADER.TimeDateStamp
             timeStamp = '0x%-8X' % (peTimeDateStamp)
             try:
                 timeStamp += ' [%s UTC]' % time.asctime(time.gmtime(peTimeDateStamp))
                 peYear = time.gmtime(peTimeDateStamp)[0]
                 thisYear = time.gmtime(time.time())[0]
                 if peYear < 2000 or peYear > thisYear:
                     timeStamp += " [SUSPICIOUS]"
                     self.score = 10
             except:
                 timeStamp += ' [SUSPICIOUS]'
                 self.score = 10
             
             return timeStamp
         except PEFormatError, e:
             log.warn("Error - No Portable Executable or MS-DOS: %s" % e)        
예제 #29
0
파일: server.py 프로젝트: skl1f/ukrgadget
 def post(self, *args, **kwargs):
     art = ArticleObject()
     art.title = self.get_argument("title")
     art.slug_name = self.get_argument("uri")
     art.content = self.get_argument("content")
     art.morecontent = self.get_argument("morecontent")
     try:
         q = strptime(self.get_argument("pub_date"), "%m/%d/%Y")
         art.pub_date = strftime("%Y-%m-%d", q)
         art.rfc822_date = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
     except:
         q = strptime(self.get_argument("pub_date"), "%Y-%m-%d")
         art.pub_date = self.get_argument("pub_date")
         art.rfc822_date = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
     art.author = self.get_argument("name")
     art.category = self.get_argument("category")
     art.tags = self.get_argument("tags")
     art.posted = False
     art.twit = False
     art.absolute_url = 'http://ukrgadget/' + \
         self.get_argument("category") + '/' + self.get_argument("uri")
     art.short_url = 'http://ukrgadget.com/go/' + \
         str(self.application.database.find().count() + 1)
     ident = self.application.database.insert(art.dict())
     self.redirect('/admin/edit/' + str(ident), permanent=False, status=302)
 def importcalenderdata(self):
     res=0
     for entry in self.mw.calenders:
         if entry[0]==self.settings.caltype:
             filter={}
             for setting in _filter_keys:
                 if self.settings._data.has_key(setting) and self.settings._data[setting]!=None:
                     if setting=='start_offset':
                         tm=time.gmtime(time.time()-(self.settings._data[setting]*24*60*60))
                         date=(tm.tm_year, tm.tm_mon, tm.tm_mday)
                         filter['start']=date
                     elif setting=='end_offset':
                         tm=time.gmtime(time.time()+(self.settings._data[setting]*24*60*60))
                         date=(tm.tm_year, tm.tm_mon, tm.tm_mday)
                         filter['end']=date
                     elif setting=="categories":
                         filter[setting]=self.settings._data[setting].split("||")
                     else:
                          filter[setting]=self.settings._data[setting]
                 else:
                     if setting=='start_offset':
                         filter['start']=None
                     if setting=='end_offset':
                         filter['end']=None
             res=entry[2](self.mw.tree.GetActivePhone(), self.settings.calender_id, filter)
             if res==1:
                 self.log("Auto Sync: Imported calender OK")
     if not res:
         self.log("Auto Sync: Failed to import calender")
     return res
예제 #31
0
        again = input("Do you want to order something else?(y/n)")
        if again!='y' and again!='yes' and again!='Y' and again!='Yes':
            if order[0]==[]:
                print("Nothing was ordered")
                exit()
            print("Your order is:")
            for k in range(len(order[0])):
                print("%i. %s"%(k+1,order[0][k]))
            print("For the price of £%d\n" % order[1])
            orderconf = input("Is your order ok? (y/n)")
            if orderconf=='y' or orderconf=='yes' or orderconf=='Y'or orderconf=='Yes':
                #order is of the form order = [["salad","pizza","coca-cola"], 15.2]
                postcode = input("What is your postcode?")
                res = fEnd.checkAddress(postcode)
                print(res)
                while res:
                    print("Unvalid postcode (type quit to exit)")
                    
                    postcode = input("What is your postcode?")
                    res = fEnd.checkAddress(postcode)
                    if postcode == 'quit':
                        exit()
                
                print("Order registered")
                print(fEnd.setOrder(order[0],order[1],strftime("%Y-%m-%d %H:%M:%S", gmtime()),name,postcode))
                print(fEnd.getOrders())
            else:
                print("Your command was cancelled")
            break
예제 #32
0
파일: httpd.py 프로젝트: xietalent/maltrail
        def _login(self, params):
            valid = False

            if params.get("username") and params.get("hash") and params.get("nonce"):
                if params.get("nonce") not in DISPOSED_NONCES:
                    DISPOSED_NONCES.add(params.get("nonce"))
                    for entry in (config.USERS or []):
                        entry = re.sub(r"\s", "", entry)
                        username, stored_hash, uid, netfilter = entry.split(':')
                        if username == params.get("username"):
                            try:
                                if params.get("hash") == hashlib.sha256(stored_hash.strip() + params.get("nonce")).hexdigest():
                                    valid = True
                                    break
                            except:
                                if config.SHOW_DEBUG:
                                    traceback.print_exc()

            if valid:
                session_id = os.urandom(SESSION_ID_LENGTH).encode("hex")
                expiration = time.time() + 3600 * SESSION_EXPIRATION_HOURS

                self.send_response(httplib.OK)
                self.send_header(HTTP_HEADER.CONNECTION, "close")
                self.send_header(HTTP_HEADER.SET_COOKIE, "%s=%s; expires=%s; path=/; HttpOnly" % (SESSION_COOKIE_NAME, session_id, time.strftime(HTTP_TIME_FORMAT, time.gmtime(expiration))))

                if netfilter in ("", "0.0.0.0/0"):
                    netfilters = None
                else:
                    addresses = set()
                    netmasks = set()

                    for item in set(re.split(r"[;,]", netfilter)):
                        item = item.strip()
                        if '/' in item:
                            _ = item.split('/')[-1]
                            if _.isdigit() and int(_) >= 16:
                                lower = addr_to_int(item.split('/')[0])
                                mask = make_mask(int(_))
                                upper = lower | (0xffffffff ^ mask)
                                while lower <= upper:
                                    addresses.add(int_to_addr(lower))
                                    lower += 1
                            else:
                                netmasks.add(item)
                        elif '-' in item:
                            _ = item.split('-')
                            lower, upper = addr_to_int(_[0]), addr_to_int(_[1])
                            while lower <= upper:
                                addresses.add(int_to_addr(lower))
                                lower += 1
                        elif re.search(r"\d+\.\d+\.\d+\.\d+", item):
                            addresses.add(item)

                    netfilters = netmasks
                    if addresses:
                        netfilters.add(get_regex(addresses))

                SESSIONS[session_id] = AttribDict({"username": username, "uid": uid, "netfilters": netfilters, "expiration": expiration, "client_ip": self.client_address[0]})
            else:
                time.sleep(UNAUTHORIZED_SLEEP_TIME)
                self.send_response(httplib.UNAUTHORIZED)
                self.send_header(HTTP_HEADER.CONNECTION, "close")

            self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
            content = "Login %s" % ("success" if valid else "failed")

            if not subprocess.mswindows:
                try:
                    subprocess.check_output("logger -p auth.info -t \"%s[%d]\" \"%s password for %s from %s port %s\"" % (NAME.lower(), os.getpid(), "Accepted" if valid else "Failed", params.get("username"), self.client_address[0], self.client_address[1]), stderr=subprocess.STDOUT, shell=True)
                except Exception:
                    if config.SHOW_DEBUG:
                        traceback.print_exc()

            return content
예제 #33
0
파일: httpd.py 프로젝트: xietalent/maltrail
        def do_GET(self):
            path, query = self.path.split('?', 1) if '?' in self.path else (self.path, "")
            params = {}
            content = None
            skip = False

            if hasattr(self, "data"):
                params.update(urlparse.parse_qs(self.data))

            if query:
                params.update(urlparse.parse_qs(query))

            for key in params:
                if params[key]:
                    params[key] = params[key][-1]

            if path == '/':
                path = "index.html"

            path = path.strip('/')
            extension = os.path.splitext(path)[-1].lower()

            if hasattr(self, "_%s" % path):
                content = getattr(self, "_%s" % path)(params)

            else:
                path = path.replace('/', os.path.sep)
                path = os.path.abspath(os.path.join(HTML_DIR, path)).strip()

                if not os.path.isfile(path) and os.path.isfile("%s.html" % path):
                    path = "%s.html" % path

                if ".." not in os.path.relpath(path, HTML_DIR) and os.path.isfile(path) and (extension not in DISABLED_CONTENT_EXTENSIONS or os.path.split(path)[-1] in CONTENT_EXTENSIONS_EXCLUSIONS):
                    mtime = time.gmtime(os.path.getmtime(path))
                    if_modified_since = self.headers.get(HTTP_HEADER.IF_MODIFIED_SINCE)

                    if if_modified_since and extension not in (".htm", ".html"):
                        if_modified_since = [_ for _ in if_modified_since.split(';') if _.upper().endswith("GMT")][0]
                        if time.mktime(mtime) <= time.mktime(time.strptime(if_modified_since, HTTP_TIME_FORMAT)):
                            self.send_response(httplib.NOT_MODIFIED)
                            self.send_header(HTTP_HEADER.CONNECTION, "close")
                            skip = True

                    if not skip:
                        content = open(path, "rb").read()
                        last_modified = time.strftime(HTTP_TIME_FORMAT, mtime)
                        self.send_response(httplib.OK)
                        self.send_header(HTTP_HEADER.CONNECTION, "close")
                        self.send_header(HTTP_HEADER.CONTENT_TYPE, mimetypes.guess_type(path)[0] or "application/octet-stream")
                        self.send_header(HTTP_HEADER.LAST_MODIFIED, last_modified)
                        if extension not in (".htm", ".html"):
                            self.send_header(HTTP_HEADER.EXPIRES, "Sun, 17-Jan-2038 19:14:07 GMT")        # Reference: http://blog.httpwatch.com/2007/12/10/two-simple-rules-for-http-caching/
                            self.send_header(HTTP_HEADER.CACHE_CONTROL, "max-age=3600, must-revalidate")  # Reference: http://stackoverflow.com/a/5084555
                        else:
                            self.send_header(HTTP_HEADER.CACHE_CONTROL, "no-cache")

                else:
                    self.send_response(httplib.NOT_FOUND)
                    self.send_header(HTTP_HEADER.CONNECTION, "close")
                    content = '<!DOCTYPE html><html lang="en"><head><title>404 Not Found</title></head><body><h1>Not Found</h1><p>The requested URL %s was not found on this server.</p></body></html>' % self.path.split('?')[0]

            if content is not None:
                for match in re.finditer(r"<\!(\w+)\!>", content):
                    name = match.group(1)
                    _ = getattr(self, "_%s" % name.lower(), None)
                    if _:
                        content = self._format(content, **{ name: _() })

                if "gzip" in self.headers.getheader(HTTP_HEADER.ACCEPT_ENCODING, ""):
                    self.send_header(HTTP_HEADER.CONTENT_ENCODING, "gzip")
                    _ = cStringIO.StringIO()
                    compress = gzip.GzipFile("", "w+b", 9, _)
                    compress._stream = _
                    compress.write(content)
                    compress.flush()
                    compress.close()
                    content = compress._stream.getvalue()

                self.send_header(HTTP_HEADER.CONTENT_LENGTH, str(len(content)))

            self.end_headers()

            if content:
                self.wfile.write(content)

            self.wfile.flush()
            self.wfile.close()
예제 #34
0
 def renew_premium_time(self):
     return time.strftime("%d %b %Y at %H:%M:%S",
                          time.gmtime(self.premium_expires_utc))
예제 #35
0
import subprocess
import time
import urllib
from argparse import ArgumentParser
import logging
import shutil

from . import _
from . import common
from . import index
from . import update
from .exception import FDroidException

config = None
options = None
start_timestamp = time.gmtime()

BINARY_TRANSPARENCY_DIR = 'binary_transparency'

AUTO_S3CFG = '.fdroid-deploy-s3cfg'
USER_S3CFG = 's3cfg'
REMOTE_HOSTNAME_REGEX = re.compile(r'\W*\w+\W+(\w+).*')


def update_awsbucket(repo_section):
    """Upload the contents of the directory `repo_section` (including subdirectories) to the AWS S3 "bucket".

    The contents of that subdir of the
    bucket will first be deleted.

    Requires AWS credentials set in config.yml: awsaccesskeyid, awssecretkey
예제 #36
0
def helpscout_webhook():
    return jsonify({"html": "<b>test</b> test %s" % time.gmtime()})
예제 #37
0
import time

epoch = time.gmtime(0)
print(time.strftime('%c', epoch)) #01/01/70 00:00:00
print(time.strftime('%x', epoch)) #01/01/70
print(time.strftime('%X', epoch)) #00:00:00
print(time.strftime('%A, %B %d, %Y, %I:%M %p', epoch))
#Thursday, January 01, 1970, 12:00 AM

independence_day = time.strptime('07/04/1776','%m/%d/%Y')
예제 #38
0
 def getTime():
     return time.gmtime()
예제 #39
0
def getTimeStamp():
    import calendar
    import time
    return calendar.timegm(time.gmtime())
예제 #40
0
        for user, item_i, item_j in train_loader:

            model.zero_grad()
            prediction_i, prediction_j = model(user, item_i, item_j)
            loss = -(prediction_i - prediction_j).sigmoid().log().sum()
            loss.backward()
            optimizer.step()
            # writer.add_scalar('data/loss', loss.item(), count)
            count += 1

        model.eval()
        HR, NDCG = evaluate.metrics(model, test_loader, args.top_k)

        elapsed_time = time.time() - start_time
        print("The time elapse of epoch {:03d}".format(epoch) + " is: " +
              time.strftime("%H: %M: %S", time.gmtime(elapsed_time)))
        print('Top_Ks:', args.top_k, '\tHR:', HR, '\tNDCG', NDCG)
        HR_total.append(HR)
        NDCG_total.append(NDCG)
        if HR[0] > best_hr[0]:
            best_hr, best_ndcg, best_epoch = HR, NDCG, epoch
            if args.out:
                if not os.path.exists(config.model_path):
                    os.mkdir(config.model_path)
                torch.save(model, '{}BPR.pt'.format(config.model_path))
    HR_df = pd.DataFrame(
        HR_total,
        columns=['Top-1', 'Top-3', 'Top-5', 'Top-10', 'Top-20', 'Top-50'])
    NDCG_df = pd.DataFrame(
        NDCG_total,
        columns=['Top-1', 'Top-3', 'Top-5', 'Top-10', 'Top-20', 'Top-50'])
예제 #41
0
sys.path.append("../../configuration")

if os.path.isfile("log.log"):
	os.remove("log.log")
log = open("log.log", "w")
from scripts import *
from buildsite import *
from process import *
from tools import *
from directories import *

printLog(log, "")
printLog(log, "-------")
printLog(log, "--- Setup ig")
printLog(log, "-------")
printLog(log, time.strftime("%Y-%m-%d %H:%MGMT", time.gmtime(time.time())))
printLog(log, "")

# Setup source directories
printLog(log, ">>> Setup source directories <<<")
for dir in IgLandSourceDirectories:
	mkPath(log, DatabaseDirectory + "/" + dir)
for dir in IgOtherSourceDirectories:
	mkPath(log, DatabaseDirectory + "/" + dir)
mkPath(log, DatabaseDirectory + "/" + LigoBaseSourceDirectory)

# Setup export directories
printLog(log, ">>> Setup export directories <<<")
mkPath(log, ExportBuildDirectory + "/" + IgStaticLandExportDirectory)
mkPath(log, ExportBuildDirectory + "/" + IgStaticOtherExportDirectory)
mkPath(log, ExportBuildDirectory + "/" + IgStaticTagExportDirectory)
예제 #42
0
    def getTime():
        return time.localtime()
        
def sendCommand(command, data):
    command = preamble + command + data + postamble
    ser.write(bytes.fromhex(command))
    ser.read_until(bytes.fromhex(postamble)) # the send command (why, oh why?)
    response = ser.read_until(bytes.fromhex(postamble)) 
    if (response != bytes.fromhex(responseOk)):
        exit('Error: Command ' + command + ' did not receive OK from radio.')
        
print('Setting Icom clock to', getTime().tm_zone, '. This may take up to 1 minute to complete.')

# Set UTC offset on radio
if(time.localtime().tm_gmtoff < 0):
    offsetData = time.strftime('%H%M', time.gmtime(time.localtime().tm_gmtoff * -1))
    if (swapclock):
        offsetData += '00'
    else:
        offsetData += '01'
else :
    offsetData = time.strftime('%H%M', time.gmtime(time.localtime().tm_gmtoff))
    if (swapclock):
        offsetData += '01'
    else:
        offsetData += '00'
sendCommand(setUtcOffsetCommand, offsetData)

# Set date on radio
dateData = time.strftime('%Y%m%d', getTime())
sendCommand(setDateCommand, dateData)
예제 #43
0
def main():  # pylint: disable=too-many-statements, too-many-branches, too-many-locals
    """ hey, it's main """
    global JIRA_BASE_URL  #pylint: disable=global-statement
    global BACKWARD_INCOMPATIBLE_LABEL  #pylint: disable=global-statement
    global SORTTYPE  #pylint: disable=global-statement
    global SORTORDER  #pylint: disable=global-statement
    global NUM_RETRIES  #pylint: disable=global-statement
    global EXTENSION  #pylint: disable=global-statement

    options = parse_args()

    if options.output_directory is not None:
        # Create the output directory if it does not exist.
        try:
            if not os.path.exists(options.output_directory):
                os.makedirs(options.output_directory)
        except OSError as exc:
            if exc.errno == errno.EEXIST and os.path.isdir(
                    options.output_directory):
                pass
            else:
                print("Unable to create output directory %s: %u, %s" % \
                        (options.output_directory, exc.errno, exc.message))
                sys.exit(1)
        os.chdir(options.output_directory)

    if options.base_url is not None:
        JIRA_BASE_URL = options.base_url

    if options.incompatible_label is not None:
        BACKWARD_INCOMPATIBLE_LABEL = options.incompatible_label

    if options.extension is not None:
        EXTENSION = options.extension

    projects = options.projects

    if options.range is True:
        versions = [
            Version(v)
            for v in GetVersions(options.versions, projects).getlist()
        ]
    else:
        versions = [Version(v) for v in options.versions]
    versions.sort()

    SORTTYPE = options.sorttype
    SORTORDER = options.sortorder

    if options.title is None:
        title = projects[0]
    else:
        title = options.title

    if options.retries is not None:
        NUM_RETRIES = options.retries[0]

    haderrors = False

    for version in versions:
        vstr = str(version)
        linter = Linter(vstr, options)
        jlist = sorted(JiraIter(vstr, projects))
        if not jlist and not options.empty:
            print("There is no issue which has the specified version: %s" %
                  version)
            continue

        if vstr in RELEASE_VERSION:
            reldate = RELEASE_VERSION[vstr]
        elif options.usetoday:
            reldate = strftime("%Y-%m-%d", gmtime())
        else:
            reldate = "Unreleased (as of %s)" % strftime("%Y-%m-%d", gmtime())

        if not os.path.exists(vstr) and options.versiondirs:
            os.mkdir(vstr)

        if options.versionfiles and options.versiondirs:
            reloutputs = Outputs(
                "%(ver)s/RELEASENOTES.%(ver)s%(ext)s",
                "%(ver)s/RELEASENOTES.%(key)s.%(ver)s%(ext)s", [], {
                    "ver": version,
                    "date": reldate,
                    "title": title,
                    "ext": EXTENSION
                })
            choutputs = Outputs("%(ver)s/CHANGELOG.%(ver)s%(ext)s",
                                "%(ver)s/CHANGELOG.%(key)s.%(ver)s%(ext)s", [],
                                {
                                    "ver": version,
                                    "date": reldate,
                                    "title": title,
                                    "ext": EXTENSION
                                })
        elif options.versiondirs:
            reloutputs = Outputs("%(ver)s/RELEASENOTES%(ext)s",
                                 "%(ver)s/RELEASENOTES.%(key)s%(ext)s", [], {
                                     "ver": version,
                                     "date": reldate,
                                     "title": title,
                                     "ext": EXTENSION
                                 })
            choutputs = Outputs("%(ver)s/CHANGELOG%(ext)s",
                                "%(ver)s/CHANGELOG.%(key)s%(ext)s", [], {
                                    "ver": version,
                                    "date": reldate,
                                    "title": title,
                                    "ext": EXTENSION
                                })
        elif options.versionfiles:
            reloutputs = Outputs("RELEASENOTES.%(ver)s%(ext)s",
                                 "RELEASENOTES.%(key)s.%(ver)s%(ext)s", [], {
                                     "ver": version,
                                     "date": reldate,
                                     "title": title,
                                     "ext": EXTENSION
                                 })
            choutputs = Outputs("CHANGELOG.%(ver)s%(ext)s",
                                "CHANGELOG.%(key)s.%(ver)s%(ext)s", [], {
                                    "ver": version,
                                    "date": reldate,
                                    "title": title,
                                    "ext": EXTENSION
                                })
        else:
            reloutputs = Outputs("RELEASENOTES%(ext)s",
                                 "RELEASENOTES.%(key)s%(ext)s", [], {
                                     "ver": version,
                                     "date": reldate,
                                     "title": title,
                                     "ext": EXTENSION
                                 })
            choutputs = Outputs("CHANGELOG%(ext)s", "CHANGELOG.%(key)s%(ext)s",
                                [], {
                                    "ver": version,
                                    "date": reldate,
                                    "title": title,
                                    "ext": EXTENSION
                                })

        if options.license is True:
            reloutputs.write_all(ASF_LICENSE)
            choutputs.write_all(ASF_LICENSE)

        relhead = '# %(title)s %(key)s %(ver)s Release Notes\n\n' \
                  'These release notes cover new developer and user-facing ' \
                  'incompatibilities, important issues, features, and major improvements.\n\n'
        chhead = '# %(title)s Changelog\n\n' \
                 '## Release %(ver)s - %(date)s\n'\
                 '\n'

        reloutputs.write_all(relhead)
        choutputs.write_all(chhead)

        incompatlist = []
        importantlist = []
        buglist = []
        improvementlist = []
        newfeaturelist = []
        subtasklist = []
        tasklist = []
        testlist = []
        otherlist = []

        for jira in jlist:
            if jira.get_incompatible_change():
                incompatlist.append(jira)
            elif jira.get_important():
                importantlist.append(jira)
            elif jira.get_type() == "Bug":
                buglist.append(jira)
            elif jira.get_type() == "Improvement":
                improvementlist.append(jira)
            elif jira.get_type() == "New Feature":
                newfeaturelist.append(jira)
            elif jira.get_type() == "Sub-task":
                subtasklist.append(jira)
            elif jira.get_type() == "Task":
                tasklist.append(jira)
            elif jira.get_type() == "Test":
                testlist.append(jira)
            else:
                otherlist.append(jira)

            line = '* [%s](' % (sanitize_text(jira.get_id())) + JIRA_BASE_URL + \
                   '/browse/%s) | *%s* | **%s**\n' \
                   % (sanitize_text(jira.get_id()),
                      sanitize_text(jira.get_priority()), sanitize_text(jira.get_summary()))

            if jira.get_release_note() or \
               jira.get_incompatible_change() or jira.get_important():
                reloutputs.write_key_raw(jira.get_project(), "\n---\n\n")
                reloutputs.write_key_raw(jira.get_project(), line)
                if not jira.get_release_note():
                    line = '\n**WARNING: No release note provided for this change.**\n\n'
                else:
                    line = '\n%s\n\n' % (processrelnote(
                        jira.get_release_note()))
                reloutputs.write_key_raw(jira.get_project(), line)

            linter.lint(jira)

        if linter.enabled:
            print(linter.message())
            if linter.had_errors():
                haderrors = True
                if os.path.exists(vstr):
                    shutil.rmtree(vstr)
                continue

        reloutputs.write_all("\n\n")
        reloutputs.close()

        if options.skip_credits:
            change_header21 = "| JIRA | Summary | Priority | " + \
                     "Component |\n"
            change_header22 = "|:---- |:---- | :--- |:---- |\n"
        else:
            change_header21 = "| JIRA | Summary | Priority | " + \
                         "Component | Reporter | Contributor |\n"
            change_header22 = "|:---- |:---- | :--- |:---- |:---- |:---- |\n"

        if incompatlist:
            choutputs.write_all("### INCOMPATIBLE CHANGES:\n\n")
            choutputs.write_all(change_header21)
            choutputs.write_all(change_header22)
            choutputs.write_list(incompatlist, options.skip_credits,
                                 JIRA_BASE_URL)

        if importantlist:
            choutputs.write_all("\n\n### IMPORTANT ISSUES:\n\n")
            choutputs.write_all(change_header21)
            choutputs.write_all(change_header22)
            choutputs.write_list(importantlist, options.skip_credits,
                                 JIRA_BASE_URL)

        if newfeaturelist:
            choutputs.write_all("\n\n### NEW FEATURES:\n\n")
            choutputs.write_all(change_header21)
            choutputs.write_all(change_header22)
            choutputs.write_list(newfeaturelist, options.skip_credits,
                                 JIRA_BASE_URL)

        if improvementlist:
            choutputs.write_all("\n\n### IMPROVEMENTS:\n\n")
            choutputs.write_all(change_header21)
            choutputs.write_all(change_header22)
            choutputs.write_list(improvementlist, options.skip_credits,
                                 JIRA_BASE_URL)

        if buglist:
            choutputs.write_all("\n\n### BUG FIXES:\n\n")
            choutputs.write_all(change_header21)
            choutputs.write_all(change_header22)
            choutputs.write_list(buglist, options.skip_credits, JIRA_BASE_URL)

        if testlist:
            choutputs.write_all("\n\n### TESTS:\n\n")
            choutputs.write_all(change_header21)
            choutputs.write_all(change_header22)
            choutputs.write_list(testlist, options.skip_credits, JIRA_BASE_URL)

        if subtasklist:
            choutputs.write_all("\n\n### SUB-TASKS:\n\n")
            choutputs.write_all(change_header21)
            choutputs.write_all(change_header22)
            choutputs.write_list(subtasklist, options.skip_credits,
                                 JIRA_BASE_URL)

        if tasklist or otherlist:
            choutputs.write_all("\n\n### OTHER:\n\n")
            choutputs.write_all(change_header21)
            choutputs.write_all(change_header22)
            choutputs.write_list(otherlist, options.skip_credits,
                                 JIRA_BASE_URL)
            choutputs.write_list(tasklist, options.skip_credits, JIRA_BASE_URL)

        choutputs.write_all("\n\n")
        choutputs.close()

    if options.index:
        buildindex(title, options.license)
        buildreadme(title, options.license)

    if options.prettyindex:
        buildprettyindex(title, options.license)

    if haderrors is True:
        sys.exit(1)
예제 #44
0
        self.print("%s = " % var, end="")
        self.pprint(data)
        self.print("")

    def print(self, *args, **kwargs):
        print(*args, file=self._stream, **kwargs)


output = open(os.path.join(os.path.dirname(__file__), 'edost', 'codes.py'),
              'w')
printer = Printer(stream=output, indent=4, width=1000)
printer.print(
    "# coding: utf-8\n"
    "# This file was automatically generated on %s\n"
    "from __future__ import unicode_literals\n" %
    strftime("%Y-%m-%d %H:%M:%S UTC", gmtime()), )


def generate_tariff_codes():
    soup = BeautifulSoup(
        urllib2.urlopen("http://edost.ru/kln/help.html").read().decode(
            'cp1251'))
    a = soup.find('a', attrs={'name': 'DeliveryCode'})
    codes = []
    for tr in a.find('table', attrs={'width': '100%'}).find_all('tr')[1:]:
        tds = tr.find_all('td')
        tariff_id = int(tds[0].p.text)
        tariff_name = tds[1].p.text
        codes.append((tariff_id, tariff_name))
    printer.print_var("EDOST_TARIFFS", codes)
예제 #45
0
            self.util.click('done')
        elif self.util.pixel('done', 'done disabled', img):
            self.util.click('no')
            self.util.click('done')
        elif self.util.pixel('title', img):
            self.util.click('title ok')
        elif self.util.pixel('lose', img):
            lose += 1
            self.util.click('lose')
            self.util.click('lose ok')
            img.writeToFile('lose_' + time.strftime('%d_%H:%M:%S') + '.png')
        elif self.util.pixel('stage', img):
            stage += 1
            self.util.click('activity', 2)
            self.util.click('quest4', 2)
            self.util.click('friend6', 2)
            self.util.click('step3', 2)
            self.util.click('step4', 2)
            self.util.click('step5', 2)
            combo = 0

        current = self.device.getProperty('am.current.comp.class')
        end = time.time()
        print '     end:', time.localtime(end)
        print '   start:', time.localtime(start)
        print 'duration:', time.strftime('%H:%M:%S', time.gmtime(end - start))
        print '    play:', play
        print '   stage:', stage
        print '    each:', (play / stage)
        print '    lose:', lose
예제 #46
0
def yum_updates(_broker):
    """
    This datasource provides a list of available updates on the system.
    It uses the yum python library installed locally, and collects list of
    available package updates, along with advisory info where applicable.

    Sample data returned::

        {
          "releasever": "8",
          "basearch": "x86_64",
          "update_list": {
            "NetworkManager-1:1.22.8-4.el8.x86_64": {
              "available_updates": [
                {
                  "package": "NetworkManager-1:1.22.8-5.el8_2.x86_64",
                  "repository": "rhel-8-for-x86_64-baseos-rpms",
                  "basearch": "x86_64",
                  "releasever": "8",
                  "erratum": "RHSA-2020:3011"
                }
              ]
            }
          },
          "metadata_time": "2021-01-01T09:39:45Z"
        }

    Returns:
        list: List of available updates
    Raises:
        SkipComponent: Raised when neither dnf nor yum is found
    """
    if UpdatesManager is None:
        raise SkipComponent()

    with UpdatesManager() as umgr:
        umgr.load()

        response = {
            "releasever": umgr.releasever,
            "basearch": umgr.basearch,
            "update_list": {},
        }

        for pkg in umgr.installed_packages():
            nevra, updates_list = umgr.updates(pkg)
            if updates_list:
                out_list = []
                update_list = umgr.sorted_pkgs(updates_list)

                for p in update_list:
                    pkg_dict = {
                        "package": umgr.pkg_nevra(p),
                        "repository": umgr.pkg_repo(p),
                        "basearch": response["basearch"],
                        "releasever": response["releasever"],
                    }

                    erratum = umgr.advisory(p)
                    if erratum:
                        pkg_dict["erratum"] = erratum

                    out_list.append(pkg_dict)

                response["update_list"][nevra] = {
                    "available_updates": out_list
                }

        ts = umgr.last_update()
        if ts:
            response["metadata_time"] = time.strftime("%FT%TZ",
                                                      time.gmtime(ts))

    return DatasourceProvider(
        content=json.dumps(response),
        relative_path='insights_commands/yum_updates_list')
예제 #47
0
 def datetime_diff(time1, time2):
     t1 = datetime.datetime.strptime(time1, datetime_str_format)
     t2 = datetime.datetime.strptime(time2, datetime_str_format)
     diff = max(t1, t2) - min(t1, t2)
     return time.strftime(hour_str_format, time.gmtime(diff.seconds))
예제 #48
0
 def date_time_string(self, t):
     return time.strftime('%a, %d %b %Y %H:%M:%S', time.gmtime(t))
예제 #49
0
def get_current_hour_gmt():
    return strftime("%H", gmtime())
def DisplayDateTime():
	# Month day, Year, Hour:Minute:Seconds
	date_time = time.strftime("%B %d, %Y, %H:%M:%S", time.gmtime())
	print("Program run: ", date_time)
예제 #51
0
def unixtime2date(t):
    return time.strftime("%Y-%m-%d", time.gmtime(t))
예제 #52
0
            #print('success!')
            try:
            #print("try connect db")
                conn = psycopg2.connect("dbname='experiments' user='******' host='131.217.63.180' port ='5432'")
            except  Exception, e:
    #print("can't connect to postgresql server\n %s" %e)
                pass
            else:
            #print("connected to  postgresql db")
                cur=conn.cursor()
            #### tell db extraction copy is done        
                sql="""UPDATE catalog_experimentinstance SET extract='u' WHERE experiment_id LIKE %s and station LIKE %s;"""
                
                try:
                    cur.execute(sql,(Exper,station,))
                except Exception, e:
                    print("update failed")
                else:
                    cur.execute("""COMMIT;""")
                    #print("updated %s %s " %(exp,station))
                cur.close()                                
                conn.close()
            d=time.gmtime()
            day=int(d[7])
            body='On day %s copy of %s mk5 to to flexbuff completed' %(day, exp)
            for rec in to_addr:
                cmd='echo %s | mail -s %s  %s' %(body,subject,rec)
                send=subprocess.call(cmd, shell=True)
            #cmd='echo %s | mail -s %s  [email protected]' %(body,subject)
            log.close()
    def __enrich_items(self):

        time_start = time.time()

        # logger.info('%s starts for %s ', 'enrichment', self.backend_section)
        logger.info('[%s] enrichment phase starts', self.backend_section)
        print("Enrichment for {}: starting...".format(self.backend_section))

        cfg = self.config.get_conf()

        if 'scroll_size' in cfg['general']:
            ElasticItems.scroll_size = cfg['general']['scroll_size']

        if 'bulk_size' in cfg['general']:
            ElasticSearch.max_items_bulk = cfg['general']['bulk_size']

        no_incremental = False
        github_token = None
        pair_programming = False
        node_regex = None
        if 'github' in cfg and 'backend_token' in cfg['github']:
            github_token = cfg['github']['backend_token']
        if 'git' in cfg and 'pair-programming' in cfg['git']:
            pair_programming = cfg['git']['pair-programming']
        if 'jenkins' in cfg and 'node_regex' in cfg['jenkins']:
            node_regex = cfg['jenkins']['node_regex']
        only_studies = False
        only_identities = False

        # repos could change between executions because changes in projects
        repos = TaskProjects.get_repos_by_backend_section(self.backend_section, raw=False)

        if not repos:
            logger.warning("No enrich repositories for %s", self.backend_section)

        # Get the metadata__timestamp value of the last item inserted in the enriched index before
        # looping over the repos which data is stored in the same index. This is needed to make sure
        # that the incremental enrichment works for data sources that are collected globally but only
        # partially enriched.
        elastic_enrich = get_elastic(cfg['es_enrichment']['url'], cfg[self.backend_section]['enriched_index'])
        last_enrich_date = elastic_enrich.get_last_item_field("metadata__timestamp")
        if last_enrich_date:
            last_enrich_date = last_enrich_date.replace(second=0, microsecond=0, tzinfo=None)

        for repo in repos:
            # First process p2o params from repo
            p2o_args = self._compose_p2o_params(self.backend_section, repo)
            filter_raw = p2o_args['filter-raw'] if 'filter-raw' in p2o_args else None
            filters_raw_prefix = p2o_args['filter-raw-prefix'] if 'filter-raw-prefix' in p2o_args else None
            jenkins_rename_file = p2o_args['jenkins-rename-file'] if 'jenkins-rename-file' in p2o_args else None
            url = p2o_args['url']
            # Second process perceval params from repo
            backend_args = self._compose_perceval_params(self.backend_section, url)
            studies_args = None

            backend = self.get_backend(self.backend_section)
            if 'studies' in self.conf[self.backend_section] and \
                    self.conf[self.backend_section]['studies']:
                studies_args = self.__load_studies()

            logger.info('[%s] enrichment starts for %s', self.backend_section, repo)
            es_enrich_aliases = self.select_aliases(cfg, self.backend_section)

            try:
                es_col_url = self._get_collection_url()
                enrich_backend(es_col_url, self.clean, backend, backend_args,
                               self.backend_section,
                               cfg[self.backend_section]['raw_index'],
                               cfg[self.backend_section]['enriched_index'],
                               None,  # projects_db is deprecated
                               cfg['projects']['projects_file'],
                               cfg['sortinghat']['database'],
                               no_incremental, only_identities,
                               github_token,
                               False,  # studies are executed in its own Task
                               only_studies,
                               cfg['es_enrichment']['url'],
                               None,  # args.events_enrich
                               cfg['sortinghat']['user'],
                               cfg['sortinghat']['password'],
                               cfg['sortinghat']['host'],
                               None,  # args.refresh_projects,
                               None,  # args.refresh_identities,
                               author_id=None,
                               author_uuid=None,
                               filter_raw=filter_raw,
                               filters_raw_prefix=filters_raw_prefix,
                               jenkins_rename_file=jenkins_rename_file,
                               unaffiliated_group=cfg['sortinghat']['unaffiliated_group'],
                               pair_programming=pair_programming,
                               node_regex=node_regex,
                               studies_args=studies_args,
                               es_enrich_aliases=es_enrich_aliases,
                               last_enrich_date=last_enrich_date,
                               projects_json_repo=repo)
            except Exception as ex:
                logger.error("Something went wrong producing enriched data for %s . "
                             "Using the backend_args: %s ", self.backend_section, str(backend_args))
                logger.error("Exception: %s", ex)
                raise DataEnrichmentError('Failed to produce enriched data for ' + self.backend_section)

            logger.info('[%s] enrichment finished for %s', self.backend_section, repo)

        spent_time = time.strftime("%H:%M:%S", time.gmtime(time.time() - time_start))
        logger.info('[%s] enrichment phase finished in %s', self.backend_section, spent_time)
        print("Enrichment for {}: finished after {} hours".format(self.backend_section,
                                                                  spent_time))
예제 #54
0
def getLastOpenDate(path):
    lastTimeOpened = os.path.getatime(path)
    formatted = strftime("%m/%Y", gmtime(lastTimeOpened)).split("/")
    returner = [int(formatted[0]), int(formatted[1])]
    return returner
예제 #55
0
파일: main.py 프로젝트: hsd1503/RDPD
        os.stat('res')
    except:
        os.mkdir('res')

    dataset = 'mimic_diag'  # or 'pamap' or 'ptbdb'

    is_budget_save = True

    is_restore = False
    if is_restore:
        restore_run_id = ''
        with open('res/{0}.pkl'.format(restore_run_id), 'rb') as fin:
            restore_res = dill.load(fin)

    suffix = 'mimic'
    run_id = '{0}_{1}'.format(strftime("%Y%m%d_%H%M%S", gmtime()), suffix)

    ### ---------------------------- hyper-parameters ----------------------------

    n_epoch = 10
    n_run = 1
    temperature_list = [5]
    data_typ_list = list(range(n_run))

    ### poor data modalitites
    if dataset == 'pamap':
        view_list = [
            list(range(1, 18)),
            list(range(18, 35)),
            list(range(35, 52))
        ]
    for root, subdirs, files in os.walk(rootdir):
        for filename in files:
            try:
                pathname = os.path.join(root, filename)
                add(pathname)
            except:
                traceback.print_exc()

filecount = 0
playlist = open(output, 'w')
spreadsheet_output = open(spreadsheet, 'w')
playlist.write('#EXTM3U\n')
basenames = sorted(timesForBasenames.keys())
for basename in basenames:
    timesAndPaths = timesForBasenames[basename]
    times = sorted(timesAndPaths.keys())
    times.reverse()
    filecount = filecount + 1
    for tyme in times:
        timestring = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(tyme))
        print('[%5d %s] %s\n                            %s' % (filecount, timestring, basename, timesAndPaths[tyme]))
    playlist.write('#EXTINF:-1,%s\n' % basename)
    pathname = timesAndPaths[times[0]]
    playlist.write('%s\n' % pathname)
    spreadsheet_output.write('Michael Gogins\t%s\t%s\t%s\n' % (basename, pathname, timestring))
    print()
playlist.write('\n')

print('Finished with', filecount, 'files.')

예제 #57
0
파일: reminder_job.py 프로젝트: EQ4/DRR
def find_requests_and_send_mail(config):
  os.chdir(os.path.dirname(os.path.realpath(__file__)))
  db = DB.connect(db_file='../db/main.db')
  now = time.time()
  what_we_be_done_with = db['c'].execute('select * from reminders where end_time < %d' % now).fetchall()

  for row in DB.map(what_we_be_done_with, 'reminders', db=db):
    row['link'] = "http://indycast.net/%s/slices/%s_%d" % ( row['station'], time.strftime("%Y%m%d%H%M", time.gmtime(row['start_time'] - row['offset'] * 60)), (row['end_time'] - row['start_time']) / 60)

    if len(row['notes']):
      row['link'] += '/%s_on_%s' % (re.sub('[^\w]', '_', row['notes']).strip('_'), row['station'])

    row['link'] += '.mp3'

    email = do_template(template_file='email_reminder_template.txt', settings=row)
    res = misc.send_email(config=config, who=row['email'], subject=email['subject'], body=email['body'])
    db['c'].execute('delete from reminders where id = %d' % row['id'])
    db['conn'].commit()

  return None
예제 #58
0
파일: utils.py 프로젝트: neildesai100/crowd
def get_ts(ts=None):
    if not ts:
        ts = time.gmtime()
    return time.strftime(ISO8601, ts)
        staircase=True)

    # train_step = tf.train.AdamOptimizer(5e-3).minimize(loss)
    # train_step = tf.train.GradientDescentOptimizer(1e-4).minimize(loss)
    # train_step = tf.train.MomentumOptimizer(1e-4, 0.95).minimize(loss)
    train_step = tf.train.AdamOptimizer(learning_rate,
                                        0.95).minimize(loss,
                                                       global_step=global_step)

    init = tf.initialize_all_variables()
    sess = tf.InteractiveSession()
    sess.run(init)

    loss_train_record = list()  # np.zeros(n_epoch)
    loss_valid_record = list()  # np.zeros(n_epoch)
    start_time = time.gmtime()

    # early stopping
    best_valid = np.inf
    best_valid_epoch = 0

    current_epoch = 0

    while current_epoch < NUM_EPOCHS:
        # Shuffle data
        shuffled_index = np.arange(train_size)
        np.random.shuffle(shuffled_index)
        train_dataset = train_dataset[shuffled_index]
        train_labels = train_labels[shuffled_index]

        for step in xrange(train_size / BATCH_SIZE):
예제 #60
0
                              silence=True,
                              name_provider=settings.name_provider)
        else:
            settings.log('[%s] >>>>>>>%s<<<<<<<' %
                         (settings.name_provider_clean, browser.status))
            settings.dialog.notification(settings.name_provider,
                                         browser.status, settings.icon, 1000)
    del settings
    del browser


if Addon().getSetting('service') == 'true':
    persistent = Addon().getSetting('persistent')
    name_provider = re.sub(
        '.COLOR (.*?)]', '',
        Addon().getAddonInfo('name').replace('[/COLOR]', ''))
    every = 28800  # seconds
    previous_time = time()
    log("[%s]Update Service starting..." % name_provider)
    update_service()
    while (not abortRequested) and persistent == 'true':
        if time() >= previous_time + every:  # verification
            previous_time = time()
            update_service()
            log('[%s] Update List at %s' %
                (name_provider, asctime(localtime(previous_time))))
            log('[%s] Next Update in %s' %
                (name_provider, strftime("%H:%M:%S", gmtime(every))))
            update_service()
        sleep(500)