Example #1
0
 def run(self,max_wait_time=60*60):
     self.log_file='%s/Tracking/SlurmTracker.log'%my_path() 
     self.pickle_file='%s/Tracking/SlurmTracker.pickle'%my_path()
     self.max_wait_time=max_wait_time
     time.sleep(10)
     while True:
         self.last_check=time.ctime()
         log=open(self.log_file,'w+')
         #Kill daemon when pidfile is erased
         self.checkpid()
         if not pid:
             log.write("Daemon killed remotely.\n")
             log.write("Daemon stop at %s\n"%time.ctime(time.time()))              
             log.close()
             break
         
         #List jobs
         log.write("Job list at %s\n"%time.ctime(time.time()))
         self.listjobs()
         log.write('\n'.join(['\t'.join([str(j.jobid),j.state]) for j in self.joblist])+'\n')
        
         #Check every job         
         self.checkjobs()             
         if len(self.joblist)==0:
             break
         with open(self.pickle_file,'w+') as f:     
             pickle.dump(self,f)
         log.write('Waiting %i seconds\n'%(self.wait_time+10))
         log.close()  
         time.sleep(10+self.wait_time) #If end near, check in 10s
     log=open(self.log_file,'a+')        
     log.write("No more processes active, Daemon stop at %s\n"%time.ctime(time.time()))  
     log.close()
     os.remove(self.pickle_file)
Example #2
0
 def __init__(self,
              datestart, datestop, n_dates, columns,
              statusfile=None,
              status_interval=None,
              outfile=None,
              max_size=None,
              ):
     self.statusfile= statusfile
     self.status_interval = status_interval
     self.outfile = outfile
     self.max_size = max_size
     self.total_rows = n_dates
     self.percent_complete = 0
     self.row_interval = 100       # Check time every 100 rows
     self.last_time = 0.0
     self.process_start = time.ctime()
     self.current_row = 0
     self.current_time = time.ctime()
     self.datestart = Chandra.Time.DateTime(datestart).date
     self.datestop = Chandra.Time.DateTime(datestop).date
     self.columns = ' '.join([x.name for x in columns])
     self.error = None
     self.filesize = 0
     self.print_attrs = ('current_row', 'total_rows', 'percent_complete',
                         'process_start', 'current_time',
                         'datestart', 'datestop', 
                         'columns', 'status', 'error')
Example #3
0
def run_debugger(testname, pythonfile, pydb_opts='', args='',
                 outfile=None):
    global srcdir, builddir, pydir

    rightfile   = os.path.join(srcdir, 'data', "%s.right" % testname)

    os.environ['PYTHONPATH']=os.pathsep.join(sys.path)
    cmdfile     = os.path.join(srcdir, "%s.cmd"   % testname)
    outfile     = "%s.out" % testname
    outfile_opt = '--output=%s ' % outfile

    # print "builddir: %s, cmdfile: %s, outfile: %s, rightfile: %s" % \
    # (builddir, cmdfile, outfile, rightfile)

    if os.path.exists(outfile): os.unlink(outfile)

    cmd = "%s --command %s %s %s %s %s" % \
          (pydb_path, cmdfile, outfile_opt, pydb_opts, pythonfile, args)
    
    os.system(cmd)
    fromfile  = rightfile
    fromdate  = time.ctime(os.stat(fromfile).st_mtime)
    fromlines = open(fromfile, 'U').readlines()
    tofile    = outfile
    todate    = time.ctime(os.stat(tofile).st_mtime)
    tolines   = open(tofile, 'U').readlines()
    diff = list(difflib.unified_diff(fromlines, tolines, fromfile,
                                     tofile, fromdate, todate))
    if len(diff) == 0:
        os.unlink(outfile)
    for line in diff:
        print line,
    return len(diff) == 0
Example #4
0
def main():	
	time1 = time.time()
	print time.ctime()

	get_column_urls()
	
	# use 5 threads to get article_urls
	for i in xrange(WORKER_THREAD_NUM):
		producer_thread = threading.Thread(target=get_article_urls)
		producer_thread.setDaemon(True)
		producer_thread.start()
		producer_threads.append(producer_thread)
	for producer_thread in producer_threads:
		producer_thread.join()

	# after getting all article_urls , start 'worker'
	for i in xrange(WORKER_THREAD_NUM):
		thread = threading.Thread(target=worker)
		thread.setDaemon(True)   # need this??
		thread.start()
		threads.append(thread)
	for thread in threads:
		thread.join()

	time2 = time.time()
	print 'finised in %s seconds' % (time2-time1)
    def get(self):
        settings = get_server_settings()
        secret = self.request.headers.get("X-Nuntiuz-Secret", None)
        if secret != settings.jabberSecret:
            logging.error("Received unauthenticated apple certificate request, ignoring ...")
            return
        app_id = self.request.get("id")
        if not app_id:
            return
        app = get_app_by_id(app_id)
        if not app:
            return

        if app.apple_push_cert_valid_until < now() + 30 * DAY:
            send_mail(settings.dashboardEmail,
                      settings.supportWorkers,
                      "The APN cert of %s is about to expire" % app_id,
                      "The APN cert of %s is valid until %s GMT" % (app_id, time.ctime(app.apple_push_cert_valid_until)))
        if app.apple_push_cert_valid_until < now() + 15 * DAY:
            logging.error("The APN cert of %s is valid until %s GMT" % (app_id, time.ctime(app.apple_push_cert_valid_until)))

        result = json.dumps(dict(cert=app.apple_push_cert, key=app.apple_push_key, valid_until=app.apple_push_cert_valid_until))
        self.response.headers['Content-Type'] = 'application/binary'
        _, data = encrypt_for_jabber_cloud(secret, result)
        self.response.write(data)
Example #6
0
def cmd_time(args):
  '''show autopilot time'''
  tusec = mpstate.master().field('SYSTEM_TIME', 'time_unix_usec', 0)
  if tusec == 0:
      print("No SYSTEM_TIME time available")
      return
  print("%s (%s)\n" % (time.ctime(tusec * 1.0e-6), time.ctime()))
Example #7
0
def func(args):
    # Get modification times
    fromdate = time.ctime(os.stat(args.FILE1).st_mtime)
    todate = time.ctime(os.stat(args.FILE2).st_mtime)

    # Open fromfile
    try:
        with open(args.FILE1, 'U') as fd:
            fromlines = fd.readlines()
    except IOError:
        print("Error opening file " + args.FILE1, file=sys.stderr)

    # Open tofile
    try:
        with open(args.FILE2, 'U') as fd:
            tolines = fd.readlines()
    except IOError:
        print("Error opening file " + args.FILE2, file=sys.stderr)

    # Create diff
    if args.unified:
        diff = difflib.unified_diff(fromlines, tolines, args.FILE1, args.FILE2,
                                    fromdate, todate, n=args.lines)
    elif args.ndiff:
        diff = difflib.ndiff(fromlines, tolines)
    elif args.html:
        diff = difflib.HtmlDiff().make_file(fromlines, tolines, args.FILE1,
                                            args.FILE2, context=args.context,
                                            numlines=args.lines)
    else:
        diff = difflib.context_diff(fromlines, tolines, args.FILE1, args.FILE2,
                                    fromdate, todate, n=args.lines)

    # we're using writelines because diff is a generator
    sys.stdout.writelines(diff)
Example #8
0
def time_cuo():
    # 10位

    print(int(time.time()))

    # 13位
    # 1521204495005
    print(int(time.time()*1000))

    #十三位转为时间
    t_1= 1540544495276/1000.00
    print('t_1', time.ctime(t_1))

    t_2= 1533825084
    print('t_2', time.ctime(t_2))

    t_s=1540544411037/1000.0
    t2 = 1503048528 * 1.00
    updateTime = 1504602551076 / 1000.00
    #t3 = time.ctime(t)
    t4 = time.ctime(t2)
    t5=time.ctime(t_s)
    print('t5',t5)

    #print('t3', t3)
    print('t4', t4)
    curr_d = datetime.datetime.fromtimestamp(updateTime)
    print('curr_d',curr_d)
    d_time = datetime.datetime.fromtimestamp(t_s)
    print(d_time)
Example #9
0
def refresh_data():
    global metrics
    global targets_all
    global graphs_all
    logger.debug('refresh_data() start')
    try:
        stat_metrics = backend.stat_metrics()
        if last_update is not None and stat_metrics.st_mtime < last_update:
            nice_metrics_mtime = time.ctime(stat_metrics.st_mtime)
            nice_last_update = time.ctime(last_update)
            logger.debug('refresh_data() not needed. metrics.json mtime %s, last_update %s', nice_metrics_mtime, nice_last_update)
            return ("metrics.json is last updated %s, "
                    "rebuilding data structures wouldn't make sense cause they were "
                    "last rebuilt %s" % (nice_metrics_mtime, nice_last_update))
        (metrics, targets_all, graphs_all) = backend.update_data(s_metrics)
        if 'metrics_file' in errors:
            del errors['metrics_file']
        logger.debug('refresh_data() end ok')
        return 'ok'
    except MetricsError, e:
        errors['metrics_file'] = (e.msg, e.underlying_error)
        logger.error("[%s] %s", e.msg, e.underlying_error)
        logger.error('refresh_data() failed')
        response.status = 500
        return "errors: %s" % ' '.join('[%s] %s' % (k, v) for (k, v) in errors.items())
Example #10
0
def get_repository_info(recipe_path):
    """This tries to get information about where a recipe came from.  This is different
    from the source - you can have a recipe in svn that gets source via git."""
    try:
        if exists(join(recipe_path, ".git")):
            origin = check_output_env(["git", "config", "--get", "remote.origin.url"],
                                      cwd=recipe_path)
            rev = check_output_env(["git", "rev-parse", "HEAD"], cwd=recipe_path)
            return "Origin {}, commit {}".format(origin, rev)
        elif isdir(join(recipe_path, ".hg")):
            origin = check_output_env(["hg", "paths", "default"], cwd=recipe_path)
            rev = check_output_env(["hg", "id"], cwd=recipe_path).split()[0]
            return "Origin {}, commit {}".format(origin, rev)
        elif isdir(join(recipe_path, ".svn")):
            info = check_output_env(["svn", "info"], cwd=recipe_path)
            server = re.search("Repository Root: (.*)$", info, flags=re.M).group(1)
            revision = re.search("Revision: (.*)$", info, flags=re.M).group(1)
            return "{}, Revision {}".format(server, revision)
        else:
            return "{}, last modified {}".format(recipe_path,
                                             time.ctime(os.path.getmtime(
                                                 join(recipe_path, "meta.yaml"))))
    except CalledProcessError:
        log.debug("Failed to checkout source in " + recipe_path)
        return "{}, last modified {}".format(recipe_path,
                                             time.ctime(os.path.getmtime(
                                                 join(recipe_path, "meta.yaml"))))
Example #11
0
def test2():
    now = time.strftime("%Y-%m-%d")
    print(now)
    print(type(now))
    now_time = datetime.datetime.now()
    print(now_time)
    print(type(now_time))
    today = time.strftime("_%Y_%m_%d")

    print(today)

    aa = datetime.datetime(2016, 8, 7)
    # 看输入的日期是一个星期的第几天
    print(aa.weekday())

    print(str(int(time.time() * 1000)))
    t1 = time.time()
    print(t1)
    print(time.ctime())
    # t = 1490060308998 * 1.00 / 1000
    t = 1503048528 * 1.00
    print(time.ctime(t1))

    print(time.ctime(t))
    t3 = time.ctime(t)

    print(time.gmtime(t))
    # print(time.strptime(t3,'%Y-%m-%d'))
    print(type(t3))
Example #12
0
    def checkFilesChanged(self, files, unchanged_for = 60):
        now = time.time()
        file_too_new = False

        file_time = []
        for cur_file in files:

            # File got removed while checking
            if not os.path.isfile(cur_file):
                file_too_new = now
                break

            # File has changed in last 60 seconds
            file_time = self.getFileTimes(cur_file)
            for t in file_time:
                if t > now - unchanged_for:
                    file_too_new = tryInt(time.time() - t)
                    break

            if file_too_new:
                break

        if file_too_new:
            try:
                time_string = time.ctime(file_time[0])
            except:
                try:
                    time_string = time.ctime(file_time[1])
                except:
                    time_string = 'unknown'

            return file_too_new, time_string

        return False, None
    def time_diff(self, session, clock="sys"):
        """
        Calculate system/hardware time difference between host and guest;

        :param session: ShellSession object;
        :param clock: 'hw'or 'sys'
        :return: time difference
        :rtype: float
        """
        time_command = self.params.get("%s_time_command" % clock)
        time_filter_re = self.params.get("%s_time_filter_re" % clock)
        time_format = self.params.get("%s_time_format" % clock)
        host_time, guest_time = self.get_time(session,
                                              time_command,
                                              time_filter_re,
                                              time_format)
        guest_ctime = time.ctime(guest_time)
        host_ctime = time.ctime(host_time)
        debug_msg = "Host %s Time: %s = %s epoch seconds " % (clock,
                                                              host_ctime,
                                                              host_time)
        logging.info(debug_msg)
        debug_msg = "Guest %s Time: %s = %s epoch seconds" % (clock,
                                                              guest_ctime,
                                                              guest_time)
        logging.info(debug_msg)
        return abs(host_time - guest_time)
def timestamp ( ):

#*****************************************************************************80
#
## TIMESTAMP prints the date as a timestamp.
#
#  Licensing:
#
#    This code is distributed under the GNU LGPL license. 
#
#  Modified:
#
#    06 April 2013
#
#  Author:
#
#    John Burkardt
#
#  Parameters:
#
#    None
#
  import time

  t = time.time ( )
  print time.ctime ( t )

  return None
def Socket_TestNext(ippool):
	global Flag, Wait_for_SSL, Succ, InProcess, isEnd 
	Lock.acquire()
	InProcess.append(1)
	Lock.release()
	while not isEnd:
		try:
			Lock.acquire()
			ip = ippool.pop(random.randrange(len(ippool)))
			Lock.release()
			if len(ippool)%1000 == 0:
				print len(Succ),"/",len(ippool), "/",len(Wait_for_SSL),"/",len(InProcess)
				print time.ctime(time.time())
			#print time.time()
			Soc = Socket_Test.Socket_Test(ip)
			if Soc[0] == True:
				Lock.acquire()
				Wait_for_SSL.append(Soc)
				Lock.release()
			gevent.sleep()
		except KeyboardInterrupt:
			isEnd = True
			return 0
		except (ValueError , IndexError):
			Lock.release()
			Flag = True
			Lock.acquire()
			InProcess.pop()
			Lock.release()
			return 0
	Lock.acquire()
	InProcess.pop()
	Lock.release()
	return 0
Example #16
0
File: cli.py Project: dmnks/dnf
    def _process_demands(self):
        demands = self.demands
        repos = self.base.repos

        if not demands.cacheonly:
            if demands.freshest_metadata:
                for repo in repos.iter_enabled():
                    repo.md_expire_cache()
            elif not demands.fresh_metadata:
                for repo in repos.values():
                    repo.md_lazy = True

        if demands.sack_activation:
            lar = self.demands.available_repos
            self.base.fill_sack(load_system_repo='auto',
                                load_available_repos=lar)
            if lar:
                repos = list(self.base.repos.iter_enabled())
                if repos:
                    mts = max(repo.metadata.timestamp for repo in repos)
                    # do not bother users with fractions of seconds
                    age = int(min(repo.metadata.age for repo in repos))
                    for repo in repos:
                        logger.debug(_("%s: using metadata from %s."),
                                     repo.id,
                                     time.ctime(repo.metadata.md_timestamp))
                    if age != 0:
                        logger.info(_("Last metadata expiration check: "
                                    "%s ago on %s."),
                                    datetime.timedelta(seconds=age),
                                    time.ctime(mts))
            self.base.plugins.run_sack()
Example #17
0
  def get(self):
    """Responds to a single request for performance counter data.  Each request has two required
    parameters in the query string, 'start' and 'end', which specify the beginning and end of the
    time range to be queried.  The times should be expressed as the number of seconds since the
    unix epoch.
    """
    start_time = float(self.get_argument('start'))
    end_time = float(self.get_argument('end'))

    # Select an appropriate interval resolution based on the requested time span.
    selected_interval = metric.LOGS_INTERVALS[-1]
    group_key = metric.Metric.EncodeGroupKey(metric.LOGS_STATS_NAME, selected_interval)
    logging.info('Query performance counters %s, range: %s - %s, resolution: %s'
                  % (group_key, time.ctime(start_time), time.ctime(end_time), selected_interval.name))

    self.set_header('Content-Type', 'application/json; charset=UTF-8')

    metrics = list()
    start_key = None
    while True:
      new_metrics = yield gen.Task(metric.Metric.QueryTimespan, self._client, group_key,
                                   start_time, end_time, excl_start_key=start_key)
      if len(new_metrics) > 0:
        metrics.extend(new_metrics)
        start_key = metrics[-1].GetKey()
      else:
        break
    data = {'group_key': group_key, 'start_time': start_time, 'end_time': end_time}
    data['data'] = _SerializeMetrics(metrics)

    self.write(json.dumps(data))
    self.finish()
Example #18
0
 def getFooter(self):
     now = time.time()
     text =""
     text+= "<center>"
     text+= "<table width=100%% border=0 cellspacing=0 cellpadding=0>"
     text+= "    <tr><td colspan=2 height=10><spacer type=block height=10 width=0></td></tr>"
     text+= "    <tr><td colspan=2 bgcolor=#cc0066 height=5><spacer type=block height=5 width=0></td></tr>"
     text+= "    <tr><td colspan=2 height=5><spacer type=block height=5 width=0></td></tr>"
     text+= "    <TR>"
     text+= "        <TD><FONT size=1 >created:  %s</font></TD>" % time.ctime(now)
     #text+= "        <TD ALIGN=RIGHT><FONT size=1 >last modified: %s" % time.ctime(now)
     text+= "        <TD ALIGN=RIGHT><FONT size=1 >last modified: %s by" % time.ctime(now)
     #text+= "        <A STYLE=""color: #0000cc"" HREF=""mailto:[email protected]"">[email protected]</A></FONT></TD>"
     if sys.platform == 'win32':
         try:
             user = os.getenv('USERNAME')
             text+= "        <A STYLE=""color: #0000cc"">%s</A></FONT></TD>" % user
         except:
             text +="</FONT></TD>"
     else:
         try:
             user = os.getenv("USER")
             text+= "        <A STYLE=""color: #0000cc"">%s</A></FONT></TD>" % user
         except:
             text +="</FONT></TD>"
     text+= "    </TR>"
     text+= "</TABLE>"
     text+= "</center>"
     text+= "</BODY>"
     text+= "</HTML>"
     return text
def insert_followers_id_2(table_name, target_twitter_id, followers_ids,start_n=0):
    con = pymysql.connect(host = db_info['db_host'],
                            user = db_info['db_user'], 
                            password = db_info['db_password'], 
                            db = db_info['db_name']);
    sqlbase = 'INSERT INTO'+ db_name+'.'+table_name+''' (twitter_Id,follower_Id)
                VALUES
                %s;'''    
    sql_values=' '
    sql_value_base='('+str(target_twitter_id)+', %s)' #changed something
    len_ids=len(followers_ids)-1
    start_time = time.ctime()    
    i=start_n
    if i==0:
        print("Let's do this", start_time)
    try:
        #for i in followers_ids:
        while i <= len_ids:
            
            #with con.cursor() as cursor:
                # insert id
            one_value=sql_value_base % (followers_ids[i])
            #print(one_value)                
                #print(sql)
                #cursor.execute(sql)
                #con.commit()
            if i!=len_ids:
                if i==0 or i%100!=0:
                    sql_values=sql_values+one_value+','                
                elif i%100==0 and i!=0:
                    sql_values=sql_values+one_value
                    sql = sqlbase%sql_values
                    #print(sql)                    
                    with con.cursor() as cursor:
                        cursor.execute(sql)                
                    con.commit()                
                    now_time=time.ctime()                
                    print('%s followers id from %s to %s inserted: %s'%(target_twitter_id, i-100,i,now_time))
                    sql_values=' '
            elif i==len_ids:
                sql_values=sql_values+one_value
                sql=sqlbase%sql_values
                #print(sql)                
                with con.cursor() as cursor:
                    cursor.execute(sql)                
                con.commit()
                now_time=time.ctime()                
                print('Finished at number: %s'%(i+1))
 
            i+=1
    except Exception as e:
        print("Error %d: %s" % (e.args[0],e.args[1]))
        print("End at number:", i)
     
    con.close()
    end_time = time.ctime()
    print('start:',start_time)
    print('end:', end_time)
    print('Total ids inserted:',i)
    return i
Example #20
0
def main():
    nfuncs = range(len(funcs))

    print '*** SINGLE THREAD'
    for i in nfuncs:
        print 'starting', funcs[i].__name__, \
	    'at:', ctime()
        print funcs[i](n)
        print funcs[i].__name__, 'finished at:', \
	    ctime()

    print '\n*** MULTIPLE THREADS'
    threads = []
    for i in nfuncs:
        t = MyThread(funcs[i], (n,),
	    funcs[i].__name__)
        threads.append(t)

    for i in nfuncs:
        threads[i].start()

    for i in nfuncs:
        threads[i].join()
        print threads[i].getResult()

    print 'all DONE'
Example #21
0
 def run(self):
     print 'starting', self.name, 'at:', \
         ctime()
     res = apply(self.func, self.args)
     self.res = res
     print self.name, 'finished at:', \
         ctime()
Example #22
0
File: core.py Project: slushecl/dev
    def datread(self):
        """\
Reads in data from .dat or .npy file paths and if .dat is newer than .npy saves
it in .npy format for faster reading in future."""
        for vals in self.files:
            if vals['type'] == 'dat' and vals['dir'] == 'pull':
                # self.datfileloc = vals['dir']
                pth = vals['path']
                pthnpy = pth.replace('.dat', '.npy', 1)
                try:
                    print("""\
DAT File:\t{0}
DAT: time last modified:\t{2}
NPY File:\t{1}
NPY: time last modified:\t{3}""".format(pth, pthnpy, \
                                        time.ctime(path.getmtime(pth)), \
                                        time.ctime(path.getmtime(pthnpy))))
                    if path.getmtime(pth) <= path.getmtime(pthnpy):
                        print('DATFILE: is older than npy... Continue.\n')
                    else:
                        print('DATFILE: is newer than npy... Remaking.\n')
                        os.remove(pthnpy)
                except OSError:
                    pass
                if vals['dir'] == 'pull':
                    self.datcheck = True
                    self.path = pth
                    if path.exists(pthnpy):
                        self.dat = np.load(pthnpy)
                    else:
                        self.dat = np.loadtxt(pth)
                        np.save(pthnpy, self.dat)
def up(cursor):
    to_build = _need_collxml(cursor)
    num_todo = len(to_build)

    batch_size = 100
    logger.info('collection.xml to generate: {}'.format(num_todo))
    logger.info('Batch size: {}'.format(batch_size))

    start = time.time()
    guesstimate = 0.01 * num_todo
    guess_complete = guesstimate + start
    logger.info('Completion guess: '
                '"{}" ({})'.format(time.ctime(guess_complete),
                                   timedelta(0, guesstimate)))

    num_complete = 0
    for batch in _batcher(to_build, batch_size):
        coll_idents = tuple([i[0] for i in batch])
        logger.debug('coll_idents {}'.format(coll_idents))

        for coll_ident in coll_idents:
            _build_collxml(coll_ident, cursor)

        cursor.connection.commit()
        num_complete += len(batch)
        percent_comp = num_complete * 100.0 / num_todo
        elapsed = time.time() - start
        remaining_est = elapsed * (num_todo - num_complete) / num_complete
        est_complete = start + elapsed + remaining_est
        logger.info('{:.1f}% complete '
                    'est: "{}" ({})'.format(percent_comp,
                                            time.ctime(est_complete),
                                            timedelta(0, remaining_est)))

    logger.info('Total runtime: {}'.format(timedelta(0, elapsed)))
Example #24
0
def set_up_landmines(target):
    """Does the work of setting, planting, and triggering landmines."""
    out_dir = get_target_build_dir(builder(), target, platform() == "ios")

    landmines_path = os.path.join(out_dir, ".landmines")
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    new_landmines = get_landmines(target)

    if not os.path.exists(landmines_path):
        with open(landmines_path, "w") as f:
            f.writelines(new_landmines)
    else:
        triggered = os.path.join(out_dir, ".landmines_triggered")
        with open(landmines_path, "r") as f:
            old_landmines = f.readlines()
        if old_landmines != new_landmines:
            old_date = time.ctime(os.stat(landmines_path).st_ctime)
            diff = difflib.unified_diff(
                old_landmines,
                new_landmines,
                fromfile="old_landmines",
                tofile="new_landmines",
                fromfiledate=old_date,
                tofiledate=time.ctime(),
                n=0,
            )

            with open(triggered, "w") as f:
                f.writelines(diff)
        elif os.path.exists(triggered):
            # Remove false triggered landmines.
            os.remove(triggered)
Example #25
0
def MPrun(hostname, scriptfile): 

    '''
    Integrate resource allocation information and send it to salt-master
    :return: send info -> salt-master
    '''

    sys.stdout.write("[ %s ] Start to get the data...\n" % time.ctime())
    sys.stdout.flush()
    global sendinfo
    sendinfo = {} 
    for f in os.listdir(scriptfile):
        if os.path.isfile("%s/%s" % (scriptfile, f)):
            result = commands.getstatusoutput("/usr/bin/python %s/%s" % (scriptfile, f))
            if result[0] == 0 and isinstance(eval(result[1]), dict):
                for k in eval(result[1]).keys():
                    sendinfo[k] = eval(result[1])[k]
            else:
                sys.stdout.write("[ %s ] ERROR : Please check the script for %s and the script return value must be dictionary type\n" % (time.ctime(), f))
                sys.stdout.flush()
                time.sleep(0.5)
                continue
        else:
            sys.stdout.write("[ %s ] ERROR : No such file %s\n" % (time.ctime(), f))
            sys.stdout.flush()
            time.sleep(0.5)
            continue
    sys.stdout.write("\n[ %s ] system resource configuration information :\n%s\n\n" % (time.ctime(), sendinfo))
    sys.stdout.flush()
    sendsalt()

    return
Example #26
0
def download_list(srcdirs = [], srcfiles = [], hostname = 'http://127.0.0.1'):
    dstdirs, dstfiles = visitdir(dstmonitorpath, dstwwwroot)

    downloadfiles = set(srcfiles) - set(dstfiles)
    #rmdirs = set(dstdirs) - set(srcdirs)
    rmfiles = set(dstfiles) - set(srcfiles)

    logger.warn("rmfiles count=%s", len(rmfiles))
    download_count = len(downloadfiles)
    logger.warn("download tasks count = %s", download_count)

    expires_time = WHOLE_SYNC_TASK_EXPIRES_TIME + 3600

    for f, sz in rmfiles:
        try:
            file = join(dstdir, f)
            localfilesz = None
            if os.path.exists(file):
                localfilesz = getsize(file)
            else:
                logger.info('file=%s not exist, skip', file)
                continue

            last_modified_time = os.path.getmtime(file)
            time_now = time.time()

            if time_now - last_modified_time  < expires_time:
                logger.info('file: %s now_time(%s) - last_modified_time(%s) = %ss less than whole_sync_task_expires_time(%ss), skip',
                             file, time.ctime(time_now), time.ctime(last_modified_time), time_now - last_modified_time, expires_time)
                continue


            is_skip = False
	    for dlf, dlsz in downloadfiles:
	        if f == dlf:
                    logger.info('file:%s is in download list, suggest_size(%s) =? scan_size(%s), real_size:%s, skip',
				 f, dlsz, sz, localfilesz)
		    is_skip = True
                    break 

            if not is_skip:
                logger.info('Going to rm file: %s, filesize=%s', file, localfilesz)
                rmfile(None, file)
        except Exception as e:
            logger.exception('check rm file: %s exception', f)


    if download_count > 0:
        ThreadPool.initialize()
        for (f, filesize) in downloadfiles:
            url = join(hostname, f)
            ThreadPool.add_task_with_param(download, url)
        ThreadPool.wait_for_complete(timeout=3600*10)

        logger.warn("complete downloads")
    
        ThreadPool.clear_task()
        ThreadPool.stop()

    return 'OK'
Example #27
0
def set_up_landmines(target, new_landmines):
  """Does the work of setting, planting, and triggering landmines."""
  out_dir = get_target_build_dir(landmine_utils.builder(), target,
                                 landmine_utils.platform() == 'ios')

  landmines_path = os.path.join(out_dir, '.landmines')
  if not os.path.exists(out_dir):
    os.makedirs(out_dir)

  if not os.path.exists(landmines_path):
    with open(landmines_path, 'w') as f:
      f.writelines(new_landmines)
  else:
    triggered = os.path.join(out_dir, '.landmines_triggered')
    with open(landmines_path, 'r') as f:
      old_landmines = f.readlines()
    if old_landmines != new_landmines:
      old_date = time.ctime(os.stat(landmines_path).st_ctime)
      diff = difflib.unified_diff(old_landmines, new_landmines,
          fromfile='old_landmines', tofile='new_landmines',
          fromfiledate=old_date, tofiledate=time.ctime(), n=0)

      with open(triggered, 'w') as f:
        f.writelines(diff)
    elif os.path.exists(triggered):
      # Remove false triggered landmines.
      os.remove(triggered)
Example #28
0
def github_list_pull_requests(urls, numbers_only=False):
    """
    Returns the pull requests numbers.

    It returns a tuple of (nonmergeable, mergeable), where "nonmergeable"
    and "mergeable" are lists of the pull requests numbers.
    """
    pulls = github_get_pull_request_all(urls)
    formated_pulls = []
    print "Total pull count", len(pulls)
    sys.stdout.write("Processing pulls...")
    for pull in pulls:
        n = pull["number"]
        sys.stdout.write(" %d" % n)
        sys.stdout.flush()
        pull_info = github_get_pull_request(urls, n)
        mergeable = pull_info["mergeable"]
        if pull["head"]["repo"]:
            repo = pull["head"]["repo"]["html_url"]
        else:
            repo = None
        branch = pull["head"]["ref"]
        created_at = pull["created_at"]
        created_at = time.strptime(created_at, "%Y-%m-%dT%H:%M:%SZ")
        created_at = time.mktime(created_at)
        username = pull["head"]["user"]["login"]
        user_info = github_get_user_info(urls, username)
        author = "\"%s\" <%s>" % (user_info.get("name", "unknown"),
                                  user_info.get("email", ""))
        formated_pulls.append((created_at, n, repo, branch, author, mergeable))
    formated_pulls.sort(key=lambda x: x[0])
    print "\nPatches that cannot be merged without conflicts:"
    nonmergeable = []
    for created_at, n, repo, branch, author, mergeable in formated_pulls:
        if mergeable: continue
        nonmergeable.append(int(n))
        if numbers_only:
            print n,
        else:
            print "#%03d: %s %s" % (n, repo, branch)
            print unicode("      Author   : %s" % author).encode('utf8')
            print "      Date     : %s" % time.ctime(created_at)
    if numbers_only:
        print
    print
    print "-"*80
    print "Patches that can be merged without conflicts:"
    mergeable_list = []
    for last_change, n, repo, branch, author, mergeable in formated_pulls:
        if not mergeable: continue
        mergeable_list.append(int(n))
        if numbers_only:
            print n,
        else:
            print "#%03d: %s %s" % (n, repo, branch)
            print unicode("      Author   : %s" % author).encode('utf8')
            print "      Date     : %s" % time.ctime(last_change)
    if numbers_only:
        print
    return nonmergeable, mergeable_list
Example #29
0
    def run_brute_force(self, keys):
        """You must set the parameter self.logfile before calling this
        method.  This should probably happen in the __init__ method of
        a derived class."""
        self.bfparams = copy.copy(self.params)
        self.build_param_mat()
        self.build_ind_mat()

        self.bflabels = self.bfkeys+['e','p1','p2']

        self.clear_log()

        start_line = 'start time: ' + time.ctime()
        print(start_line)
        self.str_out(start_line+'\n')

        for row in self.bfinds:
            self.bfparams = self.set_params(row, self.bfparams)
            e, th_bode, a_v_bode = self.cost(self.bfparams, extra=1)
##             e, th_bode, a_v_bode = _cost(self.bfparams,
##                                        extra=1, \
##                                        func=self.func, \
##                                        con_dict=self.con_dict)
            p1, p2 = self.find_peaks(a_v_bode)
            self.save_row(self.bfparams, e, p1, p2)


        print('end time: ' + time.ctime())
Example #30
0
async def test_run_task_timeout(context):
    """`run_task` raises `ScriptWorkerTaskException` and kills the process
    after exceeding `task_max_timeout`.
    """
    temp_dir = os.path.join(context.config['work_dir'], "timeout")
    context.config['task_script'] = (
        sys.executable, TIMEOUT_SCRIPT, temp_dir
    )
    # With shorter timeouts we hit issues with the script not managing to
    # create all 6 files
    context.config['task_max_timeout'] = 5

    pre = arrow.utcnow().timestamp
    with pytest.raises(ScriptWorkerTaskException):
        await swtask.run_task(context, noop_to_cancellable_process)
    post = arrow.utcnow().timestamp
    # I don't love these checks, because timing issues may cause this test
    # to be flaky. However, I don't want a non- or long- running test to pass.
    # Did this run at all?
    assert post - pre >= 5
    # Did this run too long? e.g. did it exit on its own rather than killed
    # If this is set too low (too close to the timeout), it may not be enough
    # time for kill_proc, kill_pid, and the `finally` block to run
    assert post - pre < 10
    # Did the script generate the expected output?
    files = {}
    for path in glob.glob(os.path.join(temp_dir, '*')):
        files[path] = (time.ctime(os.path.getmtime(path)), os.stat(path).st_size)
        print("{} {}".format(path, files[path]))
    for path in glob.glob(os.path.join(temp_dir, '*')):
        print("Checking {}...".format(path))
        assert files[path] == (time.ctime(os.path.getmtime(path)), os.stat(path).st_size)
    assert len(list(files.keys())) == 6
    # Did we clean up?
    assert context.proc is None
Example #31
0
dbc.execute("CREATE TABLE IF NOT EXISTS Pings (url TEXT, time TEXT, response INTEGER)")


#
# Check if the database is valid
#
if not isSQLite3(dbFile):
    sys.stderr.write("The file '{}' is not a valid SQLite database.\n".format(dbFile))
    sys.exit(1)



#
# get current time for logging
#
rTime = time.ctime(time.time())

try:
    # url must start with "http://" or the request fails
    print("Connecting to: {}".format(url))
    r = requests.head(url, timeout=5)
    code = r.status_code

except requests.ConnectionError:
    code = 0



#
# insert values into database
#
Example #32
0
    def __init__(
        self,
        energy_levels,
        isotope,
        levelsfmt,  # ='cdsd-pc',
        use_cached=True,
        use_json=None,
        verbose=True,
    ):

        # %% Init

        # Initialize PartitionFunctionCalculator for this electronic state
        ElecState = ElectronicState("CO2", isotope, "X", "1Σu+")
        super(PartFuncCO2_CDSDcalc, self).__init__(ElecState)

        # Check inputs ('return' is not mentionned in signature. it will just return
        # after cache name is given)
        assert use_cached in [True, False, "regen", "force", "return"]
        if isotope not in [1, 2]:
            raise ValueError(
                "CDSD Energies not defined for isotope: {0}".format(isotope))
        if use_json is not None:
            warn(
                DeprecationWarning(
                    "use_json replaced with faster HDF5-based use_cached"))
        # Get vibrational level definitions that match Energy Database (in particular
        # how Evib and Erot are calculated)
        # This is needed to be able to match the levels in the Line Database and
        # the levels in the Energy database
        if levelsfmt == "cdsd-p":
            viblvl_label = "p"
        elif levelsfmt == "cdsd-pc":
            viblvl_label = "pc"
        elif levelsfmt == "cdsd-pcN":
            viblvl_label = "pcN"
        elif levelsfmt == "cdsd-hamil":
            viblvl_label = "pcJN"
        elif levelsfmt is None:
            # dont label the levels. Wont be able to use the EnergyDatabase to fetch
            # vibrational energies for lines, however it can still be used to
            # calculate Partition functions independently from a Spectrum calculation
            viblvl_label = None
        else:
            raise ValueError(
                "Unknown Energy database format: levelsfmt = `{0}`".format(
                    levelsfmt) +
                ". Use one of: `cdsd-p`, `cdsd-pc`, `cdsd-pcN`,`cdsd-hamil`")

        # Store defaults
        self.verbose = verbose
        self.use_cached = use_cached
        self.levelsfmt = levelsfmt
        self.viblvl_label = viblvl_label
        self.last_modification = time.ctime(
            getmtime(getTestFile(r"co2_cdsd_hamiltonian_fragment.levels")))
        if verbose >= 2:
            print("Last modification time: {0}".format(self.last_modification))

        # Get variables to store in metadata  (after default values have been set)
        molecule = "CO2"  # will be stored in cache file metadata
        last_modification = time.ctime(
            getmtime(getTestFile(r"co2_cdsd_hamiltonian_fragment.levels")))

        _discard = [
            "self",
            "energy_levels",
            "verbose",
            "ElecState",
            "electronic_state",
            "use_json",
            "use_cached",
        ]
        # (dev) locals() automatically stores all variables: levelsfmt, viblvl_label, etc.
        metadata = filter_metadata(locals(), discard_variables=_discard)

        # %% Get levels
        # Function of use_cached value:
        # ... if True, use (and generate if doesnt exist) cache file.
        # ... if 'regen', regenerate cache file. If 'force', raise an error
        # ... if file doesnt exist.
        # If file is deprecated, regenerate it unless 'force' was used

        # Load cache file if exists

        cachefile = energy_levels + ".h5"
        self.cachefile = cachefile

        # If return, return after cachefile generated (used for tests)
        if use_cached == "return":
            return

        df = load_h5_cache_file(
            cachefile,
            use_cached,
            metadata=metadata,
            current_version=radis.__version__,
            last_compatible_version=OLDEST_COMPATIBLE_VERSION,
            verbose=verbose,
        )

        if df is None:  # Read normal file
            df = pd.read_csv(energy_levels, comment="#", delim_whitespace=True)
            df = self._add_degeneracies(df)
            df = self._add_levels(df)

        self.df = df  # Store

        if use_cached and not exists(cachefile):
            save_to_hdf(
                self.df,
                cachefile,
                metadata=metadata,
                version=radis.__version__,
                key="df",
                overwrite=True,
                verbose=verbose,
            )
def super_player(file_, time):
	for i in range(2):
		print("I was listening to %s !%s" % (file_, ctime()))
		sleep(3)
	t = threading.Thread(target=super_player, args=(file_, time))
	threads.append(t)



# # 创建线程数组
# threads = []
#
# # 创建线程1
# t1 = threading.Thread(target=music, args=('葫芦娃', 2))
# threads.append(t1)
#
# # 创建线程2
# t2 = threading.Thread(target=movie, args=('蜡笔小新', 2))
# threads.append(t2)

if __name__ == '__main__':
	# music('葫芦娃', 3)
	# movie("蜡笔小新", 3)
	# print("over  %s" % ctime())
	for t in files:
		# t.start()
		threads[t].start()

	# 守护线程
	for t in files:
		# t.join()
		threads[t].join()
	print("结束! %s" % ctime())

    @property
    def vertices(self):
        for vertex in self.bmesh.vertices:
            yield vertex.co

    @property
    def polygons(self):
        pass

    @polygons.setter
    def polygons(self, value):
        self.polygons.append(Polygon(**value))


if __name__ == '__main__':
    before = time.ctime(time.time())

    object = bpy.context.active_object
    bmesh = object.data

    export_mesh = ProcessBMesh(bmesh)

    # Create a rhom mesh
    mesh = rhom.Mesh()

    # Generate the PointTable values
    for x, y, z in export_mesh.vertices:
        mesh.addPoint(rhom.Point(x, y, z))

    # Write the mesh to disk
    rhom.writeMesh('Foo.obn', mesh)
Example #36
0
def show_time(request):
    t1 = time.ctime()
    return render(request, "index.html", {"time1": t1})
Example #37
0
'''
题目:时间函数举例1。
'''

if __name__ == '__main__':
    import time
    print(time.ctime(time.time()))
    print(time.asctime(time.localtime(time.time())))
    print(time.asctime(time.gmtime(time.time())))
 def run(self):
     logging.info("starting %s at: %s" % (self.name, time.ctime()))
     self.res = self.func(*self.args)
     logging.info("finished %s at: %s" % (self.name, time.ctime()))
Example #39
0
#Last modified: 11/30/2017 by Jonathan Engelbert
#
### No Known Issues
################################################################################################

import arcpy
from arcpy import env
import sys, string, os, time, datetime

# SET TO OVERWRITE
arcpy.env.overwriteOutput = True

# Logging script
myStartDate = str(datetime.date.today())
myStartTime = time.clock()
theStartTime = time.ctime()
print theStartTime

try:
    myStartDate = str(datetime.date.today())
    myStartTime = time.clock()
    theStartTime = time.ctime()
    # thisfile = os.path.realpath(__file__)
    file = open("C:/ETLs/TIM/TIMUpdates/Logs/" + myStartDate + "Transbase2" + ".txt", "w")
    file.write(theStartTime + "\n")
    when =datetime.date.today()
    theDate = when.strftime("%d")
    theDay=when.strftime("%A")
    print theDay

 ################################################################################################
Example #40
0
def main():  # pylint: disable=too-many-statements,too-many-branches,too-many-locals
    """main routine"""

    if int(platform.python_version().split('.')[0]) < 3:
        LOGGER.fatal("%s needs at least python version 3, currently %s",
                     ME, platform.python_version())
        sys.exit(1)

    start_time = int(time.time())
    _parser = ArgumentParser()
    _parser.add_argument("-c", "--cfile", dest="configfile", default=ME+".cfg",
                         help="Configuration file", metavar="FILE", required=True)
    _parser.add_argument("-v", "--verbosity", action="count", default=0,
                         help="increase output verbosity overriding the default")
    _parser.add_argument("-p", "--parameter", action="store",
                         help="show parameter from configfile")
    _args = _parser.parse_args()

    set_logfile(LOGGER, _args.configfile+".log")

    _config = get_config(_args.configfile, ME)

    if _args.parameter:
        if _args.parameter == 'password':
            print('parameter {}: {}\n'.format(_args.parameter,
                                              decrypted(_config[_args.parameter+'_enc'])))
        else:
            print('parameter {}: {}\n'.format(
                _args.parameter, _config[_args.parameter]))
        sys.exit(0)

    if _args.verbosity:
        newLevel = logging.getLogger().getEffectiveLevel() - (_args.verbosity*10)

        if newLevel < 0:
            newLevel = 0
        LOGGER.warning("Changing loglevel from %d to %d", logging.getLogger().getEffectiveLevel(),
                       newLevel)
        logging.getLogger().setLevel(newLevel)
    LOGGER.debug("log level %d", logging.getLogger().getEffectiveLevel())
    LOGGER.warning("start python-%s %s-%s pid=%s Connecting ...\n",
                   platform.python_version(), ME, VERSION, os.getpid()
                   )

    if _config['password']:
        LOGGER.warning(
            "first encrypted the plaintext password and removed from config\n")
    # we need the password ....
    _config['password'] = decrypted(_config['password_enc'])

# add a few seconds extra to allow the driver timeout handling to do the it's job.
# for example, cx_oracle has a cancel routine that we call after a timeout. If
# there is a network problem, the cancel gets a ORA-12152: TNS:unable to send break message
# setting this defaulttimeout should speed this up
    socket.setdefaulttimeout(_config['sqltimeout']+3)

    LOGGER.warning("%s found db_type=%s, driver %s; checking for driver\n",
                   ME,
                   _config['db_type'], _config['db_driver'])

    if not os.path.exists(
            os.path.join(_config['checks_dir'], _config['db_type'])):
        raise ValueError("db_type "+_config['db_type'] +
                         " does not exist in the "+_config['checks_dir']+" directory")
    db_driver = load_driver(_config)
    driver_errors = load_driver_errors(_config)
    db_connections = load_db_connections(_config)
    LOGGER.info(db_connections)
    LOGGER.info(driver_errors)

    LOGGER.info("hostname in zabbix: %s", _config['hostname'])
    #  hide password, hoping username != password ;-)
    LOGGER.info("connect string    : %s\n",
                db_connections.connect_string(_config).replace(_config['password'],
                                                               '******'))
    LOGGER.info('using sql_timeout : %ds\n', _config['sqltimeout'])
    LOGGER.info("out_file          : %s\n", _config['out_file'])

    if _config['site_checks']:
        LOGGER.info("site_checks       : %s\n", _config['site_checks'])

    if LOG_CONF:
        sys_files = 4
    else:
        sys_files = 3
    check_files = [{'name': __file__, 'lmod': os.path.getmtime(__file__)},
                   {'name': db_connections.__file__,
                    'lmod': os.path.getmtime(db_connections.__file__)},
                   {'name': driver_errors.__file__,
                    'lmod': os.path.getmtime(driver_errors.__file__)},
                   {'name': LOG_CONF,
                    'lmod': os.path.getmtime(LOG_CONF)}
                   ]

    if LOG_CONF:
        check_files.append(
            {'name': LOG_CONF, 'lmod': os.path.getmtime(LOG_CONF)})

    for i in range(sys_files):
        to_outfile(_config,
                   "{}[checks,{},name]".format(ME, i), check_files[i]['name'])
        to_outfile(_config,
                   "{}[checks,{},lmod]".format(ME, i), int(check_files[i]['lmod']))

    conn_counter = 0
    conn_errors = 0
    query_counter = 0
    query_errors = 0

    sleep_c = 0
    sleep_s = 1
    prev_err = 0

    while True:
        try:
            for i in range(sys_files):
                if check_files[i]['lmod'] != os.stat(check_files[i]['name']).st_mtime:
                    LOGGER.warning("%s Changed, from %s to %s restarting ..\n",
                                   check_files[i]['name'],
                                   time.ctime(check_files[i]['lmod']),
                                   time.ctime(os.path.getmtime(check_files[i]['name'])))
                    os.execv(__file__, sys.argv)

            # reset list in case of a just new connection that reloads the config
            check_files = [{'name': __file__, 'lmod': os.path.getmtime(__file__)},
                           {'name': db_connections.__file__,
                            'lmod': os.path.getmtime(db_connections.__file__)},
                           {'name': driver_errors.__file__,
                            'lmod': os.path.getmtime(driver_errors.__file__)}]

            if LOG_CONF:
                check_files.append(
                    {'name': LOG_CONF, 'lmod': os.path.getmtime(LOG_CONF)})

            _config = get_config(_args.configfile, ME)
            _config['password'] = decrypted(_config['password_enc'])

            _start = timer()

            #  hide password, hoping username != password ;-)
            LOGGER.info('connecting to %s\n',
                        db_connections.connect_string(_config).replace(_config['password'],
                                                                       '******'))
            conn_has_cancel = False
            _conn = db_connections.connect(db_driver, _config)

            if "cancel" in dir(_conn):
                conn_has_cancel = True
            LOGGER.info(_conn)
            conn_counter += 1
            to_outfile(_config, ME+"[connect,status]", 0)
            _cursor = _conn.cursor()
            connect_info = db_connections.connection_info(_conn)
            LOGGER.info('connected db_url %s type %s db_role %s version %s\n'
                        '%s user %s %s sid,serial %d,%d instance %s as %s cancel:%s\n',
                        _config['db_url'], connect_info['instance_type'],
                        connect_info['db_role'],
                        connect_info['dbversion'],
                        datetime.datetime.fromtimestamp(time.time()),
                        _config['username'], connect_info['uname'],
                        connect_info['sid'],
                        connect_info['serial'],
                        connect_info['iname'],
                        _config['role'], conn_has_cancel)

            if connect_info['db_role'] in ["PHYSICAL STANDBY", "SLAVE"]:
                checks_file = os.path.join(_config['checks_dir'],
                                           _config['db_type'], "standby" +
                                           "." + connect_info['dbversion'] + ".cfg")
            else:
                checks_file = os.path.join(_config['checks_dir'],
                                           _config['db_type'],
                                           connect_info['db_role'].lower() + "." +
                                           connect_info['dbversion']+".cfg")

            _files = [checks_file]
            check_files.append({'name': checks_file, 'lmod': 0})

            if _config['site_checks']:
                for addition in _config['site_checks'].split(","):
                    addfile = os.path.join(_config['checks_dir'], _config['db_type'],
                                           addition + ".cfg")
                    check_files.append({'name': addfile, 'lmod': 0})
                    _files.extend([addfile])
            LOGGER.info('using checks from %s\n', _files)

            for checks_file in check_files:
                if not os.path.exists(checks_file['name']):
                    raise ValueError(
                        "Configfile " + checks_file['name'] + " does not exist")
            # all checkfiles exist

            sleep_c = 0
            sleep_s = 1
            prev_err = 0
            con_mins = 0
            open_time = int(time.time())

            while True:
                LOGGER.debug("%s while True\n", ME)

                if connect_info['db_role'] != db_connections.current_role(_conn, connect_info):
                    LOGGER.info("db_role changed from %s to %s",
                                connect_info['db_role'],
                                db_connections.current_role(_conn, connect_info))
                    # re connect to get the correct monitoring config again

                    break
                # keep this to compare for when to dump stats
                now_run = int(time.time())
                run_timer = timer()  # keep this to compare for when to dump stats
                # loading checks from the various checkfiles:
                need_to_load = "no"

                # pylint: disable=consider-using-enumerate

                for i in range(len(check_files)):  # at 0 - sys_files is the script itself
                    try:
                        current_lmod = os.path.getmtime(check_files[i]['name'])
                    except OSError as _e:
                        LOGGER.warning("%s: %s\n",
                                       check_files[i]['name'], _e.strerror)
                        # ignore the error, maybe temporary due to an update
                        current_lmod = check_files[i]['lmod']

                    if check_files[i]['lmod'] != current_lmod:
                        if i < sys_files:  # it is the script, a module or LOG_CONF
                                           # that changed
                            LOGGER.warning("%s changed, from %s to %s restarting ...\n",
                                           check_files[i]['name'],
                                           time.ctime(check_files[i]['lmod']),
                                           time.ctime(current_lmod))
                            os.execv(__file__, sys.argv)
                        else:
                            if check_files[i]['lmod'] == 0:
                                LOGGER.info("checks loading %s\n",
                                            check_files[i]['name'])
                                need_to_load = "yes"
                            else:
                                LOGGER.warning("checks changed, reloading %s\n",
                                               check_files[i]['name'])
                                need_to_load = "yes"

                if need_to_load == "yes":
                    to_outfile(_config, ME + "[version]", VERSION)
                    to_outfile(
                        _config, ME + "[config,db_type]", _config['db_type'])
                    to_outfile(
                        _config, ME + "[config,db_driver]", _config['db_driver'])
                    to_outfile(
                        _config, ME + "[config,instance_type]", _config['instance_type'])
                    to_outfile(_config, ME + "[conn,db_role]",
                               connect_info['db_role'])
                    to_outfile(
                        _config, ME + "[conn,instance_type]", connect_info['instance_type'])
                    to_outfile(_config, ME + "[conn,dbversion]",
                               connect_info['dbversion'])
                    to_outfile(
                        _config, ME + "[connect,instance_name]", connect_info['iname'])
                    # sometimes the instance_name query follows within a second
                    # missing event so give it some more time
                    time.sleep(3)
                    objects_list = []
                    sections_list = []
                    file_list = []
                    all_checks = []

                    for i in range(len(check_files)):
                        _e = collections.OrderedDict()
                        _e = {"{#CHECKS_FILE}": i}
                        file_list.append(_e)

                    files_json = '{\"data\":'+json.dumps(file_list)+'}'
                    to_outfile(_config, ME+".files.lld", files_json)

                    for i in range(sys_files, len(check_files)):
                        # #0 is executable that is also checked for updates
                        # #1 db_connections module
                        # #2 driver_errors module
                        # #3 LOG_CONF if it exists ...
                        # so, skip those and pick the real check_files
                        _checks = configparser.RawConfigParser()
                        try:
                            check_file = open(check_files[i]['name'], 'r')
                            to_outfile(_config, "{}[checks,{},name]".format(ME, i),
                                       check_files[i]['name'])
                            to_outfile(_config, "{}[checks,{},lmod]".format(ME, i),
                                       str(int(os.stat(check_files[i]['name']).st_mtime)))
                            try:
                                _checks.read_file(check_file)
                                check_file.close()
                                to_outfile(_config, ME + "[checks," + str(i) +
                                           ",status]", 0)
                            except configparser.Error:
                                to_outfile(_config, ME + "[checks," + str(i) +
                                           ",status]", 13)
                                LOGGER.critical("file %s has parsing errors ->(13)\n",
                                                check_files[i]['name'])
                        except IOError as io_error:
                            to_outfile(
                                _config, ME + "[checks," + str(i) + ",status]", 11)
                            LOGGER.critical("file %s IOError %s %s ->(11)\n",
                                            check_files[i]['name'],
                                            io_error.errno, io_error.strerror)

                        check_files[i]['lmod'] = os.stat(
                            check_files[i]['name']).st_mtime
                        all_checks.append(_checks)

                        for section in sorted(_checks.sections()):
                            sec_mins = int(_checks.get(section, "minutes"))

                            if sec_mins == 0:
                                LOGGER.info(
                                    "%s run at connect only\n", section)
                            else:
                                LOGGER.info("%s run every %d minutes\n",
                                            section, sec_mins)
                            # dump own discovery items of the queries per section
                            _e = collections.OrderedDict()
                            _e = {"{#SECTION}": section}
                            sections_list.append(_e)
                            _x = dict(_checks.items(section))

                            for key, sqls in sorted(_x.items()):
                                if sqls and key != "minutes":
                                    _d = collections.OrderedDict()
                                    _d = {"{#SECTION}": section, "{#KEY}": key}
                                    objects_list.append(_d)
                                    LOGGER.info("%s: %s\n",
                                                key,
                                                sqls[0: 60].
                                                replace('\n', ' ').replace('\r', ' '))
                    # checks are loaded now.
                    sections_json = '{\"data\":'+json.dumps(sections_list)+'}'
                    LOGGER.debug("lld key: %s json: %s\n",
                                 ME+".lld", sections_json)
                    to_outfile(_config, ME+".section.lld", sections_json)
                    rows_json = '{\"data\":'+json.dumps(objects_list)+'}'
                    LOGGER.debug("lld key: %s json: %s\n",
                                 ME+".lld", rows_json)
                    to_outfile(_config, ME + ".query.lld", rows_json)
                    # sqls can contain multiple statements per key. sqlparse to split them
                    # now. Otherwise use a lot of extra cycles when splitting at runtime
                    # all_sql { {section, key}: statements }
                    all_sql = {}

                    for _checks in all_checks:
                        for section in sorted(_checks.sections()):
                            _x = dict(_checks.items(section))

                            for key, sqls in sorted(_x.items()):
                                if sqls and key != "minutes":
                                    all_sql[(section, key)] = []

                                    for statement in sqlparse.split(sqls):
                                        all_sql[(section, key)].append(
                                            statement)

                # checks discovery is also printed
                #
                # assume we are still connected. If not, exception will tell real story
                to_outfile(_config, ME + "[connect,status]", 0)
                to_outfile(_config, ME + "[uptime]",
                           int(time.time() - start_time))
                to_outfile(_config, ME + "[opentime]",
                           int(time.time() - open_time))

                # the connect status is only real if executed a query ....

                for _checks in all_checks:
                    for section in sorted(_checks.sections()):
                        section_timer = timer()  # keep this to compare for when to dump stats
                        sec_mins = int(_checks.get(section, "minutes"))

                        if ((con_mins == 0 and sec_mins == 0) or
                                (sec_mins > 0 and con_mins % sec_mins == 0)):
                            # time to run the checks again from this section
                            _x = dict(_checks.items(section))
                            _cursor = _conn.cursor()

                            for key, sqls in sorted(_x.items()):
                                if sqls and key != "minutes":
                                    LOGGER.debug("%s section: %s key: %s\n",
                                                 ME, section, key)
                                    try:
                                        query_counter += 1

                                        if conn_has_cancel:
                                            # pymysql has no cancel but does have
                                            # timeout in connect
                                            sqltimeout = threading.Timer(
                                                _config['sqltimeout'],
                                                cancel_sql, [_conn, section, key])
                                            sqltimeout.start()
                                        _start = timer()

                                        for statement in all_sql[(section, key)]:

                                            LOGGER.debug("%s section: %s key: %s sql: %s\n",
                                                         ME, section, key, statement)
                                            _cursor.execute(statement)
                                        startf = timer()
                                        # output for the last query must include the
                                        # output for the preparing queries is ignored
                                        #        complete key and value
                                        rows = _cursor.fetchall()

                                        if conn_has_cancel:
                                            sqltimeout.cancel()

                                        if "discover" in section:
                                            objects_list = []

                                            for row in rows:
                                                _d = collections.OrderedDict()

                                                for col in range(len(_cursor.description)):
                                                    _d[_cursor.description[col]
                                                       [0]] = row[col]
                                                objects_list.append(_d)
                                            rows_json = '{\"data\":' + \
                                                json.dumps(objects_list)+'}'
                                            LOGGER.debug("DEBUG lld key: %s json: %s\n", key,
                                                         rows_json)
                                            to_outfile(_config, key, rows_json)
                                            to_outfile(_config, ME +
                                                       "[query," + section + "," +
                                                       key + ",status]", 0)
                                        else:
                                            if rows and len(rows[0]) == 2:
                                                _config['section'] = section
                                                _config['key'] = key

                                                for row in rows:
                                                    to_outfile(
                                                        _config, row[0], row[1])
                                                to_outfile(_config, ME +
                                                           "[query," + section + "," +
                                                           key + ",status]", 0)
                                            elif not rows:
                                                to_outfile(_config, ME + "[query," +
                                                           section + "," +
                                                           key + ",status]", 0)
                                            else:
                                                LOGGER.critical('key=%s.%s ZBXDB-%d: '
                                                                'SQL format error: %s\n',
                                                                section, key, 2,
                                                                "expect key,value pairs")
                                                to_outfile(_config, ME +
                                                           "[query," + section + "," +
                                                           key + ",status]", 2)
                                        _config['section'] = ""
                                        _config['key'] = ""
                                        fetchela = timer() - startf
                                        elapsed_s = timer() - _start
                                        to_outfile(_config, ME + "[query," +
                                                   section + "," +
                                                   key + ",ela]", elapsed_s)
                                        to_outfile(_config, ME + "[query," +
                                                   section + "," +
                                                   key + ",fetch]", fetchela)
                                    # except (db_driver.DatabaseError,
                                        # socket.timeout) as dberr:
                                    except Exception as dberr:
                                        if conn_has_cancel:
                                            sqltimeout.cancel()
                                        ecode, emsg = driver_errors.db_errorcode(
                                            db_driver, dberr)

                                        elapsed_s = timer() - _start
                                        query_errors += 1
                                        to_outfile(_config, ME + "[query," +
                                                   section + "," +
                                                   key + ",status]", ecode)
                                        to_outfile(_config, ME + "[query," +
                                                   section + "," +
                                                   key + ",ela]", elapsed_s)
                                        LOGGER.info('key=%s.%s ZBXDB-%s: '
                                                    'Db execution error: %s\n',
                                                    section, key, ecode, emsg.strip())

                                        if driver_errors.db_error_needs_new_session(db_driver,
                                                                                    ecode):
                                            raise

                                        LOGGER.debug("%s commit\n", ME)
                                        _conn.commit()

                                        LOGGER.debug("%s committed\n", ME)
                            # end of a section ## time to run the checks again from this section
                            to_outfile(_config, ME + "[query," + section + ",,ela]",
                                       timer() - section_timer)
                # release locks that might have been taken

                LOGGER.debug("%s commit 2\n", ME)

                _conn.commit()

                LOGGER.debug("%s committed.\n", ME)
                # dump metric for summed elapsed time of this run
                to_outfile(_config, ME + "[query,,,ela]",
                           timer() - run_timer)
                to_outfile(_config, ME + "[cpu,user]",
                           resource.getrusage(resource.RUSAGE_SELF).ru_utime)
                to_outfile(_config, ME + "[cpu,sys]",
                           resource.getrusage(resource.RUSAGE_SELF).ru_stime)
                to_outfile(_config, ME + "[mem,maxrss]",
                           resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
                # passed all sections

                if ((now_run - start_time) % 3600) == 0:
                    gc.collect()
                    # dump stats
                    LOGGER.info("connect %d times, %d fail; started %d queries, "
                                "%d fail memrss:%d user:%f sys:%f\n",
                                conn_counter, conn_errors, query_counter,
                                query_errors,
                                resource.getrusage(
                                    resource.RUSAGE_SELF).ru_maxrss,
                                resource.getrusage(
                                    resource.RUSAGE_SELF).ru_utime,
                                resource.getrusage(resource.RUSAGE_SELF).ru_stime)
                # try to keep activities on the same starting second:
                sleep_time = 60 - ((int(time.time()) - start_time) % 60)

                LOGGER.debug("Sleeping for %d seconds\n", sleep_time)
                time.sleep(sleep_time)
                con_mins = con_mins + 1  # not really mins since the checks could
                #                       have taken longer than 1 minute to complete
            # end while True
        # except (db_driver.DatabaseError, socket.timeout, ConnectionResetError) as dberr:
        except Exception as dberr:
            err_code, err_msg = driver_errors.db_errorcode(db_driver, dberr)
            elapsed_s = timer() - _start
            to_outfile(_config, ME + "[connect,status]", err_code)

            if not driver_errors.db_error_needs_new_session(db_driver, err_code):
                # from a killed session, crashed instance or similar
                conn_errors += 1

            if prev_err != err_code:
                sleep_c = 0
                sleep_s = 1
                prev_err = err_code
            sleep_c += 1

            if sleep_c >= 10:
                if sleep_s <= 301:
                    # don't sleep longer than 5 mins after connect failures
                    sleep_s += 10
                sleep_c = 0
            LOGGER.warning('(%d.%d)connection error: [%s] %s for %s@%s\n',
                           sleep_c, sleep_s, err_code,
                           err_msg.strip().replace('\n', ' ').replace('\r', ' '),
                           _config['username'], _config['db_url'])
            # set_trace()
            time.sleep(sleep_s)
        except (KeyboardInterrupt, SystemExit):
            exit(0)
Example #41
0
 def __repr__(self):
     return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \
         % (self.nVersion, self.nServices, time.ctime(self.nTime),
            repr(self.addrTo), repr(self.addrFrom), self.nNonce,
            self.strSubVer, self.nStartingHeight)
        # logging INFO
        logg.info('default ADDR value implemented')

    return addr, port


def get_msg(msg_in):
    json_in = json.loads(msg_in)
    return json_in


def send_msg(destination_host, msg):
    g = json.dumps(msg)
    destination_host.send(g.encode('utf-8'))


# function collecting messages to json
def msg_to_json(filename, root_value, message):
    with open(filename) as f:
        data = json.load(f)
        temp = data[root_value]
        temp.append(message)
    with open(filename, 'w') as new_f:
        json.dump(data, new_f, indent=4)


timestr = time.ctime(time.time()) + "\n"



Example #43
0
def print_progress_info(actual, total):
    '''print progress info actual/total'''
    print '# %s %2.2f%% (%s/%s)' % (time.ctime(), total and float((actual * 100)/total) or 0, actual, total)
#coding=utf-8
#创建TCP服务器
from socket import *
from time import ctime

HOST=''
PORT=21567
BUFSIZ=1024
ADDR=(HOST,PORT)

tcpSerSock=socket(AF_INET,SOCK_STREAM) #创服务器套接字
tcpSerSock.bind(ADDR) #套接字与地址绑定
tcpSerSock.listen(5)  #监听连接,传入连接请求的最大数

while True:
    print('waiting for connection...')
    tcpCliSock,addr =tcpSerSock.accept()
    print('...connected from:',addr)

    while True:
        data =tcpCliSock.recv(BUFSIZ).decode()
        print('date=',data)
        print("END")
        if not data:
            break
        tcpCliSock.send(('[%s] %s' %(ctime(),data)).encode())

    tcpCliSock.close()
tcpSerSock.close()
Example #45
0
class PythonShellWidget(TracebackLinksMixin, ShellBaseWidget,
                        GetHelpMixin):
    """Python shell widget"""
    QT_CLASS = ShellBaseWidget
    INITHISTORY = ['# -*- coding: utf-8 -*-',
                   '# *** Spyder Python Console History Log ***',]
    SEPARATOR = '%s##---(%s)---' % (os.linesep*2, time.ctime())
    go_to_error = Signal(str)
    
    def __init__(self, parent, history_filename, profile=False):
        ShellBaseWidget.__init__(self, parent, history_filename, profile)
        TracebackLinksMixin.__init__(self)
        GetHelpMixin.__init__(self)

        # Local shortcuts
        self.shortcuts = self.create_shortcuts()

    def create_shortcuts(self):
        array_inline = config_shortcut(lambda: self.enter_array_inline(),
                                       context='array_builder',
                                       name='enter array inline', parent=self)
        array_table = config_shortcut(lambda: self.enter_array_table(),
                                      context='array_builder',
                                      name='enter array table', parent=self)
        inspectsc = config_shortcut(self.inspect_current_object,
                                    context='Console',
                                    name='Inspect current object',
                                    parent=self)
        return [inspectsc, array_inline, array_table]
        
    def get_shortcut_data(self):
        """
        Returns shortcut data, a list of tuples (shortcut, text, default)
        shortcut (QShortcut or QAction instance)
        text (string): action/shortcut description
        default (string): default key sequence
        """
        return [sc.data for sc in self.shortcuts]

    #------ Context menu
    def setup_context_menu(self):
        """Reimplements ShellBaseWidget method"""
        ShellBaseWidget.setup_context_menu(self)
        self.copy_without_prompts_action = create_action(self,
                                     _("Copy without prompts"),
                                     icon=ima.icon('copywop'),
                                     triggered=self.copy_without_prompts)
        clear_line_action = create_action(self, _("Clear line"),
                                     QKeySequence(get_shortcut('console',
                                                               'Clear line')),
                                     icon=ima.icon('editdelete'),
                                     tip=_("Clear line"),
                                     triggered=self.clear_line)
        clear_action = create_action(self, _("Clear shell"),
                                     QKeySequence(get_shortcut('console',
                                                               'Clear shell')),
                                     icon=ima.icon('editclear'),
                                     tip=_("Clear shell contents "
                                           "('cls' command)"),
                                     triggered=self.clear_terminal)
        add_actions(self.menu, (self.copy_without_prompts_action,
                    clear_line_action, clear_action))
          
    def contextMenuEvent(self, event):
        """Reimplements ShellBaseWidget method"""
        state = self.has_selected_text()
        self.copy_without_prompts_action.setEnabled(state)
        ShellBaseWidget.contextMenuEvent(self, event)

    @Slot()
    def copy_without_prompts(self):
        """Copy text to clipboard without prompts"""
        text = self.get_selected_text()
        lines = text.split(os.linesep)
        for index, line in enumerate(lines):
            if line.startswith('>>> ') or line.startswith('... '):
                lines[index] = line[4:]
        text = os.linesep.join(lines)
        QApplication.clipboard().setText(text)
    
    
    #------ Key handlers
    def postprocess_keyevent(self, event):
        """Process keypress event"""
        ShellBaseWidget.postprocess_keyevent(self, event)
        if QToolTip.isVisible():
            _event, _text, key, _ctrl, _shift = restore_keyevent(event)
            self.hide_tooltip_if_necessary(key)
                
    def _key_other(self, text):
        """1 character key"""
        if self.is_completion_widget_visible():
            self.completion_text += text
                
    def _key_backspace(self, cursor_position):
        """Action for Backspace key"""
        if self.has_selected_text():
            self.check_selection()
            self.remove_selected_text()
        elif self.current_prompt_pos == cursor_position:
            # Avoid deleting prompt
            return
        elif self.is_cursor_on_last_line():
            self.stdkey_backspace()
            if self.is_completion_widget_visible():
                # Removing only last character because if there was a selection
                # the completion widget would have been canceled
                self.completion_text = self.completion_text[:-1]
                
    def _key_tab(self):
        """Action for TAB key"""
        if self.is_cursor_on_last_line():
            empty_line = not self.get_current_line_to_cursor().strip()
            if empty_line:
                self.stdkey_tab()
            else:
                self.show_code_completion(automatic=False)
                
    def _key_ctrl_space(self):
        """Action for Ctrl+Space"""
        if not self.is_completion_widget_visible():
            self.show_code_completion(automatic=False)
                
    def _key_pageup(self):
        """Action for PageUp key"""
        pass
    
    def _key_pagedown(self):
        """Action for PageDown key"""
        pass
                
    def _key_escape(self):
        """Action for ESCAPE key"""
        if self.is_completion_widget_visible():
            self.hide_completion_widget()

    def _key_question(self, text):
        """Action for '?'"""
        if self.get_current_line_to_cursor():
            last_obj = self.get_last_obj()
            if last_obj and not last_obj.isdigit():
                self.show_object_info(last_obj)
        self.insert_text(text)
        # In case calltip and completion are shown at the same time:
        if self.is_completion_widget_visible():
            self.completion_text += '?'
                
    def _key_parenleft(self, text):
        """Action for '('"""
        self.hide_completion_widget()
        if self.get_current_line_to_cursor():
            last_obj = self.get_last_obj()
            if last_obj and not last_obj.isdigit():
                self.insert_text(text)
                self.show_object_info(last_obj, call=True)
                return
        self.insert_text(text)
        
    def _key_period(self, text):
        """Action for '.'"""
        self.insert_text(text)
        if self.codecompletion_auto:
            # Enable auto-completion only if last token isn't a float
            last_obj = self.get_last_obj()
            if last_obj and not last_obj.isdigit():
                self.show_code_completion(automatic=True)


    #------ Paste
    def paste(self):
        """Reimplemented slot to handle multiline paste action"""
        text = to_text_string(QApplication.clipboard().text())
        if len(text.splitlines()) > 1:
            # Multiline paste
            if self.new_input_line:
                self.on_new_line()
            self.remove_selected_text() # Remove selection, eventually
            end = self.get_current_line_from_cursor()
            lines = self.get_current_line_to_cursor() + text + end
            self.clear_line()
            self.execute_lines(lines)
            self.move_cursor(-len(end))
        else:
            # Standard paste
            ShellBaseWidget.paste(self)
    
  
    #------ Code Completion / Calltips        
    # Methods implemented in child class:
    # (e.g. InternalShell)
    def get_dir(self, objtxt):
        """Return dir(object)"""
        raise NotImplementedError
    def get_module_completion(self, objtxt):
        """Return module completion list associated to object name"""
        pass
    def get_globals_keys(self):
        """Return shell globals() keys"""
        raise NotImplementedError
    def get_cdlistdir(self):
        """Return shell current directory list dir"""
        raise NotImplementedError
    def iscallable(self, objtxt):
        """Is object callable?"""
        raise NotImplementedError
    def get_arglist(self, objtxt):
        """Get func/method argument list"""
        raise NotImplementedError
    def get__doc__(self, objtxt):
        """Get object __doc__"""
        raise NotImplementedError
    def get_doc(self, objtxt):
        """Get object documentation dictionary"""
        raise NotImplementedError
    def get_source(self, objtxt):
        """Get object source"""
        raise NotImplementedError
    def is_defined(self, objtxt, force_import=False):
        """Return True if object is defined"""
        raise NotImplementedError
        
    def show_code_completion(self, automatic):
        """Display a completion list based on the current line"""
        # Note: unicode conversion is needed only for ExternalShellBase
        text = to_text_string(self.get_current_line_to_cursor())
        last_obj = self.get_last_obj()
        
        if not text:
            return

        if text.startswith('import '):
            obj_list = self.get_module_completion(text)
            words = text.split(' ')
            if ',' in words[-1]:
                words = words[-1].split(',')
            self.show_completion_list(obj_list, completion_text=words[-1],
                                      automatic=automatic)
            return
            
        elif text.startswith('from '):
            obj_list = self.get_module_completion(text)
            if obj_list is None:
                return
            words = text.split(' ')
            if '(' in words[-1]:
                words = words[:-2] + words[-1].split('(')
            if ',' in words[-1]:
                words = words[:-2] + words[-1].split(',')
            self.show_completion_list(obj_list, completion_text=words[-1],
                                      automatic=automatic)
            return
        
        obj_dir = self.get_dir(last_obj)
        if last_obj and obj_dir and text.endswith('.'):
            self.show_completion_list(obj_dir, automatic=automatic)
            return
        
        # Builtins and globals
        if not text.endswith('.') and last_obj \
           and re.match(r'[a-zA-Z_0-9]*$', last_obj):
            b_k_g = dir(builtins)+self.get_globals_keys()+keyword.kwlist
            for objname in b_k_g:
                if objname.startswith(last_obj) and objname != last_obj:
                    self.show_completion_list(b_k_g, completion_text=last_obj,
                                              automatic=automatic)
                    return
            else:
                return
        
        # Looking for an incomplete completion
        if last_obj is None:
            last_obj = text
        dot_pos = last_obj.rfind('.')
        if dot_pos != -1:
            if dot_pos == len(last_obj)-1:
                completion_text = ""
            else:
                completion_text = last_obj[dot_pos+1:]
                last_obj = last_obj[:dot_pos]
            completions = self.get_dir(last_obj)
            if completions is not None:
                self.show_completion_list(completions,
                                          completion_text=completion_text,
                                          automatic=automatic)
                return
        
        # Looking for ' or ": filename completion
        q_pos = max([text.rfind("'"), text.rfind('"')])
        if q_pos != -1:
            completions = self.get_cdlistdir()
            if completions:
                self.show_completion_list(completions,
                                          completion_text=text[q_pos+1:],
                                          automatic=automatic)
            return
            
    #------ Drag'n Drop
    def drop_pathlist(self, pathlist):
        """Drop path list"""
        if pathlist:
            files = ["r'%s'" % path for path in pathlist]
            if len(files) == 1:
                text = files[0]
            else:
                text = "[" + ", ".join(files) + "]"
            if self.new_input_line:
                self.on_new_line()
            self.insert_text(text)
            self.setFocus()
Example #46
0
 def __repr__(self):
     return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
         % (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
            time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
#-----------
import time
ticks = time.time() #1970纪元后经过的浮点秒数
print ("当前时间戳为:", ticks)

localtime = time.localtime(time.time())
print ("本地时间为 :", localtime)

print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))

# 将格式字符串转换为时间戳
a = "Sat Mar 28 22:24:24 2016"
print (time.mktime(time.strptime(a,"%a %b %d %H:%M:%S %Y")))


import calendar

cal = calendar.month(2016, 1) #星期一是默认的每周第一天
print ("以下输出2016年1月份的日历:")
print (cal)

print("time.ctime() : %s" % time.ctime())
#	time.ctime([secs]) 作用相当于asctime(localtime(secs)),未给参数相当于asctime()







Example #48
0
class TerminalWidget(ShellBaseWidget):
    """
    Terminal widget
    """
    COM = 'rem' if os.name == 'nt' else '#'
    INITHISTORY = ['%s *** Spyder Terminal History Log ***' % COM, COM,]
    SEPARATOR = '%s%s ---(%s)---' % (os.linesep*2, COM, time.ctime())
    go_to_error = Signal(str)
    
    def __init__(self, parent, history_filename, profile=False):
        ShellBaseWidget.__init__(self, parent, history_filename, profile)
        
    #------ Key handlers
    def _key_other(self, text):
        """1 character key"""
        pass
                
    def _key_backspace(self, cursor_position):
        """Action for Backspace key"""
        if self.has_selected_text():
            self.check_selection()
            self.remove_selected_text()
        elif self.current_prompt_pos == cursor_position:
            # Avoid deleting prompt
            return
        elif self.is_cursor_on_last_line():
            self.stdkey_backspace()
                
    def _key_tab(self):
        """Action for TAB key"""
        if self.is_cursor_on_last_line():
            self.stdkey_tab()
                
    def _key_ctrl_space(self):
        """Action for Ctrl+Space"""
        pass
                
    def _key_escape(self):
        """Action for ESCAPE key"""
        self.clear_line()
                
    def _key_question(self, text):
        """Action for '?'"""
        self.insert_text(text)
                
    def _key_parenleft(self, text):
        """Action for '('"""
        self.insert_text(text)
        
    def _key_period(self, text):
        """Action for '.'"""
        self.insert_text(text)
            
            
    #------ Drag'n Drop
    def drop_pathlist(self, pathlist):
        """Drop path list"""
        if pathlist:
            files = ['"%s"' % path for path in pathlist]
            if len(files) == 1:
                text = files[0]
            else:
                text = " ".join(files)
            if self.new_input_line:
                self.on_new_line()
            self.insert_text(text)
            self.setFocus()
def train(args):
    np.random.seed(args.seed)
    if args.cuda:
        ctx = mx.gpu(0)
    else:
        ctx = mx.cpu(0)
    # dataloader
    transform = utils.Compose([utils.Scale(args.image_size),
                               utils.CenterCrop(args.image_size),
                               utils.ToTensor(ctx),
                               ])
    train_dataset = data.ImageFolder(args.dataset, transform)
    train_loader = gluon.data.DataLoader(train_dataset, batch_size=args.batch_size, last_batch='discard')
    style_loader = utils.StyleLoader(args.style_folder, args.style_size, ctx=ctx)
    print('len(style_loader):',style_loader.size())
    # models
    vgg = net.Vgg16()
    utils.init_vgg_params(vgg, 'models', ctx=ctx)
    style_model = net.Net(ngf=args.ngf)
    style_model.initialize(init=mx.initializer.MSRAPrelu(), ctx=ctx)
    if args.resume is not None:
        print('Resuming, initializing using weight from {}.'.format(args.resume))
        style_model.load_parameters(args.resume, ctx=ctx)
    print('style_model:',style_model)
    # optimizer and loss
    trainer = gluon.Trainer(style_model.collect_params(), 'adam',
                            {'learning_rate': args.lr})
    mse_loss = gluon.loss.L2Loss()

    for e in range(args.epochs):
        agg_content_loss = 0.
        agg_style_loss = 0.
        count = 0
        for batch_id, (x, _) in enumerate(train_loader):
            n_batch = len(x)
            count += n_batch
            # prepare data
            style_image = style_loader.get(batch_id)
            style_v = utils.subtract_imagenet_mean_preprocess_batch(style_image.copy())
            style_image = utils.preprocess_batch(style_image)

            features_style = vgg(style_v)
            gram_style = [net.gram_matrix(y) for y in features_style]

            xc = utils.subtract_imagenet_mean_preprocess_batch(x.copy())
            f_xc_c = vgg(xc)[1]
            with autograd.record():
                style_model.setTarget(style_image)
                y = style_model(x)

                y = utils.subtract_imagenet_mean_batch(y)
                features_y = vgg(y)

                content_loss = 2 * args.content_weight * mse_loss(features_y[1], f_xc_c)

                style_loss = 0.
                for m in range(len(features_y)):
                    gram_y = net.gram_matrix(features_y[m])
                    _, C, _ = gram_style[m].shape
                    gram_s = F.expand_dims(gram_style[m], 0).broadcast_to((args.batch_size, 1, C, C))
                    style_loss = style_loss + 2 * args.style_weight * mse_loss(gram_y, gram_s[:n_batch, :, :])

                total_loss = content_loss + style_loss
                total_loss.backward()

            trainer.step(args.batch_size)
            mx.nd.waitall()

            agg_content_loss += content_loss[0]
            agg_style_loss += style_loss[0]

            if (batch_id + 1) % args.log_interval == 0:
                mesg = "{}\tEpoch {}:\t[{}/{}]\tcontent: {:.3f}\tstyle: {:.3f}\ttotal: {:.3f}".format(
                    time.ctime(), e + 1, count, len(train_dataset),
                                agg_content_loss.asnumpy()[0] / (batch_id + 1),
                                agg_style_loss.asnumpy()[0] / (batch_id + 1),
                                (agg_content_loss + agg_style_loss).asnumpy()[0] / (batch_id + 1)
                )
                print(mesg)


            if (batch_id + 1) % (4 * args.log_interval) == 0:
                # save model
                save_model_filename = "Epoch_" + str(e) + "iters_" + str(count) + "_" + str(time.ctime()).replace(' ', '_') + "_" + str(
                    args.content_weight) + "_" + str(args.style_weight) + ".params"
                save_model_path = os.path.join(args.save_model_dir, save_model_filename)
                style_model.save_parameters(save_model_path)
                print("\nCheckpoint, trained model saved at", save_model_path)

    # save model
    save_model_filename = "Final_epoch_" + str(args.epochs) + "_" + str(time.ctime()).replace(' ', '_') + "_" + str(
        args.content_weight) + "_" + str(args.style_weight) + ".params"
    save_model_path = os.path.join(args.save_model_dir, save_model_filename)
    style_model.save_parameters(save_model_path)
    print("\nDone, trained model saved at", save_model_path)
    msg.attach(mime)


'''
This file takes the SMTP Sender Email address, Receiver Email address as inputs and 
sends a mail with a specific message using GMail as the SMTP server.

This file is useful to send mails from the commandline or programmatically without opening a browser

'''
port = 465  # For SSL
smtp_server = "smtp.gmail.com"
sender_email = ""  # Enter sender's email
receiver_email = ""  # Enter receiver address
password = ""  #Enter sender's pass
date = time.ctime()
message = MIMEMultipart()
text = MIMEText("""\
Subject: #Subject here.

Current Time/Date: {date}
#Message here.
""")
message.attach(text)  #attaches text to the email
img_data = open('', 'rb').read()  #Enter image filename
image = MIMEImage(img_data, name=os.path.basename())  #Enter image filename
message.attach(image)  #attaches image to the email

context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_server, port) as server:
    server.login(sender_email, password)
Example #51
0
import webbrowser
import time
count = 0
music_videos = [
    "https://www.youtube.com/watch?v=M0Sloitb9Jg",
    "http://www.youtube.com/watch?v=dQw4w9WgXcQ",
    "https://www.youtube.com/watch?v=iPXKfGxeHIY",
    "https://www.youtube.com/watch?v=58Le1Ove61o"
]
breaks = len(music_videos)
print("This program started on " + time.ctime())
while count < breaks:
    time.sleep(60 * 60 * 2)
    webbrowser.open(music_videos.pop())
    count = count + 1
Example #52
0
def main():
#----------------------
    startTime = time.time()
    random.seed(args.seed)
    mgiJournalsNames = getMgiJournals(args.mgiJournalsFile)

    retainedSampleSet = None	# the final sample sets
    leftoverSampleSet = None	#   ...

    allJournals = set()		# all journal names seen in input

    sampleObjType = getattr(sampleDataLib, args.sampleObjTypeName)

    for fn in args.inputFiles:
        verbose("Reading %s\n" % fn)
        inputSampleSet = sampleDataLib.ClassifiedRefSampleSet( \
                                        sampleObjType=sampleObjType).read(fn)

        if not retainedSampleSet:	# processing 1st input file
            sampleObjType     = inputSampleSet.getSampleObjType()
            retainedSampleSet = sampleDataLib.ClassifiedRefSampleSet( \
                                                sampleObjType=sampleObjType)
            leftoverSampleSet = sampleDataLib.ClassifiedRefSampleSet( \
                                                sampleObjType=sampleObjType)
            verbose("Sample type: %s\n" % sampleObjType.__name__)
        else:
            if sampleObjType != inputSampleSet.getSampleObjType():
                sys.stderr.write( \
                    "Input files have inconsistent sample types: %s & %s\n" % \
                    (sampleObjType.__name__,
                    inputSampleSet.getSampleObjType().__name__) )
                exit(5)

        allJournals |= inputSampleSet.getJournals()

        for sample in inputSampleSet.sampleIterator():
            if args.onlyMgi and not cleanJournalName(sample.getJournal()) \
                                                        in mgiJournalsNames:
                retain = False
            else:
                retain = random.random() < float(args.fraction)

            if retain:
                retainedSampleSet.addSample(sample)
            else:
                leftoverSampleSet.addSample(sample)
    ### Write output files
    retainedSampleSet.write(args.retainedFile)
    leftoverSampleSet.write(args.leftoverFile)

    ### Write summary report
    summary = "\nSummary:  "
    summary += "Retaining random set of articles. Fraction: %5.3f\n" \
                                                            % args.fraction
    summary += time.ctime() + '\n'
    summary += "Seed:  %d\n" % args.seed
    if args.onlyMgi:
        summary += "Only samples from MGI monitored journals are eligible\n"
    else:
        summary += "Samples from any journal are eligible\n"
    summary += str(args.inputFiles) + '\n'

    summary += "Input Totals:\n"
    totRefs = retainedSampleSet.getNumSamples() + \
                                            leftoverSampleSet.getNumSamples()
    totPos  = retainedSampleSet.getNumPositives() + \
                                            leftoverSampleSet.getNumPositives()
    totNeg  = retainedSampleSet.getNumNegatives() + \
                                            leftoverSampleSet.getNumNegatives()
    summary += formatSummary(totRefs, totPos, totNeg, len(allJournals))
    summary += '\n'

    summary += "Retained Set Totals:\n"
    summary += formatSummary(retainedSampleSet.getNumSamples(),
                            retainedSampleSet.getNumPositives(),
                            retainedSampleSet.getNumNegatives(),
                            len(retainedSampleSet.getJournals()),
                            )
    summary += "(%5.3f%% of inputs)\n" %  \
                            (100.0 * retainedSampleSet.getNumSamples()/totRefs)
    summary += '\n'
    summary += "Leftover Set Totals:\n"
    summary += formatSummary(leftoverSampleSet.getNumSamples(),
                            leftoverSampleSet.getNumPositives(),
                            leftoverSampleSet.getNumNegatives(),
                            len(leftoverSampleSet.getJournals()),
                            )
    sys.stdout.write(summary + '\n')
    return
Example #53
0
 def decode(self):
     nla.decode(self)
     self['latest handshake'] = ctime(self['tv_sec'])
def main():
    run_time = time.ctime().replace(' ', '_')[:-8] 
    directory = 'progress/' + run_time
    if not os.path.exists(directory):
        os.makedirs(directory)
    f = open(directory + '/logs.txt', 'w')
    global args, best_prec1, best_prec1_ps
    print ("GPU processing available : ", torch.cuda.is_available())
    print ("Number of GPU units available :", torch.cuda.device_count())
    args = parser.parse_args()

    ## READ DATA
    #cudnn.benchmark = True

    #TODO
    # Data loading code
    #traindir = os.path.join(args.data, 'train')
    #valdir = os.path.join(args.data, 'val')
    filter_h = [4,6,8] #[1, 3, 5]
    
    train_sampler = None 
    train_dataset = TwitterDataset(
        csv_file='DATA/txt/bamman_clean.txt', 
        folds_file='DATA/folds/fold_0.csv', 
        word_embedding_file='DATA/embeddings/filtered_embs.txt', 
        user_embedding_file='DATA/embeddings/usr2vec.txt', 
        set_type='train', 
        pad = max(filter_h) - 1
    )

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
        num_workers=args.workers, pin_memory=True)

    val_dataset = TwitterDataset(
        csv_file='DATA/txt/bamman_clean.txt', 
        folds_file='DATA/folds/fold_0.csv', 
        word_embedding_file='DATA/embeddings/filtered_embs.txt', 
        user_embedding_file='DATA/embeddings/usr2vec.txt', 
        set_type='val', 
        pad = max(filter_h) - 1,
        w2v = train_dataset.w2v
    )

    val_loader = torch.utils.data.DataLoader(
        val_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
        num_workers=args.workers, pin_memory=True)
    
    test_dataset = TwitterDataset(
        csv_file='DATA/txt/bamman_clean.txt', 
        folds_file='DATA/folds/fold_0.csv', 
        word_embedding_file='DATA/embeddings/filtered_embs.txt', 
        user_embedding_file='DATA/embeddings/usr2vec.txt', 
        set_type='test', 
        pad = max(filter_h) - 1,
        w2v = train_dataset.w2v
    )

    test_loader = torch.utils.data.DataLoader(
        test_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
        num_workers=args.workers, pin_memory=True)
    # epochs
    # print final acc with config
    # print best till date
    for lr in [0.000001, 0.00001]:
        for wd in [1, 0.1, 0.01, 0.001]:
            for oc in [50, 300]:
                for hu in [128, 256]:
                    for dp in [0, 0.1, 0.3]:
                        for fs in [(1,2,3), (1,3,5)]:
                            best_prec1 = 0
                            best_prec1_index = -1
                            parameters = {"filters": fs,
                                          "out_channels": oc,                  
                                          "max_length": train_dataset.max_l,
                                          "hidden_units": hu,
                                          "drop_prob": dp,
                                          "user_size": 400,
                                          "epochs":args.epochs}

                            #device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
                            model = CUE_CNN(parameters['filters'], parameters['out_channels'], parameters['max_length'], parameters['hidden_units'], 
                                            parameters['drop_prob'], parameters['user_size'])#.to(device)
                            model = torch.nn.DataParallel(model).cuda()

                            # define loss function (criterion) and optimizer
                            criterion = nn.CrossEntropyLoss().cuda()

                            optimizer = torch.optim.Adam(model.parameters(), lr = lr, weight_decay=wd)
                        #     optimizer = torch.optim.SGD(model.parameters(), lr = args.lr,
                        #                                      momentum=args.momentum,
                        #                                      weight_decay=args.weight_decay)
                        #     torch.optim.Adadelta(model.parameters(), 
                        #                                      rho=args.momentum,
                        #                                      weight_decay=args.weight_decay)

                        #     optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
                            scheduler = ReduceLROnPlateau(optimizer, 'min', factor=0.5, patience=10, threshold=0.005)
                            # optionally resume from a checkpoint
                            train_prec1_plot = []
                            train_loss_plot = []
                            val_prec1_plot = []
                            val_loss_plot = []
                            test_prec1_plot = []
                            test_loss_plot = []
                            if args.resume:
                                if os.path.isfile(args.resume):
                                    print("=> loading checkpoint '{}'".format(args.resume))
                                    checkpoint = torch.load(args.resume)
                                    args.start_epoch = checkpoint['epoch']
                                    best_prec1 = checkpoint['best_prec1']
                                    model.load_state_dict(checkpoint['state_dict'])
                                    optimizer.load_state_dict(checkpoint['optimizer'])
                                    train_prec1_plot = train_prec1_plot + checkpoint['train_prec1_plot']
                                    train_loss_plot = train_loss_plot + checkpoint['train_loss_plot']
                                    val_prec1_plot = val_prec1_plot + checkpoint['val_prec1_plot']
                                    val_loss_plot = val_loss_plot + checkpoint['val_loss_plot']
                                    test_prec1_plot = test_prec1_plot + checkpoint['test_prec1_plot']
                                    test_loss_plot = test_loss_plot + checkpoint['test_loss_plot']
                                    print("=> loaded checkpoint '{}' (epoch {})"
                                          .format(args.resume, checkpoint['epoch']))
                                else:
                                    print("=> no checkpoint found at '{}'".format(args.resume))

                            if args.evaluate:
                                validate(test_loader, model, criterion, f)
                                return

                            for epoch in range(args.start_epoch, args.epochs + args.start_epoch):
                                #TODO
                                #adjust_learning_rate(optimizer, epoch)

                                # train for one epoch
                                train_prec1, train_loss  = train(train_loader, model, criterion, optimizer, epoch, f)
                                train_prec1_plot.append(train_prec1)
                                train_loss_plot.append(train_loss)

                                # evaluate on validation set
                                val_prec1, val_loss = validate(val_loader, model, criterion, f)
                                val_prec1_plot.append(val_prec1)
                                val_loss_plot.append(val_loss)
                                scheduler.step(val_loss)
                                # evaluate on test set
                                test_prec1,test_loss = validate(test_loader, model, criterion, f, is_val=False)
                                test_prec1_plot.append(test_prec1)
                                test_loss_plot.append(test_loss)

                                # remember best prec@1 and save checkpoint
                                is_best = val_prec1 > best_prec1
                                best_prec1 = max(val_prec1, best_prec1)
                                best_prec1_index = epoch if is_best else best_prec1_index
                                save_checkpoint({
                                    'train_prec1_plot':train_prec1_plot,
                                    'train_loss_plot':train_loss_plot,
                                    'val_prec1_plot':val_prec1_plot,
                                    'val_loss_plot':val_loss_plot,
                                    'test_prec1_plot':test_prec1_plot,
                                    'test_loss_plot':test_loss_plot,
                                    'epoch': epoch + 1,
                                    'state_dict': model.state_dict(),
                                    'best_prec1': best_prec1,
                                    'optimizer' : optimizer.state_dict(),
                                }, is_best)

                                #plot data
                                #plt.figure(figsize=(12,12))
                                #plt.subplot(2,1,1)
                                #plot_stats(epoch+1, train_loss_plot, val_loss_plot, test_loss_plot, 'train_loss', 'val_loss', 'test_loss', plt)
                                #plt.subplot(2,1,2)
                                #plot_stats(epoch+1, train_prec1_plot, val_prec1_plot, test_prec1_plot, 'train_acc', 'val_acc', 'test_acc', plt)
                                #plt.savefig('progress/' + run_time + '/stats.jpg')
                                #plt.clf()
                                #print "Learning rate is : ", optimizer.param_groups[0]['lr']
                            print " $$ ", lr, wd, oc, hu, dp, fs, " $$ "
                            print train_prec1_plot[best_prec1_index], best_prec1, test_prec1_plot[best_prec1_index]
                            best_prec1_ps = max(best_prec1, best_prec1_ps)
                            f.write('configuration {0} {1} {2} {3} {4} {5} \n'.format(lr, wd, oc, hu, dp, fs))
                            f.write('train: {0} val: {1} test: {2} \n'.format(train_prec1_plot[best_prec1_index], best_prec1, test_prec1_plot[best_prec1_index]))
                            f.write('best val performance is : ' + str(best_prec1_ps) + '\n')
                            f.flush()
                            #best_prec1_ps = max(best_prec1, best_prec1_ps)
    print "final best performance is for val ", best_prec1_ps                       
    f.close()
def getXClusters(filename, trainIDs, testIDs, mean_centered, clusterIDs0, clusterIDs1, clusterIDs2):
    #filename="res/101_trajectories/aug_trajectories-0750am-0805am.txt"
    path = os.getcwd()+'/'
    compressed = 'compressed' in filename
    frameDict = futil.LoadDictFromTxt(path+filename, 'frame')
    print("Gotten frameDict",time.ctime())
    dictOfGrids = futil.GetGridsFromFrameDict(frameDict, mean_centered, compressed)
    print("Gotten dictOfGrids",time.ctime())
    #filepath = makePathMR(filename, '-mergerMinRanges')
    filepath = makeFullPath(filename, '-mergerRanges.txt')
    MR = np.loadtxt(filepath, dtype='int')
    '''MR=MergeRanges. MR[:,0]=merge ids, MR[:,1]=start frame, MR[:,2] = end'''
    print ("Done loading in getX", time.ctime())
    start = getStartVals(filename)    
    Xtrain1 = np.array([])
    Xtrain2 = np.array([])
    Xtrain0 = np.array([])   #will have numTrain*numFrames rows and size(grid)+1 columns
    Xtest1 = np.array([])
    Xtest2 = np.array([])
    Xtest0 = np.array([])
    it = 0
    trainEmpty = [True]*3
    testEmpty = [True]*3
    if not numUsing == 0:
        MR = MR[:numUsing]
    for row in MR:
        thisStart = start[it]
        XVID = sparse.csr_matrix(np.ascontiguousarray(getXInner(row, dictOfGrids,thisStart,frameDict, compressed)))
        if row[0] in trainIDs:
            if row[0] in clusterIDs0:
                if trainEmpty[0] == True:
                    Xtrain0 = XVID
                    trainEmpty[0] = False
                else:
                    Xtrain0 = sparse.vstack((Xtrain0,XVID))#,axis=0)
            elif row[0] in clusterIDs1:
                if trainEmpty[1] == True:
                    Xtrain1 = XVID
                    trainEmpty[1] = False
                else:
                    Xtrain1 = sparse.vstack((Xtrain1,XVID))#,axis=0)
            elif row[0] in clusterIDs2:
                if trainEmpty[2] == True:
                    Xtrain2 = XVID
                    trainEmpty[2] = False
                else:
                    Xtrain2 = sparse.vstack((Xtrain2,XVID))#,axis=0)
            print("Finished getting X data for Merger with VID:",row[0]," and it is a training example", time.ctime())
        else:
            if row[0] in clusterIDs0:
                if testEmpty[0] == True:
                    Xtest0 = XVID
                    testEmpty[0] = False
                else:
                    Xtest0 = sparse.vstack((Xtest0,XVID))#,axis=0)
            elif row[0] in clusterIDs1:
                if testEmpty[1] == True:
                    Xtest1 = XVID
                    testEmpty[1] = False
                else:
                    Xtest1 = sparse.vstack((Xtest1,XVID))#,axis=0)
            elif row[0] in clusterIDs2:
                if testEmpty[2] == True:
                    Xtest2 = XVID
                    testEmpty[2] = False
                else:
                    Xtest2 = sparse.vstack((Xtest2,XVID))#,axis=0)
                #Xtest = sparse.vstack((Xtest,XVID))#np.append(Xtest,XVID,axis=0)
            print("Finished getting X data for Merger with VID:",row[0]," and it is a test example")
        it += 1
    return Xtrain0, Xtrain1, Xtrain2, Xtest0, Xtest1, Xtest2
Example #56
0
	def makeInactive(self,name):
		with self.lock:
			self.active.remove(name)
			logging.debug('Running {0} at {1}'.format(self.active,time.ctime()))
Example #57
0
def run_model(config,
         seed=0,
         data_dir='./data',
         genotype_class='PCDARTS',
         num_epochs=20,
         batch_size=get('batch_size'),
         init_channels=get('init_channels'),
         train_criterion=torch.nn.CrossEntropyLoss,
         data_augmentations=None,
         save_model_str=None, **kwargs):
    """
    Training loop for configurableNet.
    :param model_config: network config (dict)
    :param data_dir: dataset path (str)
    :param num_epochs: (int)
    :param batch_size: (int)
    :param learning_rate: model optimizer learning rate (float)
    :param train_criterion: Which loss to use during training (torch.nn._Loss)
    :param model_optimizer: Which model optimizer to use during trainnig (torch.optim.Optimizer)
    :param data_augmentations: List of data augmentations to apply such as rescaling.
        (list[transformations], transforms.Composition[list[transformations]], None)
        If none only ToTensor is used
    :return:
    """


    # instantiate optimize
    
    if not torch.cuda.is_available():
            logging.info('no gpu device available')
            sys.exit(1)

    gpu = 'cuda:0'
    np.random.seed(seed)
    torch.cuda.set_device(gpu)
    cudnn.benchmark = True
    torch.manual_seed(seed)
    cudnn.enabled=True
    torch.cuda.manual_seed(seed)
    logging.info('gpu device = %s' % gpu)
    logging.info("config = %s", config)

    if data_augmentations is None:
        # You can add any preprocessing/data augmentation you want here
        data_augmentations = transforms.ToTensor()
    elif isinstance(type(data_augmentations), list):
        data_augmentations = transforms.Compose(data_augmentations)
    elif not isinstance(data_augmentations, transforms.Compose):
        raise NotImplementedError

    train_dataset = K49(data_dir, True, data_augmentations)
    test_dataset = K49(data_dir, False, data_augmentations)
    # train_dataset = KMNIST(data_dir, True, data_augmentations)
    # test_dataset = KMNIST(data_dir, False, data_augmentations)
    # Make data batch iterable
    # Could modify the sampler to not uniformly random sample
    
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=batch_size,
                              shuffle=True)
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=batch_size,
                             shuffle=False)

    genotype = eval("genotypes.%s" % genotype_class)
    model = Network(init_channels, train_dataset.n_classes, config['n_conv_layers'], genotype)
    model = model.cuda()
    
    total_model_params = np.sum(p.numel() for p in model.parameters())

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = train_criterion
    criterion = criterion.cuda()
    
    if config['optimizer'] == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(), 
                                    lr=config['initial_lr'], 
                                    momentum=config['sgd_momentum'], 
                                    weight_decay=config['weight_decay'], 
                                    nesterov=config['nesterov'])
    else:
        optimizer = get('opti_dict')[config['optimizer']](model.parameters(), lr=config['initial_lr'], weight_decay=config['weight_decay'])
    
    if config['lr_scheduler'] == 'Cosine':
        lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, num_epochs)
    elif config['lr_scheduler'] == 'Exponential':
        lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.1)

    logging.info('Generated Network:')
    summary(model, (train_dataset.channels,
                    train_dataset.img_rows,
                    train_dataset.img_cols),
            device='cuda' if torch.cuda.is_available() else 'cpu')
    for epoch in range(num_epochs):
        lr_scheduler.step()
        logging.info('epoch %d lr %e', epoch, lr_scheduler.get_lr()[0])
        model.drop_path_prob = config['drop_path_prob'] * epoch / num_epochs

        train_acc, train_obj = train(train_loader, model, criterion, optimizer, grad_clip=config['grad_clip_value'])
        logging.info('train_acc %f', train_acc)

        test_acc, test_obj = infer(test_loader, model, criterion)
        logging.info('test_acc %f', test_acc)


    if save_model_str:
        # Save the model checkpoint, can be restored via "model = torch.load(save_model_str)"
        if os.path.exists(save_model_str):
            save_model_str += '_'.join(time.ctime())
        torch.save(model.state_dict(), save_model_str)
    
    return test_acc
Example #58
0
    def testStartupHandler(self):
        # Clean the client records.
        aff4.FACTORY.Delete(self.client_id, token=self.token)

        client_mock = action_mocks.ActionMock("SendStartupInfo")
        for _ in test_lib.TestFlowHelper("ClientActionRunner",
                                         client_mock,
                                         client_id=self.client_id,
                                         action="SendStartupInfo",
                                         token=self.token):
            pass

        # Check the client's boot time and info.
        fd = aff4.FACTORY.Open(self.client_id, token=self.token)
        client_info = fd.Get(fd.Schema.CLIENT_INFO)
        boot_time = fd.Get(fd.Schema.LAST_BOOT_TIME)

        self.assertEqual(client_info.client_name,
                         config_lib.CONFIG["Client.name"])
        self.assertEqual(client_info.client_description,
                         config_lib.CONFIG["Client.description"])

        # Check that the boot time is accurate.
        self.assertAlmostEqual(psutil.boot_time(),
                               boot_time.AsSecondsFromEpoch())

        # Run it again - this should not update any record.
        for _ in test_lib.TestFlowHelper("ClientActionRunner",
                                         client_mock,
                                         client_id=self.client_id,
                                         action="SendStartupInfo",
                                         token=self.token):
            pass

        fd = aff4.FACTORY.Open(self.client_id, token=self.token)
        self.assertEqual(boot_time.age, fd.Get(fd.Schema.LAST_BOOT_TIME).age)
        self.assertEqual(client_info.age, fd.Get(fd.Schema.CLIENT_INFO).age)

        # Simulate a reboot in 10 minutes.
        current_boot_time = psutil.boot_time()
        psutil.boot_time = lambda: current_boot_time + 600

        # Run it again - this should now update the boot time.
        for _ in test_lib.TestFlowHelper("ClientActionRunner",
                                         client_mock,
                                         client_id=self.client_id,
                                         action="SendStartupInfo",
                                         token=self.token):
            pass

        # Ensure only this attribute is updated.
        fd = aff4.FACTORY.Open(self.client_id, token=self.token)
        self.assertNotEqual(int(boot_time.age),
                            int(fd.Get(fd.Schema.LAST_BOOT_TIME).age))

        self.assertEqual(int(client_info.age),
                         int(fd.Get(fd.Schema.CLIENT_INFO).age))

        # Now set a new client build time.
        with test_lib.ConfigOverrider({"Client.build_time": time.ctime()}):

            # Run it again - this should now update the client info.
            for _ in test_lib.TestFlowHelper("ClientActionRunner",
                                             client_mock,
                                             client_id=self.client_id,
                                             action="SendStartupInfo",
                                             token=self.token):
                pass

            # Ensure the client info attribute is updated.
            fd = aff4.FACTORY.Open(self.client_id, token=self.token)
            self.assertNotEqual(int(client_info.age),
                                int(fd.Get(fd.Schema.CLIENT_INFO).age))
Example #59
0
def create_run_ensemble(model_state_list, 
                        n_layers,
                        grad_clip_value=5, 
                        seed=0, 
                        num_epochs=20,
                        learning_rate=0.001,
                        init_channels=get('init_channels'), 
                        batch_size=get('batch_size'), 
                        genotype_class='PCDARTS'):
    
    if not torch.cuda.is_available():
            logging.info('no gpu device available')
            sys.exit(1)

    gpu = 'cuda:0'
    np.random.seed(seed)
    torch.cuda.set_device(gpu)
    cudnn.benchmark = True
    torch.manual_seed(seed)
    cudnn.enabled=True
    torch.cuda.manual_seed(seed)
    logging.info('gpu device = %s' % gpu)
    logging.info("config = %s", config)

    if data_augmentations is None:
        # You can add any preprocessing/data augmentation you want here
        data_augmentations = transforms.ToTensor()
    elif isinstance(type(data_augmentations), list):
        data_augmentations = transforms.Compose(data_augmentations)
    elif not isinstance(data_augmentations, transforms.Compose):
        raise NotImplementedError

    train_dataset = K49(data_dir, True, data_augmentations)
    test_dataset = K49(data_dir, False, data_augmentations)
    # train_dataset = KMNIST(data_dir, True, data_augmentations)
    # test_dataset = KMNIST(data_dir, False, data_augmentations)
    # Make data batch iterable
    # Could modify the sampler to not uniformly random sample
    
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=batch_size,
                              shuffle=True)
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=batch_size,
                             shuffle=False)

    genotype = eval("genotypes.%s" % genotype_class)
    dataset = dict()
    dims = []
    for i, model_state in enumerate(model_state_list):
        model = Network(init_channels, train_dataset.n_classes, n_layers, genotype)
        model.load_state_dict(torch.load(model_state))
        model.cuda()
        for p in model.parameters():
            p.requires_grad = False
        trn_labels = []
        trn_features = []
        if i == 0:
            for d,la in train_loader:
                o = model(Variable(d.cuda()))
                o = o.view(o.size(0),-1)
                trn_labels.extend(la)
                trn_features.extend(o.cpu().data)
            test_labels = []
            test_features = []
            for d,la in test_loader:
                o = model(Variable(d.cuda()))
                o = o.view(o.size(0),-1)
                test_labels.extend(la)
                test_features.extend(o.cpu().data)
            dataset['trn_labels'] = trn_labels
            dataset['test_labels'] = test_labels

        else:
            for d,la in train_loader:
                o = model(Variable(d.cuda()))
                o = o.view(o.size(0),-1)
                trn_features.extend(o.cpu().data)
            test_labels = []
            test_features = []
            for d,la in test_loader:
                o = model(Variable(d.cuda()))
                o = o.view(o.size(0),-1)
                test_features.extend(o.cpu().data)            
        dataset['trn_features'].extend(trn_features)
        dims.extend(dataset['trn_features'][i][0].size(0))
        dataset['test_features'].extend(test_features)
    

    trn_feat_dset = FeaturesDataset(dataset['trn_features'][0],dataset['trn_features'][1],dataset['trn_features'][2],dataset['trn_labels'])
    test_feat_dset = FeaturesDataset(dataset['test_features'][0],dataset['test_features'][1],dataset['test_features'][2],dataset['test_labels'])
    trn_feat_loader = DataLoader(trn_feat_dset,batch_size=64,shuffle=True)
    test_feat_loader = DataLoader(val_feat_dset,batch_size=64)
    model = EnsembleModel(dims, out_size=train_dataset.n_classes)
    criterion = torch.nn.optim.CrossEntropyLoss
    criterion = criterion.cuda()
    optimizer = torch.nn.optim.SGD(model.parameters(), 
                                    lr=learning_rate, 
                                    momentum=0.9)   
    
    for epoch in range(num_epochs):
        epoch_loss, epoch_accuracy = fit(epoch,model,trn_feat_loader,critierion, training=True)
        val_epoch_loss , val_epoch_accuracy = fit(epoch,model, test_feat_loader, criterion, training=False)


    if save_model_str:
        # Save the model checkpoint, can be restored via "model = torch.load(save_model_str)"
        if not os.path.exists(save_model_str):
            os.mkdir(save_model_str)
        
        torch.save(model.state_dict(), os.path.join(save_model_str, time.ctime())) 
Example #60
0
    params = parse_args()
    write_args(logger,params)
    use_numeric,upsampling_rate,target_col = params
    
    data = data_reader(data_path,air_path)
    X,Y = splitXY(data,['delay_time','is_claim','label','cancelled'])
    x_train,x_test,y_train,y_test = train_test_split(
        X,Y,test_size=0.1, random_state=SEED
    )
    #upsampling
    if upsampling_rate>0:
        indice = upsampling(y_train,upsampling_rate)
        x_train = x_train.iloc[indice]
        y_train = y_train.iloc[indice]
    #feature engineering:
    logger.writelines('{}: start feature engineering...\n'.format(time.ctime()))
    x_train = add_features(x_train)
    x_test =  add_features(x_test)    
    # get numeric columns
    if use_numeric:
        x_train, x_train_num = numeric_filter(x_train)
        x_test, x_test_num = numeric_filter(x_test)

    fitted_dfs,fitted_encoders,encoder_names = categ_encoder(
        x_train, y_train[target_col],cols=x_train.columns, encoders=('target','count')
    )    
    if use_numeric:
        fitted_dfs.append(x_train_num)

    train_feat = pd.concat(fitted_dfs,axis=1) 
    train_label = y_train[target_col]