Example #1
0
def frameCount2timeCode (frames, fps = DEFAULT_FPS_VALUE):
    """
    Convert a framecount to a timecode string. Provide fps in variable 'fps', default value is 25fps.
    This function is the inverse of timeCode2frameCount.
    
    >>> import Timecode
    
    >>> Timecode.frameCount2timeCode (0, 25)
    '00:00:00:00'
    
    >>> Timecode.frameCount2timeCode (1, 25)
    '00:00:00:01'
    
    >>> Timecode.frameCount2timeCode (25, 25)
    '00:00:01:00'
    
    >>> Timecode.frameCount2timeCode (24, 24)
    '00:00:01:00'
    
    >>> Timecode.frameCount2timeCode (2159999, 25)
    '23:59:59:24'
    
    >>> Timecode.frameCount2timeCode (2073599, 24)
    '23:59:59:23'
    
    """
    HH, MM, SS, FF = frameCount2timeCodeElements (frames, fps)
    return (string.zfill (HH, 2) + ":" + string.zfill (MM, 2) + ":"\
             + string.zfill (SS, 2) + ":" + string.zfill (FF, 2))
Example #2
0
File: cddb.py Project: Cinnz/python
 def _get_id(self, tracklist):
     # fill in self.id and self.toc.
     # if the argument is a string ending in .rdb, the part
     # upto the suffix is taken as the id.
     if type(tracklist) == type(""):
         if tracklist[-4:] == ".rdb":
             self.id = tracklist[:-4]
             self.toc = ""
             return
         t = []
         for i in range(2, len(tracklist), 4):
             t.append((None, (int(tracklist[i : i + 2]), int(tracklist[i + 2 : i + 4]))))
         tracklist = t
     ntracks = len(tracklist)
     self.id = _dbid((ntracks >> 4) & 0xF) + _dbid(ntracks & 0xF)
     if ntracks <= _DB_ID_NTRACKS:
         nidtracks = ntracks
     else:
         nidtracks = _DB_ID_NTRACKS - 1
         min = 0
         sec = 0
         for track in tracklist:
             start, length = track
             min = min + length[0]
             sec = sec + length[1]
         min = min + sec / 60
         sec = sec % 60
         self.id = self.id + _dbid(min) + _dbid(sec)
     for i in range(nidtracks):
         start, length = tracklist[i]
         self.id = self.id + _dbid(length[0]) + _dbid(length[1])
     self.toc = string.zfill(ntracks, 2)
     for track in tracklist:
         start, length = track
         self.toc = self.toc + string.zfill(length[0], 2) + string.zfill(length[1], 2)
Example #3
0
 def display_time(self):
     length = len(self.TITLE)
     while True:
         if self.q == 1: # 退出
             break
         if self.song_time >= 0 and self.douban.playingsong:
             minute = int(self.song_time) / 60
             sec = int(self.song_time) % 60
             show_time = string.zfill(str(minute), 2) + ':' + string.zfill(str(sec), 2)
             self.get_volume() # 获取音量
             self.TITLE = self.TITLE[:length - 1] + '  ' + self.douban.playingsong['kbps'] + 'kbps  ' + colored(show_time, 'cyan') + '  rate: ' + colored(self.rate[int(round(self.douban.playingsong['rating_avg'])) - 1], 'red') + '  vol: '
             if self.is_muted:
                 self.TITLE += '✖'
             else:
                 self.TITLE += self.volume.strip() + '%'
             if self.loop:
                 self.TITLE += '  ' + colored('↺', 'red')
             else:
                 self.TITLE += '  ' + colored('→', 'red')
             self.TITLE += '\r'
             self.display()
             if not self.pause:
                 self.song_time -= 1
         else:
             self.TITLE = self.TITLE[:length]
         time.sleep(1)
Example #4
0
def proc_meta(IPADDR, VIDEOID, DB):
    PROC = {"transfered": 0}
    metadata = DB[IPADDR][VIDEOID]
    PROC["status"] = get_value(metadata, "recMovieAiring.jsonForClient.video.state", "unknown")
    PROC["airdate"] = get_value(metadata, "recMovieAiring.jsonForClient.airDate", "")
    PROC["desc"] = get_value(metadata, "recMovie.jsonForClient.plot", "")
    PROC["title"] = get_value(metadata, "recMovie.jsonForClient.title", "")
    PROC["date"] = get_value(metadata, "recMovie.jsonForClient.releaseYear", "")
    PROC["series"] = get_value(metadata, "recSeries.jsonForClient.title", PROC["title"])
    PROC["season"] = get_value(metadata, "recEpisode.jsonForClient.seasonNumber", "0")
    PROC["episode"] = get_value(metadata, "recEpisode.jsonForClient.episodeNumber", "0")
    PROC["title"] = get_value(metadata, "recEpisode.jsonForClient.title", PROC["title"])
    PROC["desc"] = get_value(metadata, "recEpisode.jsonForClient.description", PROC["desc"])
    PROC["airdate"] = get_value(metadata, "recEpisode.jsonForClient.originalAirDate", PROC["airdate"])
    PROC["date"] = get_value(metadata, "recEpisode.jsonForClient.airDate", PROC["date"])
    PROC["status"] = get_value(metadata, "recEpisode.jsonForClient.video.state", PROC["status"])
    if metadata.has_key("recSeason"):  # is a TV show!!!!
        PROC["type"] = "tv"
        if string.zfill(PROC["episode"], 2) == "00":
            PROC["name"] = PROC["series"] + " - " + PROC["date"][:10]
        else:
            PROC["name"] = (
                PROC["series"] + " - S" + string.zfill(PROC["season"], 2) + "E" + string.zfill(PROC["episode"], 2)
            )
        if PROC["title"] != "":
            PROC["name"] = PROC["name"] + " - " + PROC["title"]
    else:  # is a Movie!!
        PROC["type"] = "movie"
        PROC["name"] = PROC["title"] + " (" + str(PROC["date"]) + ")"
    PROC["clean"] = clean(PROC["name"])
    return PROC
Example #5
0
def main():
	# fix UnicodeDecodeError: 'ascii' 
	#mm1 = sys.getdefaultencoding()
	#mm2 = sys.stdin.encoding
	#mm3 = sys.stdout.encoding
	#print "python: %s, sys stdin: %s, sys stdout: %s" % (mm1, mm2, mm3)
	#reload(sys)
	#sys.setdefaultencoding('utf-8')
	try:
		opts,args = getopt.getopt(sys.argv[1:], "t:h", ["test", "help"]) 
	except getopt.GetoptError:
		usage()
	if (len(args) != 3):
		usage()
	firstTag = args[0]
	secondTag = args[1]
	ip = args[2]	

	testVal = validateOpts(opts)
	# Make Baseline DiffLog
	print 'Get changes betweens tags...'
	diffLogData = diffBetweenTag(firstTag, secondTag)
	if len(diffLogData) == 0:
		print "Maybe differ data Noe. do opertion with other baseline tag"
		sys.exit(-1)
		
	# Convert DiffLog to CSV Data
	print 'Building spreadsheet...'
	csvData = diffLog2csv(diffLogData, ip)
	if len(csvData) == 0:
		print "Maybe differ data Noe. do opertion with other baseline tag"
		sys.exit(-1)
		
	# Make Temporary CSV FILE
	csvTempFileName = "diffData.csv"
	try:
		wFile = open(csvTempFileName, 'w')
		wFile.write(csvData)
		wFile.close()
	except IOError:
		print "File operation failed:", csvTempFileName, "...aborting"
		sys.exit(-1)
		
	# Convert CSV to Excel
	print 'Saving file...'
	#time setting
	now = time.localtime()
	date = str(now[0])+ string.zfill(now[1], 2) + string.zfill(now[2], 2) + "_" + string.zfill(now[3], 2) + string.zfill(now[4], 2) + string.zfill(now[5], 2)

#	outputFileName = "Tag_Compare_Result.xls"# % (firstTag, secondTag)
	outputFileName = "Tag_Compare_Result_%s.xls" % date
#	outputFileName = "Tag_Compare_Result_%s_%s_%s.xls" % (firstTag.replace('/', '_'), secondTag.replace('/', '_'), date)
	csvData2xls(outputFileName, firstTag, secondTag)
		
	# Remove Temporary CSV FILE
	os.remove(csvTempFileName)
	
	#close
	mssqlCursor.close()
	mssqlDB.close()
Example #6
0
def makelogic(logic, startdate, enddate):

   #set up the logic for different dates
   date_logic=''
   if startdate: 
      d,m,y=salttime.breakdate(startdate)
      y=str(y)
      m=string.zfill(m, 2)
      d=string.zfill(d, 2)
      date_logic += "UTStart > '%s-%s-%s 12:00:01'" % (y,m,d)
   if startdate and enddate:
      date_logic += " and "
   if enddate:
      edate=salttime.getnextdate(enddate)
      d,m,y=salttime.breakdate(str(edate))
      y=str(y)
      m=string.zfill(m, 2)
      d=string.zfill(d, 2)
      date_logic += "UTStart < '%s-%s-%s 11:59:59'"  % (y,m,d)

   if logic and date_logic:
      logic = '('+logic+')' + ' and ' + date_logic
   else:
      logic
   return logic
Example #7
0
def calctfidf(fenci_list) :
    corpus = []  #存取100份文档的分词结果
    for f_fc in fenci_list:
        f = open(f_fc,'r')
        content = f.read()
        f.close()
        corpus.append(content)

    vectorizer = CountVectorizer()    
    transformer = TfidfTransformer()
    tfidf = transformer.fit_transform(vectorizer.fit_transform(corpus))
    
    word = vectorizer.get_feature_names() #所有文本的关键字
    weight = tfidf.toarray()              #对应的tfidf矩阵

    if not os.path.exists(tfidfpath) :
        os.mkdir(tfidfpath)
 
    # 这里将每份文档词语的TF-IDF写入tfidffile文件夹中保存
    tfidf_list=[]
    for i in range(len(weight)) :
        print u"Writing all the tf-idf into the",i,u" file into ",tfidfpath+'\\'+string.zfill(i,5)+'.txt',"--------"
        tfidffile=os.path.abspath(tfidfpath)+'\\'+string.zfill(i,5)+'.txt'
        tfidf_list.append(tfidffile)
        f = open(tfidffile,'w')
        for j in range(len(word)) :
            f.write(word[j]+"    "+str(weight[i][j])+"\n")
        f.close()
Example #8
0
    def savePictures(self, albumPath, pictures, comments=False):
        """
        Save a list of pictures.

        Args:
            albumPath: the path to the album in the directory tree.
            pictures: a list of pictures, where the first element is the url
                      and the second is a list of comments.
            comments: indicates wether obtain comments of the picture or not.
        """
        myCounter = 1
        for pic in pictures:
            picName = string.zfill(myCounter, CONSTANT_FILL) + '_' + pic[1] + JPG
            fileName = os.path.join(albumPath, picName)
            picInfo = self.tnt.getPicture(pic[0], comments)
            if not os.path.exists(fileName):
                if self.console:
                    print '| Descargando foto ' + picName + '...'
                urllib.urlretrieve(picInfo[0], fileName)

            commentsFileName = string.zfill(myCounter, CONSTANT_FILL) + '_' + pic[1] + TXT
            if comments and not os.path.exists(commentsFileName) and picInfo[1] != []:
                if self.console:
                    print '| Descargando sus comentarios...'
                file2write = open(commentsFileName, 'w')
                for comment in picInfo[1]:
                    file2write.write('******************\r\n')
                    file2write.write(comment[0].encode('utf-8') + ' (' + comment[1].encode('utf-8') + '):\r\n')
                    file2write.write(comment[2].encode('utf-8') + '\r\n')
                file2write.close()

            myCounter += 1
            sleep(0.5)
Example #9
0
File: coord.py Project: OSSOS/MOP
    def __segs__(self, ang, precision=1):
        """given an angle, convert it to a segisdecimal string"""
        sign = "+"
        if ang < 0:
            sign = "-"
            ang = ang * -1.0000000000000
        ang += 1E-11

        _d = math.floor(ang)
        _mf = (ang - _d) * 60.00
        _m = math.floor(_mf)
        _s = (_mf - _m) * 60.00
        _is = math.floor(_s)
        _fs = math.floor((_s - _is) * 10.00 * precision)

        _d = "%.0f" % ( _d)
        _m = "%.0f" % ( _m)
        _is = "%.0f" % (_is)
        _fs = int(_fs)

        _sd = string.zfill(_d, 2)
        _sm = string.zfill(_m, 2)
        _ss = string.zfill(_is, 2)
        _sf = string.zfill(_fs, precision)

        s = sign + "%s:%s:%s.%s" % ( _sd, _sm, _ss, _sf)
        return s
Example #10
0
def plot_skin_temp(file_name, cntr_lvl=None, save_frames=False):
    file, vars = peek(file_name, show_vars=False)
    lon = vars['lon'].get_value()
    lat = vars['lat'].get_value()
    skin_temp = vars['skin_temp']
    skin_temp = skin_temp_var.get_value()[0]
    valid_date = str(vars['valid_date'].get_value()[0])
    valid_time = zfill(str(vars['valid_time'].get_value()[0]), 4)
    valid_when = valid_date[6:] + ' ' \
      + cardinal_2_month(int(valid_date[4:6])) + ' ' \
      + valid_date[0:4] \
      + ' ' + valid_time[:2] + ':' \
      + valid_time[2:] + ' UTC'
    m = set_default_basemap(lon,lat)
    # must plot using 2d lon and lat
    LON, LAT = p.meshgrid(lon,lat)
    p.figure()
    if cntr_lvl is not None:
        m.contourf(LON,LAT,skin_temp, cntr_lvl)
    else:
        m.contourf(LON,LAT,skin_temp)
    m.drawcoastlines()
    m.drawmeridians(n.array(n.arange(lon.min(), lon.max() + a_small_number, 15.)), labels=[1,0,0,1])
    m.drawparallels(n.array(n.arange(lat.min(), lat.max() + a_small_number, 15.)), labels=[1,0,0,1])
    p.colorbar(orientation='horizontal', shrink=0.7, fraction=0.02, pad=0.07, aspect=70)
    title_string = 'Surface pressure (K) valid at' \
      + '\n' + valid_when + ' ' \
      + ' from LAPS'
    p.title(title_string)
    if save_frames:
        p.savefig('frames/frame_' + zfill(str(frame_number),3) +'_skin_temp_' + str(int(lvl[lvl_idx])) + '.png')
    return 
Example #11
0
def RGBtoHex(color):
	"""\
	Convert float (R, G, B) tuple to RRGGBB hex value (without #).
	"""
	import string
	
	return string.zfill(str(hex(int(color[0] * 255))[2:]), 2) + string.zfill(str(hex(int(color[1] * 255))[2:]), 2) + string.zfill(str(hex(int(color[2] * 255))[2:]), 2)
Example #12
0
 def resnum(self,uniqueid):
     """Given a uniqueid this function returns the residue number"""
     import string
     if len(string.split(uniqueid,','))>1:
         return string.split(uniqueid,':')[0]+':'+string.zfill(string.split(uniqueid,':')[1],self.length_of_residue_numbers)+','+string.split(uniqueid,',')[1]
     else:
         return string.split(uniqueid,':')[0]+':'+string.zfill(string.split(uniqueid,':')[1],self.length_of_residue_numbers)
Example #13
0
    def Save(self):
        #Делаем бэкап открываемого файла, если он есть
        fn = os.path.split(self.dso_filename)
        t=time.localtime(time.time())
        fnn=str(t[0])+" "+string.zfill(str(t[1]),2)+" "+string.zfill(str(t[2]),2)\
                +" "+string.zfill(str(t[3]),2)+string.zfill(str(t[4]),2)\
                +string.zfill(str(t[5]),2)+" "+fn[1]
        backfn = os.path.join(fn[0],fnn)
        #print "backfn=",backfn
        if os.path.exists(self.dso_filename):
            shutil.copyfile(self.dso_filename,backfn)
            print u"Была создана резервная копия файла ",self.dso_filename, u"под именем ", backfn
        else:
            sys.exit(app.exec_())

        f_out = open(self.dso_filename,'wb')
        #Перебираем записи и пишем их
        current=self.Buffer.first
        while 1:
            if current == None:
                return
            #print current.value
            dso_tools.WriteRecord2(f_out, current.value, self.ini_data)
            current=current.next
        f_out.close()
Example #14
0
def advance(ntstep):
    for i in range(0,ntstep):
        print 'timestep = ',i+1
      
        esbgk1.advance(numIter)
        print 'converged';
        esbgk1.updateTime()
        
        if ((i+1)%output_Coeff == 0) :
            if(fgamma>0):
                coeff=macroFields.coeff[cellSites].asNumPyArray()
                print 'BGK:',coeff[cellno,0],'cx^2',coeff[cellno,1],'cx',coeff[cellno,2]            
                
            if(fgamma==2):
                coeffg=macroFields.coeffg[cellSites].asNumPyArray()
                print 'ESBGK:',coeffg[cellno,0],'cx^2',coeffg[cellno,1],'cx',coeffg[cellno,2]
                print '     :','cy^2',coeffg[cellno,3],'cy',coeffg[cellno,4],'cz^2',coeffg[cellno,5],'cz',coeffg[cellno,6]
                print 'cxcy',coeffg[cellno,7],'cxcz',coeffg[cellno,8],'cycz',coeffg[cellno,9]
                
        
        if ((i+1)%output_interval == 0) :
            """
            dens=macroFields.density[cellSites].asNumPyArray()
            print 'density',dens[105],dens[115],dens[125],dens[135]
            press=macroFields.pressure[cellSites].asNumPyArray()
            print 'pressure',press[105],press[115],press[125],press[135]
            """
            dsfname = "output_"+string.zfill(str(i+1),5)+".dat"
           #esbgk1.OutputDsfBLOCK(dsfname)
            filename = "macro_"+string.zfill(str(i+1),5)+".dat"
            tecplotESBGK.esbgkTecplotFile(meshes,macroFields,filename)
Example #15
0
def main():
    print "Recuperation des IPs en cours"    
    #GetIP
    f=open("IPList.txt","r")
    lIP=f.readlines()
    lIP.sort()
    #print lIP
    f.close()
    #lIP=["212.95.68.69"]
    #Check IP one by one
    # wget --header="accept: application/json" http://arte.tv/artews/services/geolocation?ip=84.154.153.58 to change heaser (json/xml)
    #javascript http://www.arte.tv/artews/js/geolocation.js

    inc=0
    for i in range(10):
        for IP in lIP:
            inc=inc+1
            IP=string.replace(IP,"\n","")
            print string.zfill(inc,0)+"/ "+IP
            opener=urllib2.build_opener()
            opener.addheaders=[('accept','application/json')] # FORMAT JSON
            #opener.addheaders=[('accept','application/xml')] # FORMAT XML 
            #os.system("wget http://degas.preprod.arte.tv/artews/services/geolocation?ip="+IP+" --output-document="+IP+".xml")

            #response=opener.open("http://arte.tv/artews/services/geolocation?ip="+IP).read()
            response=opener.open("http://degas.preprod.arte.tv/artews/services/geolocation?ip="+IP).read()
            #response=opener.open("http://degas.preprod.arte.tv/artews/services/geolocation").read()
            print response+"\n"
            opener.close()    
Example #16
0
    def __init__(self):
        if not pathExists('/etc/enigma2/lamedb'):
            return
        file = open('/etc/enigma2/lamedb')
        readlines = file.readlines()
        f_service = False
        i = 0
        for n in xrange(0, len(readlines)):
            if readlines[n] == 'services\n':
                f_service = True
                continue
            if not f_service:
                continue
            if readlines[n] == 'end\n':
                break
            if i == 0:
                referens = [ x.upper() for x in readlines[n].split(':') ]
                if referens[0] == 'S':
                    serviceid = zfill(referens[4], 4) + ':' + zfill(referens[7], 8) + ':' + zfill(referens[5], 4) + ':' + zfill(referens[6], 4)
                else:
                    serviceid = referens[0] + ':' + referens[1] + ':' + referens[2] + ':' + referens[3]
            if i == 2:
                provider = readlines[n].split(':')[1].split(',')[0].rstrip('\n')
            i += 1
            if i == 3:
                i = 0
                self.CashServiceList[serviceid] = provider

        file.close()
def addPerson(first_name=None, last_name=None,
              start_date=None, default_birthplace_address_city=None,
              default_address_text=None, description=None, 
              function=None, **kw):
  """
    This creates a single temporary person with all appropriate parameters
  """
  global result_list
  global uid
  if not (first_name or last_name):
    return
  uid_string = 'new_%s' % zfill(uid, 3)
  if listbox is not None:
    # Use input parameters instead of default
    # if available in listbox
    line = listbox[zfill(uid, 3)]
    if line.has_key('last_name') and line.has_key('first_name') :
      first_name = line['first_name']
      last_name = line['last_name']
  person = context.newContent(
    portal_type='Person',
    uid=uid_string,
    first_name=first_name,
    last_name=last_name,
    start_date=start_date,
    default_birthplace_address_city = default_birthplace_address_city,
    default_address_text=default_address_text,
    function=function,
    description=description,
    temp_object=1,
    is_indexable=0,
  )
  result_list.append(person)
  uid += 1
def Tfidf(filelist) :
    path = 'D:\\anaconda project\TEST1\\'
    corpus = []  #存取100份文档的分词结果
    for ff in filelist :
        fname = path + ff+"-seg.txt"
        f = open(fname,'r+')
        content = f.read()
        f.close()
        corpus.append(content)

    vectorizer = CountVectorizer()
    transformer = TfidfTransformer()
    tfidf = transformer.fit_transform(vectorizer.fit_transform(corpus))

    word = vectorizer.get_feature_names() #所有文本的关键字
    weight = tfidf.toarray()              #对应的tfidf矩阵

    sFilePath = 'D:\\anaconda project\TEST2\\'
    if not os.path.exists(sFilePath) :
        os.mkdir(sFilePath)

    # 这里将每份文档词语的TF-IDF写入tfidffile文件夹中保存
    for i in range(len(weight)) :
        print u"--Writing all the tf-idf in the",i,u" file into ",sFilePath+'\\'+string.zfill(i,5)+'.txt',"--"
        f = open(sFilePath+'/'+string.zfill(i,5)+'.txt','w+')
        for j in range(len(word)) :
            f.write(word[j]+"    "+str(weight[i][j])+"\n")
        f.close()
Example #19
0
 def __init__(self, parent_window = None, date = None, entry=None):
     Dialog.__init__(self, "Calendario", parent_window, 0,)
     self.set_position(gtk.WIN_POS_MOUSE)
     hbox = gtk.HBox(False, 8)
     hbox.set_border_width(8)
     self.vbox.pack_start(hbox, 1, False, 0)
     self.date = date
     calendar = gtk.Calendar()
     calendar.connect('day_selected_double_click', self.on_calendar_double_click)
     if date <> None and date <> "":
         calendar.select_day(int(date[0:2]))
         calendar.select_month(int(date[3:5])-1,int(date[6:10]))
     hbox.pack_start(calendar, True, True, 0)
     self.set_default_response(gtk.RESPONSE_CANCEL)
     if entry is not None:
         self.set_decorated(False)
         rect = entry.get_allocation()
         wx, wy = entry.get_size_request()
         win = entry.get_parent_window()
         tx, ty = win.get_position()
         self.move(rect.x + tx, rect.y + ty + wy)
         parent = entry
         while not isinstance(parent, gtk.Window):
             parent = parent.get_parent()
         self.set_transient_for(parent)
     self.show_all()
     calendar.grab_focus()
     response = self.run()
     if response == gtk.RESPONSE_OK:
         self.destroy()
         self.date = calendar.get_date()
         self.date = str(zfill(self.date[2],2)) +"/"+ str(zfill(self.date[1] +1,2))+"/"+ str(zfill(self.date[0],4))
     else:
         self.destroy()
Example #20
0
def getTimeAsString(spec,time):

    if spec in ('%G','%eG'):
        imo = time.month
        specstr = _monthListUpper[imo-1]
    elif spec in ('%H','%eH'):
        specstr = str(time.hour)
    elif spec in ('%M','%eM'):
        specstr = str(time.minute)
    elif spec in ('%S','%eS'):
        specstr = str(int(time.second))
    elif spec in ('%Y','%eY'):
        specstr = string.zfill(str(time.year),4)
    elif spec in ('%d','%ed'):
        specstr = str(time.day)
    elif spec in ('%f','%ef'):
        specstr = string.zfill(str(time.day),2)
    elif spec in ('%g','%eg'):
        imo = time.month
        specstr = _monthListLower[imo-1]
    elif spec in ('%h','%eh'):
        specstr = string.zfill(str(time.hour),2)
    elif spec in ('%m','%em'):
        specstr = str(time.month)
    elif spec in ('%n','%en'):
        specstr = string.zfill(str(time.month),2)
    elif spec in ('%y','%ey'):
        specstr = string.zfill(str(time.year%100),2)
    elif spec in ('%z','%ez'):
        specstr = getTimeAsString('%H',time)+'Z'+getTimeAsString('%Y',time)+getTimeAsString('%n',time)+getTimeAsString('%d',time)
    return specstr
Example #21
0
def main():

    initial    = 10     # Size of the smallest graph
    final      = 10**5  # Size of the largest graph
    resolution = 10     # Number of graphs per 10 fold increase in vertex size 
    mult       = math.pow(initial, 1.0/resolution) # The exponent

    for filename in sys.argv[1:]:
        n = initial
        if(filename.endswith(".edges")):
            fname = filename[:-6]
            print fname
        else:
            continue
        
        # Read all the nodes
        nodefile = open(fname + ".nodes")
        in_nodes = nodefile.readlines()
        nodefile.close()

        # Read all the edges
        edgefile = open(fname + ".edges")
        in_edges = edgefile.readlines()
        edgefile.close()
    
        for count in range(1 + resolution * int(math.log(final/initial, resolution))):
            if( len(in_nodes)  < int(round(n))):
                print >>sys.stderr, "Not enough nodes", len(in_nodes), "<" ,int(round(n)) 
                break
            out_nodefile = "subgraph/"+fname+string.zfill(count, 2)+".nodes"
            out_edgefile = "subgraph/"+fname+string.zfill(count, 2)+".edges"
            print string.zfill(int(n), 6)
            print out_nodefile, out_edgefile
            generate_subgraph(int(round(n)), in_nodes, in_edges, out_nodefile, out_edgefile)
            n *= mult
Example #22
0
def MakeDancingShoes(glyphnames):
	
	# Your features, in the order you want them in the font
	features = ('aalt', 'locl', 'numr', 'dnom', 'frac', 'tnum', 'smcp', 'case', 'calt', 'liga', 'ss01', 'ss02', 'ss03')
	
	# Initialize DancingShoes object, hand over glyph names and default features
	shoes = DancingShoes(glyphnames, features)
	
	
	# Stylistic Sets
	for i in range(20):
		shoes.AddSimpleSubstitutionFeature('ss' + str(string.zfill(i, 2)), '.ss' + str(string.zfill(i, 2)))
	
	# Add direct substitutions
	directsubstitutions = (
		('smcp', '.sc'),
		('case', '.case'),
		('tnum', '.tf'),
		('ss01', '.ss01'),
		('ss02', '.ss02'),
		('ss03', '.ss03'),
		)
	for feature, ending in directsubstitutions:
		shoes.AddSimpleSubstitutionFeature(feature, ending)

	# Arabic
	if shoes.HasGroups(['.init']):
		shoes.AddEndingToBothClasses('init', '.init')
		shoes.AddSubstitution('init', "@init_source", "@init_target", 'arab', '', 'RightToLeft')


	# You can write contextual code for your script fonts using your own glyph name endings
	if shoes.HasGroups(['.initial', '.final']):
		# Add contextual substitution magic here
		for target in shoes.GlyphsInGroup('.initial'):
			shoes.AddGlyphsToClass('@initialcontext', ('a', 'b', 'c'))
			shoes.AddSubstitution('calt', "@initialcontext %s'" % (shoes.SourceGlyphFromTarget(target)), target)
	
	# You can theoretically write your own kern feature (which FontLab can also do for you upon font generation):
	shoes.AddPairPositioning('kern', 'T A', -30)
	shoes.AddPairPositioning('kern', 'uniFEAD uniFEEB', (-30, 0, -60, 0), 'arab', '', 'RightToLeft')

	# From CSV file
	csvfile = "../substitutions.csv"
	for feature, source, target, script, language, lookupflag, comment in SubstitutionsFromCSV(csvfile):
		shoes.AddSubstitution(feature, source, target, script, language, lookupflag, comment)
	
	# Uppercase Spacing
	uppercaseletters = ['A', 'B', 'C', 'D', 'E']
	for uppercaseletter in uppercaseletters:
		if shoes.HasGlyphs(uppercaseletter):
			shoes.AddGlyphsToClass('@uppercaseLetters', uppercaseletter)
	if shoes.HasClasses('@uppercaseLetters'):
		shoes.AddSinglePositioning('cpsp', '@uppercaseLetters', (5, 0, 10, 0))
	

	shoes.DuplicateFeature('hist', 'ss20')
	
	return shoes
Example #23
0
	def OnPlannerDateChange(self, widget=None):
		''' Date is changed - reload the planner '''

		year, month, date = widget.get_date()
		mydate = str(year)+"-"+string.zfill(str(month+1),2)+"-"+string.zfill(str(date),2)
		print "Selected "+mydate
		self.planner_date = mydate
		self.OnLoadPlanner()
Example #24
0
def sag_renderSetup_timer( startTime ):
        endTime = time.clock()	
        secs = int( (endTime - startTime) % 60 )
        hours = int( (endTime - startTime - secs) / 3600 )
        mins = int( (endTime - startTime - secs - hours * 3600) / 60 )
        duration = zfill( str( hours ), 2 ) + ':' + zfill( str( mins ), 2 ) + ':' + zfill( str( secs ), 2 )

        return duration
Example #25
0
def iso_time(sep = ":"):
	"""Return the current time in ISO-standard format
	"""
	year, month, day, hour, minute, second, weekday, julianday, dst = time.localtime(time.time())
	hour_text_AsPaddedText   = string.zfill(hour  , 2)
	minute_text_AsPaddedText  = string.zfill(minute, 2)
	second_text_AsPaddedText  = string.zfill(second, 2)
	return  hour_text_AsPaddedText +sep +minute_text_AsPaddedText +sep +second_text_AsPaddedText
Example #26
0
	def str_date(d):
		"""Make a sad date string.
		Converts a date object into a string vaguely similar to the one 
		returned by the twitter API, which is then reconverted with the
		date_str method. sigh... all a bit futile really.
		"""
		return "xxx %s %s %s:%s:00 +0000 %s" % (HappyDate.months[d.month], 
			zfill(d.day, 2), zfill(d.hour, 2), zfill(d.minute, 2), d.year)
Example #27
0
def bd(url,PostBegin,PostEnd):
    for i in range(PostBegin, PostEnd ,50):
        sName = string.zfill(i,6) + '.html'
        print 'working..... ' + sName
        f = open( string.zfill(sName,6),'w+')
        m = urllib.urlopen(url + str(i)).read()
        f.write(m)
        f.close()
Example #28
0
def iso_date(sep = "-"):
	"""Return the current date in ISO-standard format
	"""
	year, month, day, hour, minute, second, weekday, julianday, dst = time.localtime(time.time())
	year_AsPaddedText  = string.zfill(year , 4)
	month_AsPaddedText  = string.zfill(month, 2)
	day_AsPaddedText   = string.zfill(day  , 2)
	return year_AsPaddedText +sep +month_AsPaddedText +sep +day_AsPaddedText
Example #29
0
def jd2gd(jd, verbose=False):

    """Task to convert a list of julian dates to gregorian dates
    description at http://mathforum.org/library/drmath/view/51907.html
    Original algorithm in Jean Meeus, "Astronomical Formulae for
    Calculators"

    2009-02-15 13:36 IJC: Converted to importable, callable function
    """
   
    jd=jd+0.5
    Z=int(jd)
    F=jd-Z
    alpha=int((Z-1867216.25)/36524.25)
    A=Z + 1 + alpha - int(alpha/4)

    B = A + 1524
    C = int( (B-122.1)/365.25)
    D = int( 365.25*C )
    E = int( (B-D)/30.6001 )

    dd = B - D - int(30.6001*E) + F

    if E<13.5: mm=E-1

    if E>13.5: mm=E-13

    if mm>2.5: yyyy=C-4716

    if mm<2.5: yyyy=C-4715

    months=["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
    daylist=[31,28,31,30,31,30,31,31,30,31,30,31]
    daylist2=[31,29,31,30,31,30,31,31,30,31,30,31]

    h=int((dd-int(dd))*24)
    min=int((((dd-int(dd))*24)-h)*60)
    sec=86400*(dd-int(dd))-h*3600-min*60

    # Now calculate the fractional year. Do we have a leap year?
    if (yyyy%4 != 0): days=daylist2
    elif (yyyy%400 == 0): days=daylist2
    elif (yyyy%100 == 0): days=daylist
    else: days=daylist2

    hh = 24.0*(dd % 1.0)
    min = 60.0*(hh % 1.0)
    sec = 60.0*(min % 1.0)

    dd =  dd-(dd%1.0)
    hh =  hh-(hh%1.0)
    min =  min-(min%1.0)

    if verbose:
        print(str(jd)+" = "+str(months[mm-1])+ ',' + str(dd) +',' +str(yyyy))
        print(string.zfill(h,2)+":"+string.zfill(min,2)+":"+string.zfill(sec,2)+" UTC")

    return (yyyy, mm, dd, hh, min, sec)
Example #30
0
def date_to_age(day, month, year, fix_date="today"):
    """Convert a date into an age (relative to a fix date)

  USAGE:
    age = date_to_age(day, month, year)
    age = date_to_age(day, month, year, fix_date)

  ARGUMENTS:
    day       Day value (integer number)
    month     Month value (integer number)
    year      Year value (integer number)
    fix_date  The date relative for which the age is computed. Can be a date
              tuple, the string 'today' (which is the default), or an integer
              (epoch day number)

  DESCRIPTION:
    Returns the age in years as a positive floating-point number.
    If the date is after the fix date a negative age is returned.
  """

    # Check if fix date is given, otherwise calculate it  - - - - - - - - - - - -
    #
    if fix_date == "today":
        sys_time = time.localtime(time.time())  # Get current system date
        fix_day = string.zfill(str(sys_time[2]), 2)
        fix_month = string.zfill(str(sys_time[1]), 2)
        fix_year = str(sys_time[0])

    elif isinstance(fix_date, list) and (len(fix_date) == 3):
        fix_day = string.zfill(str(fix_date[0]), 2)
        fix_month = string.zfill(str(fix_date[1]), 2)
        fix_year = str(fix_date[2])

    elif isinstance(fix_date, int):
        fix_epoch = fix_date

    else:
        print 'error:"fix_date" is not in a valid form: %s' % (str(fix_date))
        raise Exception

    # Get epoch number for input date and fix date  - - - - - - - - - - - - - - -
    #
    date_epoch = date_to_epoch(day, month, year)

    if not isinstance(fix_date, int):
        fix_epoch = date_to_epoch(fix_day, fix_month, fix_year)

    day_diff = fix_epoch - date_epoch  # Get day difference

    # Compute approximate age - - - - - - - - - - - - - - - - - - - - - - - - - -
    #
    age = float(day_diff) / 365.25  # Can be positive or negative

    # A log message for high volume log output (level 3)  - - - - - - - - - - - -
    #
    print "3:    Date: %s with fix date: %s -> Age: %.3f" % (str([day, month, year]), str(fix_date), age)

    return age
#========================================
# author: changlong.zang
#   mail: [email protected]
#   date: Thu, 01 Sep 2016, 14:14:10
#========================================
import os, re, string, itertools
#--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
VERSION_PRECISION = 3

DEFAULT_FILE_EXT = 'ma'

VERSION_MATCH_PATTERN = '(?<=v)\d{{{0}}}(?=\.)'.format(VERSION_PRECISION)

FIRST_VERSION_FILE = 'Name_v{0}.{1}'.format(string.zfill(1, VERSION_PRECISION),
                                            DEFAULT_FILE_EXT)


def get_file_version(filePath):
    '''
    Get file's version...
    Exp:
        D:/test/name_v001.ma - 001
        D:/test/name_v002.ma - 002
        ...
    '''
    version = re.search(VERSION_MATCH_PATTERN, os.path.basename(filePath))
    if version:
        return version.group()


def get_file_list(path, kWords=None, ext=None):
request = context.REQUEST

# Get spreadsheet data
try:
    spreadsheets = request['ooo_import_spreadsheet_data']
except KeyError:
    return []

for spreadsheet in spreadsheets.keys():
    # In the case of empty spreadsheet do nothing
    if spreadsheets[spreadsheet] not in (None, []):
        column_name_list = spreadsheets[spreadsheet][0]

        for column in column_name_list:
            safe_id = context.Base_getSafeIdFromString('%s%s' %
                                                       (spreadsheet, column))
            num += 1
            # int_len is used to fill the uid of the created object like 0000001
            int_len = 7
            o = newTempBase(context, safe_id)
            o.setUid('new_%s' %
                     zfill(num, int_len))  # XXX There is a security issue here
            o.edit(uid='new_%s' %
                   zfill(num, int_len))  # XXX There is a security issue here
            o.edit(id=safe_id,
                   spreadsheet_name=spreadsheet,
                   spreadsheet_column=column)
            listbox_lines.append(o)

return listbox_lines
Example #33
0
    def _get_pehash(self, exe):
        #image characteristics
        img_chars = bitstring.BitArray(hex(exe.FILE_HEADER.Characteristics))
        #pad to 16 bits
        img_chars = bitstring.BitArray(bytes=img_chars.tobytes())
        img_chars_xor = img_chars[0:7] ^ img_chars[8:15]

        #start to build pehash
        pehash_bin = bitstring.BitArray(img_chars_xor)

        #subsystem -
        sub_chars = bitstring.BitArray(hex(exe.FILE_HEADER.Machine))
        #pad to 16 bits
        sub_chars = bitstring.BitArray(bytes=sub_chars.tobytes())
        sub_chars_xor = sub_chars[0:7] ^ sub_chars[8:15]
        pehash_bin.append(sub_chars_xor)

        #Stack Commit Size
        stk_size = bitstring.BitArray(hex(exe.OPTIONAL_HEADER.SizeOfStackCommit))
        stk_size_bits = string.zfill(stk_size.bin, 32)
        #now xor the bits
        stk_size = bitstring.BitArray(bin=stk_size_bits)
        stk_size_xor = stk_size[8:15] ^ stk_size[16:23] ^ stk_size[24:31]
        #pad to 8 bits
        stk_size_xor = bitstring.BitArray(bytes=stk_size_xor.tobytes())
        pehash_bin.append(stk_size_xor)

        #Heap Commit Size
        hp_size = bitstring.BitArray(hex(exe.OPTIONAL_HEADER.SizeOfHeapCommit))
        hp_size_bits = string.zfill(hp_size.bin, 32)
        #now xor the bits
        hp_size = bitstring.BitArray(bin=hp_size_bits)
        hp_size_xor = hp_size[8:15] ^ hp_size[16:23] ^ hp_size[24:31]
        #pad to 8 bits
        hp_size_xor = bitstring.BitArray(bytes=hp_size_xor.tobytes())
        pehash_bin.append(hp_size_xor)

        #Section chars
        for section in exe.sections:
            #virutal address
            sect_va =  bitstring.BitArray(hex(section.VirtualAddress))
            sect_va = bitstring.BitArray(bytes=sect_va.tobytes())
            pehash_bin.append(sect_va)

            #rawsize
            sect_rs =  bitstring.BitArray(hex(section.SizeOfRawData))
            sect_rs = bitstring.BitArray(bytes=sect_rs.tobytes())
            sect_rs_bits = string.zfill(sect_rs.bin, 32)
            sect_rs = bitstring.BitArray(bin=sect_rs_bits)
            sect_rs = bitstring.BitArray(bytes=sect_rs.tobytes())
            sect_rs_bits = sect_rs[8:31]
            pehash_bin.append(sect_rs_bits)

            #section chars
            sect_chars =  bitstring.BitArray(hex(section.Characteristics))
            sect_chars = bitstring.BitArray(bytes=sect_chars.tobytes())
            sect_chars_xor = sect_chars[16:23] ^ sect_chars[24:31]
            pehash_bin.append(sect_chars_xor)

            #entropy calulation
            address = section.VirtualAddress
            size = section.SizeOfRawData
            raw = exe.write()[address+size:]
            if size == 0:
                kolmog = bitstring.BitArray(float=1, length=32)
                pehash_bin.append(kolmog[0:7])
                continue
            bz2_raw = bz2.compress(raw)
            bz2_size = len(bz2_raw)
            #k = round(bz2_size / size, 5)
            k = bz2_size / size
            kolmog = bitstring.BitArray(float=k, length=32)
            pehash_bin.append(kolmog[0:7])

        m = hashlib.sha1()
        m.update(pehash_bin.tobytes())
        output = m.hexdigest()
        self._add_result('PEhash value', "%s" % output, {'Value': output})
Example #34
0
    img_chars = bitstring.BitArray(bytes=img_chars.tobytes())
    img_chars_xor = img_chars[0:7] ^ img_chars[8:15]

    #start to build pehash
    pehash_bin = bitstring.BitArray(img_chars_xor)

    #subsystem -
    sub_chars = bitstring.BitArray(hex(exe.FILE_HEADER.Machine))
    #pad to 16 bits
    sub_chars = bitstring.BitArray(bytes=sub_chars.tobytes())
    sub_chars_xor = sub_chars[0:7] ^ sub_chars[8:15]
    pehash_bin.append(sub_chars_xor)

    #Stack Commit Size
    stk_size = bitstring.BitArray(hex(exe.OPTIONAL_HEADER.SizeOfStackCommit))
    stk_size_bits = string.zfill(stk_size.bin, 32)
    #now xor the bits
    stk_size = bitstring.BitArray(bin=stk_size_bits)
    stk_size_xor = stk_size[8:15] ^ stk_size[16:23] ^ stk_size[24:31]
    #pad to 8 bits
    stk_size_xor = bitstring.BitArray(bytes=stk_size_xor.tobytes())
    pehash_bin.append(stk_size_xor)

    #Heap Commit Size
    hp_size = bitstring.BitArray(hex(exe.OPTIONAL_HEADER.SizeOfHeapCommit))
    hp_size_bits = string.zfill(hp_size.bin, 32)
    #now xor the bits
    hp_size = bitstring.BitArray(bin=hp_size_bits)
    hp_size_xor = hp_size[8:15] ^ hp_size[16:23] ^ hp_size[24:31]
    #pad to 8 bits
    hp_size_xor = bitstring.BitArray(bytes=hp_size_xor.tobytes())
def notzero(file):
    if os.stat(file)[stat.ST_SIZE]:
        return 1
    else:
        if verbose: print 'size is zero ...',
        return 0

print 'Everytime this program is started it begins counting from 0.'
i = 0
oldlocalfile = ''
while 1:
    recenttime = time.time()
    head, tail = os.path.split(camurl)
    root, ext = os.path.splitext(tail)
    localfile = root + '_' + string.zfill(i, 5) + ext
    try:
        if verbose: print time.asctime(time.localtime(time.time()))[11:19], 'retrieving image ...',
        urllib.urlretrieve(camurl, localfile)
        if notzero(localfile) and valid(localfile) and diff(oldlocalfile, localfile):
            oldlocalfile = localfile
            i = i + 1
            if verbose: print 'got a new one!  Saved as', localfile
        else:
            os.remove(localfile)
            if verbose: print 'image deleted'
    except:
        import sys
        type, value = sys.exc_info()[:2]
        print 'webcam-saver.py: ' + str(type) + ':', value
    while time.time() - recenttime < interval:
Example #36
0
def Display_Players_Stats ( Min_Total_Games, Mean_Power_Ratings, Powers, HTML_Background_Color ) :
#
# Displays the Players' Overall Ratings and Stats.  A Minimum Total # of Games
# is required to actually be considered "Rated"
#
     Index = { }
     for Player in All_Players.keys() :
          if All_Players[ Player ][ 'Overall Rating' ] == 'NR' :
               Index[ '---- ' + Player ] = Player
          elif All_Players[ Player ][ 'Total Games' ] < Min_Total_Games :
               Index[ '-' + string.zfill( int( round( All_Players[ Player ][ 'Overall Rating' ], 0 ) ), 4 ) + ' ' + Player ] = Player
          else :
               Index[ string.zfill( int( round( All_Players[ Player ][ 'Overall Rating' ], 0 ) ), 4 ) + ' ' + Player ] = Player
     Sort_Index = Index.keys()
     Sort_Index.sort()
     Sort_Index.reverse()
     print
     print '<a NAME = HoF><p><hr><p><h2>Old Salts of the Sea (the Sail Ho! Hall of Fame)</h2></p>'
     print
     print
     print '<p><center><table BORDER = 10 BGCOLOR = %s>' % HTML_Background_Color
     print '     <tr>  <th ROWSPAN = 2> Rank'
     print '           <th ROWSPAN = 2> Overall<br>Rating'
     print '           <th ROWSPAN = 2> Last<br>Rated<br>Game'
     print '           <th ROWSPAN = 2> Player'
     print '           <th COLSPAN = %d> Rating by Power' % len( Powers.keys() )
     print '           <th ROWSPAN = 2> Total<br>Wins'
     print '           <th ROWSPAN = 2> Total<br>Rated<br>Games'
     print '           <th ROWSPAN = 2> About these Stats'
     print '     <tr>'
     for Power in Powers.keys() :
          print '           <th BGCOLOR = %s> %s' % ( Powers[ Power ][ 'HTML_Color'], Power )
     print
     for Count in range( len( Sort_Index ) ) :
          Player = All_Players[ Index[ Sort_Index[ Count ] ] ]
          if Count == 0 :
               print '     <tr><td ALIGN = RIGHT> <IMG SRC = MEDALS.GIF BORDER = 0>'
               print '                                ',
          else :
               print '     <tr><td ALIGN = RIGHT> %3d ' % eval( 'Count + 1' ),
          if Player[ 'Overall Rating' ] == 'NR' :
               print '<td ALIGN = RIGHT><font color = red>   NR </font>',
               print '<td ALIGN = RIGHT> &nbsp;    ',
          else :
               if Sort_Index[ Count ][ 0 ] == '-' :
                    print '<td ALIGN = RIGHT><font color = red> %4d </font>' % eval( 'round( Player[ \'Overall Rating\' ], 0 )' ),
               else :
                    print '<td ALIGN = RIGHT>                   %4d        ' % eval( 'round( Player[ \'Overall Rating\' ], 0 )' ),
               print '<td ALIGN = RIGHT> %2s-%3s-%2s ' % ( eval( 'string.zfill( Player[ \'Overall Rating Date\' ][ \'Day\' ], 2 )' ), Player[ 'Overall Rating Date' ][ 'Month' ], eval( 'str( Player[ \'Overall Rating Date\' ][ \'Year\' ] )[ -2: ] ' ) ),
          print '<td> %-20s' % Index[ Sort_Index[ Count ] ],
          for Power in Powers.keys() :
               print '<td BGCOLOR = %s ALIGN = RIGHT>' % Powers[ Power ][ 'HTML_Color' ],
               if Player[ Power ][ 'Num Games' ] > 0.0 :
                    print '%4d ' % eval( 'round( Player[ Power ][ \'Rating\' ], 0 )' ),
               else :
                    print '%4s ' % '  NR',
          print '<td ALIGN = RIGHT> %5.1f ' % Player[ 'Total Wins' ],
          print '<td ALIGN = RIGHT> %5.1f ' % Player[ 'Total Games' ],
          print
     print
     print '     <tr><th COLSPAN = 3> &nbsp; <th ALIGN = LEFT> AVERAGE ',
     for Power in Powers.keys() :
          print '<th BGCOLOR = %s ALIGN = RIGHT> %4d' % ( Powers[ Power ][ 'HTML_Color' ], eval( 'round( Mean_Power_Ratings[ Power ], 0 )' ) ),
     print '<th> &nbsp;  <th> &nbsp;'
     print '</table></center></p>'
     print
     print
Example #37
0
            sql="INSERT INTO processing (triple, status, comment, ccd, process) VALUES ( %d, %d, '%s', %d, '%s' ) " % ( triple, -1, started, ccd, process)
            cfeps.execute(sql)
            mysql.commit()
	    cfeps.execute("UNLOCK TABLES")
            sql="SELECT e.expnum,e.object FROM triple_members m JOIN bucket.exposure e ON  m.expnum=e.expnum WHERE triple=%d ORDER BY expnum " % ( triple,)
            cfeps.execute(sql)
            exps=cfeps.fetchall()
            mysql.close()
            
            if len(file_ids)==0:
                for exp in exps:
                    file_ids.append(exp[0])
            if opt.verbose:
                sys.stderr.write("Running find on the files "+str(file_ids)+"\n")
	    cwd=os.getcwd()
	    ccdPath=os.path.join("chip"+string.zfill(str(ccd),2),str(exps[0][1]))
            wdir=os.path.join(cwd,os.path.join("real",ccdPath))
            sdir=os.path.join(cwd,os.path.join("scramble",ccdPath))
	    ndir=os.path.join(cwd,os.path.join("nailing",ccdPath))
            if opt.verbose:
                print wdir,cwd
	    for dirName in [wdir, sdir, ndir]:
	      if not os.path.exists(dirName):
	        os.makedirs(dirName)
            os.chdir(wdir)
            
	    result=-2
            opt.raw=fext
            try:
                if opt.verbose :
                    sys.stderr.write("Doing CCD: %s, of files: %s, PLANT: %s\n" %( str(ccd),str(file_ids),str(opt.plant)))
Example #38
0
def _dbid(v):
    if v >= len(_dbid_map):
        return string.zfill(v, 2)
    else:
        return _dbid_map[v]
Example #39
0
    def calculate_contact_order(self):
        xs = []
        ys = []
        RCOdir = os.path.join(self.rundir, 'RCOs')
        if self.options.init:
            if os.path.isdir(RCOdir):
                import shutil
                shutil.rmtree(RCOdir)
        if not os.path.isdir(RCOdir):
            os.mkdir(RCOdir)
        #
        for test in self.datasets:
            calc_spec = self.get_calculation_spec(
                os.path.join(self.expdir, test))
            datapoints = self.setup_calculation(calc_spec)
            for dp in datapoints['Calculations']:
                if calc_spec['Calctype'] == 'dpKa':
                    #
                    # Get the contact order if we don't have it already
                    #
                    print dp.keys()
                    self.struct_key = 'PDBchain'
                    if dp.has_key('PDBchain'):
                        pdbfilename = dp['PDBchain']
                    else:
                        pdbfilename = dp['PDB']
                        self.struct_key = 'PDB'
                    RCOfile = os.path.join(RCOdir, pdbfilename)
                    if os.path.isfile(RCOfile):
                        fd = open(RCOfile)
                        import pickle
                        RCO = pickle.load(fd)
                        fd.close()
                    else:
                        pdbfile = calc_spec['Files'][pdbfilename]
                        CO = pKa_contact_order([pdbfile])
                        RCO = CO.RCO
                        fd = open(RCOfile, 'w')
                        import pickle
                        pickle.dump(RCO.copy(), fd)
                        fd.close()
                    #
                    # Get the dpKa
                    #
                    import pKaTool.pKadata
                    titgroup = dp['titgroup'].upper()
                    if not pKaTool.pKadata.modelpKas.has_key(titgroup):
                        print 'Could not parse %s' % titgroup
                        continue
                    modelpKa = pKaTool.pKadata.modelpKas[titgroup]
                    acibas = pKaTool.pKadata.acidbase[titgroup]
                    dpKa = float(dp['expval']) - modelpKa
                    #
                    # Find the chain ID
                    #
                    if dp.has_key('ChainID'):
                        ChainID = dp['ChainID']
                    else:
                        chainID = self.find_chainID(pdbfilename, dp['resnum'])
                    #
                    # Add the data
                    #
                    import string
                    resid = '%s:%s' % (ChainID,
                                       string.zfill(int(dp['resnum']), 4))
                    print pdbfilename, resid
                    if RCO.has_key(resid):
                        xs.append(RCO[resid])
                        ys.append(dpKa)
        import pylab
        pylab.plot(xs, ys, 'ro')
        pylab.xlabel('Contact order')
        pylab.ylabel('dpKa')
        pylab.show()

        return
Example #40
0
 def hexdigest(self):
     """Like digest(), but returns a string of hexadecimal digits instead.
     """
     return "".join([string.zfill(hex(ord(x))[2:], 2)
                     for x in tuple(self.digest())])
Example #41
0
                logger.error(("Mulitple input images needs one output "
                              "but --output option not set? [Logic Error]"))
                sys.exit(-1)
            subs = "."
            if opt.dist:
                subs = opt.dist
                object = hdu.header.get('OBJECT')
                nccd = hdu.header.get('EXTNAME')
                for dirs in [nccd, object]:
                    subs = subs + "/" + dirs
                    if not os.access(subs, os.F_OK):
                        os.makedirs(subs)
            subs = subs + "/"
            if opt.split:
                nccd = hdu.header.get('EXTVER')
                outfile = outfile + string.zfill(str(nccd), 2)
            outfile = subs + outfile + ".fits"
            ### exit if the file exist and this is the ccd or
            ### were splitting so every file should only have one
            ### extension
            if os.access(outfile, os.W_OK) and (ccd == 0 or
                                                opt.split) and not opt.combine:
                sys.exit("Output file " + outfile + " already exists")

            ### do the overscan for each file
            logger.info("Processing " + file_id)

            if opt.overscan:
                logger.info("Overscan subtracting")
                overscan(hdu)
            if opt.bias:
# Do the first timestep to set up the sizing of the axes for tight_layout.
# It will be repeated in the loop.
xi = walks['X'][0]
yi = walks['Y'][0]
zi = walks['Z'][0]
snapshot(0,axXY,axXZ,axPDF,axAvSk,xi,yi,zi,AvVar,AvSk,Pe,aratio,t,mmap)

pyplot.tight_layout()

print "Constructing animation..."
for i in range(tsteps):

     axAvSk.plot(t,AvSk)

     xi = walks['X'][i]
     yi = walks['Y'][i]
     zi = walks['Z'][i]
     
     snapshot(i,axXY,axXZ,axPDF,axAvSk,xi,yi,zi,AvVar,AvSk,Pe,aratio,t,mmap)

     outfile = 'movie_%s.png'%string.zfill(i,4)     
     print outfile
     pyplot.savefig(outfile,dpi=150)

     
# end for

walks.close()

if myid == 0:
    tmp_pt_source_filename_list = []
    tmp_pt_source_list = []
    # Now combine into one file
    for j in range(0, len(pt_list), 1):
        tmp_pt_filename = geom_filtered_pt_sources_sublist = geom_pt_sources_filename.rstrip('.xml') + \
            '_%03d.xml' % j
        tmp_pt_source_filename_list.append(tmp_pt_filename)
    for tmp_pt_source_file in tmp_pt_source_filename_list:
        tmp_pt_source = read_pt_source(tmp_pt_source_file)
        tmp_pt_source_list.append(tmp_pt_source)
    merged_filename = geom_pt_sources_filename.rstrip(
        '.xml') + '_merged_parallel.xml'
    model_name = geom_pt_sources_filename.rstrip('.xml')
    combine_pt_sources(tmp_pt_source_list,
                       merged_filename,
                       model_name,
                       nrml_version='04',
                       id_location_flag=None)
    #
    #if myid == 0:
    ss = int(pypar.time() - t0)
    h = ss / 3600
    m = (ss % 3600) / 60
    s = (ss % 3600) % 60
    print "--------------------------------------------------------"
    print 'P0: Total time (%i seconds): %s:%s:%s (hh:mm:ss)' % (
        ss, string.zfill(h, 2), string.zfill(m, 2), string.zfill(s, 2))
    print "--------------------------------------------------------"
pypar.finalize()
Example #44
0
 def bi2int(self, bytArr):
     #converts a 4 byte bytearray to an integer
     return int(''.join([string.zfill(s,8) for s in map(lambda n : n[2:], map(bin, bytArr))]), 2)
Example #45
0
from string import zfill
request = context.REQUEST

from string import zfill

for k in kw.keys():
  v = kw[k]
  if k.endswith('listbox'):
    listbox = {}
    listbox_key = "%s_key" % k
    if v is not None:
      i = 1
      for line in v:
        if line.has_key(listbox_key):
          key = '%s' % line[listbox_key]
        else:
          key = str(zfill(i,3))
        listbox[key] = line
        i+=1
      request.set(k,listbox)
  else:
    request.set('your_%s' % k, v)
    request.set('%s' % k, v)
    # for backward compatibility, we keep my_ for dialog
    # using old naming conventions
    request.set('my_%s' % k, v)
Example #46
0
    print '%s capitalize=%s' % (s,s.capitalize()) # 打印: python String function capitalize=Python string function
    print '%s title=%s' % (s,s.title()) # 打印: python String function title=Python String Function
    import string; print string.capitalize(s) # 打印: Python string function


  3.格式化相关
    获取固定长度,右对齐,左边不够用空格补齐: str.ljust(width)
    获取固定长度,左对齐,右边不够用空格补齐: str.ljust(width)
    获取固定长度,中间对齐,两边不够用空格补齐: str.ljust(width)
    获取固定长度,右对齐,左边不足用0补齐

    print '%s ljust="%s"' % (s,s.ljust(40)) # 打印: python String function ljust="python String function                  "
    print '%s rjust="%s"' % (s,s.rjust(40)) # 打印: python String function rjust="                  python String function"
    print '%s center="%s"' % (s,s.center(40)) # 打印: python String function center="         python String function         "
    print '%s zfill="%s"' % (s,s.zfill(40)) # 打印: python String function zfill="000000000000000000python String function"
    import string; print string.zfill(s, 40) # 打印: 000000000000000000python String function


  4.字符串搜索相关
    搜索指定字符串,没有返回-1: str.find('t')
    指定起始位置搜索: str.find('t',start)
    指定起始及结束位置搜索: str.find('t',start,end)
    从右边开始查找: str.rfind('t')
    搜索到多少个指定字符串: str.count('t')
    上面所有方法都可用 index 代替,不同的是使用 index 查找不到会抛异常,而 find 返回-1

    print '%s find nono=%d' % (s,s.find('nono')) # 打印: python String function find nono=-1
    print '%s find t=%d' % (s,s.find('t')) # 打印: python String function find t=2
    print '%s find t from %d=%d' % (s,3,s.find('t',3)) # 打印: python String function find t from 3=8
    print '%s find t from %d to %d=%d' % (s,1,2,s.find('t',1,2)) # 打印: python String function find t from 1 to 2=-1
    #print '%s index nono ' % (s,s.index('nono',1,2))
Example #47
0
 def makeOperations(self, sequence, operations):
     """Perform the specified operations on the sequence
     Sequence must be in the [[A:0001:ALA],[A:0002:GLU],['A:0003:THR'], ..] format
     Operations is a list of the following types:
     Mutations: A:0001:ALA:ASP
     Deletions: delete:A:0002:GLU
     Insertions: insert:1:A:0003:THR:ALA, insert:2:A:0003:THR:TRP (insert THR,TRP after A:0003:THR)
     Operations are always performed in sequence numbering order
     """
     if operations==[]:
         return sequence
     ops_sorted={}
     insertions=[]
     for operation in operations:
         s_op=operation.split(':')
         if s_op[0]=='insert':
             resid='%s:%s' %(s_op[2],s_op[3])
             if ops_sorted.has_key(resid):
                 ok=False
                 if type(ops_sorted[resid]) is type(list):
                     if ops_sorted[resid][0]=='insert':
                         ok=True
                 if not ok:
                     raise Exception('More than one operation on the same residue: %s' %resid)
             else:
                 ops_sorted[resid]=['insert',{}]
             #
             # Add the residue to be inserted
             #
             ins_num=s_op[1]
             org_typ=s_op[4]
             ins_typ=s_op[5]
             ops_sorted[resid][1][ins_num]=[org_typ,ins_typ]
         elif s_op[0]=='delete':
             resid='%s:%s' %(s_op[1],s_op[2])
             if ops_sorted.has_key(resid):
                 raise Exception('More than one operation on the same residue: %s' %resid)
             restyp=s_op[3]
             ops_sorted[resid]=['delete',restyp]
         else:
             # Normal mutation
             import pKa.pKD_tools as pKD_tools
             resid=pKD_tools.get_resid_from_mut(operation)
             if ops_sorted.has_key(resid):
                 raise Exception('More than one operation on the same residue: %s' %resid)
             ops_sorted[resid]=['mutate',operation]
     #
     # Perform the operations, one after one
     #
     new_seq=[]
     new_count=None
     new_chain=None
     for resid,restyp in sequence:
         # Make sure that the chain hasn't changed or if we are at the beginning then init
         if resid.split(':')[0]!=new_chain:
             #Initialise
             sp_resid=resid.split(':')
             new_chain=sp_resid[0]
             new_count=int(sp_resid[1])
             newresid='%s:%s' %(new_chain,string.zfill(new_count,4))
         # Does this residue have an operation?
         if ops_sorted.has_key(resid):
             op=ops_sorted[resid]
             if op[0]=='delete':
                 # Deletion
                 if op[1]==restyp:
                     pass # This deletes the residue
                 else:
                     raise Exception('Incorrect org residue in deletion: %s' %op)
             elif op[0]=='insert':
                 # Insertion
                 inserts=op[1].keys()
                 inserts.sort()
                 for i in inserts:
                     if i[0]==restyp:
                         new_seq.append([newresid,i[1]])
                         new_count=new_count+1
                         newresid='%s:%s' %(new_chain,string.zfill(new_count,4))
             elif op[0]=='mutate':
                 # Mutation
                 import pKa.pKD_tools as pKD_tools
                 orgres=pKD_tools.get_oldrestyp_from_mut(op[1])
                 if orgres==restyp:
                     new_seq.append([newresid,pKD_tools.get_newrestyp_from_mut(op[1])])
                     new_count=new_count+1
                     newresid='%s:%s' %(new_chain,string.zfill(new_count,4))
                 pass
             else:
                 raise Exception('Unknown mutations spec: %s' %op)
         else:
             new_seq.append([resid,restyp])
             new_count=new_count+1
             newresid='%s:%s' %(new_chain,string.zfill(new_count,4))
     return new_seq
Example #48
0
def calculate_pehash(file_path=None):
    if not HAVE_PEFILE:
        self.log('error',
                 "Missing dependency2, install pefile (`pip install pefile`)")
        return ''

    if not HAVE_BITSTRING:
        self.log(
            'error',
            "Missing dependency2, install bitstring (`pip install bitstring`)")
        return ''

    if not file_path:
        return ''

    try:
        exe = pefile.PE(file_path)

        #image characteristics
        img_chars = bitstring.BitArray(hex(exe.FILE_HEADER.Characteristics))
        #pad to 16 bits
        img_chars = bitstring.BitArray(bytes=img_chars.tobytes())
        if img_chars.len == 16:
            img_chars_xor = img_chars[0:7] ^ img_chars[8:15]
        else:
            img_chars_xor = img_chars[0:7]

        #start to build pehash
        pehash_bin = bitstring.BitArray(img_chars_xor)

        #subsystem -
        sub_chars = bitstring.BitArray(hex(exe.FILE_HEADER.Machine))
        #pad to 16 bits
        sub_chars = bitstring.BitArray(bytes=sub_chars.tobytes())
        sub_chars_xor = sub_chars[0:7] ^ sub_chars[8:15]
        pehash_bin.append(sub_chars_xor)

        #Stack Commit Size
        stk_size = bitstring.BitArray(
            hex(exe.OPTIONAL_HEADER.SizeOfStackCommit))
        stk_size_bits = string.zfill(stk_size.bin, 32)
        #now xor the bits
        stk_size = bitstring.BitArray(bin=stk_size_bits)
        stk_size_xor = stk_size[8:15] ^ stk_size[16:23] ^ stk_size[24:31]
        #pad to 8 bits
        stk_size_xor = bitstring.BitArray(bytes=stk_size_xor.tobytes())
        pehash_bin.append(stk_size_xor)

        #Heap Commit Size
        hp_size = bitstring.BitArray(hex(exe.OPTIONAL_HEADER.SizeOfHeapCommit))
        hp_size_bits = string.zfill(hp_size.bin, 32)
        #now xor the bits
        hp_size = bitstring.BitArray(bin=hp_size_bits)
        hp_size_xor = hp_size[8:15] ^ hp_size[16:23] ^ hp_size[24:31]
        #pad to 8 bits
        hp_size_xor = bitstring.BitArray(bytes=hp_size_xor.tobytes())
        pehash_bin.append(hp_size_xor)

        #Section chars
        for section in exe.sections:
            #virutal address
            sect_va = bitstring.BitArray(hex(section.VirtualAddress))
            sect_va = bitstring.BitArray(bytes=sect_va.tobytes())
            pehash_bin.append(sect_va)

            #rawsize
            sect_rs = bitstring.BitArray(hex(section.SizeOfRawData))
            sect_rs = bitstring.BitArray(bytes=sect_rs.tobytes())
            sect_rs_bits = string.zfill(sect_rs.bin, 32)
            sect_rs = bitstring.BitArray(bin=sect_rs_bits)
            sect_rs = bitstring.BitArray(bytes=sect_rs.tobytes())
            sect_rs_bits = sect_rs[8:31]
            pehash_bin.append(sect_rs_bits)

            #section chars
            sect_chars = bitstring.BitArray(hex(section.Characteristics))
            sect_chars = bitstring.BitArray(bytes=sect_chars.tobytes())
            sect_chars_xor = sect_chars[16:23] ^ sect_chars[24:31]
            pehash_bin.append(sect_chars_xor)

            #entropy calulation
            address = section.VirtualAddress
            size = section.SizeOfRawData
            raw = exe.write()[address + size:]
            if size == 0:
                kolmog = bitstring.BitArray(float=1, length=32)
                pehash_bin.append(kolmog[0:7])
                continue
            bz2_raw = bz2.compress(raw)
            bz2_size = len(bz2_raw)
            #k = round(bz2_size / size, 5)
            k = bz2_size / size
            kolmog = bitstring.BitArray(float=k, length=32)
            pehash_bin.append(kolmog[0:7])

        m = hashlib.sha1()
        m.update(pehash_bin.tobytes())
        return str(m.hexdigest())
    except:
        return ''
Example #49
0
def theTime(time):

    hrs=int(time/60)
    min=int(time-hrs*60)
    return "%s:%s" %(string.zfill(str(hrs),2),string.zfill(str(min),2))
Example #50
0
    def _event_to_xml_marc_21(self, event, includeSession=1, includeContribution=1, includeMaterial=1, out=None):
        if not out:
            out = self._XMLGen

        out.openTag("datafield",[["tag","245"],["ind1"," "],["ind2"," "]])
        out.writeTag("subfield", event.title, [["code", "a"]])
        out.closeTag("datafield")

        out.writeTag("leader", "00000nmm  2200000uu 4500")
        out.openTag("datafield",[["tag","111"],["ind1"," "],["ind2"," "]])
        out.writeTag("subfield", event.title, [["code", "a"]])
        event_location_info = []
        if event.venue_name:
            event_location_info.append(event.venue_name)
        if event.address:
            event_location_info.append(event.address)
        event_room = event.get_room_name(full=False)
        if event_room:
            event_location_info.append(event_room)
        out.writeTag("subfield", ', '.join(event_location_info), [["code", "c"]])

        sd = event.start_dt
        ed = event.end_dt
        out.writeTag("subfield","%d-%s-%sT%s:%s:00Z" %(sd.year, string.zfill(sd.month,2), string.zfill(sd.day,2), string.zfill(sd.hour,2), string.zfill(sd.minute,2)),[["code","9"]])
        out.writeTag("subfield","%d-%s-%sT%s:%s:00Z" %(ed.year, string.zfill(ed.month,2), string.zfill(ed.day,2), string.zfill(ed.hour,2), string.zfill(ed.minute,2)),[["code","z"]])

        out.writeTag("subfield", uniqueId(event), [["code", "g"]])
        out.closeTag("datafield")

        self._generate_category_path(event, out)

        sd = event.start_dt
        if sd is not None:
            out.openTag("datafield",[["tag","518"],["ind1"," "],["ind2"," "]])
            out.writeTag("subfield","%d-%s-%sT%s:%s:00Z" %(sd.year, string.zfill(sd.month,2), string.zfill(sd.day,2), string.zfill(sd.hour,2), string.zfill(sd.minute,2)),[["code","d"]])
            out.closeTag("datafield")

        out.openTag("datafield",[["tag","520"],["ind1"," "],["ind2"," "]])
        out.writeTag("subfield", event.description, [["code", "a"]])
        out.closeTag("datafield")

        self._generate_references(event, out)

        out.openTag("datafield",[["tag","653"],["ind1","1"],["ind2"," "]])
        for keyword in event.keywords:
            out.writeTag("subfield",keyword,[["code","a"]])
        out.closeTag("datafield")

        out.openTag("datafield",[["tag","650"],["ind1","2"],["ind2","7"]])
        out.writeTag("subfield", event.type.capitalize(), [["code", "a"]])
        out.closeTag("datafield")
        #### t o d o

        #out.openTag("datafield",[["tag","650"],["ind1","3"],["ind2","7"]])
        #out.writeTag("subfield",,[["code","a"]])
        #out.closeTag("datafield")


        # tag 700 chair name
        for chair in event.person_links:
            out.openTag("datafield",[["tag","906"],["ind1"," "],["ind2"," "]])
            full_name = chair.get_full_name(last_name_first=True, last_name_upper=False, abbrev_first_name=False)
            out.writeTag("subfield", full_name, [["code", "p"]])
            out.writeTag("subfield", chair.affiliation, [["code", "u"]])
            out.closeTag("datafield")


        #out.openTag("datafield",[["tag","856"],["ind1","4"],["ind2"," "]])
        if includeMaterial:
            self.materialToXMLMarc21(event, out=out)
        #out.closeTag("datafield")

        if event.note and not event.note.is_deleted:
            self.noteToXMLMarc21(event.note, out=out)

        #if respEmail != "":
        #    out.openTag("datafield",[["tag","859"],["ind1"," "],["ind2"," "]])
        #   out.writeTag("subfield",respEmail,[["code","f"]])
        #   out.closeTag("datafield")
        # tag 859 email
        for chair in event.person_links:
            out.openTag("datafield", [["tag", "859"], ["ind1", " "], ["ind2", " "]])
            out.writeTag("subfield", chair.person.email, [["code", "f"]])
            out.closeTag("datafield")

        out.openTag("datafield", [["tag", "961"], ["ind1", " "], ["ind2", " "]])
        out.writeTag("subfield", event.created_dt.strftime('%Y-%m-%dT'), [["code", "x"]])
        out.closeTag("datafield")

        out.openTag("datafield", [["tag", "961"], ["ind1", " "], ["ind2", " "]])
        out.writeTag("subfield", datetime.now().strftime('%Y-%m-%dT'), [["code", "c"]])
        out.closeTag("datafield")

        out.openTag("datafield",[["tag","980"],["ind1"," "],["ind2"," "]])
        out.writeTag("subfield", self._getRecordCollection(event), [["code", "a"]])
        out.closeTag("datafield")

        out.openTag("datafield",[["tag","970"],["ind1"," "],["ind2"," "]])
        out.writeTag("subfield", "INDICO." + str(uniqueId(event)), [["code", "a"]])
        out.closeTag("datafield")

        self._generate_link_field(event.external_url, 'Event details', out)

        self._generateAccessList(event, out, objId=uniqueId(event))