Esempio n. 1
0
def startimport(daystr2):
    log.info("start startimport=======")
    
    jzdisp = {}
    jzdispdir = rootpath+"/data/jzdisphourmysql/"+daystr2
    jzclickdir = rootpath+"/data/jzclickhourmysql/"+daystr2
    jzdispfout = "jzdisphourmysql-"+daystr2
    jzclickfout = "jzclickhourmysql-"+daystr2
    merge(jzdispdir,jzdispfout)
    merge(jzclickdir,jzclickfout)
    jzdispfile = jzdispdir + "/" + jzdispfout
    jzclickfile = jzclickdir + "/" + jzclickfout
    if getsize(jzdispfile)==0:
        return
    if getsize(jzclickfile)==0:
        return

    jzdisp = util.readfiles2mapCpcNew(daystr2,jzdispfile,jzclickfile,{})
    
    log.info("jzdisp=%d"%(len(jzdisp.keys())))
    mysqlhelper = MySQLHelper()
    mysqlhelper.setstatconn()
    pagesize = 100
    
    #mysqlhelper.query("delete from t_jzcpcdisp_"+yyyymm+" where  dispday like '"+daystr2+"%'")
    #bench_insertdb(mysqlhelper,pagesize,jzdisp,sql,handlerValues_jzdisp)
    bench_insertdb(mysqlhelper,pagesize,jzdisp,daystr2)
    #mysqlhelper.close()
    if os.path.exists(jzdispfile):
	os.remove(jzdispfile)
    if os.path.exists(jzclickfile):
        os.remove(jzclickfile)
    print "over.time=%s"%(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())))
Esempio n. 2
0
 def prepare(self):
     if fileExists(self.srcfile, 'r') and fileExists(self.destfile, 'r'):
         fsize1 = path.getsize(self.srcfile)
         fsize2 = path.getsize(self.destfile)
         self.srcsize = fsize1 + fsize2
         self.ProgressTimer.start(7500, True)
     self.toolbox.ptsFrontpanelActions('start')
Esempio n. 3
0
    def test_002_checkwavcopy(self):
	infile  = g_in_file
	outfile = "test_out.wav"

	wf_in  = blocks.wavfile_source(infile)
	wf_out = blocks.wavfile_sink(outfile,
                                     wf_in.channels(),
                                     wf_in.sample_rate(),
                                     wf_in.bits_per_sample())
	self.tb.connect(wf_in, wf_out)
	self.tb.run()
	wf_out.close()

	# we're loosing all extra header chunks
	self.assertEqual(getsize(infile) - g_extra_header_len, getsize(outfile))

	in_f  = file(infile,  'rb')
	out_f = file(outfile, 'rb')

	in_data  = in_f.read()
	out_data = out_f.read()
        out_f.close()
	os.remove(outfile)
	# cut extra header chunks input file
	self.assertEqual(in_data[:g_extra_header_offset] + \
	                 in_data[g_extra_header_offset + g_extra_header_len:], out_data)
Esempio n. 4
0
def dealFile(path):

    dict = {}
    if not os.path.exists(path):
        return None
    for root, dirs, files in os.walk(path):

        for file in  files :

            if file.startswith(".DS_Store"):
                pass

            elif file.endswith("_base.xml"):

                list = countItem(path + "/"+file)
                try:
                    dict.update({'baseSize':getsize(path + "/"+ file)})
                except Exception, e:
                    dict.update({'baseSize': '0'})
                dict.update({"base_count": list[0]})
                dict.update({"base_tag_count": list[1]})

            elif file.endswith("_ext.xml"):
                list = countItem(path + "/"+file)
                try:
                    dict.update({'extSize':getsize(path + "/"+ file)})
                except Exception, e:
                    dict.update({'extSize':'0'})
                dict.update({"ext_count": list[0]})
                dict.update({"ext_tag_count": list[1]})
Esempio n. 5
0
def compress(sources):
    """
        Compress a file/directory using 7zip.
        If the gain is not large enough, remove the archive, otherwise remove
        the source.
    """

    dest = "{0}.{1}".format(file_without_ext(sources[0]), extension)

    seven_zip(sources, dest)

    sources_size = 0
    for s in sources:
        sources_size += path.getsize(s)
        
    dest_size = path.getsize(dest)

    gain = sources_size - dest_size

    if gain < sources_size * min_ratio: # Not enought gain
        os.unlink(dest)
        return 0
    else:
        for s in sources:
            os.unlink(s)
        return gain
Esempio n. 6
0
def calcsize(file):
    if not isdir(file):
        return getsize(file)
    total = 0L
    for s in subfiles(abspath(file)):
        total += getsize(s[1])
    return total
Esempio n. 7
0
    def test_write_truncate(self):
        knowngood = {}
        filelist = list(self.pak2.listfiles())
        for f in filelist:
            self.pak2.open(f)
            data = self.pak2.read()
            knowngood[f] = [len(data), md5(data).hexdigest()]
        size = getsize(self.filename2)

        buf = "123456789"
        bufmd5 = md5(buf).hexdigest()
        for i in xrange(0, len(filelist), 2):
            self.pak2.open(filelist[i], "r")
            size -= len(self.pak2.read())
            self.pak2.close()
            self.pak2.open(filelist[i], "w")
            self.pak2.write(buf)
            self.pak2.close()
            size += len(buf)
            knowngood[filelist[i]][0] = len(buf)
            knowngood[filelist[i]][1] = bufmd5

        for f in filelist:
            self.assertEqual(filelist, list(self.pak2.listfiles()))
            self.pak2.open(f)
            data = self.pak2.read()
            self.assertEqual(len(data), knowngood[f][0])
            self.assertEqual(md5(data).hexdigest(), knowngood[f][1])

        del self.pak2
        self.assertEqual(getsize(self.filename2), size)
Esempio n. 8
0
    def test_run_summarize_taxa_through_plots(self):
        """ run_summarize_taxa_through_plots generates expected results
        """
        run_summarize_taxa_through_plots(
            self.test_data['biom'][0],
            self.test_data['map'][0],
            self.test_out,
            mapping_cat=None,
            sort=False,
            command_handler=call_commands_serially,
            params=self.params,
            qiime_config=self.qiime_config,
            status_update_callback=no_status_updates)

        # Check that summarized taxonomy files have non-zero size
        input_file_basename = splitext(split(self.test_data['biom'][0])[1])[0]
        for i in [2, 3, 4, 5, 6]:
            sum_taxa_file = join(self.test_out, input_file_basename + '_L%s.txt'
                                 % (str(i)))
            self.assertTrue(getsize(sum_taxa_file) > 0)

        # Check the html files are generated
        self.assertTrue(getsize(join(self.test_out, 'taxa_summary_plots',
                                     'area_charts.html')) > 0)

        self.assertTrue(getsize(join(self.test_out, 'taxa_summary_plots',
                                     'area_charts.html')) > 0)

        # Check that the log file is created and has size > 0
        log_fp = glob(join(self.test_out, 'log*.txt'))[0]
        self.assertTrue(getsize(log_fp) > 0)
Esempio n. 9
0
def down_book(down_url,url,book_name,local_dir):
    """
    下载书籍,下载过程中后缀.tmp,下载完成判断大小,如果满足文件大小70%则认为成功并修改文件后缀
    """
    book_name = validatename(book_name)
    # book_name = book_name.replace("/","")
    down_dir_tmp = str(local_dir) + str(book_name)+ ".pdf"+".tmp"
    down_dir = str(local_dir) + str(book_name)+ ".pdf"
    if os.path.exists(down_dir) == True and abs(round(float(getsize(down_dir))/1024/1024,1) - round(float(size.replace(' MB',"")),1)) < 1: #判断书籍是否被下载过,如已存在文件则跳过
        #sys.exit()

        print ("....<"+book_name+"> already exists...")
        logging.info("....Books already exists....")
    elif os.path.exists(down_dir_tmp) == True or os.path.exists(down_dir) == False:
        if os.path.exists(down_dir_tmp) == True:
            print "...ReDownloading <"+book_name+">..."
            print "Original Size: "+size
            os.remove(down_dir_tmp)
        else:
            print "...Downloading <"+book_name+">..."
            print "Original Size: "+size
        rp = requests.get(down_url,headers = {'Referer':url},allow_redirects = False)
        r = requests.get(rp.headers['location'])
        with open(down_dir_tmp, "wb") as code:
           code.write(r.content)

        print "Actual Size: "+str(round(float(getsize(down_dir_tmp))/1024/1024,1))+" MB"
        if abs(round(float(getsize(down_dir_tmp))/1024/1024,1) 
            - round(float(size.replace(' MB',"")),1))/round(float(size.replace(' MB',"")),1) < 0.3:#此处可调整,如果下载不到原有的70%认为没下载成功
            os.rename(down_dir_tmp,down_dir)
Esempio n. 10
0
    def repackage(self, extracted_apk_dir, dex_dir, have_locators):
        shutil.move(join(dex_dir, 'classes.dex'), extracted_apk_dir)

        jar_meta_path = join(dex_dir, 'metadata.txt')
        with open(jar_meta_path, 'w') as jar_meta:
            if have_locators:
                jar_meta.write('.locators\n')
            for i in range(1, 100):
                oldpath = join(dex_dir, 'classes%d.dex' % (i + 1))
                dexpath = join(dex_dir, 'secondary-%d.dex' % i)
                if not isfile(oldpath):
                    break
                shutil.move(oldpath, dexpath)
                jarpath = dexpath + '.jar'
                create_dex_jar(jarpath, dexpath)
                dex_meta_base = 'secondary-%d.dex.jar.meta' % i
                dex_meta_path = join(dex_dir, dex_meta_base)
                with open(dex_meta_path, 'w') as dex_meta:
                    dex_meta.write('jar:%d dex:%d\n' %
                                   (getsize(jarpath), getsize(dexpath)))
                with open(jarpath, 'rb') as jar:
                    sha1hash = hashlib.sha1(jar.read()).hexdigest()
                shutil.move(dex_meta_path,
                            join(extracted_apk_dir, self._secondary_dir))
                shutil.move(jarpath, join(extracted_apk_dir,
                                          self._secondary_dir))
                jar_meta.write(
                    'secondary-%d.dex.jar %s secondary.dex%02d.Canary\n'
                    % (i, sha1hash, i))
        shutil.move(jar_meta_path, join(extracted_apk_dir, self._secondary_dir))
Esempio n. 11
0
def comp (source_patch, target_patch):
	from os import path, walk
	from filecmp import cmp
	# выходное сообщение о найденных отличиях
	message = ''
	path_f = []
	tree = walk(source_patch)
	for d, dirs, files in tree:
		for f in files:
			patch = path.join(d,f) # формирование адреса
			path_f.append(patch)      # добавление адреса в список
		# перибираем адреса файлов из списка
		for patch in path_f:
			# выполняем сравнение файлов и в случае отличий получаем информацию о файлах
			# проверяем существование файла
			if not path.exists(patch.replace(source_patch, target_patch)):
				message = message + 'Отсутствует целевой файл: '+ patch.replace(source_patch, target_patch)
			# сверяем размеры исходного и целевого файла
			elif path.getsize(patch.replace(source_patch, target_patch)) <> path.getsize(patch):
				message = message + file_info(patch, patch.replace(source_patch, target_patch))
			# дата последней модификации
			elif path.getmtime(patch.replace(source_patch, target_patch)) <> path.getmtime(patch):
				message = message + file_info(patch, patch.replace(source_patch, target_patch))
			# сравниваем файлы
			elif not cmp(patch.replace(source_patch, target_patch), patch):
				message = message + file_info(patch, patch.replace(source_patch, target_patch))
		return message
Esempio n. 12
0
def getchunks(infile, n_cpus, scheduler = 'guided'):
    # Divide input data based on scheduler type
    if scheduler == 'static':
        size = getsize(infile) / n_cpus
    else:
        size = getsize(infile) / (n_cpus * 20)

    # Open input file    
    try:    
        ifile = open(infile)
    except:
        print >> stderr, 'Error: Not able to open ', infile, '. Exiting.'
        exit(-1)

    # Create chunk of data to be distributed to nodes    
    while 1:
        start = ifile.tell()
        ifile.seek(size, 1)
        s = ifile.readline()
        yield start, ifile.tell() - start
        if not s:
            break

    # Close the input file    
    try:    
        ifile.close()
    except:
        print >> stderr, 'Warning: Error closing the file ', ifile
def can_overwrite_old_file(old, new):
    """returns true if the old file does not exist and the new file is not significantly smaller"""
    if not PATH.exists(old):
        return True
    if not PATH.exists(new):
        return False
    return PATH.getsize(new) > PATH.getsize(old) * 0.7
    def processfile(self, filename):
        """Renames the specified image to a backup path,
        and writes out the image again with optimal settings."""
        try:
            # Skip read-only files
            if not stat(filename)[0] & S_IWRITE:
                print 'Ignoring read-only file "' + filename + '".'
                return False

            # Create a backup
            backupname = filename + "." + self.backupextension

            if isfile(backupname):
                print 'Ignoring file "' + filename + '" for which existing backup file is present.'
                return False

            rename(filename, backupname)
        except Exception as e:
            stderr.write('Skipping file "' + filename + '" for which backup cannot be made: ' + str(e) + "\n")
            return False

        ok = False

        try:
            # Open the image
            with open(backupname, "rb") as file:
                img = Image.open(file)

                # Check that it's a supported format
                format = str(img.format)
                if format != "PNG" and format != "JPEG":
                    print 'Ignoring file "' + filename + '" with unsupported format ' + format
                    return False

                # This line avoids problems that can arise saving larger JPEG files with PIL
                ImageFile.MAXBLOCK = img.size[0] * img.size[1]

                # The 'quality' option is ignored for PNG files
                img.save(filename, quality=90, optimize=True)

            # Check that we've actually made it smaller
            origsize = getsize(backupname)
            newsize = getsize(filename)

            if newsize >= origsize:
                print 'Cannot further compress "' + filename + '".'
                return False

            # Successful compression
            ok = True
        except Exception as e:
            stderr.write('Failure whilst processing "' + filename + '": ' + str(e) + "\n")
        finally:
            if not ok:
                try:
                    move(backupname, filename)
                except Exception as e:
                    stderr.write('ERROR: could not restore backup file for "' + filename + '": ' + str(e) + "\n")

        return ok
Esempio n. 15
0
 def command_patch_list_data(self):
   requested_version = (self.reported_version + 1)
   while requested_version in self.patch_catalog.getCatalog()[self.reported_client]:
     p_file = path.join(self.patch_catalog.getCatalog()['path'],
       self.patch_catalog.getCatalog()[self.reported_client][requested_version] + '.pat')
     p_head, p_tail = path.split(p_file)
     r_file = path.join(self.patch_catalog.getCatalog()['path'],
       self.patch_catalog.getCatalog()[self.reported_client][requested_version] + '.rtp')
     r_head, r_tail = path.split(r_file)
     file_listing = {'patname':p_tail.encode('ascii'),
                     'patnamelen':pack('>i', 
                       len(p_tail.encode('ascii'))),
                     'patlen':pack('>i', 
                       path.getsize(p_file)),
                     'rtpname':r_tail.encode('ascii'),
                     'rtpnamelen':pack('>i', 
                       len(r_tail.encode('ascii'))),
                     'rtplen':pack('>i', 
                       path.getsize(r_file))
                    }
     self.connection.send(file_listing['patnamelen'])
     self.connection.send(file_listing['patname'])
     self.connection.send(file_listing['patlen'])
     self.connection.send(file_listing['rtpnamelen'])
     self.connection.send(file_listing['rtpname'])
     self.connection.send(file_listing['rtplen'])
     print("\nSERVER << sent %s PatchListData entry %s\n" % (self.address, file_listing))
     requested_version = requested_version + 1
   self.connection.send(b'\x00\x00\x00\x00')
   return
Esempio n. 16
0
 def add(self, mydata): 
    self.name = mydata 
    if os.path.isfile(mydata): 
       self.file = open(mydata, 'rb').read() 
       self.size = str(int(os.path.getsize(mydata)))  
       self.type = 'blob' 
    elif os.path.isdir(mydata):    
       self.file = [] 
       #  Print a list of filenames and their sizes.         
       for root, dirs, files in os.walk(mydata):     
          self.size = sum(int(getsize(join(root, name))) for name in files)           
          for name in files:                 
              self.file.append([root, name, getsize(join(root, name))]) 
          if '.git' in dirs:
              dirs.remove('.git')  # don't visit .git directories        
       self.type = 'tree'       
    self.mode = '10' + oct(os.stat(mydata)[0] & 0777)       
    # Header - different to Git    
    self.header = self.type + "*42*" + str(self.size) + "moose" + "\0"          
    
    #  Calculate the SHA1 hexdigest.  
    self.stuff = self.header + str(self.file) 
    self.sha1 = hashlib.sha1(self.stuff).hexdigest()    
    self.dirname = self.sha1[0:2]  
    self.blobname = self.sha1[2:41]  
          
    #  ( To do - calculate the SHA1s for a "tree" and a "commit". )      
    #  Save in our dict.  
    self.data.update({ self.blobname: [self.dirname, self.name, 
        self.size, self.mode ] })  
Esempio n. 17
0
def duplicate(original, new, other):

	if not exists(new):
		return False

	if new == original:
		del_file(original)
		return True

	try:
		if DadVision.args.force_rename or (other and other.lower() in ["proper"]):
			log.warn("Replacing Existing with repack or Force Rename Requested: {}".format(original))
			del_file(new)
			return False

		if getsize(original) > getsize(new):
			log.info("Replacing Existing with Larger Version: {}".format(new))
			del_file(new)
			return False

		log.trace("Comparing existing file to new, may run for some time.")
		if filecmp.cmp(original, new):
			log.info("Deleting New File, Same File already at destination!: {}".format(new))
			del_file(original)
			return True
		del_file(new)
		return False
	except (InvalidPath, OSError), e:
		log.error("Unable to remove File: {}".format(e))
		return True
def folderDigger(parNR, directory, name):
    global BadOnes
    BadOnes=""

    size = 0
    subNR = parNR+1
    
    for item in os.listdir(directory):
        if not os.path.isdir(directory+"/"+item):
            if os.path.isfile(directory+"/"+item):
                BadOnes= BadOnes+"* "+item+" ("+directory+")\n"

    for folder in os.listdir(directory):
        if os.path.isdir(directory+"/"+folder):
            try:
                size = size + folderDigger(subNR, (directory+"/"+folder), folder)
            except:
                pass
    
    
    for fil in os.listdir(directory):
        if os.path.isfile(directory+"/"+fil):
            size = size+getsize(directory+"/"+fil)
            if showFiles:
                pf(subNR+1, getsize(directory+"/"+fil), fil, directory)
    
    
    pf(subNR, size, name, directory)
    return size
Esempio n. 19
0
 def _get_file_details(self, file_name):
     label = None
     details = {
         'path': path.realpath(file_name),
         'sha1': self._get_sha1(file_name)
     }
     if path.isfile(file_name):
         label, ext = path.splitext(path.basename(file_name))
         if ext.lower() == '.mrxs':
             details.update({
                 'name': label,
                 'mimetype': self.INDEX_FILE_MT,
                 'size': path.getsize(file_name)
             })
         else:
             label, details = None, None
     elif path.isdir(file_name):
         label = path.basename(file_name)
         details.update({
             'name': label,
             'mimetype': self.DATA_FOLDER_MT,
             'size': sum([path.getsize(path.join(file_name, f))
                          for f in listdir(file_name)]),
             })
     self.logger.debug('Details for file %s: %r', file_name, details)
     return label, details
Esempio n. 20
0
def dir_cache_data(path):
    """
    Return the data to store in the cache for directory at *path*.
    """
    path = force_utf8(path)
    files = []
    directories = []
    for entry in os.listdir(path):
        for pattern in settings.EXCLUDE_FILES:
            if pattern.match(entry):
                continue
        entry_path = op.join(path, entry)
        if not op.exists(entry_path):
            # File was deleted during directory listing
            continue
        timestamp = op.getmtime(entry_path)
        if op.isdir(entry_path):
            size = 0
            for dirpath, dirnames, filenames in os.walk(entry_path):
                for f in filenames:
                    fp = op.join(dirpath, f)
                    if op.exists(fp):
                        size += op.getsize(fp)
            directories.append((entry, size, timestamp))
        else:
            size = op.getsize(entry_path)
            files.append((entry, size, timestamp))
    return directories, files
def createDict(path, root={}):
    pathList = listdir(path)
    for i, item in enumerate(pathList):
        file_path = path_join(path, item)
        if item not in ignore_dir and exists(file_path):
            if isdir(file_path):
                if not root.get(item, False):
                    root[item] = {"type": "dir", "files": {}}
                createDict(file_path, root[item]["files"])
            else:
                if not root.get(item, False):
                    log("new file " + file_path)
                    root[item] = {"type": "file",
                                  "file_size": getsize(file_path),
                                  "mtime": getmtime(file_path), 
                                  "ctime": getctime(file_path),
                                  "md5": md5(file_path),
                                  "sha256": sha256(file_path)}
                else:
                    if root[item]["mtime"] != getmtime(file_path):
                        log("rehashing " + file_path)
                        root[item] = {"type": "file",
                                      "file_size": getsize(file_path),
                                      "mtime": getmtime(file_path), 
                                      "ctime": getctime(file_path),
                                      "md5": md5(file_path),
                                      "sha256": sha256(file_path)}
                        
                                    
    return root
Esempio n. 22
0
    def disabled_test_xml_quiet(self):
        """ Tests the 'quiet' parameter of the MARC8ToUnicode class,
            passed in via the pymarc.record_to_xml() method
        """
        outfile = 'test/dummy_stderr.txt'
        # truncate outfile in case someone's fiddled with it
        open(outfile, 'wb').close()
        # redirect stderr
        sys.stderr = open(outfile, 'wb')
        # reload pymarc so it picks up the new sys.stderr
        reload(pymarc)
        # get problematic record
        record = next(pymarc.reader.MARCReader(open('test/utf8_errors.dat', 'rb')))
        # record_to_xml() with quiet set to False should generate errors
        #   and write them to sys.stderr
        xml = pymarc.record_to_xml(record, quiet=False)
        # close dummy stderr so we can accurately get its size
        sys.stderr.close()
        # file size should be greater than 0
        self.assertNotEqual(getsize(outfile), 0)

        # truncate file again
        open(outfile, 'wb').close()
        # be sure its truncated
        self.assertEqual(getsize(outfile), 0)
        # redirect stderr again
        sys.stderr = open(outfile, 'wb')
        reload(pymarc)
        # record_to_xml() with quiet set to True should not generate errors
        xml = pymarc.record_to_xml(record, quiet=True)
        # close dummy stderr
        sys.stderr.close()
        # no errors should have been written
        self.assertEqual(getsize(outfile), 0)
Esempio n. 23
0
    def repackage(self, extracted_apk_dir, dex_dir, have_locators):
        BaseDexMode.repackage(self, extracted_apk_dir, dex_dir, have_locators)

        metadata = DexMetadata(have_locators=have_locators,
                               store=self._store_id,
                               dependencies=self._dependencies)
        for i in range(1, 100):
            oldpath = join(dex_dir, self._dex_prefix + '%d.dex' % (i + 1))
            dexpath = join(dex_dir, self._store_name + '-%d.dex' % i)
            if not isfile(oldpath):
                break
            shutil.move(oldpath, dexpath)

            jarpath = dexpath + '.jar'
            create_dex_jar(jarpath, dexpath)
            metadata.add_dex(jarpath, BaseDexMode.get_canary(self, i))

            dex_meta_base = jarpath + '.meta'
            dex_meta_path = join(dex_dir, dex_meta_base)
            with open(dex_meta_path, 'w') as dex_meta:
                dex_meta.write('jar:%d dex:%d\n' %
                               (getsize(jarpath), getsize(dexpath)))

            shutil.move(dex_meta_path,
                        join(extracted_apk_dir, self._secondary_dir))
            shutil.move(jarpath, join(extracted_apk_dir,
                                      self._secondary_dir))
        jar_meta_path = join(dex_dir, 'metadata.txt')
        metadata.write(jar_meta_path)
        shutil.move(jar_meta_path, join(extracted_apk_dir, self._secondary_dir))
Esempio n. 24
0
def build_project(project, _zip, _url, _alias, _replace, _create, _option):
  """Build project."""
  if _option:
    project.properties = flatten(project.properties)
    # to make sure we properly override nested options, we flatten first
    project.properties.update(_parse_option(_option))
  if _zip:
    if osp.isdir(_zip):
      _zip = osp.join(_zip, '%s.zip' % (project.versioned_name, ))
    project.build(_zip, overwrite=_replace)
    sys.stdout.write(
      'Project %s successfully built and saved as %r (size: %s).\n'
      % (project, _zip, human_readable(osp.getsize(_zip)))
    )
  else:
    with temppath() as _zip:
      project.build(_zip)
      archive_name = '%s.zip' % (project.versioned_name, )
      session = _get_session(_url, _alias)
      res = _upload_zip(session, project.name, _zip, _create, archive_name)
      sys.stdout.write(
        'Project %s successfully built and uploaded '
        '(id: %s, size: %s, upload: %s).\n'
        'Details at %s/manager?project=%s\n'
        % (
          project,
          res['projectId'],
          human_readable(osp.getsize(_zip)),
          res['version'],
          session.url,
          project,
        )
      )
Esempio n. 25
0
def build_project(project, zip, url, alias, replace, create):
  """Build project."""
  if zip:
    project.build(zip, overwrite=replace)
    stdout.write(
      'Project successfully built and saved as %r (size: %s).\n'
      % (zip, human_readable(getsize(zip)))
    )
  else:
    with temppath() as zip:
      project.build(zip)
      session = Session(url, alias)
      while True:
        try:
          res = session.upload_project(project.name, zip)
        except AzkabanError as err:
          if create:
            session.create_project(project.name, project.name)
          else:
            raise err
        else:
          break
      stdout.write(
        'Project %s successfully built and uploaded '
        '(id: %s, size: %s, version: %s).\n'
        'Details at %s/manager?project=%s\n'
        % (
          project,
          res['projectId'],
          human_readable(getsize(zip)),
          res['version'],
          session.url,
          project,
        )
      )
Esempio n. 26
0
def _calcsize(file_to_torrent):
    if not isdir(file_to_torrent):
        return getsize(file_to_torrent)
    total = 0
    for s in _subfiles(abspath(file_to_torrent)):
        total += getsize(s[1])
    return total
Esempio n. 27
0
    def compressFile(self,fName):
        if self.verbose>1:
            print_("  Compressing",fName)
        zippedName=fName+".gz"
        if path.exists(zippedName):
            self.warning("Zipped file",zippedName,"already existing for",fName)
            return
        oldSize=path.getsize(fName)
        if oldSize<self.bigSize:
            if self.verbose>2:
                print_("   Skipping because it is too small")
            self.nrSkipped+=1
            return

        # use gzip because that way the responsibility of removing the old file is with a 'tried and testd' program
        ret=subprocess.call(["gzip",fName])
        if ret!=0 or not path.exists(zippedName) or path.exists(fName):
            self.warning("Problem compressing file",fName)
            self.nrProblems+=1
            return
        newSize=path.getsize(zippedName)
        if newSize>oldSize:
            self.warning("Compression of",fName,"increased the filesize. Old:",
                         humanReadableSize(oldSize),"New:",humanReadableSize(newSize))

        if self.verbose>2:
            print_("   Old size:",humanReadableSize(oldSize),"New size:",humanReadableSize(newSize))

        self.nrFiles+=1
        self.prevSize+=oldSize
        self.nowSize+=newSize
Esempio n. 28
0
    def run(self, filename, pipedata):
        if 'optimize' in pipedata and not pipedata['optimize']:
            return
        self.bakery.logging_raw('### Optimize TTF {}'.format(filename))
        # copied from https://code.google.com/p/noto/source/browse/nototools/subset.py
        from fontTools.subset import Options, Subsetter, load_font, save_font

        options = Options()
        options.layout_features = "*"
        options.name_IDs = "*"
        options.hinting = True
        options.notdef_outline = True

        font = load_font(op.join(self.builddir, filename), options)
        subsetter = Subsetter(options=options)
        subsetter.populate(glyphs=font.getGlyphOrder())
        subsetter.subset(font)
        save_font(font, op.join(self.builddir, filename + '.opt'), options)

        newsize = op.getsize(op.join(self.builddir, filename + '.opt'))
        origsize = op.getsize(op.join(self.builddir, filename))

        # compare filesizes TODO print analysis of this :)
        comment = "# look at the size savings of that subset process"
        self.bakery.logging_cmd("ls -l '%s'* %s" % (filename, comment))

        statusmessage = "{0}.opt: {1} bytes\n{0}: {2} bytes\n"
        self.bakery.logging_raw(statusmessage.format(filename, newsize, origsize))

        # move ttx files to src
        shutil.move(op.join(self.builddir, filename + '.opt'),
                    op.join(self.builddir, filename),
                    log=self.bakery.logger)
def compare_dns_dirs():
	print(' Checking if there is need for an update .... ')
	
	#first check to see if .ip_tmp_path exists
	if ( path.exists('.ip_tmp_path') and (path.isdir('.ip_tmp_path')) ):
		print(' Give me just a few seconds more')
		sleep(2)
		
		if ( int(path.getsize('.ip_tmp')) <= int(path.getsize('.ip_tmp_path')) ):
			print(' \n Looks like new content is available ')
			
			# copying new content in .dns_tmp_path to .dns_tmp
			try:
				rmtree('.dns_tmp')
				copytree('.dns_tmp_path','.dns_tmp')
			except:
				print(' Failed to copy new data ... ')
				print(' Exiting ... ')
				exit(0)
			else:
				print(' Successfully moved new data')				
		else:
			print(' Nothing new was added ... ')
			print(' Exiting ... ')
			exit(0)
	else:
		print(' This is first run ... \n moving on ... ')
		sleep(2)
Esempio n. 30
0
def unpack_file_from_file(in_file, out_file):
    """ Uncompress a file from a file.

    Parameters
    ----------
    in_file : str
        the name of the input file
    out_file : str
        the name of the output file

    Returns
    -------
    metadata : bytes
        the metadata contained in the file if present

    Raises
    ------

    FormatVersionMismatch
        if the file has an unmatching format version number
    ChecksumMismatch
        if any of the chunks fail to produce the correct checksum
    """
    in_file_size = path.getsize(in_file)
    log.verbose('input file size: %s' % pretty_size(in_file_size))
    with open(in_file, 'rb') as input_fp, open(out_file, 'wb') as output_fp:
        source = CompressedFPSource(input_fp)
        sink = PlainFPSink(output_fp, source.nchunks)
        unpack(source, sink)
    out_file_size = path.getsize(out_file)
    log.verbose('output file size: %s' % pretty_size(out_file_size))
    log.verbose('decompression ratio: %f' % (out_file_size / in_file_size))
    return source.metadata
Esempio n. 31
0
 def _get_size(self):
     try:
         return commatize(getsize(self.file_name)) + ' bytes'
     except:
         return ''
Esempio n. 32
0
    sha = hashlib.sha256()
    with open(filepath, 'rb') as fp:
        while True:
            data = fp.read(blocksize)
            if not data:
                break
            sha.update(data)
    return sha.hexdigest()


# --- /helpers ---

write("""\
%%%% HASHDEEP-1.0
%%%% size,sha256,filename
##
## $ hashdeep.py
##""")

ROOT = '.'
for root, dirs, files in os.walk(ROOT):
    for fpath in [osp.join(root, f) for f in files]:
        size = osp.getsize(fpath)
        sha = filehash(fpath)
        name = osp.relpath(fpath, ROOT)
        write('%s,%s,%s' % (size, sha, name))

        #for ignored in ['.hg', '.svn', 'git']:
        #     if ignored in dirs:
#         dirs.remove(ignored)
            e.show(f).grid(column=0,row=r)
        return f

    def __repr__(self) :
        s = "Agenda: " + self.name + '\n\t'
        for e in self :
            s += str(e) + '\n'
        return s

def clear(container) :
    for w in container.winfo_children() :
        w.destroy()

#----------------------------------------------------MAIN----------------------------------------------------------------

if path.exists(pathfile) and path.getsize(pathfile) > 0 :
    try :
        with open(pathfile,"rb") as  readfile:
            calendar = [ a.depklize() for a in pickle.load(readfile) ]
        #wtfisgoinon("read 'STRICKYlist.pk' in 'listaEventi'")
    except Exception as e :
        raise e
else :
    #default
    calendar=[Agenda("test")]
    calendar[0].insertEvent(Agenda_Event())

#structure of the main window
row_messages = tkinter.Frame(rootwindow)
row_commands = tkinter.Frame(rootwindow)
row_display = tkinter.Frame(rootwindow)
Esempio n. 34
0
async def nem(context):
    proxies = {}
    proxynum = 0
    headers = {
        'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
        'Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063',
        "Accept":
        "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,"
        "application/signed-exchange;v=b3;q=0.9",
        "X-Real-IP":
        "223.252.199.66"
    }
    proxy = [{
        'http': 'http://music.lolico.me:39000',
        'https': 'http://music.lolico.me:39000'
    }, {
        'http': 'http://netease.unlock.feiwuis.me:6958',
        'https': 'https://netease.unlock.feiwuis.me:6958'
    }]
    helptext = "**使用方法:** `-nem` `<指令>` `<关键词>`\n\n指令s为搜索,p为点歌,id为歌曲ID点歌,r为随机热歌(无关键词)\n搜索在s后添加数字如`-nem` `s8` " \
               "`<关键词>`调整结果数量\n搜索灰色歌曲请尽量**指定歌手**\n可回复搜索结果消息`-nem` `p` `<歌曲数字序号>`点歌 "
    apifailtext = "出错了呜呜呜 ~ 试了好多好多次都无法访问到 API 服务器 。"

    if len(context.parameter) < 2:
        if len(context.parameter) == 0:
            await context.edit(helptext)
            return
        elif context.parameter[0] == "r":  # 随机热歌
            await context.edit("随机中 . . .")
            for _ in range(20):  # 最多重试20次
                apinum = random.randint(0, 1)
                apitype = random.randint(0, 1)
                if apitype == 0:
                    apitype = "飙升榜"
                else:
                    apitype = "热歌榜"
                if apinum == 0:
                    url = "https://api.vvhan.com/api/rand.music?type=json&sort=" + apitype
                else:
                    url = "https://api.uomg.com/api/rand.music?sort=" + apitype + "&format=json"
                status = False
                try:
                    randsong = requests.get(url, headers=headers)
                    if randsong.status_code == 200:
                        randsong = json.loads(randsong.content)
                        if apinum == 0 and randsong['success'] is True:
                            context.parameter[0] = "id"
                            context.parameter.append(
                                str(randsong['info']['id']))
                            status = True
                            break
                        elif apinum == 1 and randsong['code'] == 1:
                            context.parameter[0] = "id"
                            context.parameter.append(
                                str(randsong['data']['url'][45:]))
                            status = True
                            break
                except:
                    continue
            if status is False:
                await context.edit(apifailtext)
                sleep(3)
                await context.delete()
                return
        else:  # 错误输入
            await context.edit(helptext)
            return
    # 整理关键词
    keyword = ''
    for i in range(1, len(context.parameter)):
        keyword += context.parameter[i] + " "
    keyword = keyword[:-1]
    idplay = False
    if context.parameter[0] == "id":  # ID点歌功能
        if len(context.parameter) > 2:
            await context.edit(helptext)
            return
        idplay = keyword
        context.parameter[0] = "p"
    if context.parameter[0][0] == "s":  # 搜索功能
        await context.edit(f"【{keyword}】搜索中 . . .")
        if len(context.parameter[0]) > 1:
            limit = str(context.parameter[0][1:])
        else:
            limit = "5"
        url = "http://music.163.com/api/search/pc?&s=" + \
            keyword + "&offset=0&limit=" + limit + "&type=1"
        for _ in range(20):  # 最多尝试20次
            status = False
            req = requests.request("GET", url, headers=headers)
            if req.status_code == 200:
                req = json.loads(req.content)
                if req['code'] == 200:
                    result = req['result']
                    if req['result']['songCount'] == 0:
                        result = False
                else:
                    result = False
                if result:
                    info = defaultdict()
                    for i in range(len(req['result']['songs'])):
                        info[i] = {
                            'id': '',
                            'title': '',
                            'alias': '',
                            'album': '',
                            'albumpic': '',
                            'artist': ''
                        }
                        info[i]['id'] = req['result']['songs'][i]['id']
                        info[i]['title'] = req['result']['songs'][i]['name']
                        info[i]['alias'] = req['result']['songs'][i]['alias']
                        info[i]['album'] = req['result']['songs'][i]['album'][
                            'name']
                        info[i]['albumpic'] = req['result']['songs'][i][
                            'album']['picUrl']
                        for j in range(
                                len(req['result']['songs'][i]['artists'])):
                            info[i]['artist'] += req['result']['songs'][i][
                                'artists'][j]['name'] + " "
                    text = f"<strong>关于【{keyword}】的结果如下</strong> \n"
                    for i in range(len(info)):
                        text += f"#{i+1}: \n<strong>歌名</strong>: {info[i]['title']}\n"
                        if info[i]['alias']:
                            text += f"<strong>别名</strong>: <i>{info[i]['alias'][0]}</i>\n"
                        if info[i]['album']:
                            res = '<a href="' + \
                                info[i]['albumpic'] + '">' + \
                                info[i]['album'] + '</a>'
                            text += f"<strong>专辑</strong>: {res} \n"
                        text += f"<strong>作者</strong>: {info[i]['artist']}\n<strong>歌曲ID</strong>: <code>{info[i]['id']}</code>\n————————\n"
                    text += "\n<strong>回复此消息</strong><code>-nem p <歌曲序号></code><strong>即可点歌</strong>"
                    await context.edit(text,
                                       parse_mode='html',
                                       link_preview=True)
                    status = True
                    break
                else:
                    await context.edit("**未搜索到结果**")
                    sleep(3)
                    await context.delete()
                    status = True
                    break
            else:
                continue
        if status is False:
            await context.edit(apifailtext)
            sleep(3)
            await context.delete()
        return
    elif context.parameter[0] == "p":  # 点歌功能
        try:
            reply = await context.get_reply_message()
        except ValueError:
            await context.edit("出错了呜呜呜 ~ 无效的参数。")
            return
        search = ""
        title = ""
        if reply:
            msg = reply.message
            search = re.findall(".*【(.*)】.*", msg)
            if search:
                try:
                    start = "#" + context.parameter[1] + ":"
                    search = ".*" + start + "(.*?)" + '————————' + ".*"
                    msg = re.findall(search, msg, re.S)[0]
                    search = ".*歌曲ID: (.*)\n.*"
                    title = ".*歌名: (.*?)\n.*"
                    title = "【" + re.findall(title, msg, re.S)[0] + "】"
                    idplay = re.findall(search, msg, re.S)[0]
                    if reply.sender.is_self:
                        await reply.edit(f"{title}点歌完成")
                except:
                    await context.edit("出错了呜呜呜 ~ 无效的歌曲序号。")
                    return
            else:
                await context.edit("出错了呜呜呜 ~ 无效的参数。")
                return

        await context.edit("获取中 . . .")
        try:
            import eyed3
            imported = True
        except ImportError:
            imported = False
            await bot.send_message(
                context.chat_id,
                '(`eyeD3`支持库未安装,歌曲文件信息将无法导入\n请使用 `-sh` `pip3` `install` `eyed3` '
                '安装,或自行ssh安装)')
        url = "http://music.163.com/api/search/pc?&s=" + \
            keyword + "&offset=0&limit=1&type=1"
        for _ in range(20):  # 最多尝试20次
            status = False
            if proxynum > (len(proxy) - 1):  # 代理自动切换至下一个
                proxynum = 0
            proxies = proxy[proxynum]
            proxynum += 1
            if idplay:  # 指定ID播放
                url = "http://music.163.com/api/song/detail?id=" + \
                    idplay + "&ids=[" + idplay + "]"
            # 搜索后播放
            req = requests.request("GET", url, headers=headers)
            if req.status_code == 200:
                req = json.loads(req.content)
                if req['code'] == 200:
                    if idplay:
                        req['result'] = req
                    result = req['result']
                    if not idplay:
                        if req['result']['songCount'] == 0:
                            result = False
                else:
                    result = False
                if result:
                    info = {
                        'id': req['result']['songs'][0]['id'],
                        'title': req['result']['songs'][0]['name'],
                        'alias': req['result']['songs'][0]['alias'],
                        'album': req['result']['songs'][0]['album']['name'],
                        'albumpic':
                        req['result']['songs'][0]['album']['picUrl'],
                        'artist': '',
                        'br': ''
                    }
                    if req['result']['songs'][0]['hMusic']:
                        info['br'] = req['result']['songs'][0]['hMusic'][
                            'bitrate']
                    elif req['result']['songs'][0]['mMusic']:
                        info['br'] = req['result']['songs'][0]['mMusic'][
                            'bitrate']
                    elif req['result']['songs'][0]['lMusic']:
                        info['br'] = req['result']['songs'][0]['lMusic'][
                            'bitrate']
                    for j in range(len(req['result']['songs'][0]['artists'])):
                        info['artist'] += req['result']['songs'][0]['artists'][
                            j]['name'] + "; "
                    info['artist'] = info['artist'][:-2]
                    if title:
                        title = ""
                    else:
                        title = f"【{info['title']}】"
                    await context.edit(f"{title}下载中 . . .")
                    try:
                        from Crypto.Cipher import AES
                        AES.new("0CoJUm6Qyw8W8jud".encode('utf-8'),
                                AES.MODE_CBC,
                                "0102030405060708".encode('utf-8'))
                        ccimported = True
                    except ImportError:
                        ccimported = False
                        await bot.send_message(
                            context.chat_id,
                            '(`PyCryptodome`支持库未安装,音乐曲库/音质受限\n请使用 `-sh` `pip3` '
                            '`install` `pycryptodome` 安装,或自行ssh安装)')
                    name = info['title'].replace('/', " ") + ".mp3"
                    name = name.encode('utf-8').decode('utf-8')
                    if ccimported:  # 尝试使用高清音质下载
                        songid = str(info['id'])

                        class WangyiyunDownload(object):
                            def __init__(self):
                                self.key = '0CoJUm6Qyw8W8jud'
                                self.public_key = "010001"
                                self.modulus = '00e0b509f6259df8642dbc35662901477df22677ec152b5ff68ace615bb7b72515' \
                                               '2b3ab17a876aea8a5aa76d2e417629ec4ee341f56135fccf695280104e0312ecbda92' \
                                               '557c93870114af6c9d05c4f7f0c3685b7a46bee255932575cce10b424d813cfe4875d' \
                                               '3e82047b97ddef52741d546b8e289dc6935b3ece0462db0a22b8e7 '
                                # 偏移量
                                self.iv = "0102030405060708"
                                # 请求头
                                self.headers = {
                                    'User-Agent':
                                    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 ('
                                    'KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36',
                                    # 传入登录cookie,
                                    'Cookie':
                                    'MUSIC_U=f52f220df171da480dbf33ce899479615'
                                    '85a7fdf08c89a2a4bdd6efebd86544233a649814e309366;',
                                    "X-Real-IP":
                                    "223.252.199.66",
                                }
                                # 请求url
                                self.url = 'https://music.163.com/weapi/song/enhance/player/url/v1?csrf_token='

                            # 生成16位随机数字符串
                            def set_random_num(self):
                                random_num = ''
                                # 随机取16个字符
                                string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
                                for ___ in range(16):
                                    n = math.floor(
                                        random.uniform(0, 1) * len(string))
                                    random_num += string[n]
                                # 返回16位随机数字符串
                                return random_num

                            # 生成encSecKey
                            # 通过public_key和modulus对random_num进行RSA加密
                            def RSA_encrypt(self, random_num):
                                # 将16位随机数字符串倒序并以utf-8编码
                                random_num = random_num[::-1].encode('utf-8')
                                # 将其以hex(16进制)编码
                                random_num = codecs.encode(
                                    random_num, 'hex_codec')
                                # 加密(三者均要从16进制转换为10进制)
                                # int(n, 16) --> 将16进制字符串n转换为10进制
                                encryption = int(random_num, 16)**int(
                                    self.public_key, 16) % int(
                                        self.modulus, 16)
                                # 将加密后的数据转换为16进制字符串
                                encryption = format(encryption, 'x')
                                # 返回加密后的字符串
                                return encryption

                            # 生成params
                            # 根据key和iv对msg进行AES加密,需调用两次
                            # key:
                            #   第一次: key
                            #   第二次: random_num
                            # iv: 偏移量iv
                            def AES_encrypt(self, msg, key, iv):
                                # 先将msg按需补全至16的倍数
                                # 需补全的位数
                                pad = (16 - len(msg) % 16)
                                # 补全
                                msg = msg + pad * chr(pad)
                                # 将key,iv和msg均以utf-8编码
                                key = key.encode('utf-8')
                                iv = iv.encode('utf-8')
                                msg = msg.encode('utf-8')
                                # 根据key和iv生成密钥,模式为CBC模式
                                encryptor = AES.new(key, AES.MODE_CBC, iv)
                                # 加密
                                encrypt_aes = encryptor.encrypt(msg)
                                # 先将加密后的值进行base64编码
                                encrypt_text = base64.encodebytes(encrypt_aes)
                                # 将其转换为utf-8字符串
                                encrypt_text = str(encrypt_text, 'utf-8')
                                # 返回加密后的字符串
                                return encrypt_text

                            # 根据歌曲song_id,生成需要传输的data
                            # 其中包括params和encSecKey
                            def construct_data(self, song_id):
                                # 生成16位随机数字符串
                                random_num = self.set_random_num()
                                # 生成encSecKey
                                encSecKey = self.RSA_encrypt(
                                    random_num=random_num)
                                # 调用两次AES加密生成params
                                # 初始化歌曲song_info
                                song_info = '{"ids":"[%s]","level":"exhigh","encodeType":"mp3",' \
                                            '"csrf_token":"477c1bd99fddedb3adc074f47fee2d35"}' % song_id
                                # 第一次加密,传入encText, key和iv
                                first_encryption = self.AES_encrypt(
                                    msg=song_info, key=self.key, iv=self.iv)
                                # 第二次加密, 传入first_encryption, random_num和iv
                                encText = self.AES_encrypt(
                                    msg=first_encryption,
                                    key=random_num,
                                    iv=self.iv)
                                # 生成data
                                data = {
                                    'params': encText,
                                    'encSecKey': encSecKey
                                }
                                # 返回data
                                return data

                            # 发送请求,获取下载链接
                            def get_real_url(self):
                                # 输入歌曲song_id
                                self.song_id = songid
                                # 获取data
                                data = self.construct_data(
                                    song_id=self.song_id)
                                # 发送请求
                                request = requests.post(url=self.url,
                                                        headers=self.headers,
                                                        data=data,
                                                        proxies=proxies,
                                                        verify=False)
                                # 初始化real_url
                                real_url = ''
                                # 处理返回信息
                                try:
                                    js_text = json.loads(request.text)
                                    data = js_text['data']
                                    if len(data) != 0:
                                        code = data[0]['code']
                                        # 获取成功
                                        if code == 200:
                                            # 歌曲真实地址
                                            real_url = data[0]['url']
                                        else:
                                            raise RetryError
                                except:
                                    print('生成的params和encSecKey有误!重试中!')
                                    raise RetryError
                                # 返回real_url
                                return real_url

                            def download(self):
                                # 获取下载链接
                                real_url = self.get_real_url()
                                if real_url == '':
                                    print('链接获取失败!')
                                    raise RetryError
                                else:
                                    file = name
                                    # 开始下载
                                    try:
                                        content = requests.get(
                                            url=real_url,
                                            headers=self.headers).content
                                        with open(file, 'wb') as fp:
                                            fp.write(content)
                                    except:
                                        print('服务器连接出错')
                                        raise RetryError

                        for __ in range(6):  # 最多尝试6次
                            if proxynum > (len(proxy) - 1):  # 代理自动切换至下一个
                                proxynum = 0
                            proxies = proxy[proxynum]
                            proxynum += 1
                            try:
                                WangyiyunDownload().download()
                                ccimported = True
                                break
                            except:
                                ccimported = False
                        if not exists(name):
                            ccimported = False
                    if ccimported is False:  # 下载(普通音质)
                        music = requests.request(
                            "GET",
                            "http://music.163.com/api/song/enhance/download/url?&br="
                            + str(info['br']) + "&id=" + str(info['id']),
                            headers=headers,
                            proxies=proxies,
                            verify=False)
                        if music.status_code == 200:
                            music = json.loads(music.content)
                            if not music['data']['url']:
                                music = requests.request(
                                    "GET",
                                    "https://music.163.com/song/media/outer/url?id="
                                    + str(info['id']) + ".mp3",
                                    headers=headers,
                                    verify=False)
                                if music.status_code != 200:
                                    continue
                            else:
                                music = requests.request("GET",
                                                         music['data']['url'],
                                                         headers=headers)
                        else:
                            continue
                    performers = info['artist'].replace(';', ',')
                    cap = performers + " - " + "**" + info['title'] + "**"

                    if ccimported is False:
                        with open(name, 'wb') as f:
                            f.write(music.content)
                    if (path.getsize(name) / 1024) < 100:
                        remove(name)
                        try:
                            if reply.sender.is_self:
                                await reply.delete()
                        except:
                            pass
                        await context.delete()
                        res = '或者你可以点击<a href="https://music.163.com/#/song?id=' + \
                            str(info['id']) + '">' + \
                            ' <strong>这里</strong> ' + '</a>' + '前往网页版收听'
                        await bot.send_message(
                            context.chat_id,
                            f"<strong>【{info['title']}】</strong>\n" +
                            "歌曲获取失败,资源获取可能受限,你可以再次尝试。\n" + res,
                            parse_mode='html',
                            link_preview=True)
                        return
                    duration = 0
                    imagedata = requests.get(info['albumpic'],
                                             headers=headers).content
                    if imported is True:
                        await context.edit(f"{title}信息导入中 . . .")
                        tag = eyed3.load(name)
                        duration = int(tag.info.time_secs)
                        tag.initTag()
                        tag = tag.tag
                        tag.artist = info['artist']
                        tag.title = info['title']
                        tag.album = info['album']
                        tag.images.remove('')
                        tag.images.set(6, imagedata, "image/jpeg", u"Media")
                        tag.save(version=eyed3.id3.ID3_DEFAULT_VERSION,
                                 encoding='utf-8')
                    br = ""
                    if imported is True:
                        br = "#" + \
                            str(eyed3.mp3.Mp3AudioFile(
                                name).info.bit_rate[1]) + "kbps "
                    alias = ""
                    if info['alias']:
                        alias = "\n\n__" + info['alias'][0] + "__"
                    cap += "\n#NeteaseMusic " + br + alias
                    await context.edit(f"{title}上传中 . . .")
                    if not exists("plugins/NeteaseMusicExtra/FastTelethon.py"):
                        if not exists("plugins/NeteaseMusicExtra"):
                            mkdir("plugins/NeteaseMusicExtra")
                        for ____ in range(6):  # 最多尝试6次
                            faster = requests.request(
                                "GET",
                                "https://gist.githubusercontent.com/TNTcraftHIM"
                                "/ca2e6066ed5892f67947eb2289dd6439/raw"
                                "/86244b02c7824a3ca32ce01b2649f5d9badd2e49/FastTelethon.py"
                            )
                            if faster.status_code == 200:
                                with open(
                                        "plugins/NeteaseMusicExtra/FastTelethon.py",
                                        "wb") as f:
                                    f.write(faster.content)
                                break
                            else:
                                if exists(
                                        "plugins/NeteaseMusicExtra/NoFastTelethon.txt"
                                ):
                                    break
                    try:
                        from NeteaseMusicExtra.FastTelethon import upload_file
                        file = await upload_file(context.client,
                                                 open(name, 'rb'), name)
                    except:
                        file = name
                        if not exists(
                                "plugins/NeteaseMusicExtra/NoFastTelethon.txt"
                        ):
                            with open(
                                    "plugins/NeteaseMusicExtra/NoFastTelethon.txt",
                                    "w") as f:
                                f.write(
                                    "此文件出现表示FastTelethon支持文件在首次运行NeteaseMusic插件时导入失败\n这可能是因为Github"
                                    "服务器暂时性的访问出错导致的\nFastTelethon可以提升低网络性能机型在上传文件时的效率,但是正常情况提升并不明显\n"
                                    "如想要手动导入,可以手动下载:\nhttps://gist.githubusercontent.com/TNTcraftHIM"
                                    "/ca2e6066ed5892f67947eb2289dd6439/raw"
                                    "/86244b02c7824a3ca32ce01b2649f5d9badd2e49/FastTelethon.py\n并放入当前文件夹"
                                )
                            await bot.send_message(
                                context.chat_id,
                                '`FastTelethon`支持文件导入失败,上传速度可能受到影响\n'
                                '此提示仅出现**一次**,手动导入可参考:\n`' + getcwd() +
                                '/plugins/NeteaseMusicExtra/NoFastTelethon.txt`'
                            )

                    await context.client.send_file(
                        context.chat_id,
                        file,
                        caption=cap,
                        link_preview=False,
                        force_document=False,
                        thumb=imagedata,
                        attributes=(DocumentAttributeAudio(
                            duration, False, info['title'], performers), ))
                    try:
                        if reply.sender.is_self:
                            await reply.delete()
                    except:
                        pass
                    try:
                        remove(name)
                    except:
                        pass
                    await context.delete()
                    status = True
                    break
                else:
                    await context.edit("**未搜索到结果**")
                    sleep(3)
                    await context.delete()
                    status = True
                    break
            else:
                continue

        if status is False:
            await context.edit(apifailtext)
            sleep(3)
            await context.delete()
    else:  # 错误输入
        await context.edit(helptext)
        return
Esempio n. 35
0
while line:
    tk = line.split()
    if len(tk) > 1:
        t = float(tk[0])
        gid = int(tk[1])
        if gid in m:
            m[gid].append(t)
        else:
            m[gid] = [t]
    line = fi.readline()
fi.close()

fo1 = open(filespk + '.sbgh', 'wb')
fo = open(filespk + '.sbg', 'wb')
for gid, tspks in m.items():
    fo.write(pack('>LL', gid, len(tspks)))
    for x in tspks:
        fo1.write(pack('>f', x))
fo.close()
fo1.close()
from os import path, system
f = open(filespk + '.size', 'wb')
f.write(pack('>q', path.getsize(filespk + '.size')))
f.close()

system('cat %s.size %s.sbgh %s.sbg > %s.spk' %
       (filespk, filespk, filespk, filespk))
system('rm %s.size %s.sbgh %s.sbg' % (filespk, filespk, filespk))

quit()
Esempio n. 36
0
julian_day = []
j = 0
st = '_' + int2str(jn, 3)

txt_file = open(file_name + "_packet_info.txt", "w+")
txt_file.write("\t\t---Sampling_Frequency_Summary---")
txt_file.close()
txt_file = open(file_name + "_packet_info.txt", "a")

#stop.append(0)

while (jn < 999):

    print "Reading file " + file_name + st + ".hdr"

    size = getsize(file_name + st + ".hdr")
    i = 26
    q = 0
    fpga = np.empty(2)
    gps = np.empty(2)
    p_num = np.empty(2)
    g_blip = []
    p_blip = []
    f_g_blip = []
    f_p_blip = []

    file = open(file_name + st + ".hdr", "rb")
    while (i < size):
        file.read(24)
        fpga[q % 2] = int(str(file.read(2)).encode('hex'), 16)
        gps[q % 2] = int(str(file.read(2)).encode('hex'), 16)
Esempio n. 37
0
    client = FileDeliverHost(host_info["host"], pwd=host_info["pwd"], pkey=host_info["pkey"])
    redis.hset("file_deliver", "%s_%s" % (host_info["host"], remote_path), value="upload")
    client.sftp.put(local_path, remote_path, callback=callback)
    print(host_info["host"] + " done")


if __name__ == '__main__':

    from multiprocessing.pool import Pool
    from multiprocessing import Process

    hosts = ["172.16.0.13", "172.18.0.21"]  # , "172.16.0.17", "172.16.0.16", "172.16.0.18"]
    pwd = "h@@T1onZ02Onew"
    path = "/root/test.png"
    local_path = "../dev/test.gif"
    file_size = getsize(local_path)
    processes = []
    start = time.time()
    for host in hosts:
        name = "%s_%s" % (host, path)
        p = Process(
            target=task,
            args=({
                      "host": host,
                      "pwd": pwd,
                      "pkey": None
                  },
                  local_path, path, partial(file_callback, host, path)),
            name=name
        )
        p.start()
Esempio n. 38
0
def finalize_redex(state):
    state.lib_manager.__exit__(*sys.exc_info())

    repack_start_time = timer()

    state.unpack_manager.__exit__(*sys.exc_info())
    state.zip_manager.__exit__(*sys.exc_info())

    align_and_sign_output_apk(
        state.zip_manager.output_apk,
        state.args.out,
        # In dev mode, reset timestamps.
        state.args.reset_zip_timestamps or state.args.dev,
        state.args.sign,
        state.args.keystore,
        state.args.keyalias,
        state.args.keypass,
        state.args.ignore_zipalign,
        state.args.page_align_libs,
    )

    logging.debug(
        "Creating output APK finished in {:.2f} seconds".format(
            timer() - repack_start_time
        )
    )

    meta_file_dir = join(state.dex_dir, "meta/")
    assert os.path.isdir(meta_file_dir), "meta dir %s does not exist" % meta_file_dir

    copy_all_file_to_out_dir(
        meta_file_dir, state.args.out, "*", "all redex generated artifacts"
    )

    if state.args.enable_instrument_pass:
        logging.debug("Creating redex-instrument-metadata.zip")
        zipfile_path = join(dirname(state.args.out), "redex-instrument-metadata.zip")

        FILES = [
            join(dirname(state.args.out), f)
            for f in (
                "redex-instrument-metadata.txt",
                "redex-source-block-method-dictionary.csv",
                "redex-source-blocks.csv",
            )
        ]

        # Write a checksum file.
        hash = hashlib.md5()
        for f in FILES:
            hash.update(open(f, "rb").read())
        checksum_path = join(dirname(state.args.out), "redex-instrument-checksum.txt")
        with open(checksum_path, "w") as f:
            f.write(f"{hash.hexdigest()}\n")

        with zipfile.ZipFile(zipfile_path, "w", compression=zipfile.ZIP_DEFLATED) as z:
            for f in [*FILES, checksum_path]:
                z.write(f, os.path.basename(f))

        for f in [*FILES, checksum_path]:
            os.remove(f)

    redex_stats_filename = state.config_dict.get("stats_output", "redex-stats.txt")
    redex_stats_file = join(dirname(meta_file_dir), redex_stats_filename)
    if isfile(redex_stats_file):
        with open(redex_stats_file, "r") as fr:
            apk_input_size = getsize(state.args.input_apk)
            apk_output_size = getsize(state.args.out)
            redex_stats_json = json.load(fr)
            redex_stats_json["input_stats"]["total_stats"][
                "num_compressed_apk_bytes"
            ] = apk_input_size
            redex_stats_json["output_stats"]["total_stats"][
                "num_compressed_apk_bytes"
            ] = apk_output_size
            update_redex_stats_file = join(
                dirname(state.args.out), redex_stats_filename
            )
            with open(update_redex_stats_file, "w") as fw:
                json.dump(redex_stats_json, fw)

    # Write invocation file
    with open(join(dirname(state.args.out), "redex.py-invocation.txt"), "w") as f:
        print("%s" % " ".join(map(shlex.quote, sys.argv)), file=f)

    copy_all_file_to_out_dir(
        state.dex_dir, state.args.out, "*.dot", "approximate shape graphs"
    )
Esempio n. 39
0
    def _parseHeader(self):
        """Read the header information from a dcd file.
        Input: fd - a file struct opened for binary reading.
        Output: 0 on success, negative error code on failure.
        Side effects: *natoms set to number of atoms per frame
                      *nsets set to number of frames in dcd file
                      *istart set to starting timestep of dcd file
                      *nsavc set to timesteps between dcd saves
                      *delta set to value of trajectory timestep
                      *nfixed set to number of fixed atoms
                      *freeind may be set to heap-allocated space
                      *reverse set to one if reverse-endian, zero if not.
                      *charmm set to internal code for handling charmm data.
        """

        dcd = self._file
        endian = b''  #'=' # native endian
        rec_scale = RECSCALE32BIT
        charmm = None
        dcdcordmagic = unpack(endian + b'i', b'CORD')[0]
        # Check magic number in file header and determine byte order
        bits = dcd.read(calcsize('ii'))

        temp = unpack(endian + b'ii', bits)

        if temp[0] + temp[1] == 84:
            LOGGER.info('Detected CHARMM -i8 64-bit DCD file of native '
                        'endianness.')
            rec_scale = RECSCALE64BIT
        elif temp[0] == 84 and temp[1] == dcdcordmagic:
            pass
            #LOGGER.info('Detected standard 32-bit DCD file of native '
            #            'endianness.')
        else:
            if unpack(b'>ii', bits) == temp:
                endian = '>'
            else:
                endian = '<'
            temp = unpack(endian + b'ii', bits)
            if temp[0] + temp[1] == 84:
                rec_scale = RECSCALE64BIT
                LOGGER.info('Detected CHARMM -i8 64-bit DCD file of opposite '
                            'endianness.')
            else:
                endian = ''
                temp = unpack(endian + b'ii', bits)
                if temp[0] == 84 and temp[1] == dcdcordmagic:
                    LOGGER.info('Detected standard 32-bit DCD file of '
                                'opposite endianness.')
                else:
                    raise IOError('Unrecognized DCD header or unsupported '
                                  'DCD format.')

        # check for magic string, in case of long record markers
        if rec_scale == RECSCALE64BIT:
            raise IOError('CHARMM 64-bit DCD files are not yet supported.')
            temp = unpack(b'I', dcd.read(calcsize('I')))
            if temp[0] != dcdcordmagic:
                raise IOError('Failed to find CORD magic in CHARMM -i8 64-bit '
                              'DCD file.')

        # Buffer the entire header for random access
        bits = dcd.read(80)

        # CHARMm-genereate DCD files set the last integer in the
        # header, which is unused by X-PLOR, to its version number.
        # Checking if this is nonzero tells us this is a CHARMm file
        # and to look for other CHARMm flags.
        temp = unpack(endian + b'i' * 20, bits)

        if temp[-1] != 0:
            charmm = True

        if charmm:
            #LOGGER.info('CHARMM format DCD file (also NAMD 2.1 and later).')
            temp = unpack(endian + b'i' * 9 + b'f' + b'i' * 10, bits)
        else:
            LOGGER.info('X-PLOR format DCD file (also NAMD 2.0 and earlier) '
                        'is not supported.')
            return None

        # Store the number of sets of coordinates (NSET)
        self._n_csets = temp[0]
        # Store ISTART, the starting timestep
        self._first_ts = temp[1]
        # Store NSAVC, the number of timesteps between dcd saves
        self._framefreq = temp[2]
        # Store NAMNF, the number of fixed atoms
        self._n_fixed = temp[8]

        if self._n_fixed > 0:
            raise IOError('DCD files with fixed atoms is not yet supported.')

        # Read in the timestep, DELTA
        # Note: DELTA is stored as double with X-PLOR but as float with CHARMm
        self._timestep = temp[9]
        self._unitcell = temp[10] == 1

        # Get the end size of the first block
        if unpack(endian + b'i', dcd.read(rec_scale * calcsize('i')))[0] != 84:
            raise IOError('Unrecognized DCD format.')

        # Read in the size of the next block
        temp = unpack(endian + b'i', dcd.read(rec_scale * calcsize('i')))

        if (temp[0] - 4) % 80 != 0:
            raise IOError('Unrecognized DCD format.')
        noremarks = temp[0] == 84

        # Read NTITLE, the number of 80 character title strings there are
        temp = unpack(endian + b'i', dcd.read(rec_scale * calcsize('i')))

        self._dcdtitle = dcd.read(80)

        if not noremarks:
            self._remarks = dcd.read(80)

        # Get the ending size for this block
        temp = unpack(endian + b'i', dcd.read(rec_scale * calcsize('i')))

        if (temp[0] - 4) % 80 != 0:
            raise IOError('Unrecognized DCD format.')

        # Read in an integer '4'
        if unpack(endian + b'i', dcd.read(rec_scale * calcsize('i')))[0] != 4:
            raise IOError('Unrecognized DCD format.')

        # Read in the number of atoms
        self._n_atoms = unpack(endian + b'i',
                               dcd.read(rec_scale * calcsize('i')))[0]
        # Read in an integer '4'
        if unpack(endian + b'i', dcd.read(rec_scale * calcsize('i')))[0] != 4:
            raise IOError('Bad DCD format.')

        self._is64bit = rec_scale == RECSCALE64BIT
        self._endian = endian
        self._n_floats = (self._n_atoms + 2) * 3

        if self._is64bit:
            if self._unitcell:
                self._bytes_per_frame = 56 + self._n_floats * 8
            else:
                self._bytes_per_frame = self._n_floats * 8
            LOGGER.warning('Reading of 64 bit DCD files has not been tested. '
                           'Please report any problems that you may find.')
            self._dtype = np.float64
            self._itemsize = 8
        else:
            if self._unitcell:
                self._bytes_per_frame = 56 + self._n_floats * 4
            else:
                self._bytes_per_frame = self._n_floats * 4
            self._dtype = np.float32
            self._itemsize = 4

        self._first_byte = self._file.tell()
        n_csets = (getsize(self._filename) -
                   self._first_byte) // self._bytes_per_frame
        if n_csets != self._n_csets:
            LOGGER.warning('DCD header claims {0} frames, file size '
                           'indicates there are actually {1} frames.'.format(
                               self._n_csets, n_csets))
            self._n_csets = n_csets

        self._coords = self.nextCoordset()
        self._file.seek(self._first_byte)
        self._nfi = 0
Esempio n. 40
0
def get_files_info(directory):
    yield from ((extract_extension(f), getsize(f))
                for f in get_files(directory))
Esempio n. 41
0
def main():
    # python27 unrpyc.py [-c] [-d] [--python-screens|--ast-screens|--no-screens] file [file ...]
    cc_num = cpu_count()
    parser = argparse.ArgumentParser(
        description="Decompile .rpyc/.rpymc files")

    parser.add_argument('-c',
                        '--clobber',
                        dest='clobber',
                        action='store_true',
                        help="overwrites existing output files")

    parser.add_argument(
        '-d',
        '--dump',
        dest='dump',
        action='store_true',
        help="instead of decompiling, pretty print the ast to a file")

    parser.add_argument(
        '-p',
        '--processes',
        dest='processes',
        action='store',
        type=int,
        choices=range(1, cc_num),
        default=cc_num - 1 if cc_num > 2 else 1,
        help="use the specified number or processes to decompile."
        "Defaults to the amount of hw threads available minus one, disabled when muliprocessing is unavailable."
    )

    parser.add_argument(
        '-t',
        '--translation-file',
        dest='translation_file',
        action='store',
        default=None,
        help="use the specified file to translate during decompilation")

    parser.add_argument(
        '-T',
        '--write-translation-file',
        dest='write_translation_file',
        action='store',
        default=None,
        help="store translations in the specified file instead of decompiling")

    parser.add_argument(
        '-l',
        '--language',
        dest='language',
        action='store',
        default='english',
        help=
        "if writing a translation file, the language of the translations to write"
    )

    parser.add_argument(
        '--sl1-as-python',
        dest='decompile_python',
        action='store_true',
        help="Only dumping and for decompiling screen language 1 screens. "
        "Convert SL1 Python AST to Python code instead of dumping it or converting it to screenlang."
    )

    parser.add_argument(
        '--comparable',
        dest='comparable',
        action='store_true',
        help=
        "Only for dumping, remove several false differences when comparing dumps. "
        "This suppresses attributes that are different even when the code is identical, such as file modification times. "
    )

    parser.add_argument(
        '--no-pyexpr',
        dest='no_pyexpr',
        action='store_true',
        help=
        "Only for dumping, disable special handling of PyExpr objects, instead printing them as strings. "
        "This is useful when comparing dumps from different versions of Ren'Py. "
        "It should only be used if necessary, since it will cause loss of information such as line numbers."
    )

    parser.add_argument(
        '--tag-outside-block',
        dest='tag_outside_block',
        action='store_true',
        help=
        "Always put SL2 'tag's on the same line as 'screen' rather than inside the block. "
        "This will break compiling with Ren'Py 7.3 and above, but is needed to get correct line numbers "
        "from some files compiled with older Ren'Py versions.")

    parser.add_argument(
        '--init-offset',
        dest='init_offset',
        action='store_true',
        help=
        "Attempt to guess when init offset statements were used and insert them. "
        "This is always safe to enable if the game's Ren'Py version supports init offset statements, "
        "and the generated code is exactly equivalent, only less cluttered.")

    parser.add_argument(
        'file',
        type=str,
        nargs='+',
        help="The filenames to decompile. "
        "All .rpyc files in any directories passed or their subdirectories will also be decompiled."
    )

    parser.add_argument(
        '--try-harder',
        dest="try_harder",
        action="store_true",
        help=
        "Tries some workarounds against common obfuscation methods. This is a lot slower."
    )

    args = parser.parse_args()

    if args.write_translation_file and not args.clobber and path.exists(
            args.write_translation_file):
        # Fail early to avoid wasting time going through the files
        print(
            "Output translation file already exists. Pass --clobber to overwrite."
        )
        return

    if args.translation_file:
        with open(args.translation_file, 'rb') as in_file:
            args.translations = in_file.read()

    # Expand wildcards
    def glob_or_complain(s):
        retval = glob.glob(s)
        if not retval:
            print("File not found: " + s)
        return retval

    filesAndDirs = map(glob_or_complain, args.file)
    # Concatenate lists
    filesAndDirs = list(itertools.chain(*filesAndDirs))

    # Recursively add .rpyc files from any directories passed
    files = []
    for i in filesAndDirs:
        if path.isdir(i):
            for dirpath, dirnames, filenames in walk(i):
                files.extend(
                    path.join(dirpath, j) for j in filenames
                    if len(j) >= 5 and j.endswith(('.rpyc', '.rpymc')))
        else:
            files.append(i)

    # Check if we actually have files. Don't worry about
    # no parameters passed, since ArgumentParser catches that
    if len(files) == 0:
        print("No script files to decompile.")
        return

    files = map(lambda x: (args, x, path.getsize(x)), files)
    processes = int(args.processes)
    if processes > 1:
        # If a big file starts near the end, there could be a long time with
        # only one thread running, which is inefficient. Avoid this by starting
        # big files first.
        files.sort(key=itemgetter(2), reverse=True)
        results = Pool(int(args.processes), sharelock,
                       [printlock]).map(worker, files, 1)
    else:
        # Decompile in the order Ren'Py loads in
        files.sort(key=itemgetter(1))
        results = map(worker, files)

    if args.write_translation_file:
        print("Writing translations to %s..." % args.write_translation_file)
        translated_dialogue = {}
        translated_strings = {}
        good = 0
        bad = 0
        for result in results:
            if not result:
                bad += 1
                continue
            good += 1
            translated_dialogue.update(magic.loads(result[0], class_factory))
            translated_strings.update(result[1])
        with open(args.write_translation_file, 'wb') as out_file:
            magic.safe_dump(
                (args.language, translated_dialogue, translated_strings),
                out_file)

    else:
        # Check per file if everything went well and report back
        good = results.count(True)
        bad = results.count(False)

    if bad == 0:
        print("Decompilation of %d script file%s successful" %
              (good, 's' if good > 1 else ''))
    elif good == 0:
        print("Decompilation of %d file%s failed" %
              (bad, 's' if bad > 1 else ''))
    else:
        print(
            "Decompilation of %d file%s successful, but decompilation of %d file%s failed"
            % (good, 's' if good > 1 else '', bad, 's' if bad > 1 else ''))
         if line[0] == ">":
             seq_name = line[1:]
             if seq_name not in locus_list:
                 locus_list.append(seq_name)
                 Allseq_dict[seq_name] = ''
             m = re.search("\|(.+)$", seq_name)
             sample = m.group(1)
             if sample not in sample_list:
                 sample_list.append(sample)
                 sample_seq_dict[sample] = []
         else:
             Allseq_dict[seq_name] += line
 for key in Allseq_dict.keys():
     m = re.search("\|(.+)$", key)
     sample_seq_dict[m.group(1)].append(Allseq_dict[key])
 filesize = getsize(fas_file)
 if filesize == 0 or locus_list == [] or len(locus_list) < 2:
     print("输入文件不合要求,请检查!")
     OUTPUT.write("#" + fas_file + ",-\n")
     os.exit(0)
 #开始计算Pi
 name = ""
 count = 0
 sum_pi = 0
 pair_num = 0
 processing_num = 1
 for i in range(len(sample_list)):
     #print("Sample:", sample_list[i])
     for j in range(i+1, len(sample_list)):
         total_pair = len(sample_list) * (len(sample_list)-1) / 2
         print("Process rate: %d / %d\r" % (processing_num, total_pair), end = '')
Esempio n. 43
0
source_folder_name = "source"
build_folder_name = "build"

driver = webdriver.Chrome(
    path_to_webdriver
)  # Change "Chrome" to the right type of browser driver you are using

# endregion

source_path = join(script_path, source_folder_name)
source_path_len = len(source_path) + len(pathsep)
build_path = join(script_path, build_folder_name, "html")
rst_files = glob(join(script_path, '**/*.rst'), recursive=True)
size_dict = {}
for rst_file in rst_files:
    size_dict[rst_file] = [getsize(rst_file), getmtime(rst_file), 0]

while True:
    for rst_file in rst_files:
        if isfile(rst_file):
            record = size_dict[rst_file]
            new_size = getsize(rst_file)
            new_mtime = getmtime(rst_file)

            if record[0] != new_size or record[
                    1] < new_mtime:  # Checks if file size or latest modify time change
                html_file = join(build_path,
                                 rst_file[source_path_len:-4] + ".html")
                #                 import ipdb; ipdb.set_trace()
                os.system("sphinx-build -M html {} {}".format(
                    source_folder_name, build_folder_name))
Esempio n. 44
0
 def has_size(self, file_name, size):
     file_size = getsize(file_name)
     assert file_size >= size, f"{file_name} deve ter pelo menos {size} bytes"
Esempio n. 45
0
    def __init__(self,
                 input_file=None,
                 output_file=None,
                 dfxml_file=None,
                 report_file=None,
                 commit=False,
                 ignore_patterns=[],
                 key=None,
                 rules=[]):
        #  Validate configuration
        from schema import Schema, Optional, Or, Use, And, SchemaError
        schema = Schema({
            'input_file':
            Use(lambda f: open(f, 'r'), error='Cannot read the input file'),
            Optional('output_file'):
            Or(
                None,
                Use(lambda f: open(f, 'w'),
                    error='Cannot write to the output file')),
            Optional('dfxml_file'):
            Or(None, Use(lambda f: open(f, 'r'),
                         error='Cannot read DFXML file')),
            Optional('report_file'):
            Or(None,
               lambda f: open(f, 'w'),
               error='Cannot write to the report file'),
            'commit':
            Or(True, False),
            'ignore_patterns':
            Use(lambda f: re.compile(convert_fileglob_to_re('|'.join(f))),
                error='Cannot compile unified ignore regex'),
            'key':
            Or(None, str),
            'rules':
            And([(redact_rule, redact_action)], lambda f: len(f) > 0)
        })
        try:
            kwargs = {
                'input_file': input_file,
                'output_file': output_file,
                'dfxml_file': dfxml_file,
                'report_file': report_file,
                'commit': commit,
                'ignore_patterns': ignore_patterns,
                'key': key,
                'rules': rules
            }
            self.conf = schema.validate(kwargs)
        except SchemaError as e:
            logging.warning('The redact configuration did not validate:')
            exit(e)
        if self.conf['commit'] and 'output_file' not in self.conf.keys():
            logging.error('An output file is required when COMMIT is on.')
            exit(1)
        # TODO Check input and output are not same file

        logging.debug('Configuration:\n%s' % self.conf)

        # Print rules
        logging.debug(
            json.dumps(map(
                lambda x, y: (x.line, x.__class__.__name__, y.__class__.
                              __name__, x.lgpattern
                              if hasattr(x, 'lgpattern') else ''),
                self.conf['rules']),
                       indent=4))

        self.input_file = self.conf['input_file']
        from os import path
        self.image_size = path.getsize(self.input_file.name)
        self.output_file = self.conf['output_file']
        self.report_file = self.conf['report_file']
        self.dfxml_file = self.conf['dfxml_file']
        self.commit = self.conf['commit']
        self.configure_report_logger()
Esempio n. 46
0
def main():
    """
    Starts the parser on the file given by the filename as the first
    argument on the command line.
    """
    from optparse import OptionParser, OptionGroup
    from os import sep
    from os.path import basename, getsize
    from sys import argv, exit as sysExit
    from chardet import detect
    from codecs import BOM_UTF8, open as codecsOpen

    def optParse():
        """
        Parses command line options.

        Generally we're supporting all the command line options that doxypy.py
        supports in an analogous way to make it easy to switch back and forth.
        We additionally support a top-level namespace argument that is used
        to trim away excess path information.
        """

        parser = OptionParser(prog=basename(argv[0]))

        parser.set_usage("%prog [options] filename")
        parser.add_option(
            "-a",
            "--autobrief",
            action="store_true",
            dest="autobrief",
            help=
            "parse the docstring for @brief description and other information")
        parser.add_option("-c",
                          "--autocode",
                          action="store_true",
                          dest="autocode",
                          help="parse the docstring for code samples")
        parser.add_option(
            "-n",
            "--ns",
            action="store",
            type="string",
            dest="topLevelNamespace",
            help="specify a top-level namespace that will be used to trim paths"
        )
        parser.add_option(
            "-t",
            "--tablength",
            action="store",
            type="int",
            dest="tablength",
            default=4,
            help="specify a tab length in spaces; only needed if tabs are used"
        )
        parser.add_option("-s",
                          "--stripinit",
                          action="store_true",
                          dest="stripinit",
                          help="strip __init__ from namespace")
        parser.add_option(
            "-O",
            "--object-respect",
            action="store_true",
            dest="object_respect",
            help=
            "By default, doxypypy hides object class from class dependencies even if class inherits explictilty from objects (new-style class), this option disable this."
        )
        group = OptionGroup(parser, "Debug Options")
        group.add_option("-d",
                         "--debug",
                         action="store_true",
                         dest="debug",
                         help="enable debug output on stderr")
        parser.add_option_group(group)

        ## Parse options based on our definition.
        (options, filename) = parser.parse_args()

        # Just abort immediately if we are don't have an input file.
        if not filename:
            stderr.write("No filename given." + linesep)
            sysExit(-1)

        # Turn the full path filename into a full path module location.
        fullPathNamespace = filename[0].replace(sep, '.')[:-3]
        # Use any provided top-level namespace argument to trim off excess.
        realNamespace = fullPathNamespace
        if options.topLevelNamespace:
            namespaceStart = fullPathNamespace.find(options.topLevelNamespace)
            if namespaceStart >= 0:
                realNamespace = fullPathNamespace[namespaceStart:]
        if options.stripinit:
            realNamespace = realNamespace.replace('.__init__', '')
        options.fullPathNamespace = realNamespace

        return options, filename[0]

    # Figure out what is being requested.
    (options, inFilename) = optParse()

    # Figure out encoding of input file.
    numOfSampleBytes = min(getsize(inFilename), 32)
    sampleBytes = open(inFilename, 'rb').read(numOfSampleBytes)
    sampleByteAnalysis = detect(sampleBytes)
    encoding = sampleByteAnalysis['encoding'] or 'ascii'

    # Switch to generic versions to strip the BOM automatically.
    if sampleBytes.startswith(BOM_UTF8):
        encoding = 'UTF-8-SIG'
    if encoding.startswith("UTF-16"):
        encoding = "UTF-16"
    elif encoding.startswith("UTF-32"):
        encoding = "UTF-32"

    # Read contents of input file.
    if encoding == 'ascii':
        inFile = open(inFilename)
    else:
        inFile = codecsOpen(inFilename, encoding=encoding)
    lines = inFile.readlines()
    inFile.close()
    # Create the abstract syntax tree for the input file.
    astWalker = AstWalker(lines, options, inFilename)
    astWalker.parseLines()
    # Output the modified source.

    # There is a "feature" in print on Windows. If linesep is
    # passed, it will generate 0x0D 0x0D 0x0A each line which
    # screws up Doxygen since it's expected 0x0D 0x0A line endings.
    for line in astWalker.getLines().split(linesep):
        print(line.rstrip())
Esempio n. 47
0
    def test_run_jackknifed_beta_diversity_parallel(self):
        """ run_jackknifed_beta_diversity generates expected results """

        run_jackknifed_beta_diversity(self.test_data['biom'][0],
                                      self.test_data['tree'][0],
                                      20,
                                      self.test_out,
                                      call_commands_serially,
                                      self.params,
                                      self.qiime_config,
                                      self.test_data['map'][0],
                                      parallel=True,
                                      status_update_callback=no_status_updates)

        weighted_unifrac_upgma_tree_fp = join(self.test_out,
                                              'weighted_unifrac', 'upgma_cmp',
                                              'jackknife_named_nodes.tre')
        unweighted_unifrac_upgma_tree_fp = join(self.test_out,
                                                'unweighted_unifrac',
                                                'upgma_cmp',
                                                'jackknife_named_nodes.tre')
        weighted_unifrac_emperor_index_fp = join(self.test_out,
                                                 'weighted_unifrac',
                                                 'emperor_pcoa_plots',
                                                 'index.html')
        unweighted_unifrac_emperor_index_fp = join(self.test_out,
                                                   'unweighted_unifrac',
                                                   'emperor_pcoa_plots',
                                                   'index.html')

        input_file_basename = splitext(split(self.test_data['biom'][0])[1])[0]
        unweighted_unifrac_dm_fp = join(
            self.test_out, 'unrarefied_bdiv',
            'unweighted_unifrac_%s.txt' % input_file_basename)
        weighted_unifrac_dm_fp = join(
            self.test_out, 'unrarefied_bdiv',
            'weighted_unifrac_%s.txt' % input_file_basename)

        # check for expected relations between values in the unweighted unifrac
        # distance matrix
        dm = parse_distmat_to_dict(open(unweighted_unifrac_dm_fp))
        self.assertTrue(
            dm['f1']['f2'] < dm['f1']['p1'],
            "Distance between pair of fecal samples is larger than distance"
            " between fecal and palm sample (unweighted unifrac).")
        self.assertEqual(dm['f1']['f1'], 0)
        # check for expected relations between values in the weighted unifrac
        # distance matrix
        dm = parse_distmat_to_dict(open(weighted_unifrac_dm_fp))
        self.assertTrue(
            dm['f1']['f2'] < dm['f1']['p1'],
            "Distance between pair of fecal samples is larger than distance"
            " between fecal and palm sample (unweighted unifrac).")
        self.assertEqual(dm['f1']['f1'], 0)

        # check that final output files have non-zero size
        self.assertTrue(getsize(weighted_unifrac_upgma_tree_fp) > 0)
        self.assertTrue(getsize(unweighted_unifrac_upgma_tree_fp) > 0)
        self.assertTrue(getsize(weighted_unifrac_emperor_index_fp) > 0)
        self.assertTrue(getsize(unweighted_unifrac_emperor_index_fp) > 0)

        # Check that the log file is created and has size > 0
        log_fp = glob(join(self.test_out, 'log*.txt'))[0]
        self.assertTrue(getsize(log_fp) > 0)
Esempio n. 48
0
        except subprocess.CalledProcessError:
            logging.warning("CALLEDPROCESERROR in initial sort (sam)")
        except OSError:
            logging.warning("OSERROR in initial sort (sam)")
        logging.info(
            "Initial name sorting completed, starting SAM-file splitting at: {}"
            .format(str(datetime.datetime.now())))
        errchk()
        # sam_split(temp, outfile_perfect, outfile_secondary)
        sam_parse.sam_split_runner(temp, directory + "/", 2000000, threads)
        errchk()
        logging.info("Splitting of the SAM-file completed at: {}".format(
            str(datetime.datetime.now())))
        subprocess.call(["rm", temp])

    perfsize = path.getsize(outfile_perfect)
    secsize = path.getsize(outfile_secondary)
    logging.info(
        "After splitting: ByteSize of Perfect: {}. Bytesize of secondary: {}".
        format(perfsize, secsize))

    # Get original headers
    original_headers = directory + "/original_headers.sam"
    subprocess.call("{} view -H {} > {}".format(samtools_path, infile,
                                                original_headers),
                    shell=True)
    logging.info(
        "Headers extracted from original input file: {} to : {}".format(
            infile, original_headers))
    errchk()
Esempio n. 49
0
 def ping(self):
     ret = PING_SUCCESS
     counter = 0
     for i in [1]:
         
         try:                
             # 默认使用ping次数
             # 修改最长时间为30天 modify by shenlige 2013-6-26
             number = 2592000       
             cmd_smg = " -l %s -n %s %s" % (self.size,number,self.node)
             cmd = "\"" + EXE_PATH + "\"" + cmd_smg
             
             with open(self.ping_tmp_file,"w") as self.file_handel:
                 self.popen = subprocess.Popen(cmd,shell=True,
                                  stdin=subprocess.PIPE,
                                  stdout=self.file_handel,
                                  stderr=subprocess.PIPE)
         
         except Exception, e:
             log_data = u"执行并行ping命令发生异常,message:%s" % e
             log.user_err(log_data)
             self.err_info = log_data
             ret = PING_FAIL
             break
                        
         # 确认ping临时文件不为空,增加等待时间,最长等待20秒  add by shenlige 2013-4-26
         while counter < self.wait_time:
             content = getsize(self.ping_tmp_file)
             if content > 0L:
                 break
             else:
                 counter += 1
                 time.sleep(1)
                              
         time.sleep(self.total_time)    # 执行指定时间长度的ping命令
         
         try:
             if self.popen.poll() == None:
                 # 修改kill进程的实现方式 modify by shenlige 2013-7-4
                 pro_ret,data = attcommonfun.get_process_children(self.popen.pid)
                 if pro_ret == attcommonfun.ATTCOMMONFUN_SUCCEED:
                     dict_process = data
                     for process_pid, process_name in dict_process.items():
                         # modify by shenlige 2013-11-2 修改kill进程
                         #if process_name not in ["ATT.exe","robot.exe","cmd.exe"]:
                         if process_name.lower() == 'ping.exe':
                             try:
                                 os.kill(process_pid, 9)
                             except:
                                 pass
             
                 try:
                     os.kill(self.popen.pid, 9)
                 except:
                     pass
             else:
                 pass
             
             time.sleep(0.5)    # zsj add 解决报windows32错误问题   2013-3-25
             
         except Exception, e:
             log_data = u"停止并行ping发生异常,message:%s" % e
             log.user_err(log_data)
             self.err_info = log_data
             ret = PING_FAIL
Esempio n. 50
0
import os
from os.path import getsize, splitext
import codecs
import json

base_url = "img/"
R = os.path.join("..", "R.js")

obj = []

for f in os.listdir(os.getcwd()):
    names = splitext(f)
    if names[1] != ".py" and names[1] != "":
        print(names[0])
        obj.append({
            "id": names[0],
            "src": base_url + f + "?121",
            "size": round(getsize(f) * .001, 2)
        })

fp = codecs.open(R, "w", "utf-8")
fp.write("R={};R.images=" + json.dumps(obj, ensure_ascii=False))
fp.close()

if os.name == "nt":
    os.system("pause")
Esempio n. 51
0
# -*- coding: utf-8 -*-

'''
Tip_010101 文件和目录操作-01
1. 显示搜索根目录下,每个子目录中文件的个数和总Bytes数量
2. 忽略名称中带CVS的子目录
3. https://docs.python.org/3.6/library/os.html
'''
import os
from os.path import join, getsize
for root, dirs, files in os.walk('..'):
    print(root, "consumes", end=" ")
    print(sum(getsize(join(root, name)) for name in files), end=" ")
    print("bytes in", len(files), "non-directory files")
    if 'CVS' in dirs:
        dirs.remove('CVS')  # don't visit CVS directories

'''
Tip_010102 文件和目录操作-02
1. 递归遍历目录目录中所有的文件和子目录
'''
import os
rootDir = r'..'
for root, dirs, files in os.walk(rootDir):
    print('{}{}'.format(root, os.sep))
    for i in files:
        print('{}{}{}'.format(root, os.sep, i))

'''
Tip_010103 文件和目录操作-03
1. 临时文件
Esempio n. 52
0
 def get_size(self):
     return getsize(self.path_str)
Esempio n. 53
0
start_dir = getcwd()
info = []
for root in argv[1:]:
    chdir(join(start_dir, root))
    for infof in glob('**/*.info', recursive=True):
        changed = False
        with open(infof, 'r') as f:
            d = json.load(f, object_pairs_hook=OrderedDict)
            filename = infof[:-5]
            location = d.get('url', '')
            if location and (not location.startswith(URL)
                             or basename(location) != basename(filename)):
                try:
                    urlretrieve(location, filename)
                    d['url'] = '{}/{}/{}'.format(URL, normpath(root), filename)
                    d['size'] = getsize(filename)
                    changed = True
                except:
                    print('failed to get file',
                          filename,
                          location,
                          file=stderr)
            ref = d.get('references', None)
            if isinstance(ref, str):
                d['references'] = [ref]
                changed = True
        if changed:
            with open(infof, 'w') as f:
                json.dump(d, f, indent=4)
        if root:
            file_path = [root, filename]
Esempio n. 54
0
     cmd = "\"" + EXE_PATH + "\"" + cmd_smg
     
     with open(PING_TMP_FILE,"w") as file_tmp:
         popen = subprocess.Popen(cmd,shell=True,
                          stdin=subprocess.PIPE,
                          stdout=file_tmp,
                          stderr=subprocess.PIPE)                   
 except Exception, e:
     log_data = u"执行ping.exe发生异常,message:%s" % e
     log.user_err(log_data)
     ret = PING_FAIL
     break
 
      
 while counter < 20:
     content = getsize(PING_TMP_FILE)
     if content > 0L:
         break
     else:
         counter += 1
         time.sleep(1)
 try:
     # modify by shenlige 2013-7-10 修改域名解析失败时attcommonfun.get_process_children执行失败的问题
     if total_time:
         if popen.poll() == None:
             time.sleep(total_time)  # 等待指定的ping时间
             pro_ret,data = attcommonfun.get_process_children(popen.pid)
             if pro_ret == attcommonfun.ATTCOMMONFUN_SUCCEED:
                 dict_process = data
                 for process_pid, process_name in dict_process.items():
                     # modify by shenlige 2013-11-2 修改kill进程
def loadASCII_data_header(filename, miss=None, fill=np.nan, *args, **kwargs):
    '''
    Open an ascii data file and load it into a python numpy array.

    Input:
    filename = CINDI data file name
    miss     = string or list denoting missing value options (default=None)
    fill     = fill value (default = NaN)

    Output:
    header = a list containing the header strings without the '#'
    out    = a dict containing the data in np.arrays, the dict keys are
             specified by the header data line
    '''

    func_name = string.join([module_name, "loadASCII_data_header"], " ")

    #-----------------------------------------------------------------------
    # Test to ensure the file is small enough to read in.  Python can only
    # allocate 2GB of data.  If you load something larger, python will crash

    fsize  = path.getsize(filename)
    header = list()
    out    = dict()

    if(fsize > 2.0e9):
        print func_name, "WARNING: File size [", (fsize * 1e-9), "GB > 2 GB]"
        return(header, out)
    elif(fsize == 0):
        print func_name, "WARNING: empty file [", filename, "]"
        return(header, out)

    #----------------------------------------------
    # Open the datafile and read the header rows

    f = open(filename, "r")

    if not f:
        print func_name, "ERROR: unable to open input file [", filename, "]"
        return header, out

    line   = f.readline()
    check  = 0

    while line.find("#") >= 0:
        hline  = string.strip(line.replace("#", ""))
        line   = f.readline()
        check += 1

        if(len(hline) > 0):
            header.append(hline)

    if(check > 0):
        hline = hline.split()
    else:
        print func_name, "ERROR: no header in this file [", filename, "]"
        return(header, out)

    #-------------------------------------------
    # Open the datafile and read the data rows

    temp = np.genfromtxt(filename, comments="#", missing_values=miss,
                         filling_values=fill)

    if len(temp) > 0:
        #------------------------------------------
        # Create the output dictionary

        for num,name in enumerate(hline):
            out[name] = temp[:,num]

    del temp
    return(header, out)
Esempio n. 56
0
        print('')
        print('------We Out!------')
        # Quit the program aka gtfo
        raise SystemExit

# ------------------------------------------------------------------------------
# --- Make a copy if I haven't already today

# Make the new folder
newFolder = join(lD, da)
os.mkdir(newFolder)

# The for windows spotlight photos
winLockF = (r'C:\Users\Max\AppData\Local\Packages\Microsoft.Windows.Content' +
            r'DeliveryManager_cw5n1h2txyewy\LocalState\Assets')

# Select the files bigger than 230 mB which is hopfully both all and only
# the wall papers
tbc = [f for f in os.listdir(winLockF) if getsize(join(winLockF, f)) > 230000]
for i in range(len(tbc)):
    copy2(join(winLockF, tbc[i]), join(newFolder, tbc[i] + '.jpg'))

# Give some user feed back
print('1.', 'There were ' + str(len(tbc)) + ' files copyed to:')
print('2.', '"' + newFolder + '"')

# ------------------------------------------------------------------------------

print('')
print('--------End--------')
def loadASCII_data_hline(filename, hlines, miss=None, fill=np.nan, *args,
                         **kwargs):
    '''
    Open an ascii data file and load it into a python numpy array.  File
    header may be any number of lines and does not have to be preceeded by a
    particular character.

    Input:
    filename = CINDI data file name
    hlines   = number of lines in header
    miss     = string or list denoting missing value options (default=None)
    fill     = fill value (default = NaN)

    Output:
    header = a list containing all specified header lines
    out = a dict containing the data in np.arrays, the dict keys are
          specified by the header data line
    '''

    func_name = string.join([module_name, "loadASCII_data_hline"])

    #-----------------------------------------------------------------------
    # Test to ensure the file is small enough to read in.  Python can only
    # allocate 2GB of data.  If you load something larger, python will crash

    fsize = path.getsize(filename)
    header = list()
    out = dict()

    if(fsize > 2.0e9):
        print func_name, "WARNING: File size [", (fsize * 1e-9), "GB > 2 GB]"
        return hearder, out
    elif(fsize == 0):
        print func_name, "WARNING: empty file [", filename, "]"
        return(header, out)

    #----------------------------------------------
    # Open the datafile and read the header rows

    f = open(filename, "r")

    if not f:
        print func_name, "ERROR: unable to open input file [", filename, "]"
        return out

    for h in range(hlines):
        header.append(f.readline())

    #-------------------------------------------
    # Open the datafile and read the data rows

    temp = np.genfromtxt(filename, skip_header=hlines, missing_values=miss,
                         filling_values=fill)

    if len(temp) > 0:
        #---------------------------------------------------------------------
        # Create the output dictionary, removing the point sign from any keys

        for num,name in enumerate(header[-1].split()):
            name = name.replace("#", "")

            if len(name) > 0:
                out[name] = temp[:,num]

    del temp
    return header, out
Esempio n. 58
0
 def getsize(self, fn):
     return getsize(fn)
Esempio n. 59
0
 def _get_source_filesize(self):
     if not hasattr(self, '_source_filesize'):
         self._source_filesize = getsize(self.source)
     return self._source_filesize
def loadASCII_index_profile(filename, miss=None, fill=np.nan, *args, **kwargs):
    '''
    Open an ascii data file and load it into a python numpy array.  Assumes
    this file is seperated into index blocks, which should be maintained.

    Input:
    filename = CINDI data file name
    miss     = string or list denoting missing value options (default=None)
    fill     = fill value (default = NaN)

    Output:
    header  = a list containing the header strings without the '#'
    out     = a dict containing the data in np.arrays, the dict keys are
              specified by the header data line
    nblocks = number of indexed data blocks
    '''

    func_name = string.join([module_name, "loadASCII_data_header"])

    #-----------------------------------------------------------------------
    # Test to ensure the file is small enough to read in.  Python can only
    # allocate 2GB of data.  If you load something larger, python will crash

    fsize = path.getsize(filename)
    header = list()
    out = dict()
    nblocks = 0

    if(fsize > 2.0e9):
        print func_name, "WARNING: File size [", (fsize * 1e-9), "GB > 2 GB]"
        return(header, out, nblocks)
    elif(fsize == 0):
        print func_name, "WARNING: empty file [", filename, "]"
        return(header, out, nblocks)

    #----------------------------------------------
    # Open the datafile and read the header rows

    f = open(filename, "r")

    if not f:
        print func_name, "ERROR: unable to open input file [", filename, "]"
        return header, out, nblocks

    line = f.readline()
    check = 0

    while line.find("#") >= 0:
        hline = string.strip(line.replace("#", ""))
        line = f.readline()
        check += 1

        if(len(hline) > 0):
            header.append(hline)

    if(check > 0):
        hline = hline.split()
    else:
        print func_name, "ERROR: no header in this file [", filename, "]"
        return(header, out, nblocks)

    #-------------------------------------------------
    # Cycle through the data rows, identifying blocks

    while len(line) > 0:
        if (len(line) == 1 and line.find("\n") == 0):
            # A new block has been found.  Only incriment if data has been read
            if len(out) > 0:
                nblocks += 1
            print "TEST: at block", nblocks, len(line)

            # Cycle to new dataline
            while len(line) == 1 and line.find("\n") == 0:
                line = f.readline()

        # Load the dataline into the output structure
        dline = line.split()

        for num,name in enumerate(hline):
            if out.has_key(name):
                if len(out[name]) < nblocks:
                    out[name][nblocks].append(dline[num])
                else:
                    out[name].append([dline[num]])
            else:
                out[name] = [[dline[num]]]

    return(header, out, nblocks)