def test_periodadmin_has_access(self):
        with self.settings(DEVILRY_COMPRESSED_ARCHIVES_DIRECTORY=self.backend_path):
            testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
                                               short_name='learn-python-basics',
                                               first_deadline=timezone.now() + timezone.timedelta(days=1))

            # Period admin
            periodpermissiongroup = mommy.make('devilry_account.PeriodPermissionGroup',
                                               period=testassignment.period)
            testuser = mommy.make(settings.AUTH_USER_MODEL)
            mommy.make('devilry_account.PermissionGroupUser',
                       user=testuser, permissiongroup=periodpermissiongroup.permissiongroup)

            self.__make_simple_setup(assignment=testassignment)

            # run actiongroup
            self._run_actiongroup(name='batchframework_assignment',
                                  task=tasks.AssignmentCompressAction,
                                  context_object=testassignment,
                                  started_by=testuser)

            archive_meta = archivemodels.CompressedArchiveMeta.objects.get(content_object_id=testassignment.id)
            zipfileobject = ZipFile(archive_meta.archive_path)
            self.assertEqual(1, len(zipfileobject.namelist()))
            self.assertTrue(zipfileobject.namelist()[0].startswith('{}'.format('april')))
Esempio n. 2
0
def get_shp_from_zip(zip_file):
    """
    extract components file parts of a shapefile from a zip file

    zip_file -- zip file
    """
    try:
        zip_f = ZipFile(zip_file, 'r')
    except BadZipfile:
        return None
    list_names = zip_f.namelist()
    d = {}
    for elem in list_names:
        t = elem.split('.')
        d[t[1].lower()] = t[0]
        ll = d.values()
    # shp name validation (same name)
    if all(x == ll[0] for x in ll):
        k = d.keys()
        # shp file  type validation
        if len(k) == 4 and (
                'shp' in k and 'dbf' in k and 'shx' in k and 'prj' in k):
            res = {}
        for name in zip_f.namelist():
            io = StringIO.StringIO()
            zo = zip_f.open(name, 'r')
            io.write(zo.read())  # .decode('ISO8859-1').encode('utf-8'))
            zo.close()
            res_file = InMemoryUploadedFile(
                io, None, name.lower(), 'text', io.len, None)
            res_file.seek(0)
            res[name.split('.')[1].lower()] = res_file
        return res
    else:
        return None
 def test_tarball_aware_of_branches(self):
     rev = '19'
     branch_content = sorted(['test-svn-tags-19-branches-aaa/',
                              'test-svn-tags-19-branches-aaa/aaa.txt',
                              'test-svn-tags-19-branches-aaa/svn-commit.tmp',
                              'test-svn-tags-19-branches-aaa/README'])
     h.set_context('test', 'svn-tags', neighborhood='Projects')
     tmpdir = tg.config['scm.repos.tarball.root']
     tarball_path = os.path.join(tmpdir, 'svn/t/te/test/testsvn-trunk-tags-branches/')
     fn = tarball_path + 'test-svn-tags-19-branches-aaa.zip'
     self.svn_tags.tarball(rev, '/branches/aaa/')
     assert os.path.isfile(fn), fn
     snapshot = ZipFile(fn, 'r')
     assert_equal(sorted(snapshot.namelist()), branch_content)
     os.remove(fn)
     self.svn_tags.tarball(rev, '/branches/aaa/some/path/')
     assert os.path.isfile(fn), fn
     snapshot = ZipFile(fn, 'r')
     assert_equal(sorted(snapshot.namelist()), branch_content)
     os.remove(fn)
     # if inside of branches, but no branch is specified
     # expect snapshot of trunk
     fn = tarball_path + 'test-svn-tags-19-trunk.zip'
     self.svn_tags.tarball(rev, '/branches/')
     assert os.path.isfile(fn), fn
     snapshot = ZipFile(fn, 'r')
     assert_equal(sorted(snapshot.namelist()),
                  sorted(['test-svn-tags-19-trunk/',
                          'test-svn-tags-19-trunk/aaa.txt',
                          'test-svn-tags-19-trunk/bbb.txt',
                          'test-svn-tags-19-trunk/ccc.txt',
                          'test-svn-tags-19-trunk/README']))
     shutil.rmtree(tarball_path, ignore_errors=True)
Esempio n. 4
0
def find_plugin_yaml(dataobj):
    """
        """
    yml = False
    try:
        # The first thing we are going to try to do is create a ZipFile
        # object with the StringIO data that we have.
        zfile = ZipFile(dataobj)
    except:
        print "[DEBUG] ZipFile Library Failed to Parse DataObject"
    else:
        # Before we start recursively jumping through hoops, lets first
        # check to see if the plugin.yml exists at this level.  If so, then
        # just set the yaml variable.  Otherwise we are gonna look for more
        # zip and jar files and dig into them.
        if "plugin.yml" in zfile.namelist():
            try:
                yml = yaml.load(zfile.read("plugin.yml"))
            except:
                return False
        else:
            for filename in zfile.namelist():
                if not yml and filename[-3:].lower() in ["zip", "jar"]:
                    print "[DEBUG] Found Zip/Jar file " + filename
                    data = StringIO()
                    data.write(zfile.read(filename))
                    yml = find_plugin_yaml(data)
                    data.close()
            zfile.close()
    return yml
Esempio n. 5
0
    def open(zipname):
        zf = ZipFile(zipname, 'r')
        m = zf.read('META-INF/manifest.xml')
        manifest = Manifest.parse(m)

        def warn(resource):
            print(u"Warning: bundle {} does not contain resource {}, which is referred in its manifest.".format(zipname, resource).encode('utf-8'))

        result = Bundle()
        result.presets_data = []
        for preset in manifest.get_resources('paintoppresets'):
            if preset in zf.namelist():
                result.presets.append(preset)
                data = zf.read(preset)
                kpp = KPP(preset, data)
                result.presets_data.append(kpp)
            else:
                warn(preset)

        result.meta_string = zf.read("meta.xml")
        result.preview_data = zf.read("preview.png")

        for brush in manifest.get_resources('brushes'):
            if brush in zf.namelist():
                result.brushes.append(brush)
            else:
                warn(brush)
        for pattern in manifest.get_resources('patterns'):
            if pattern in zf.namelist():
                result.patterns.append(pattern)
            else:
                warn(pattern)
            
        zf.close()
        return result
Esempio n. 6
0
def get_rasters(url, downloaddir='./Inputs'):
    """Download file and handle nonexist file.
       If not exist, an empty list returned. Hack...
       TODO: more elegent solution
    """
    try:
        fname = site.saveFile(url, dir=downloaddir)
        zipfname = '%s/%s' % (downloaddir, fname)
        print "***%s found, downloading as %s..." % (fname, zipfname)
        z = ZipFile(zipfname)
    except BadZipfile:
        print "***bad zip file"
        return [fname, ]
    # TODO fix error handling ???
    except:
        print "***empty zip file"
        return []

    rasters = []
    print "     zipped file namelist: ", z.namelist()
    for fname in z.namelist():
        if fname.endswith('/'):
            continue
        else:
            fname = os.path.basename(fname)
            rasters.append(fname)
            outfname = '%s/%s' % (downloaddir, fname)
            print "***get_raster: %s" % outfname
            with open(outfname, 'wb') as f:
                f.write(z.read(fname))
    os.remove(zipfname)
    print "***remove %s" % zipfname
    return rasters
Esempio n. 7
0
 def _find_plugin_yaml(self, dataobj):
     '''
     '''
     yml = False
     try:
         # The first thing we are going to try to do is create a ZipFile
         # object with the StringIO data that we have.
         zfile = ZipFile(dataobj)
     except:
         pass
     else:
         # Before we start recursively jumping through hoops, lets first
         # check to see if the plugin.yml exists at this level.  If so, then
         # just set the yaml variable.  Otherwise we are gonna look for more
         # zip and jar files and dig into them.
         if 'plugin.yml' in zfile.namelist():
             try:
                 yml = yaml.load(zfile.read('plugin.yml'))
             except:
                 return False
         else:
             for filename in zfile.namelist():
                 if not yml and filename[-3:].lower() in ['zip', 'jar']:
                     data = StringIO()
                     data.write(zfile.read(filename))
                     yml = self._find_plugin_yaml(data)
                     data.close()
             zfile.close()
     return yml
Esempio n. 8
0
def run(file_name):
    config_dict = False
    jar = ZipFile(file_name, 'r')
    # Version A
    if 'a.txt' and 'b.txt' in jar.namelist():
        pre_key = jar.read('a.txt')
        enckey = ['{0}{1}{0}{1}a'.format('plowkmsssssPosq34r', pre_key),
                  '{0}{1}{0}{1}a'.format('kevthehermitisaGAYXD', pre_key)
                  ]
        coded_jar = jar.read('b.txt')
        config_dict = version_a(enckey, coded_jar)

    # Version B
    if 'ID' and 'MANIFEST.MF' in jar.namelist():
        pre_key = jar.read('ID')
        enckey = ['{0}H3SUW7E82IKQK2J2J2IISIS'.format(pre_key)]
        coded_jar = jar.read('MANIFEST.MF')
        config_dict = version_b(enckey, coded_jar)

    # Version C
    if 'resource/password.txt' and 'resource/server.dll' in jar.namelist():
        pre_key = jar.read('resource/password.txt')
        enckey = ['CJDKSIWKSJDKEIUSYEIDWE{0}'.format(pre_key)]
        coded_jar = jar.read('resource/server.dll')
        config_dict = version_c(enckey, coded_jar)

    # Version D
    if 'java/stubcito.opp' and 'java/textito.isn' in jar.namelist():
        pre_key = jar.read('java/textito.isn')
        enckey = ['TVDKSIWKSJDKEIUSYEIDWE{0}'.format(pre_key)]
        coded_jar = jar.read('java/stubcito.opp')
        config_dict = version_c(enckey, coded_jar)

    return config_dict
Esempio n. 9
0
class ZipfileReader:
    """ Reads files from an imported zip file. """

    def __init__(self, files):
        self.files = ZipFile(files)
        self.fullpath = ''


    def readManifest(self):
        """ Get the maifest file if it exists. """
        for x in self.files.namelist():
            index = x.find('imsmanifest.xml')
            if index != -1:
                self.fullpath = x[:index]
                return self.files.read(x)
        return None
    

    def readFile(self, path):
        """ Get file data from the zip file. """
        fn = '%s%s' %(self.fullpath, str(path))
        if fn not in self.files.namelist():
            fn = fn.replace('/', '\\')
            if fn not in self.files.namelist():
                return None
        return self.files.read(fn)

    def listFiles(self):
        """ List files in the package. """
        return self.files.namelist()
Esempio n. 10
0
File: db.py Progetto: ajm/glutton
    def _read(self) :
        global MANIFEST_FNAME

        z = ZipFile(self.fname, 'r', compression=self.compression)
    
        def _err(msg) :
            z.close()
            raise GluttonImportantFileNotFoundError(msg)
    
        # without the manifest all is lost
        # we need this to get the names of the other
        # XML files
        if MANIFEST_FNAME not in z.namelist() :
            _err('manifest not found in %s' % self.fname)

        self.metadata = json.load(z.open(MANIFEST_FNAME))
        
        self.log.info("read manifest - created on %s using glutton version %.1f" % \
            (time.strftime('%d/%m/%y at %H:%M:%S', time.localtime(self.download_time)), \
             self.version))

        # the data file is the raw data grouped into gene families
        # when we do a local alignment we need to get the gene id
        # of the best hit and find out which gene family it belongs to 
        if self.metadata['data-file'] not in z.namelist() :
            _err('data file (%s) not found in %s' % (self.metadata['data-file'], self.fname))

        self.data = json_to_glutton(json.load(z.open(self.metadata['data-file'])))
        self.seq2famid = self._create_lookup_table(self.data)

        self.log.info("read %d gene families (%d genes)" % (len(self.data), len(self.seq2famid)))

        z.close()
Esempio n. 11
0
 def test_graph_export_csv(self):
     create_graph(self)
     create_schema(self)
     create_type(self)
     create_data(self)
     self.browser.find_by_id('toolsMenu').first.click()
     cookies = {self.browser.cookies.all()[0]["name"]: self.browser.cookies.all()[0]["value"], self.browser.cookies.all()[1]["name"]: self.browser.cookies.all()[1]["value"]}
     result = requests.get(self.live_server_url + '/tools/bobs-graph/export/csv/', cookies=cookies)
     spin_assert(lambda: self.assertEqual(
         result.headers['content-type'], 'application/zip'))
     spin_assert(lambda: self.assertEqual(
         self.browser.status_code.is_success(), True))
     test_file = StringIO(result.content)
     csv_zip = ZipFile(test_file)
     for name in csv_zip.namelist():
         fw = open('sylva/sylva/tests/files/' + name, 'w')
         fw.write(csv_zip.read(name))
         fw.close()
     for name in csv_zip.namelist():
         f = open('sylva/sylva/tests/files/' + name)
         csvFile = ""
         for line in f:
             csvFile += line
         f.close()
         spin_assert(lambda: self.assertEqual(csv_zip.read(name), csvFile))
     Graph.objects.get(name="Bob's graph").destroy()
def test_multi_layer_dataset(multi_layer_app, temp_file):
    req = Request.blank("/")
    resp = req.get_response(multi_layer_app)
    assert resp.status == "200 OK"

    for chunk in resp.app_iter:
        temp_file.write(chunk)
    temp_file.flush()

    z = ZipFile(temp_file.name, "r", ZIP_DEFLATED)
    assert z

    # Should be 2 files for each layer
    assert len(z.namelist()) == 2 * 4
    assert "my_grid_0.asc" in z.namelist()

    # find the first asc file
    asc_filename = filter(lambda x: x.endswith(".asc"), z.namelist())[0]

    with z.open(asc_filename, "r") as f:
        data = f.read()

    assert (
        data
        == """ncols        3
nrows        2
xllcorner    -122.500000000000
yllcorner    53.000000000000
dx           -0.500000000000
dy           1.000000000000
NODATA_value  -9999
 0 1 2
 3 4 5
"""
    )
Esempio n. 13
0
def unzip(filename, match_dir=False, destdir=None):
    """
    Extract all files from a zip archive
    filename: The path to the zip file
    match_dir: If True all files in the zip must be contained in a subdirectory
      named after the archive file with extension removed
    destdir: Extract the zip into this directory, default current directory

    return: If match_dir is True then returns the subdirectory (including
      destdir), otherwise returns destdir or '.'
    """
    if not destdir:
        destdir = '.'

    z = ZipFile(filename)
    unzipped = '.'

    if match_dir:
        if not filename.endswith('.zip'):
            raise FileException('Expected .zip file extension', filename)
        unzipped = os.path.basename(filename)[:-4]
        check_extracted_paths(z.namelist(), unzipped)
    else:
        check_extracted_paths(z.namelist())

    # File permissions, see
    # http://stackoverflow.com/a/6297838
    # http://stackoverflow.com/a/3015466
    for info in z.infolist():
        log.debug('Extracting %s to %s', info.filename, destdir)
        z.extract(info, destdir)
        os.chmod(os.path.join(destdir, info.filename),
                 info.external_attr >> 16 & 4095)

    return os.path.join(destdir, unzipped)
Esempio n. 14
0
def fetch(arch,version,path,save=True):
    version = normalize_ver(version)
    path = path.lstrip('/')
    key = 'zip%s%s' % (version,path)
    base_url = get_base_url(arch, 0)
    base_url2 = get_base_url(arch, 1)
    #zdata = memcache.get(key)
    path = path.replace(' ','%20')
    zdata = get_from_storage(key,arch)
    if zdata is None:
        result = urlfetch.fetch('%s/%s/%s.zip' % (base_url,version,path),deadline=10)
        #print result.status_code
        if result.status_code != 200:
            result = urlfetch.fetch('%s/%s/%s.zip' % (base_url2,version,path))
        if result.status_code == 200:
            zdata = result.content
            #memcache.set(key,zdata)
            if save:
                try:
                    put_into_storage(key,zdata,arch)
                except:pass
            zdata = StringIO(zdata)
    if zdata is None:
        return None
    #zfp = ZipFile(StringIO(zdata), "r")
    #data = zfp.read(zfp.namelist()[0])#.decode("cp1251")
    #del zfp
    #return data
    zfp = ZipFile(zdata)
    try :
        #python 2.6+
        return zfp.open(zfp.namelist()[0])
    except:
        return StringIO(zfp.read(zfp.namelist()[0]))#.decode("cp1251")
Esempio n. 15
0
    def _create_resource(self, data_set, site, file_):
        """ Creates a new resource or file associated with its data set
        :param data_set:
        :param site:
        :param file_:
        """
        #content of the zip file
        zip_file_name = file_.filename
        zip_path = os.path.join(config.DATA_SETS_DIR, zip_file_name)
        file_.save(zip_path)
        sourcezip = ZipFile(zip_path)
        i = 0
        while i < len(sourcezip.namelist()):
            zip_entry_path = os.path.join(os.path.abspath(os.path.dirname(zip_path)), sourcezip.namelist()[i])
            sourcezip.extract(sourcezip.namelist()[i], config.DATA_SETS_DIR)
            url = self.parser.get_file_name().replace(".zip","") + "_" + str(i)
            site.action.resource_create(package_id=data_set, upload=open(zip_entry_path),
                                        name=sourcezip.namelist()[i], url=url)
            i += 1

        #xml content
        xml_file_name = self.parser.get_dataset().id + ".xml"
        path = os.path.join(config.DATA_SETS_DIR, xml_file_name)
        with open(path, "w") as ttmp:
            ttmp.write(self._content.encode(encoding="utf-8"))
        url = xml_file_name
        site.action.resource_create(package_id=data_set, upload=open(path),
                                    name=xml_file_name, url=url)
	def load_and_save_scopes(self):
		scopes = set()
		for x in os.walk(sublime.packages_path() + '/..'):
			for f in glob.glob(os.path.join(x[0], '*.tmLanguage')):
				for s in self.get_scopes_from(plistlib.readPlist(f)):
					scopes.add(s.strip())

		for x in os.walk(os.path.dirname(sublime.executable_path())):
			for f in glob.glob(os.path.join(x[0], '*.sublime-package')):
				input_zip = ZipFile(f)
				for name in input_zip.namelist():
					if name.endswith('.tmLanguage'):
						for s in self.get_scopes_from(plistlib.readPlistFromBytes(input_zip.read(name))):
							scopes.add(s.strip())

		for x in os.walk(sublime.packages_path() + '/..'):
			for f in glob.glob(os.path.join(x[0], '*.sublime-package')):
				input_zip = ZipFile(f)
				for name in input_zip.namelist():
					if name.endswith('.tmLanguage'):
						for s in self.get_scopes_from(plistlib.readPlistFromBytes(input_zip.read(name))):
							scopes.add(s.strip())
		names = list(scopes)
		scopes = dict()
		for name in names:
			value = name
			if value.startswith('source.'):
				value = value[7:]
			elif value.startswith('text.'):
				value = value[5:]
			scopes[name] = value
		self.settings.set('scopes', scopes)
		sublime.save_settings('smart-pieces.sublime-settings')
Esempio n. 17
0
class Capture(object):
    def __init__(self, filename):
        if not os.path.exists(filename):
            raise CaptureException("Capture file '%s' does not exist!" %
                                   filename)
        try:
            self.archive = ZipFile(filename, 'r')
        except BadZipfile:
            raise BadCapture("Capture file '%s' not a .zip file")
        self.metadata = json.loads(self.archive.open('metadata.json').read())
        # A cache file for storing hard-to-generate data about the capture
        self.cache_filename = filename + '.cache'
        if not self.metadata or not self.metadata['version']:
            raise BadCapture("Capture file '%s' does not appear to be an "
                                   "Eideticker capture file" % filename)

        self.num_frames = max(0, len(filter(lambda s: s[0:7] == "images/" and len(s) > 8,
                                            self.archive.namelist())) - 2)
        if self.num_frames > 0:
            im = self.get_frame_image(0)
            self.dimensions = im.size

        # Name of capture filename (in case we need to modify it)
        self.filename = filename

    @property
    def length(self):
        return self.num_frames / 60.0

    def get_video(self):
        buf = StringIO.StringIO()
        buf.write(self.archive.read('movie.webm'))
        buf.seek(0)
        return buf

    def get_frame_image(self, framenum, grayscale=False):
        if int(framenum) > self.num_frames:
            raise CaptureException("Frame number '%s' is greater than the number of frames " \
                                   "(%s)" % (framenum, self.num_frames))

        filename = 'images/%s.png' % framenum
        if filename not in self.archive.namelist():
            raise BadCapture("Frame image '%s' not in capture" % filename)

        return self._get_frame_image(filename, grayscale)

    def _get_frame_image(self, filename, grayscale=False):
        buf = StringIO.StringIO()
        buf.write(self.archive.read(filename))
        buf.seek(0)
        im = Image.open(buf)
        if grayscale:
            im = im.convert("L")

        return im

    def get_frame(self, framenum, grayscale=False, type=numpy.float):
        return numpy.array(self.get_frame_image(framenum, grayscale), dtype=type)
def load_data():
    urlopener = urllib.URLopener()
    translationTablePerGeneFileList = []
    for genename,url in translationtableUrls.iteritems():
        urlopener.retrieve(url,genename+'.xlsx')
        print 'downloading: ',genename+'.xlsx',' from: ',url[:50],' ...'
        translationTablePerGeneFileList.append(genename+'.xlsx')

    urlopener.retrieve(jsonfilesUrl,'dosingGuidelines_json.zip')
    dosingGuidelines_json_zipfile=ZipFile(open('dosingGuidelines_json.zip','rb'))

    pharmgkbJsonFileList=[]
    if not os.path.exists(os.getcwd()+'/dosingGuidelines_json'):
        os.makedirs(os.getcwd()+'/dosingGuidelines_json')
    for name in dosingGuidelines_json_zipfile.namelist():
        dosingGuidelines_json_zipfile.extract(name,os.getcwd()+'/dosingGuidelines_json')
        pharmgkbJsonFileList.append(os.getcwd()+'/dosingGuidelines_json/'+name)
    print 'downloading:  json files  from Pharmgkb.  ','Downloaded ',len(dosingGuidelines_json_zipfile.namelist()),' json files from ',jsonfilesUrl[:40],' ...'
    # print pharmgkbJsonFileList
    # fileOut = open('/Users/admin/Dropbox/Privat/00_Masterthesis/MITTELasdf.txt','w')

    rsidList = fillRsidList(translationTablePerGeneFileList)
    for rs in rsidList:
    print json.dumps(getDosingGuidelineFromRsid(rs,translationTablePerGeneFileList,pharmgkbJsonFileList), indent=4)
    # fileOut.write(json.dumps(getDosingGuidelineFromRsid('rs4244285',translationTablePerGeneFileList,pharmgkbJsonFileList), indent=4, sort_keys=True))
    # print json.dumps(getDosingGuidelineFromRsid('rs1801265'), indent=4)
    # fileOut.close()
    # getDosingGuidelineFromRsid('rs1801265')

def getHaplotypesFromTranslationtable(rsid,translationTablePerGeneFileList):
    for translationTablePerGene in translationTablePerGeneFileList:
        if translationTablePerGene.endswith('.xlsx') and not translationTablePerGene.startswith('~'):
            haplottypeListTemp=[]
            translationTablePerGeneWorkbook = load_workbook(translationTablePerGene,read_only=True)
            worksheetTranslationTablePerGene = translationTablePerGeneWorkbook.active
            coordinatesOfRsid = ''
            for row in worksheetTranslationTablePerGene.rows:
                for cell in row:
                    if isinstance(cell.value,unicode): #dont parse datetime objects
                        if str(cell.value.encode('utf8','ignore')).strip()==rsid: #encoding due to unicode characters, str(unicode) gives unicodeencdoerror
                            coordinatesOfRsid = cell.coordinate
            letterOfRsIdCell = ''
            if coordinatesOfRsid!='':
                letterOfRsIdCell = re.search('[A-Z]{1,2}', coordinatesOfRsid).group() #gives the letter of the coordinate
                rowCount = worksheetTranslationTablePerGene.get_highest_row()
                if not letterOfRsIdCell=='':
                    for i in range (1,rowCount+1):
                        try:
                            if worksheetTranslationTablePerGene[letterOfRsIdCell+str(i)].value: #take only non-empty cells
                                 if bool(re.search('\*\d',str(worksheetTranslationTablePerGene['B'+str(i)].value))): # pattern is star plus a digit then stop because we only want the basic star allels. We search in the B column because it contains the star alleles
                                    haplottypeListTemp.append(worksheetTranslationTablePerGene['B'+str(i)].value)
                        except IndexError, e:
                            print e
                        except:
                            pass
                print 'star alleles list:',haplottypeListTemp
                return haplottypeListTemp
Esempio n. 19
0
 def testZipExport(self):
     self.login('god')
     view = self._get_view()
     fn = view.zip_export(download=False)
     zf = ZipFile(fn, 'r')
     self.assertEqual('foo/index.html' in zf.namelist(), True)
     self.assertEqual('foo/index.xml' in zf.namelist(), True)
     zf.close()
     os.unlink(fn)
Esempio n. 20
0
 def test_zipped_excluded_directory(self):
     zippedRoot = self.get_zipped_root('1/')
     expected_entries = ['2/2-1/2-1.txt']
     zf = ZipFile(zippedRoot)
     self.assertTrue(zf.testzip() is None)
     for elem in expected_entries:
         self.assertTrue(elem in zf.namelist())
     self.assertEqual(len(expected_entries), len(zf.namelist()))
     zf.close()
Esempio n. 21
0
    def test_tarball_paths(self):
        rev = '19'
        h.set_context('test', 'svn-tags', neighborhood='Projects')
        tmpdir = tg.config['scm.repos.tarball.root']
        tarball_path = os.path.join(tmpdir, 'svn/t/te/test/testsvn-trunk-tags-branches/')

        # a tag
        self.svn_tags.tarball(rev, '/tags/tag-1.0/')
        fn = tarball_path + 'test-svn-tags-r19-tags-tag-1.0.zip'
        assert os.path.isfile(fn), fn
        snapshot = ZipFile(fn, 'r')
        tag_content = sorted(['test-svn-tags-r19-tags-tag-1.0/',
                              'test-svn-tags-r19-tags-tag-1.0/svn-commit.tmp',
                              'test-svn-tags-r19-tags-tag-1.0/README'])
        assert_equal(sorted(snapshot.namelist()), tag_content)
        os.remove(fn)

        # a directory (of tags)
        self.svn_tags.tarball(rev, '/tags/')
        fn = tarball_path + 'test-svn-tags-r19-tags.zip'
        assert os.path.isfile(fn), fn
        snapshot = ZipFile(fn, 'r')
        tags_content = sorted(['test-svn-tags-r19-tags/',
                               'test-svn-tags-r19-tags/tag-1.0/',
                               'test-svn-tags-r19-tags/tag-1.0/svn-commit.tmp',
                               'test-svn-tags-r19-tags/tag-1.0/README'])
        assert_equal(sorted(snapshot.namelist()), tags_content)
        os.remove(fn)

        # no path, but there are trunk in the repo
        # expect snapshot of trunk
        self.svn_tags.tarball(rev)
        fn = tarball_path + 'test-svn-tags-r19-trunk.zip'
        assert os.path.isfile(fn), fn
        snapshot = ZipFile(fn, 'r')
        trunk_content = sorted(['test-svn-tags-r19-trunk/',
                                'test-svn-tags-r19-trunk/aaa.txt',
                                'test-svn-tags-r19-trunk/bbb.txt',
                                'test-svn-tags-r19-trunk/ccc.txt',
                                'test-svn-tags-r19-trunk/README'])
        assert_equal(sorted(snapshot.namelist()), trunk_content)
        os.remove(fn)

        # no path, and no trunk dir
        # expect snapshot of repo root
        h.set_context('test', 'src', neighborhood='Projects')
        fn = os.path.join(tmpdir, 'svn/t/te/test/testsvn/test-src-r1.zip')
        self.repo.tarball('1')
        assert os.path.isfile(fn), fn
        snapshot = ZipFile(fn, 'r')
        assert_equal(snapshot.namelist(), ['test-src-r1/', 'test-src-r1/README'])
        shutil.rmtree(os.path.join(tmpdir, 'svn/t/te/test/testsvn/'),
                      ignore_errors=True)
        shutil.rmtree(tarball_path, ignore_errors=True)
Esempio n. 22
0
def merge_wikitext(ui_, repo, base_dir, tmp_file, in_stream):
    """ Merge changes from a submission zip file into the
        repository. """

    # HgFileOverlay to read bundle files with.
    prev_overlay = HgFileOverlay(ui_, repo, base_dir, tmp_file)

    # Direct overlay to write updates into the repo.
    head_overlay = DirectFiles(os.path.join(repo.root, base_dir))

    arch = ZipFile(in_stream, 'r')
    try:
        base_ver, dummy = unpack_info(arch.read('__INFO__'))
        if not has_version(repo, base_ver):
            # REDFLAG: Think. What about 000000000000?
            #          It is always legal. hmmmm...
            raise SubmitError("Base version: %s not in repository." %
                              base_ver[:12], True)

        # Still need to check for illegal submissions.
        prev_overlay.version = base_ver

        # REDFLAG: revisit.
        # just assert in forking_extract_wikitext and
        # get rid of extra checking / exception raising?
        check_base_shas(arch, prev_overlay)
        # Hmmmm... checking against a version of readonly.txt
        # which may be later than the one that the submitter
        # used.
        check_writable(head_overlay, arch)
        check_merges([name for name in arch.namelist()
                      if name != '__INFO__'],
                     # pylint gives spurious E1101 here ???
                     #pylint: disable-msg=E1101
                     prev_overlay.list_pages(os.path.join(prev_overlay.
                                                          base_path,
                                                          'wikitext')),
                     ArchiveHasher(arch).hexdigest)

        # created, modified, removed, skipped, forked
        op_lut = (set([]), set([]), set([]), set([]), set([]))

        for name in arch.namelist():
            # check_base_sha validates wikinames.
            if name == "__INFO__":
                continue
            action, versioned_name = forking_extract_wikitext(arch,
                                                              prev_overlay,
                                                              head_overlay,
                                                              name)
            op_lut[action].add(versioned_name)
        return op_lut
    finally:
        arch.close()
Esempio n. 23
0
def extract(dir_name, zip_path):
    zipfile = ZipFile(zip_path, "r")
    files = []
    for name in zipfile.namelist():
        sanitize_path(dir_name, name, "zip file")
    zipfile.extractall(dir_name)
    zipfile.close()

    for name in zipfile.namelist():
        if path.isfile(path.join(dir_name, name)):
            files.append(path.normpath(name))
    return files
 def extract_zip_file(self, zip_file_name):
     '''
     Extracts the content of a zip file in the path zip_file_name and put it
     content in the place specified by the configuration parameter ["FAOSTAT", "datapath"]
     '''
     self.log.info("Extracting data from zip file...")
     sourceZip = ZipFile(zip_file_name)  # open the zip File
     if len(sourceZip.namelist()) != 1:  # The zip file should contain a single element
         raise RuntimeError("Unexpected zip file. Content will not be extracted")
     else:
         sourceZip.extract(sourceZip.namelist()[0], self.config.get("FAOSTAT", "data_file_path"))
         self.log.info("Data extracted to {0}".format(self.config.get("FAOSTAT", "data_file_path")))
Esempio n. 25
0
    def update_from_archive(self, archive, obj):
        old_pks = list(Page.objects.filter(book=obj).values_list("id", flat=True))
        archive = ZipFile(archive)

        toc = archive.read("toc.py")
        toc = toc.replace("(", "[").replace(")", "]").replace("'", '"')
        obj.toc = toc
        obj.save()

        pics = [name for name in archive.namelist() if name.startswith("pics/") and not name == "pics/"]
        archive.extractall(settings.MEDIA_ROOT, pics)

        appendix_pattern = re.compile(r"^ap(?P<section>[a-z])\.html$")
        ch_pattern = re.compile(r"^ch(?P<ch>\d+)\.html$")
        chs_pattern = re.compile(r"^ch(?P<ch>\d+)s(?P<s>\d+)\.html$")

        for filename in archive.namelist():
            if not filename.split(".")[-1] == "html":
                continue

            slug = filename[:-5]

            try:
                page = Page.objects.get(slug=slug, book=obj)
                old_pks.remove(page.pk)
            except Page.DoesNotExist:
                page = Page(slug=slug, book=obj)
                # create name if page is new
                if filename == "index.html":
                    name = u"Первая страница"
                elif chs_pattern.match(filename):
                    r = chs_pattern.match(filename)
                    name = u"Глава %s, раздел %s" % (int(r.group("ch")), int(r.group("s")))
                    page.chapter = r.group("ch")
                    page.section = r.group("s")
                elif ch_pattern.match(filename):
                    r = ch_pattern.match(filename)
                    name = u"Глава %s" % int(r.group("ch"))
                    page.chapter = r.group("ch")
                elif appendix_pattern.match(filename):
                    r = appendix_pattern.match(filename)
                    name = u"Приложение %s" % r.group("section").upper()
                    page.chapter = u"ap"
                    page.section = r.group("section")
                else:
                    name = filename
                page.name = name

            page.content = archive.read(filename)
            page.save()
        Page.objects.filter(pk__in=old_pks).delete()
        archive.close()
Esempio n. 26
0
    def update_from_archive(self, archive, obj):
        old_pks = list(models.Page.objects.filter(book=obj).values_list('id', flat=True))
        archive = ZipFile(archive)

        toc = archive.read('toc.py')
        toc = toc.replace('(', '[').replace(')', ']').replace("'", '"')
        obj.toc = toc
        obj.save()

        pics = [name for name in archive.namelist() if name.startswith('pics/') and not name == 'pics/']
        archive.extractall(settings.MEDIA_ROOT, pics)

        appendix_pattern = re.compile(r'^ap(?P<section>[a-z])\.html$')
        ch_pattern = re.compile(r'^ch(?P<ch>\d+)\.html$')
        chs_pattern = re.compile(r'^ch(?P<ch>\d+)s(?P<s>\d+)\.html$')

        for filename in archive.namelist():
            if not filename.split('.')[-1] == 'html':
                continue

            slug = filename[:-5]

            try:
                page = models.Page.objects.get(slug=slug, book=obj)
                old_pks.remove(page.pk)
            except models.Page.DoesNotExist:
                page = models.Page(slug=slug, book=obj)
                #create name if page is new
                if filename == 'index.html':
                    name = u'Первая страница'
                elif chs_pattern.match(filename):
                    r = chs_pattern.match(filename)
                    name = u'Глава %s, раздел %s' % (int(r.group('ch')), int(r.group('s')))
                    page.chapter = r.group('ch')
                    page.section = r.group('s')
                elif ch_pattern.match(filename):
                    r = ch_pattern.match(filename)
                    name = u'Глава %s' % int(r.group('ch'))
                    page.chapter = r.group('ch')
                elif appendix_pattern.match(filename):
                    r = appendix_pattern.match(filename)
                    name = u'Приложение %s' % r.group('section').upper()
                    page.chapter = u'ap'
                    page.section = r.group('section')
                else:
                    name = filename
                page.name = name

            page.content = archive.read(filename)
            page.save()
        models.Page.objects.filter(pk__in=old_pks).delete()
        archive.close()
Esempio n. 27
0
def configure_search_replace(request):
    if request.method == 'GET':
        zf_in = ZipFile(request.session['stored_archive_filename'], mode='r')
        all_filenames_lst = zf_in.namelist()
        all_filenames = set(all_filenames_lst)
        assert len(all_filenames) == len(all_filenames_lst), "Duplicate filenames in the input file?!"
        zf_in.close()
        return render_to_response('docx_search_replace/configure_search_replace.html',
                                  {'filenames': sorted(all_filenames)})
    elif request.method == 'POST':
        replacements = []
        for i in range(1, 6):  # We have input fields "from1", "to1"... "from5", "to5"
            if request.POST['from%d' % i]:
                replacements.append((request.POST['from%d' % i], request.POST['to%d' % i]))
        logging.info('replacements: %s' % replacements)

        selected_filenames = [k for k in request.POST if request.POST[k] == 'on']
        logging.info('selected_filenames: %s' % selected_filenames)

        zf_in = ZipFile(request.session['stored_archive_filename'], mode='r')
        all_filenames = zf_in.namelist()
        stored_output_file = tempfile.NamedTemporaryFile(delete=False)
        zf_out = ZipFile(stored_output_file.name, mode='w', compression=zf_in.compression)

        for fname in selected_filenames:
            file_contents = zf_in.open(fname).read().decode('utf-8')
            for r in replacements:
                file_contents = file_contents.replace(*r)
            zf_out.writestr(fname, file_contents.encode('utf-8'))

        filenames_to_copy_unchanged = set(all_filenames) - set(selected_filenames)
        for fname in filenames_to_copy_unchanged:
            zf_out.writestr(fname, zf_in.open(fname).read(), compress_type=ZIP_DEFLATED)

        zf_in.close()
        zf_out.close()

        orig_uploaded_filename = request.session['uploaded_filename']
        if orig_uploaded_filename.endswith('.docx'):
            downloading_filename = re.sub('.docx$', '_EDITED.docx', orig_uploaded_filename)
        else:
            downloading_filename = orig_uploaded_filename + '_EDITED'

        ret_file = open(stored_output_file.name, 'rb')
        resp = HttpResponse(status=200,
                content=ret_file.read(),
                mimetype='application/vnd.openxmlformats-officedocument.wordprocessingml.document')
        resp['Content-Disposition'] = 'attachment; filename="%s"' % downloading_filename
        return resp

    else:
        return HttpResponseBadRequest('Invalid method: %s' % request.method)
Esempio n. 28
0
def __unpackZip(verzip, rodir, verbose):
    zipfile = ZipFile(verzip)

    if verbose:
        for l in zipfile.namelist():
            print os.path.join(rodir, l)

    if not os.path.exists(rodir) or not os.path.isdir(rodir):
        os.mkdir(rodir)
    zipfile.extractall(rodir)

    print "%d files checked out" % len(zipfile.namelist())
    return 0
Esempio n. 29
0
 def extract_zip(self,todir,filename):
     try:
         z = ZipFile("downloads/"+filename,"r")
     except:
         import traceback
         traceback.print_exc()
         return "Corrupt"
     
     if self.mode == "engine":
         root = "./"
         block = None
     #Extract folder from zip to todir
     elif filename+"/" in z.namelist():
         root = todir+"/"
         block = filename+"/"
     #Create folder from filename, extract contents of zip to there
     else:
         root = todir+"/"+filename+"/"
         try:
             os.makedirs(root)
         except:
             pass
         block = None
     for name in z.namelist():
         if hasattr(self,"progress"):
             self.progress.text = "extracting:"+name[-30:]
         print "extract:",name
         try:
             txt = z.read(name)
         except:
             return "Corrupt download"
         if block:
             if not name.startswith(block):
                 continue
         if "/" in name and not os.path.exists(root+name.rsplit("/",1)[0]):
             os.makedirs(root+name.rsplit("/",1)[0])
         if not name.endswith("/"):
             f = open(root+name,"wb")
             f.write(txt)
             f.close()
     z.close()
     os.remove("downloads/"+filename)
     try:
         os.remove("downloads/last")
     except:
         pass
     if self.mode == "engine":
         self.root.children[:] = [editbox(None,"In order to complete upgrade you must restart.")]
         self.need_restart = True
     return "FINISHED"
Esempio n. 30
0
    def test_multiple_queries_are_zip_file(self):
        expected_csv = 'two\r\n2\r\n'

        q = SimpleQueryFactory()
        q2 = SimpleQueryFactory()
        fn = generate_report_action()

        res = fn(None, None, [q, q2])
        z = ZipFile(io.BytesIO(res.content))
        got_csv = z.read(z.namelist()[0])

        self.assertEqual(len(z.namelist()), 2)
        self.assertEqual(z.namelist()[0], '%s.csv' % q.title)
        self.assertEqual(got_csv.lower().decode('utf-8'), expected_csv)
Esempio n. 31
0
def make_runtime(capsule, output, licfile=None, platforms=None, package=False,
                 suffix='', restrict=True):
    if package:
        output = os.path.join(output, 'pytransform' + suffix)
        if not os.path.exists(output):
            os.makedirs(output)
    logging.info('Generating runtime files to %s', relpath(output))

    myzip = ZipFile(capsule, 'r')
    if 'pytransform.key' in myzip.namelist():
        logging.info('Extract pytransform.key')
        myzip.extract('pytransform.key', output)
    else:
        logging.info('Extract pyshield.key, pyshield.lic, product.key')
        myzip.extract('pyshield.key', output)
        myzip.extract('pyshield.lic', output)
        myzip.extract('product.key', output)

    if licfile is None:
        logging.info('Extract license.lic')
        myzip.extract('license.lic', output)
    else:
        logging.info('Copying %s as license file', relpath(licfile))
        shutil.copy2(licfile, os.path.join(output, 'license.lic'))

    def copy3(src, dst):
        if suffix:
            x = os.path.basename(src).replace('.', ''.join([suffix, '.']))
            shutil.copy2(src, os.path.join(dst, x))
        else:
            shutil.copy2(src, dst)

    if not platforms:
        libfile = pytransform._pytransform._name
        if not os.path.exists(libfile):
            libname = dll_name + dll_ext
            libfile = os.path.join(PYARMOR_PATH, libname)
            if not os.path.exists(libfile):
                pname = pytransform.format_platform()
                libpath = os.path.join(PYARMOR_PATH, 'platforms')
                libfile = os.path.join(libpath, pname, libname)
        logging.info('Copying %s', libfile)
        copy3(libfile, output)
    elif len(platforms) == 1:
        filename = _build_platforms(platforms, restrict)[0]
        logging.info('Copying %s', filename)
        copy3(filename, output)
    else:
        libpath = os.path.join(output, pytransform.plat_path)
        logging.info('Create library path to support multiple platforms: %s',
                     libpath)
        if not os.path.exists(libpath):
            os.mkdir(libpath)

        filenames = _build_platforms(platforms, restrict)
        for platid, filename in list(zip(platforms, filenames)):
            logging.info('Copying %s', filename)
            path = os.path.join(libpath, *platid.split('.')[:2])
            logging.info('To %s', path)
            if not os.path.exists(path):
                os.makedirs(path)
            copy3(filename, path)

    filename = os.path.join(PYARMOR_PATH, 'pytransform.py')
    if package:
        shutil.copy2(filename, os.path.join(output, '__init__.py'))
    else:
        copy3(filename, output)

    logging.info('Generate runtime files OK')
Esempio n. 32
0
def _get_metadata(wheel_file):
    archive = ZipFile(wheel_file)
    for f in archive.namelist():
        if f.endswith("METADATA"):
            return archive.open(f).read().decode("utf-8")
    raise Exception("Metadata file not found in %s" % wheel_file)
Esempio n. 33
0
class WheelFile(DistInfoProvider, FileProvider):
    def __init__(self, path):
        self.path = Path(path)
        self.parsed_filename = parse_wheel_filename(self.path)
        self.fp = None
        self.zipfile = None
        self._dist_info = None

    def __enter__(self):
        self.fp = self.path.open("rb")
        self.zipfile = ZipFile(self.fp)
        return self

    def __exit__(self, _exc_type, _exc_value, _traceback):
        self.zipfile.close()
        self.fp.close()
        self.fp = None
        self.zipfile = None
        return False

    @property
    def dist_info(self):
        if self._dist_info is None:
            if self.zipfile is None:
                raise RuntimeError(
                    "WheelFile.dist_info cannot be determined when WheelFile"
                    " is not open in context"
                )
            self._dist_info = find_dist_info_dir(
                self.zipfile.namelist(),
                self.parsed_filename.project,
                self.parsed_filename.version,
            )
        return self._dist_info

    def basic_metadata(self):
        namebits = self.parsed_filename
        about = {
            "filename": self.path.name,
            "project": namebits.project,
            "version": namebits.version,
            "buildver": namebits.build,
            "pyver": namebits.python_tags,
            "abi": namebits.abi_tags,
            "arch": namebits.platform_tags,
            "file": {
                "size": self.path.stat().st_size,
            },
        }
        self.fp.seek(0)
        about["file"]["digests"] = digest_file(self.fp, ["md5", "sha256"])
        return about

    def open_dist_info_file(self, path):
        # returns a binary IO handle; raises MissingDistInfoFileError if file
        # does not exist
        try:
            zi = self.zipfile.getinfo(self.dist_info + "/" + path)
        except KeyError:
            raise errors.MissingDistInfoFileError(path)
        else:
            return self.zipfile.open(zi)

    def has_dist_info_file(self, path):  # -> bool
        try:
            self.zipfile.getinfo(self.dist_info + "/" + path)
        except KeyError:
            return False
        else:
            return True

    def list_files(self):
        return [name for name in self.zipfile.namelist() if not name.endswith("/")]

    def has_directory(self, path):
        return any(name.startswith(path) for name in self.zipfile.namelist())

    def get_file_size(self, path):
        return self.zipfile.getinfo(path).file_size

    def get_file_hash(self, path, algorithm):
        with self.zipfile.open(path) as fp:
            return digest_file(fp, [algorithm])[algorithm]
Esempio n. 34
0
def unzip(zip_uri, is_url, clone_to_dir=".", no_input=False, password=None):
    """Download and unpack a zipfile at a given URI.

    This will download the zipfile to the cookiecutter repository,
    and unpack into a temporary directory.

    :param zip_uri: The URI for the zipfile.
    :param is_url: Is the zip URI a URL or a file?
    :param clone_to_dir: The cookiecutter repository directory
        to put the archive into.
    :param no_input: Suppress any prompts
    :param password: The password to use when unpacking the repository.
    """
    # Ensure that clone_to_dir exists
    clone_to_dir = os.path.expanduser(clone_to_dir)
    make_sure_path_exists(clone_to_dir)

    if is_url:
        # Build the name of the cached zipfile,
        # and prompt to delete if it already exists.
        identifier = zip_uri.rsplit("/", 1)[1]
        zip_path = os.path.join(clone_to_dir, identifier)

        if os.path.exists(zip_path):
            download = prompt_and_delete(zip_path, no_input=no_input)
        else:
            download = True

        if download:
            # (Re) download the zipfile
            r = requests.get(zip_uri, stream=True)
            with open(zip_path, "wb") as f:
                for chunk in r.iter_content(chunk_size=1024):
                    if chunk:  # filter out keep-alive new chunks
                        f.write(chunk)
    else:
        # Just use the local zipfile as-is.
        zip_path = os.path.abspath(zip_uri)

    # Now unpack the repository. The zipfile will be unpacked
    # into a temporary directory
    try:
        zip_file = ZipFile(zip_path)

        if len(zip_file.namelist()) == 0:
            raise InvalidZipRepository(
                "Zip repository {} is empty".format(zip_uri))

        # The first record in the zipfile should be the directory entry for
        # the archive. If it isn't a directory, there's a problem.
        first_filename = zip_file.namelist()[0]
        if not first_filename.endswith("/"):
            raise InvalidZipRepository("Zip repository {} does not include "
                                       "a top-level directory".format(zip_uri))

        # Construct the final target directory
        project_name = first_filename[:-1]
        unzip_base = tempfile.mkdtemp()
        unzip_path = os.path.join(unzip_base, project_name)

        # Extract the zip file into the temporary directory
        try:
            zip_file.extractall(path=unzip_base)
        except RuntimeError:
            # File is password protected; try to get a password from the
            # environment; if that doesn't work, ask the user.
            if password is not None:
                try:
                    zip_file.extractall(path=unzip_base,
                                        pwd=password.encode("utf-8"))
                except RuntimeError:
                    raise InvalidZipRepository(
                        "Invalid password provided for protected repository")
            elif no_input:
                raise InvalidZipRepository(
                    "Unable to unlock password protected repository")
            else:
                retry = 0
                while retry is not None:
                    try:
                        password = read_repo_password("Repo password")
                        zip_file.extractall(path=unzip_base,
                                            pwd=password.encode("utf-8"))
                        retry = None
                    except RuntimeError:
                        retry += 1
                        if retry == 3:
                            raise InvalidZipRepository(
                                "Invalid password provided "
                                "for protected repository")

    except BadZipFile:
        raise InvalidZipRepository(
            "Zip repository {} is not a valid zip archive:".format(zip_uri))

    return unzip_path
Esempio n. 35
0
def extract_zip(input_zip):
    input_zip = ZipFile(input_zip)
    return {name: input_zip.read(name) for name in input_zip.namelist()}
Esempio n. 36
0
def download_unzip(url, path):
	request = get(url)
	zip_file = ZipFile(BytesIO(request.content))
	#zip_file.extractall(
	files = zip_file.namelist()
	print(files)
Esempio n. 37
0
def test_download_ic_file(app, ACCOUNTS):
    app.login(ACCOUNTS['SUPERUSER'])
    vid_pid, col_pid = create_video_in_collection(app)

    # now make a collection-based annotation
    collection_based = {
        "media": [{
            "id":
            vid_pid,
            "name":
            "Media0",
            "url": [
                "https://milo.byu.edu///movies/50aba99cbe3e2dadd67872da44b0da94/54131f93/0033467.mp4",
                "https://milo.byu.edu///movies/b4861e89ca5c8adf5ae37281743206cd/54131f93/0033467.webm"
            ],
            "target":
            "hum-video",
            "duration":
            300.011,
            "popcornOptions": {
                "frameAnimation": True
            },
            "controls":
            False,
            "tracks": [{
                "name":
                "Layer 0",
                "id":
                "0",
                "trackEvents": [{
                    "id": "TrackEvent0",
                    "type": "skip",
                    "popcornOptions": {
                        "start": 0,
                        "end": 5,
                        "target": "target-0",
                        "__humrequired": False,
                        "id": "TrackEvent0"
                    },
                    "track": "0",
                    "name": "TrackEvent0"
                }]
            }],
            "clipData": {}
        }]
    }
    col_result = app.post('/annotation?client=popcorn&collection=' + col_pid,
                          data=json.dumps(collection_based),
                          headers={'Content-Type': 'application/json'})
    assert col_result.status_code is 200, "Superuser could not create collection-based annotation"

    # maketh a required annotation
    required = {
        "media": [{
            "id":
            vid_pid,
            "name":
            "Media0",
            "url": [
                "https://milo.byu.edu///movies/50aba99cbe3e2dadd67872da44b0da94/54131f93/0033467.mp4",
                "https://milo.byu.edu///movies/b4861e89ca5c8adf5ae37281743206cd/54131f93/0033467.webm"
            ],
            "target":
            "hum-video",
            "duration":
            300.011,
            "popcornOptions": {
                "frameAnimation": True
            },
            "controls":
            False,
            "tracks": [{
                "name":
                "Layer 0",
                "id":
                "0",
                "trackEvents": [{
                    "id": "TrackEvent0",
                    "type": "skip",
                    "popcornOptions": {
                        "start": 10,
                        "end": 25,
                        "target": "target-0",
                        "__humrequired": False,
                        "id": "TrackEvent0"
                    },
                    "track": "0",
                    "name": "TrackEvent0"
                }]
            }],
            "clipData": {}
        }]
    }
    req_result = app.post('/annotation?client=popcorn',
                          data=json.dumps(required),
                          headers={'Content-Type': 'application/json'})
    assert req_result.status_code is 200, "Superuser could not create required annotation"

    annotation_result = app.get('/annotation?client=ic&collection=' + col_pid +
                                '&dc:relation=' + vid_pid)
    assert annotation_result.headers.get('Content-Type') == 'application/zip'

    z = ZipFile(StringIO(annotation_result.data))
    items = z.namelist()

    assert len(filter(lambda fname: fname.endswith('.json'),
                      items)) is 1, 'No annotations in archive'
    assert len(filter(lambda fname: fname.endswith('.icf'),
                      items)) is 1, 'No ICF file in archive'

    a_filename = filter(lambda fname: fname.endswith('.json'), items)[0]

    filedata = z.read(a_filename)
    a = json.loads(filedata)

    assert len(
        a) is 2, 'There are not two annotation sets in the annotation file.'
    assert a[0]['media'][0]['tracks'][0]['trackEvents'][0]['popcornOptions'][
        'start'] == '10'
    assert a[1]['media'][0]['tracks'][0]['trackEvents'][0]['popcornOptions'][
        'start'] == '0'
Esempio n. 38
0
def unpack_zip(zipfile='', path_from_local=''):
    filepath = path_from_local + zipfile
    extract_path = OutputFilepath
    parent_archive = ZipFile(filepath)

    for info in parent_archive.infolist():
        if "/vizqlserver_" in info.filename:
            print(info.filename)
            if info.filename[-1] == '/':
                continue
            info.filename = os.path.basename(info.filename)
            parent_archive.extract(info, extract_path)
        if "worker" in info.filename:
            print(info.filename)
            info.filename = os.path.basename(info.filename)
            parent_archive.extract(info, extract_path)

    namelist = parent_archive.namelist()
    parent_archive.close()

    for name in namelist:
        try:
            if name[-4:] == '.zip':
                filepath = './Log Dump/' + name
                sub_archive = ZipFile(filepath)

                for info in sub_archive.infolist():
                    # print info.filename
                    if "/vizqlserver_" in info.filename:
                        print(info.filename)
                        if info.filename[-1] == '/':
                            continue
                        info.filename = os.path.basename(info.filename)
                        path = './Log Dump/' + os.path.basename(
                            filepath.strip('.zip')) + '/'
                        sub_archive.extract(info, path)

                rdir = os.getcwd() + '\Log Dump\\'
                filelist = []
                for tree, fol, fils in os.walk(rdir):
                    filelist.extend([
                        os.path.join(tree, fil) for fil in fils
                        if fil.endswith('.txt')
                    ])
                for cnt, fil in enumerate(filelist):
                    os.rename(
                        fil,
                        os.path.join(
                            rdir,
                            str(cnt + 1).zfill(2) + '_' +
                            fil[fil.rfind('\\') + 1:]))

                print("Successfully extracted " + name + "!")

        except Exception as e:

            print('failed on', name)
            print(e)
            pass

    return extract_path
Esempio n. 39
0
def parseM3U(sUrl=None, infile=None):#Traite les m3u local
    oGui = cGui()
    oInputParameterHandler = cInputParameterHandler()
    sUrl = oInputParameterHandler.getValue('siteUrl')

    if infile == None:
        if 'iptv4sat' in sUrl or '.zip' in sUrl:
            sHtmlContent = getHtml(sUrl)
            zip_files = ZipFile(io.BytesIO(sHtmlContent))
            files = zip_files.namelist()

            for Title in files:
                oOutputParameterHandler = cOutputParameterHandler()
                oOutputParameterHandler.addParameter('sMovieTitle', Title)
                oOutputParameterHandler.addParameter('siteUrl', sUrl)

                oGui.addDir(SITE_IDENTIFIER, 'unZip', Title, 'tv.png', oOutputParameterHandler)

            oGui.setEndOfDirectory()
            return

        elif not '#EXTM3U' in sUrl:
            site = infile
            headers = {'User-Agent': UA}

            oRequestHandler = cRequestHandler(sUrl)
            oRequestHandler.addHeaderEntry('User-Agent', UA)
            inf = oRequestHandler.request()

            if 'drive.google' in inf:
                inf = unGoogleDrive(inf)

            inf = inf.split('\n')
        else:
            inf = infile

    else:
        inf = infile

    try:
        line = inf.readline()
    except:
        pass

    playlist=[]
    song = track(None, None, None, None)
    ValidEntry = False

    for line in inf:
        line = line.strip()
        if line.startswith('#EXTINF:'):
            length, title = line.split('#EXTINF:')[1].split(',', 1)
            try:
                licon = line.split('#EXTINF:')[1].partition('tvg-logo=')[2]
                icon = licon.split('"')[1]
            except:
                icon = 'tv.png'
            ValidEntry = True
            song = track(length, title, None, icon)

        elif (len(line) != 0):
            if (ValidEntry) and (not (line.startswith('!') or line.startswith('#'))):
                ValidEntry = False
                song.path = line
                playlist.append(song)
                #VSlog(playlist)
                song=track(None, None, None, None)

    try:
        inf.close()
    except:
        pass

    return playlist
Esempio n. 40
0
import csv
from zipfile import ZipFile

f = ZipFile("test.zip", 'r')

for f_name in f.namelist():
	print(f_name)
	#f.extract(f_name, '../')
	data = f.read(f_name)
	with open('haha.txt', 'wb') as f:
		f.write(data)

f.close()
Esempio n. 41
0
class ExcelReader:
    def __init__(self, filename):
        self.filename = filename
        self._zip_archive = ZipFile(self.filename, 'r')
        self._zip_files = self._zip_archive.namelist()
        self._shared_strings = []
        self._shared_values = {}
        self._rels = []
        self.workbook = Workbook(self)
        self.read()

    def read(self):
        self.read_relationship()
        self.read_shared_string()
        self.read_workbook()

    def read_relationship(self):
        relationship_path = find_file_path(self._zip_files,
                                           'workbook.xml.rels')
        if not relationship_path:
            return
        relationship_raw = self._zip_archive.read(relationship_path)
        root = etree.fromstring(relationship_raw)
        nsmap = {
            k if k is not None else 'rel': v
            for k, v in root.nsmap.items()
        }
        for node in root.xpath('/rel:Relationships/rel:Relationship',
                               namespaces=nsmap):
            rel = {}
            for key, value in node.attrib.items():
                rel[key] = value
            self._rels.append(rel)

    def read_shared_string(self):
        shared_strings_path = find_file_path(self._zip_files,
                                             'sharedStrings.xml')
        self._shared_strings.append(shared_strings_path)
        shared_strings_raw = self._zip_archive.read(shared_strings_path)
        root = etree.fromstring(shared_strings_raw)
        nsmap = {
            k if k is not None else 'sh': v
            for k, v in root.nsmap.items()
        }
        for index, string in enumerate(
                root.xpath('/sh:sst/sh:si/sh:t', namespaces=nsmap)):
            self._shared_values[str(index)] = string.text

    def read_workbook(self):
        workbook_raw = self._zip_archive.read(self.workbook.path)
        root = etree.fromstring(workbook_raw)
        nsmap = {
            k if k is not None else 'wb': v
            for k, v in root.nsmap.items()
        }
        worksheets = {}
        for sheet in root.xpath('/wb:workbook/wb:sheets/wb:sheet',
                                namespaces=nsmap):
            sheet_id = sheet.xpath('@r:id', namespaces=nsmap)[0]
            sheet_name = sheet.attrib['name']
            rel = self.find_relationship(sheet_id)
            sheet_filename = rel['Target']
            sheet_file_path = find_file_path(self._zip_files, sheet_filename)
            worksheets[sheet_name] = sheet_file_path
        self.read_worksheets(worksheets)

    def read_worksheets(self, worksheets):
        """

        :param dict worksheets:
        :return:
        """
        for ws_name, ws_path in worksheets.items():
            worksheet = Worksheet(self.workbook, ws_name, ws_path)

            worksheet_raw = self._zip_archive.read(ws_path)
            root = etree.fromstring(worksheet_raw)
            nsmap = {
                k if k is not None else 'ws': v
                for k, v in root.nsmap.items()
            }
            for row_node in root.xpath('./ws:sheetData/ws:row',
                                       namespaces=nsmap):

                row = Row(row_node.attrib['r'])
                for cell_node in row_node.xpath('ws:c', namespaces=nsmap):
                    column_latter = re.fullmatch(
                        COORDINATE_regex,
                        cell_node.attrib['r'])['column_latter']
                    row_num = re.fullmatch(COORDINATE_regex,
                                           cell_node.attrib['r'])['row']
                    column_num = get_column_number(column_latter)
                    cell = Cell(int(row_num), int(column_num))
                    cell.type = cell_node.attrib.get('t')

                    value_node = cell_node.find('ws:v', namespaces=nsmap)
                    if value_node is None:
                        value_node = cell_node.find(
                            'ws:is', namespaces=nsmap).find('ws:t',
                                                            namespaces=nsmap)
                    if cell_node.attrib.get('t', False) == 's':
                        cell.value = self._shared_values.get(value_node.text)
                    else:
                        cell.value = value_node.text

                    if cell_node.find('ws:f', namespaces=nsmap) is not None:
                        cell.formulae = cell_node.find('ws:f',
                                                       namespaces=nsmap).text

                    row.add_cell(cell)
                worksheet.add_row(row)
            self.workbook.add_worksheet(worksheet)

    def find_relationship(self, rel_id):
        for rel in self._rels:
            if rel['Id'] == rel_id:
                return rel
        return None
Esempio n. 42
0
from collections import Counter
import shutil
import scipy
from pandas import Series

total = []
cl_clf = pickle.load(open('/home/ym1495/cl_clf.p', 'rb'))
os.chdir('/home/ym1495/1990')
zipfiles = glob('*zip')

for zfname in zipfiles:
    print(zfname)
    zfile = ZipFile(zfname)
    year = zfname.split('/')[-1][:-4]

    members = zfile.namelist()
    threshold = len(members) / 200
    c = 0
    for fname in members:
        if fname.endswith('.p') == False:
            continue
        elif fname.endswith('dis/.p') == True:
            continue
        else:
            docid = fname.split('/')[-1][:-2]
            caseid = docid.split('-')[0]
            para_count = pickle.load(zfile.open(fname, 'r'))
            if para_count.shape[0] != 0:
                topics = cl_clf.predict(para_count)
                probability = cl_clf.predict_proba(para_count)
                for i in range(len(topics)):
Esempio n. 43
0
import requests
from pathlib import Path

url = 'https://ihmecovid19storage.blob.core.windows.net/latest/ihme-covid19.zip'
content = requests.get(url)

# unzip the content
from io import BytesIO
from zipfile import ZipFile

f = ZipFile(BytesIO(content.content))

from datetime import datetime
import datetime

cleaned_list = [i for i in f.namelist() if '.csv' in i]
for i in cleaned_list:
    url = 'https://ihmecovid19storage.blob.core.windows.net/latest/ihme-covid19.zip'
    z = urlopen(url)
    myzip = ZipFile(BytesIO(z.read())).extract(i)
    df = pd.read_csv(myzip, dtype='unicode')
    p = Path(i)
    df['Path_Update_Dt'] = p.parts[0]
    df['current_dttm'] = datetime.datetime.today()

    if 'reference_hospitalization_all_locs' in i.lower():
        copy_to_sql(df=df,
                    table_name='STG_Hospitalization_all_locs',
                    schema_name=params.SchemaName,
                    index=False,
                    if_exists="replace")
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 19 12:02:21 2018

@author: Angel.Herrera
"""
import os
from zipfile import ZipFile

os.chdir("")

# Only root directories:
root_dirs = []
zip_ref = ZipFile("NAME.zip")
for f in zip_ref.namelist():
    zinfo = zip_ref.getinfo(f)
    if zinfo.is_dir():

        r_dir = f.split('/')
        r_dir = r_dir[0]
    if r_dir not in root_dirs:
        root_dirs.append(r_dir)
for d in root_dirs:
    print(d)
Esempio n. 45
0
__email__ = "*****@*****.**"

# The following script maps the biogrid_genes to their biogrid_id

import pandas as pd
from urllib.request import urlopen
from zipfile import ZipFile
from io import BytesIO
from collections import defaultdict

thefile = urlopen(
    'https://downloads.thebiogrid.org/Download/BioGRID/Latest-Release/BIOGRID-ORGANISM-LATEST.tab2.zip'
)
extracted_files = ZipFile(BytesIO(thefile.read()))
dataset = [
    i for i in extracted_files.namelist()
    if "BIOGRID-ORGANISM-Homo_sapiens" in i
][0]
version = dataset.split('-')[-1].replace(".tab2.txt", "")
print("done downloading, started importing")
data = pd.read_csv(extracted_files.open(dataset),
                   low_memory=False,
                   delimiter='\t')

dct = defaultdict(list)

col = [
    'BioGRID ID Interactor A', 'BioGRID ID Interactor B',
    'Official Symbol Interactor A', 'Official Symbol Interactor B'
]
Esempio n. 46
0
class ZipArchive(ZipPath):
    """
    I am a L{FilePath}-like object which can wrap a zip archive as if it were a
    directory.

    It works similarly to L{FilePath} in L{bytes} and L{unicode} handling --
    instantiating with a L{bytes} will return a "bytes mode" L{ZipArchive},
    and instantiating with a L{unicode} will return a "text mode"
    L{ZipArchive}. Methods that return new L{ZipArchive} or L{ZipPath}
    instances will be in the mode of the argument to the creator method,
    converting if required.
    """
    @property
    def archive(self):
        return self

    def __init__(self, archivePathname):
        """
        Create a ZipArchive, treating the archive at archivePathname as a zip
        file.

        @param archivePathname: a L{bytes} or L{unicode}, naming a path in the
            filesystem.
        """
        self.path = archivePathname
        self.zipfile = ZipFile(_coerceToFilesystemEncoding(
            "", archivePathname))
        self.pathInArchive = _coerceToFilesystemEncoding(archivePathname, "")
        # zipfile is already wasting O(N) memory on cached ZipInfo instances,
        # so there's no sense in trying to do this lazily or intelligently
        self.childmap = {}  # type: Dict[str, Dict[str, int]]

        for name in self.zipfile.namelist():
            name = _coerceToFilesystemEncoding(self.path, name).split(self.sep)
            for x in range(len(name)):
                child = name[-x]
                parent = self.sep.join(name[:-x])
                if parent not in self.childmap:
                    self.childmap[parent] = {}
                self.childmap[parent][child] = 1
            parent = _coerceToFilesystemEncoding(archivePathname, "")

    def child(self, path):
        """
        Create a ZipPath pointing at a path within the archive.

        @param path: a L{bytes} or L{unicode} with no path separators in it
            (either '/' or the system path separator, if it's different).
        """
        return ZipPath(self, path)

    def exists(self):
        """
        Returns C{True} if the underlying archive exists.
        """
        return FilePath(self.zipfile.filename).exists()

    def getAccessTime(self):
        """
        Return the archive file's last access time.
        """
        return FilePath(self.zipfile.filename).getAccessTime()

    def getModificationTime(self):
        """
        Return the archive file's modification time.
        """
        return FilePath(self.zipfile.filename).getModificationTime()

    def getStatusChangeTime(self):
        """
        Return the archive file's status change time.
        """
        return FilePath(self.zipfile.filename).getStatusChangeTime()

    def __repr__(self) -> str:
        return "ZipArchive(%r)" % (os.path.abspath(self.path), )
Esempio n. 47
0
 def installClick(self, event):
     # Browse for the package and install it
     # The package is an IRM file, but it is basically just a zip file
     dlg = wx.FileDialog(
         self,
         message="Choose a File",
         defaultDir=self.seqWin.cwd,
         defaultFile="",
         wildcard="InteractiveROSETTA Modules (*.irm)|*.irm",
         style=wx.OPEN | wx.CHANGE_DIR)
     if (dlg.ShowModal() != wx.ID_OK):
         return
     module = dlg.GetPath()
     # Prevent the user from trying to package up the template
     if (module.split(".irm")[0].endswith("template")):
         dlg = wx.MessageDialog(
             self,
             "The template module is reserved, please do not attempt to overwrite it.",
             "Operation Forbidden", wx.OK | wx.ICON_ERROR | wx.CENTRE)
         dlg.ShowModal()
         dlg.Destroy()
         return
     if (platform.system() == "Windows"):
         packagecode = module[module.rfind("\\") + 1:].split(".irm")[0]
     else:
         packagecode = module[module.rfind("/") + 1:].split(".irm")[0]
     # Let's see if this module is already installed
     home = os.path.expanduser("~")
     if (platform.system() == "Windows"):
         if (os.path.exists(home + "/InteractiveROSETTA/modules/" +
                            packagecode)):
             dlg = wx.MessageDialog(
                 self,
                 "This module is already installed.  Do you want to overwrite it?",
                 "Module Already Installed",
                 wx.YES_NO | wx.ICON_QUESTION | wx.CENTRE)
             if (dlg.ShowModal() == wx.ID_NO):
                 return
             dlg.Destroy()
             # Delete it
             shutil.rmtree(home + "/InteractiveROSETTA/modules/" +
                           packagecode,
                           ignore_errors=True)
         os.mkdir(home + "/InteractiveROSETTA/modules/" + packagecode)
     else:
         if (os.path.exists(home + "/.InteractiveROSETTA/modules/" +
                            packagecode)):
             dlg = wx.MessageDialog(
                 self,
                 "This module is already installed.  Do you want to overwrite it?",
                 "Module Already Installed",
                 wx.YES_NO | wx.ICON_QUESTION | wx.CENTRE)
             if (dlg.ShowModal() == wx.ID_NO):
                 return
             dlg.Destroy()
             # Delete it
             shutil.rmtree(home + "/.InteractiveROSETTA/modules/" +
                           packagecode,
                           ignore_errors=True)
         os.mkdir(home + "/.InteractiveROSETTA/modules/" + packagecode)
     # Unpack the irm package to the ~/InteractiveROSETTA/modules directory
     fin = open(module, 'rb')
     z = ZipFile(fin)
     for name in z.namelist():
         if (platform.system() == "Windows"):
             outpath = home + "/InteractiveROSETTA/modules/" + packagecode
         else:
             outpath = home + "/.InteractiveROSETTA/modules/" + packagecode
         z.extract(name, outpath)
     fin.close()
     # Remove the "server" directory if it exists since that belongs on the server, not the client
     try:
         if (platform.system() == "Windows"):
             shutil.rmtree(home + "/InteractiveROSETTA/modules/" +
                           packagecode + "/server",
                           ignore_errors=True)
         else:
             shutil.rmtree(home + "/.InteractiveROSETTA/modules/" +
                           packagecode + "/server",
                           ignore_errors=True)
     except:
         pass
     # Is there a license?  If so, display it.
     if (platform.system() == "Windows"):
         if (os.path.isfile(home + "/InteractiveROSETTA/modules/" +
                            packagecode + "/license")):
             fin = open(home + "/InteractiveROSETTA/modules/" +
                        packagecode + "/license")
             licensetext = ""
             for aline in fin:
                 licensetext += aline
             fin.close()
             dlg1 = wx.lib.dialogs.ScrolledMessageDialog(
                 None, licensetext, packagecode + " License")
             dlg1.ShowModal()
             dlg1.Destroy()
             dlg = wx.MessageDialog(
                 None, "Do you accept the license agreement?",
                 packagecode + " License",
                 wx.YES_NO | wx.ICON_QUESTION | wx.CENTRE)
             if (dlg.ShowModal() == wx.ID_NO):
                 # Delete everything
                 shutil.rmtree(home + "/InteractiveROSETTA/modules/" +
                               packagecode)
                 return
     else:
         if (os.path.isfile(home + "/.InteractiveROSETTA/modules/" +
                            packagecode + "/license")):
             fin = open(home + "/.InteractiveROSETTA/modules/" +
                        packagecode + "/license")
             licensetext = ""
             for aline in fin:
                 licensetext += aline
             fin.close()
             dlg1 = wx.lib.dialogs.ScrolledMessageDialog(
                 None, licensetext, packagecode + " License")
             dlg1.ShowModal()
             dlg1.Destroy()
             dlg = wx.MessageDialog(
                 None, "Do you accept the license agreement?",
                 packagecode + " License",
                 wx.YES_NO | wx.ICON_QUESTION | wx.CENTRE)
             if (dlg.ShowModal() == wx.ID_NO):
                 # Delete everything
                 shutil.rmtree(home + "/.InteractiveROSETTA/modules/" +
                               packagecode)
                 return
     # Is there a message from the developer?  If so, display it
     if (platform.system() == "Windows"):
         if (os.path.isfile(home + "/InteractiveROSETTA/modules/" +
                            packagecode + "/message")):
             fin = open(home + "/InteractiveROSETTA/modules/" +
                        packagecode + "/message")
             msgtext = ""
             for aline in fin:
                 msgtext += aline
             fin.close()
             dlg = wx.MessageDialog(self, msgtext,
                                    "Message From the Developer",
                                    wx.OK | wx.ICON_ERROR | wx.CENTRE)
             dlg.ShowModal()
             dlg.Destroy()
     else:
         if (os.path.isfile(home + "/.InteractiveROSETTA/modules/" +
                            packagecode + "/message")):
             fin = open(home + "/.InteractiveROSETTA/modules/" +
                        packagecode + "/message")
             msgtext = ""
             for aline in fin:
                 msgtext += aline
             fin.close()
             dlg = wx.MessageDialog(self, msgtext,
                                    "Message From the Developer",
                                    wx.OK | wx.ICON_ERROR | wx.CENTRE)
             dlg.ShowModal()
             dlg.Destroy()
     # Reload the modules to get the updated list of protocols
     self.parent.readModules()
Esempio n. 48
0
    def scan(self, codeURL, runtime):
        zippath = self.downloads.joinpath('lambda.zip')
        zippath.write_bytes(requests.get(codeURL).content)
        if not is_zipfile(zippath):
            return  # invalid zip
        zf = ZipFile(zippath)

        # Unzip Lambda source code
        for _ in zf.namelist():
            zf.extractall(self.downloads, members=[_])

        # Configure sonar-project.properties
        if runtime.startswith('python'):
            language = 'py'
        elif runtime.startswith('node'):
            language = 'js'
        elif runtime.startswith('java'):
            language = 'java'
        else:
            return  # unsupported language
        Path(self.downloads, 'sonar-project.properties').write_text(
            SONAR_PROJECT_PROPERTIES.format(self.config['url'],
                                            self.config['login'],
                                            self.config['password'], language,
                                            self.config['projectKey'],
                                            self.config['projectName']))

        # Run sonar-scanner
        cwd = Path('.').resolve()
        cd(self.downloads)
        sh(shsplit(self.config['command']), stdout=DEVNULL, stderr=DEVNULL)
        cd(cwd)
        rmtree(self.downloads, ignore_errors=True)
        self.downloads.mkdir(parents=True, exist_ok=True)

        # Get results
        curl = requests.Session()
        curl.auth = (self.config['login'], self.config['password'])

        while True:
            sleep(3)
            task = json.loads(
                curl.get(
                    f'{self.config["url"]}/api/ce/activity').text)['tasks'][0]
            if task['status'] in ['SUCCESS', 'FAIL']:
                break

        issues = json.loads(
            curl.get(
                f'{self.config["url"]}/api/issues/search?project=lambdaguard').
            text)['issues']
        curl.post(f'{self.config["url"]}/api/projects/delete',
                  data={'project': 'lambdaguard'})

        for issue in issues:
            if issue['status'] != 'OPEN':
                continue
            where = issue['component'].split(':', 1)[1]
            yield {
                'level':
                'high',
                'text':
                f'{issue["message"]}\n{where} on line {issue["textRange"]["startLine"]}.'
            }
Esempio n. 49
0
def run(**kwargs):
    '''
    function varargout = readhgt(varargin)
    %READHGT Import/download NASA SRTM data files (.HGT).
    %	READHGT(area=) where AREA is a 4-element vector [LAT1,lAT2,LON1,LON2]
    %	downloads the SRTM data and plots a map corresponding to the geographic
    %	area defined by latitude and longitude limits (in decimal degrees). If 
    %	the needed SRTM .hgt files are not found in the current directory (or 
    %	in the path), they are downloaded from the USGS data server (needs an 
    %	Internet connection and a companion file "readhgt_srtm_index.txt"). For
    %	better plot results, it is recommended to install DEM personal function
    %	available at author's Matlab page. 
    %
    %	READHGT(lat=,lon=) reads or downloads the SRTM tiles corresponding to LAT
    %	and LON (in decimal degrees) coordinates (lower-left corner).
    %
    %	LAT and/or LON can be vectors: in that case, tiles corresponding to all
    %	possible combinations of LAT and LON values will be downloaded, and
    %	optional output structure X will have as much elements as tiles.
    %
    %	READHGT(lat=,lon=,outdir=) specifies output directory OUTDIR to write
    %	downloaded files.
    %
    %	READHGT(lat=,lon=,outdir=,url=) specifies the URL address to find HGT 
    %	files (default is USGS).
    %
    %	READHGT(filename=) reads HGT data file FILENAME, must be in the form
    %	"[N|S]yy[E|W]xxx.hgt[.zip]", as downloaded from SRTM data servers.
    %
    %
    %	X=READHGT(...) returns a structure X containing: 
    %		lat: coordinate vector of latitudes (decimal degree)
    %		lon: coordinate vector of longitudes (decimal degree)
    %		  z: matrix of elevations (meters, INT16 class)
    %		hgt: downloaded filename(s)
    %
    %
    % X=READHGT(...,crop=) if value is left blank, crops the resulting map 
    % around existing land (reduces any sea or novalue areas at the borders).
    % If crop value is [LAT1,LAT2,LON1,LON2], crops the map using latitude/lon
    % limits.  perfer to use new syntax READHGT(AREA=).
    %
    %
    %	--- Additionnal options ---
    %
    % 'plot'
    %    Also plots the tile(s).
    %
    %	'tiles'
    %	   Imports and plots individual tiles instead of merging them (default 
    %	   behavior if adjoining values of LAT and LON).
    %
    %	'interp'
    %	   Linearly interpolates missing data.
    %
    %	'crop'
    %	   crops the resulting map around existing land (reduces any sea or 
    %	   novalue areas at the borders).
    %
    %	'crop',[lat1,lat2,lon1,lon2]
    %	   Former syntax that crops the map using latitude/longitude limits. 
    %	   Prefer the new syntax READHGT(AREA).
    %
    %	'srtm1'
    %	   Downloads SRTM1 tiles which are 9 times bigger than default SRTM3 
    %	   ! EXPERIMENTAL ! since the used URL from USGS seems temporary.
    %
    %	'srtm3'
    %	   Forces SRTM3 download (by default, SRTM1 tile is downloaded only for
    %	   USA, if exists).
    %
    %
    %	--- Examples ---
    %
    %	- to plot a map of the Paris region, France (single tile):
    %		readhgt(48,2)
    %
    %	- to plot a map of Flores volcanic island, Indonesia (5 tiles):
    %		readhgt(-9,119:123)
    %
    %	- to plot a map of the Ubinas volcano, Peru (SRTM1 cropped tile):
    %	   readhgt([-16.4,-16.2,-71.5,-71.3],'srtm1','interp')
    %
    %	- to download SRTM1 data of Cascade Range (27 individual tiles):
    %		X=readhgt(40:48,-123:-121,'tiles');
    %
    %
    %	--- Informations ---
    %
    %	- each file corresponds to a tile of 1x1 degree of a square grid
    %	  1201x1201 of elevation values (SRTM3 = 3 arc-seconds), and for USA  
    %	  territory or when using the 'srtm1' option, at higher resolution 
    %	  3601x3601 grid (SRTM1 = 1 arc-second). Note that SRTM1 and SRTM3 
    %	  files have the same syntax names; only the size differs.
    %
    %	- elevations are of class INT16: sea level values are 0, unknown values
    %	  equal -32768 (there is no NaN for INT class), use 'interp' option to
    %	  fill the gaps.
    %
    %	- note that borders are included in each tile, so to concatenate tiles
    %	  you must remove one row/column in the corresponding direction (this
    %	  is made automatically by READHGT when merging tiles).
    %
    %	- downloaded file is written in the current directory or optional  
    %	  OUTDIR directory, and it remains there. Take care that mixed SRTM1
    %	  and SRTM3 files may lead to fail to merge.
    %
    %	- NASA Shuttle Radar Topography Mission [February 11 to 22, 2000] 
    %	  produced a near-global covering on Earth land, but still limited to 
    %	  latitudes from 60S to 60N. Offshore tiles will be output as flat 0
    %	  value grid.
    %
    %	- if you look for other global topographic data, take a look to ASTER
    %	  GDEM, worldwide 1 arc-second resolution (from 83S to 83N): 
    %	  http://gdex.cr.usgs.gov/gdex/ (free registration required)
    %
    %
    %	Author: Fran?ois Beauducel <*****@*****.**>
    %		Institut de Physique du Globe de Paris
    %
    %	References:
    %		http://dds.cr.usgs.gov/srtm/version2_1
    %
    %	Acknowledgments: Yves Gaudemer, Jinkui Zhu, Greg
    %
    %	Created: 2012-04-22 in Paris, France
    %	Updated: 2016-05-06
    
    %	Copyright (c) 2016, Fran?ois Beauducel, covered by BSD License.
    %	All rights reserved.
    %
    %	Redistribution and use in source and binary forms, with or without 
    %	modification, are permitted provided that the following conditions are 
    %	met:
    %
    %	   * Redistributions of source code must retain the above copyright 
    %	     notice, this list of conditions and the following disclaimer.
    %	   * Redistributions in binary form must reproduce the above copyright 
    %	     notice, this list of conditions and the following disclaimer in 
    %	     the documentation and/or other materials provided with the distribution
    %	                           
    %	THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
    %	AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
    %	IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
    %	ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
    %	LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
    %	CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
    %	SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
    %	INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
    %	CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
    %	ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
    %	POSSIBILITY OF SUCH DAMAGE.
    '''

    fidx = 'readhgt_srtm_index.txt'
    # ATTENTION: this file must exist in the Matlab path to use default SRTM3 tiles
    # since USGS delivers data continent-by-continent with nominative directories,
    # this index file is needed to know the full path name of each tile.
    sz1 = 3601  # SRTM1 tile size
    sz3 = 1201  # SRTM3 tile size
    novalue = -32768
    n = 1

    if 'options' in kwargs:
        options = kwargs['options']
        makeplot = True if 'plot' in options else False
        merge = True if 'merge' in options else False
        tiles = True if 'tiles' in options else False
        inter = True if 'interp' in options else False

    if 'srtm1' in options:
        # EXPERIMENTAL: SRTM1 full resolution tiles available here (2016):
        url = 'http://e4ftl01.cr.usgs.gov/SRTM/SRTMGL1.003/2000.02.11'
        srtm1 = True
        srtm3 = False
        sz = 3601
    else:
        # official USGS SRTM3 tiles (and SRTM1 for USA):
        url = 'http://dds.cr.usgs.gov/srtm/version2_1'
        srtm3 = True
        srtm1 = False
        sz = 1201

    if 'crop' in kwargs:
        if kwargs['crop'] == '':
            cropflag = 1
        else:
            crop = kwargs['crop']
            lat = np.floor(
                np.arange(min(crop[0], crop[1]), max(crop[0], crop[1])))
            lon = np.floor(
                np.arange(min(crop[2], crop[3]), max(crop[2], crop[3])))
            cropflag = 2
    else:
        cropflag = 0

    # syntax READHGT without argument: opens the GUI to select a file
    if not kwargs:
        # Make a top-level instance and hide since it is ugly and big.
        root = Tkinter.Tk()
        root.withdraw()

        # Make it almost invisible - no decorations, 0 size, top left corner.
        root.overrideredirect(True)
        root.geometry('0x0+0+0')

        # Show window again and lift it to top so it can get focus,
        # otherwise dialogs will end up behind the terminal.
        root.deiconify()
        root.lift()
        root.focus_force()

        options = {}
        options['filetypes'] = [('HGT files', '.hgt'), ('HGT zip', '.hgt.zip')]
        f = tkFileDialog.askopenfilenames(parent=root,
                                          **options)  # Or some other dialog

        # Get rid of the top-level instance once to make it actually invisible.
        root.destroy()

    # syntax READHGT(FILENAME, ...)
    if 'filename' in kwargs:
        f = kwargs['filename']
        try:
            test = open(f, 'r')
            test.close()
        except IOError:
            print('Cannot open file: ' + f)

    if 'lat' in kwargs and 'lon' in kwargs:
        lat = kwargs['lat']
        lon = kwargs['lon']

    if 'url' in kwargs:
        url = kwargs['url']

    try:
        vl = len(lat)**2
    except TypeError:
        vl = 1

#        if ~isnumeric(lon) || ~isnumeric(lat) || any(abs(lat) > 60) || any(lon < -180) || any(lon > 179) || isempty(lat) || isempty(lon)
#            error('LAT and LON must be numeric and in valid SRTM interval (abs(LAT)<60).');
#        end
#        if ~tiles && (any(diff(lat) ~= 1) || any(diff(lon) ~= 1))
#            fprintf('READHGT: Warning! LAT and LON vectors do not define adjoining tiles. Cannot merge and force TILES option.');
#            tiles = 1;
#        end

    if 'outdir' in kwargs:
        outdir = kwargs['outdir']
        if not os.path.isdir(outdir):
            print('Out directory is invalid. Using default')
            outdir = '.'
    else:
        outdir = '.'

    lon = int(np.floor(lon))
    lat = int(np.floor(lat))

    lat, lon = np.meshgrid(lat, lon)
    f = []

    for i in range(vl):
        for j in range(vl):
            if lat[i][j] < 0:
                slat = 'S%02d' % -lat[i][j]
            else:
                slat = 'N%02d' % lat[i][j]

            if lon[i][j] < 0:
                slon = 'W%03d' % -lon[i][j]
            else:
                slon = 'E%03d' % lon[i][j]

            f.append('%s/%s%s.hgt' % (outdir, slat, slon))

            try:
                ofile = open(f[i + j])
                ofile.close()

            except IOError:
                if srtm1:
                    ff = '/%s%s.SRTMGL1.hgt.zip' % (slat, slon)
                else:
                    fsrtm = fidx
                    try:
                        fid = open(fsrtm, 'rU')
                        k = []
                        for lines in fid:
                            if slat + slon in lines:
                                k.append(lines)
                        if not k:
                            print(
                                'READHGT: Warning! Cannot find %s%s tile in SRTM database. Consider it offshore...\n'
                                % (slat, slon))
                        else:
                            if srtm3:
                                ff = k[1]
                            else:
                                ff = k[len(k)]
                    except IOError:
                        print(
                            'Cannot find "%s" index file to parse SRTM database. Please download HGT file manually.'
                            % fsrtm)
                    print('Download %s%s ... ' % (url, ff))

                zipurl = url + ff
                zipresp = urlopen(zipurl)
                zfile = ZipFile(BytesIO(zipresp.read()))
                f[i + j] = zfile.namelist()
                zfile.extractall(outdir)
                #                with urlopen(zipurl) as zipresp:
                #                    with ZipFile(BytesIO(zipresp.read())) as zfile:
                #                        f[i+j] = zfile.namelist()
                #                        zfile.extractall(outdir)
                #                zipresp = urlopen(zipurl)
                #                tempzip = open('tzip.zip','wb')
                #                tempzip = write(zipresp.read())
                #                tempzip.close()
                #                zf = ZipFile('tzip.zip')
                #                zf.extractall(path=out)
                #                zf.close()

                print('done.\n')

    #pre-allocates X structure (for each file/tile)
    X = pd.DataFrame(index=np.arange(vl), columns=['hgt', 'lat', 'lon', 'z'])

    tiles = False if vl == 1 else True

    for i in np.arange(vl):
        #unzip HGT file if needed
        if '.zip' in f[i]:
            with ZipFile(f[i]) as zfile:
                X.loc[i]['hgt'] = zfile.namelist()
                zfile.extractall(outdir)
        else:
            X.loc[i]['hgt'] = f[i]

        if f[i] == '':
            #offshore empty tile...
            X.loc[i]['z'] = []
        else:

            #loads data from HGT file
            fl = open(X.loc[i]['hgt'], 'rb')
            data = np.fromfile(fl, np.dtype('>i2'))
            numel = int(np.sqrt(len(data)))

            if numel == sz1:
                if srtm3:
                    z = data.reshape(sz1, sz1)
                    X.loc[i]['z'] = z[::3, ::3]  #select every 3rd row

                    X.loc[i]['lon'] = np.linspace(lon[i], lon[i] + 1, sz3)
                    X.loc[i]['lat'] = np.linspace(lat[i], lat[i] + 1, sz3)
                elif srtm1:
                    X.loc[i]['z'] = data.reshape(sz1, sz1)

                    X.loc[i]['lon'] = np.linspace(lon[i], lon[i] + 1, sz1)
                    X.loc[i]['lat'] = np.linspace(lat[i], lat[i] + 1, sz1)
            elif numel == sz3:
                X.loc[i]['z'] = data.reshape(sz3, sz3)

                X.loc[i]['lon'] = np.linspace(lon[i], lon[i] + 1, sz3)
                X.loc[i]['lat'] = np.linspace(lat[i], lat[i] + 1, sz3)

    return X
Esempio n. 50
0
if sys.argv[1] == '--all':
    jars = System.getProperty("java.class.path").split(File.pathSeparator)
else:
    jars = sys.argv[1:]

classLoader = FijiClassLoader()
args = array([Object.getClass(zeros(0, String))], Class)


def hasMainMethod(name):
    try:
        c = Class.forName(name, False, classLoader)
        return c.getMethod('main', args) != None
    except:
        return False


for jar in jars:
    try:
        classLoader.addPath(jar)
        zip = ZipFile(jar, 'r')
        for file in zip.namelist():
            if not file.endswith('.class'):
                continue
            name = file[:-6].replace('/', '.')
            if hasMainMethod(name):
                print 'main class', name, 'found in jar', jar
    except:
        pass  # do nothing
Esempio n. 51
0
    def find_plugins(self, dir):
        """
        Find readers in a given directory. This method
        can be used to inspect user plug-in directories to
        find new readers/writers.

        :param dir: directory to search into
        :return: number of readers found
        """
        readers_found = 0
        temp_path = os.path.abspath(dir)
        if not os.path.isdir(temp_path):
            temp_path = os.path.join(os.getcwd(), dir)
        if not os.path.isdir(temp_path):
            temp_path = os.path.join(os.path.dirname(__file__), dir)
        if not os.path.isdir(temp_path):
            temp_path = os.path.join(os.path.dirname(sys.path[0]), dir)

        dir = temp_path
        # Check whether the directory exists
        if not os.path.isdir(dir):
            msg = "DataLoader couldn't locate DataLoader plugin folder."
            msg += """ "%s" does not exist""" % dir
            logger.warning(msg)
            return readers_found

        for item in os.listdir(dir):
            full_path = os.path.join(dir, item)
            if os.path.isfile(full_path):

                # Process python files
                if item.endswith('.py'):
                    toks = os.path.splitext(os.path.basename(item))
                    try:
                        sys.path.insert(0, os.path.abspath(dir))
                        module = __import__(toks[0], globals(), locals())
                        if self._identify_plugin(module):
                            readers_found += 1
                    except:
                        msg = "Loader: Error importing "
                        msg += "%s\n  %s" % (item, sys.exc_value)
                        logger.error(msg)

                # Process zip files
                elif item.endswith('.zip'):
                    try:
                        # Find the modules in the zip file
                        zfile = ZipFile(item)
                        nlist = zfile.namelist()

                        sys.path.insert(0, item)
                        for mfile in nlist:
                            try:
                                # Change OS path to python path
                                fullname = mfile.replace('/', '.')
                                fullname = os.path.splitext(fullname)[0]
                                module = __import__(fullname, globals(),
                                                    locals(), [""])
                                if self._identify_plugin(module):
                                    readers_found += 1
                            except:
                                msg = "Loader: Error importing"
                                msg += " %s\n  %s" % (mfile, sys.exc_value)
                                logger.error(msg)

                    except:
                        msg = "Loader: Error importing "
                        msg += " %s\n  %s" % (item, sys.exc_value)
                        logger.error(msg)

        return readers_found
Esempio n. 52
0
def read_file_data_into_string(filepath):
    buffer = ""
    with zip_file.open(filepath) as f:
        for line in f:
            buffer += str(line)
    return buffer

def print_all_files(files):
    for filename in files:
        if not os.path.isdir(filename):
            with zip_file.open(filename) as f:
                for line in f:
                    print(line)

zip_file = ZipFile('Sample.xlsx')
files = zip_file.namelist()

workbook_xml_string = read_file_data_into_string('xl/workbook.xml')
data_sheet_id = find_sheet_id(workbook_xml_string, "Data")

workbook_rels_xml_string = read_file_data_into_string('xl/_rels/workbook.xml.rels')
data_sheet_file = find_sheet_file(workbook_rels_xml_string, data_sheet_id)
data_sheet_file_path = f'''xl/{data_sheet_file}'''

data_sheet_xml_string = read_file_data_into_string(data_sheet_file_path)

table_rid = find_table_rid_of_sheet(data_sheet_xml_string)

print(data_sheet_file_path)
print(table_rid)
Esempio n. 53
0
    def process_item(self, item, spider):
        if hasattr(spider, 'sample') and spider.sample == 'true':
            is_sample = True
        else:
            is_sample = False
        data_version = self._get_start_time(spider).strftime(
            '%Y-%m-%d %H:%M:%S')
        for completed in item:

            if completed['success']:

                data = {
                    'collection_source': spider.name,
                    'collection_data_version': data_version,
                    'collection_sample': is_sample,
                    'file_name': completed['file_name'],
                    'url': completed['url'],
                    'data_type': completed['data_type'],
                    'encoding': completed.get('encoding', 'utf-8')
                }

                if hasattr(spider, 'note') and spider.note:
                    data['collection_note'] = spider.note

                files = {}

                zipfile = None
                if hasattr(spider, 'ext') and spider.ext == '.zip':
                    zipfile = ZipFile(completed['local_path'])

                    files = {
                        'file': (completed['file_name'],
                                 zipfile.open(zipfile.namelist()[0]),
                                 'application/json')
                    }
                else:
                    if self.api_local_directory:

                        full_local_filename = os.path.join(
                            self.api_local_directory,
                            completed['local_path_inside_files_store'])
                        # At this point, we could test if the file path exists locally.
                        # But we aren't going to: it's possible the file path is different on the machine running scrape
                        # and the machine running process. (eg a network share mounted in different dirs)
                        data['local_file_name'] = full_local_filename

                    else:
                        files = {
                            'file': (completed['file_name'],
                                     open(completed['local_path'],
                                          'rb'), 'application/json')
                        }

                response = requests.post(self.api_url + '/api/v1/submit/file/',
                                         data=data,
                                         files=files,
                                         headers=self.api_headers)

                if response.ok:
                    raise DropItem('Response from [{}] posted to API.'.format(
                        completed.get('url')))
                else:
                    spider.logger.warning(
                        'Failed to post [{}]. API status code: {}'.format(
                            completed.get('url'), response.status_code))
                if zipfile is not None:
                    zipfile.close()

            else:

                data = {
                    'collection_source': spider.name,
                    'collection_data_version': data_version,
                    'collection_sample': is_sample,
                    'file_name': completed['file_name'],
                    'url': completed['url'],
                    'errors': json.dumps([completed['error_message']]),
                }

                response = requests.post(self.api_url +
                                         '/api/v1/submit/file_errors/',
                                         data=data,
                                         headers=self.api_headers)
                if response.ok:
                    raise DropItem(
                        'Response from [{}] posted to File Errors API.'.format(
                            completed.get('url')))
                else:
                    spider.logger.warning(
                        'Failed to post [{}]. File Errors API status code: {}'.
                        format(completed.get('url'), response.status_code))
Esempio n. 54
0
def _compare_xlsx_files(got_file, exp_file, ignore_files, ignore_elements):
    # Compare two XLSX files by extracting the XML files from each
    # zip archive and comparing them.
    #
    # This is used to compare an "expected" file produced by Excel
    # with a "got" file produced by XlsxWriter.
    #
    # In order to compare the XLSX files we convert the data in each
    # XML file into an list of XML elements.
    try:
        # Open the XlsxWriter as a zip file for testing.
        got_zip = ZipFile(got_file, 'r')
    except IOError:
        # For Python 2.5+ compatibility.
        e = sys.exc_info()[1]
        error = "XlsxWriter file error: " + str(e)
        return error, ''
    except (BadZipfile, LargeZipFile):
        e = sys.exc_info()[1]
        error = "XlsxWriter zipfile error, '" + exp_file + "': " + str(e)
        return error, ''

    try:
        # Open the Excel as a zip file for testing.
        exp_zip = ZipFile(exp_file, 'r')
    except IOError:
        e = sys.exc_info()[1]
        error = "Excel file error: " + str(e)
        return error, ''
    except (BadZipfile, LargeZipFile):
        e = sys.exc_info()[1]
        error = "Excel zipfile error, '" + exp_file + "': " + str(e)
        return error, ''

    # Get the filenames from the zip files.
    got_files = sorted(got_zip.namelist())
    exp_files = sorted(exp_zip.namelist())

    # Ignore some test specific filenames.
    got_files = [name for name in got_files if name not in ignore_files]
    exp_files = [name for name in exp_files if name not in ignore_files]

    # Check that each XLSX container has the same files.
    if got_files != exp_files:
        return got_files, exp_files

    # Compare each file in the XLSX containers.
    for filename in exp_files:

        got_xml_str = got_zip.read(filename)
        exp_xml_str = exp_zip.read(filename)

        # Compare binary files with string comparison based on extension.
        extension = os.path.splitext(filename)[1]
        if extension in ('.png', '.jpeg', '.bmp', '.bin'):
            if got_xml_str != exp_xml_str:
                return 'got: %s' % filename, 'exp: %s' % filename
            continue

        if sys.version_info >= (3, 0, 0):
            got_xml_str = got_xml_str.decode('utf-8')
            exp_xml_str = exp_xml_str.decode('utf-8')

        # Remove dates and user specific data from the core.xml data.
        if filename == 'docProps/core.xml':
            exp_xml_str = re.sub(r' ?John', '', exp_xml_str)
            exp_xml_str = re.sub(r'\d\d\d\d-\d\d-\d\dT\d\d\:\d\d:\d\dZ',
                                 '', exp_xml_str)
            got_xml_str = re.sub(r'\d\d\d\d-\d\d-\d\dT\d\d\:\d\d:\d\dZ',
                                 '', got_xml_str)

        # Remove workbookView dimensions which are almost always different
        # and calcPr which can have different Excel version ids.
        if filename == 'xl/workbook.xml':
            exp_xml_str = re.sub(r'<workbookView[^>]*>',
                                 '<workbookView/>', exp_xml_str)
            got_xml_str = re.sub(r'<workbookView[^>]*>',
                                 '<workbookView/>', got_xml_str)
            exp_xml_str = re.sub(r'<calcPr[^>]*>',
                                 '<calcPr/>', exp_xml_str)
            got_xml_str = re.sub(r'<calcPr[^>]*>',
                                 '<calcPr/>', got_xml_str)

        # Remove printer specific settings from Worksheet pageSetup elements.
        if re.match(r'xl/worksheets/sheet\d.xml', filename):
            exp_xml_str = re.sub(r'horizontalDpi="200" ', '', exp_xml_str)
            exp_xml_str = re.sub(r'verticalDpi="200" ', '', exp_xml_str)
            exp_xml_str = re.sub(r'(<pageSetup[^>]*) r:id="rId1"',
                                 r'\1', exp_xml_str)

        # Remove Chart pageMargin dimensions which are almost always different.
        if re.match(r'xl/charts/chart\d.xml', filename):
            exp_xml_str = re.sub(r'<c:pageMargins[^>]*>',
                                 '<c:pageMargins/>', exp_xml_str)
            got_xml_str = re.sub(r'<c:pageMargins[^>]*>',
                                 '<c:pageMargins/>', got_xml_str)

        # Convert the XML string to lists for comparison.
        if re.search('.vml$', filename):
            got_xml = _xml_to_list(got_xml_str)
            exp_xml = _vml_to_list(exp_xml_str)
        else:
            got_xml = _xml_to_list(got_xml_str)
            exp_xml = _xml_to_list(exp_xml_str)

        # Ignore test specific XML elements for defined filenames.
        if filename in ignore_elements:
            patterns = ignore_elements[filename]

            for pattern in patterns:
                exp_xml = [tag for tag in exp_xml if not re.match(pattern, tag)]
                got_xml = [tag for tag in got_xml if not re.match(pattern, tag)]

        # Reorder the XML elements in the XLSX relationship files.
        if filename == '[Content_Types].xml' or re.search('.rels$', filename):
            got_xml = _sort_rel_file_data(got_xml)
            exp_xml = _sort_rel_file_data(exp_xml)

        # Compared the XML elements in each file.
        if got_xml != exp_xml:
            got_xml.insert(0, filename)
            exp_xml.insert(0, filename)
            return got_xml, exp_xml

    # If we got here the files are the same.
    return 'Ok', 'Ok'
Esempio n. 55
0
class BundleProjectStore(ProjectStore):
    """Represents a translate project bundle (zip archive)."""

    # INITIALIZERS #
    def __init__(self, fname):
        super().__init__()
        self._tempfiles = {}
        if fname and os.path.isfile(fname):
            self.load(fname)
        else:
            self.zip = ZipFile(fname, "w")
            self.save()
            self.zip.close()
            self.zip = ZipFile(fname, "a")

    # CLASS METHODS #
    @classmethod
    def from_project(cls, proj, fname=None):
        if fname is None:
            fname = "bundle.zip"

        bundle = BundleProjectStore(fname)
        for fn in proj.sourcefiles:
            bundle.append_sourcefile(proj.get_file(fn))
        for fn in proj.transfiles:
            bundle.append_transfile(proj.get_file(fn))
        for fn in proj.targetfiles:
            bundle.append_targetfile(proj.get_file(fn))
        bundle.settings = proj.settings.copy()
        bundle.save()
        return bundle

    # METHODS #
    def append_file(self, afile, fname, ftype="trans", delete_orig=False):
        """Append the given file to the project with the given filename, marked
        to be of type ``ftype`` ('src', 'trans', 'tgt').

        :param delete_orig: If ``True``, as set by
                            :meth:`~translate.storage.Project.convert_forward`,
                            ``afile`` is deleted after appending, if
                            possible.

        .. note:: For this implementation, the appended file will be deleted
                  from disk if ``delete_orig`` is ``True``.
        """
        if fname and fname in self.zip.namelist():
            raise ValueError("File already in bundle archive: %s" % (fname))
        if not fname and isinstance(afile,
                                    str) and afile in self.zip.namelist():
            raise ValueError("File already in bundle archive: %s" % (afile))

        afile, fname = super().append_file(afile, fname, ftype)
        self._zip_add(fname, afile)

        if delete_orig and hasattr(
                afile, "name") and afile.name not in self._tempfiles:
            try:
                os.unlink(afile.name)
            except Exception:
                pass

        return self.get_file(fname), fname

    def remove_file(self, fname, ftype=None):
        """Remove the file with the given project name from the project."""
        super().remove_file(fname, ftype)
        self._zip_delete([fname])
        tempfiles = [
            tmpf for tmpf, prjf in self._tempfiles.items() if prjf == fname
        ]
        if tempfiles:
            for tmpf in tempfiles:
                try:
                    os.unlink(tmpf)
                except Exception:
                    pass
                del self._tempfiles[tmpf]

    def close(self):
        super().close()
        self.cleanup()
        self.zip.close()

    def cleanup(self):
        """Clean up our mess: remove temporary files."""
        for tempfname in self._tempfiles:
            if os.path.isfile(tempfname):
                os.unlink(tempfname)
        self._tempfiles = {}

    def get_file(self, fname):
        """Retrieve a project file (source, translation or target file) from
        the project archive.
        """
        retfile = None
        if fname in self._files or fname in self.zip.namelist():
            # Check if the file has not already been extracted to a temp file
            tempfname = [
                tfn for tfn in self._tempfiles if self._tempfiles[tfn] == fname
            ]
            if tempfname and os.path.isfile(tempfname[0]):
                tempfname = tempfname[0]
            else:
                tempfname = ""
            if not tempfname:
                # Extract the file to a temporary file
                zfile = self.zip.open(fname)
                tempfname = os.path.split(fname)[-1]
                tempfd, tempfname = tempfile.mkstemp(suffix="_" + tempfname)
                os.close(tempfd)
                open(tempfname, "w").write(zfile.read())
            retfile = open(tempfname)
            self._tempfiles[tempfname] = fname

        if not retfile:
            raise FileNotInProjectError(fname)
        return retfile

    def get_proj_filename(self, realfname):
        """Try and find a project file name for the given real file name."""
        try:
            fname = super().get_proj_filename(realfname)
        except ValueError:
            fname = None
        if fname:
            return fname
        if realfname in self._tempfiles:
            return self._tempfiles[realfname]
        raise ValueError("Real file not in project store: %s" % (realfname))

    def load(self, zipname):
        """Load the bundle project from the zip file of the given name."""
        self.zip = ZipFile(zipname, mode="a")
        self._load_settings()

        append_section = {
            "sources": self._sourcefiles.append,
            "targets": self._targetfiles.append,
            "transfiles": self._transfiles.append,
        }
        for section in ("sources", "targets", "transfiles"):
            if section in self.settings:
                for fname in self.settings[section]:
                    append_section[section](fname)
                    self._files[fname] = None

    def save(self, filename=None):
        """Save all project files to the bundle zip file."""
        self._update_from_tempfiles()

        if filename:
            newzip = ZipFile(filename, "w")
        else:
            newzip = self._create_temp_zipfile()

        # Write project file for the new zip bundle
        newzip.writestr("project.xtp", self._generate_settings())
        # Copy project files from project to the new zip file
        project_files = self._sourcefiles + self._transfiles + self._targetfiles
        for fname in project_files:
            newzip.writestr(fname, self.get_file(fname).read())
        # Copy any extra (non-project) files from the current zip
        for fname in self.zip.namelist():
            if fname in project_files or fname == "project.xtp":
                continue
            newzip.writestr(fname, self.zip.read(fname))

        self._replace_project_zip(newzip)

    def update_file(self, pfname, infile):
        """Updates the file with the given project file name with the contents
        of ``infile``.

        :returns: the results from :meth:`BundleProjStore.append_file`.
        """
        if pfname not in self._files:
            raise FileNotInProjectError(pfname)

        if pfname not in self.zip.namelist():
            return super().update_file(pfname, infile)

        self._zip_delete([pfname])
        self._zip_add(pfname, infile)

    def _load_settings(self):
        """Grab the project.xtp file from the zip file and load it."""
        if "project.xtp" not in self.zip.namelist():
            raise InvalidBundleError("Not a translate project bundle")
        super()._load_settings(self.zip.open("project.xtp").read())

    def _create_temp_zipfile(self):
        """Create a new zip file with a temporary file name (with mode 'w')."""
        newzipfd, newzipfname = tempfile.mkstemp(prefix="translate_bundle",
                                                 suffix=".zip")
        os.close(newzipfd)
        return ZipFile(newzipfname, "w")

    def _replace_project_zip(self, zfile):
        """Replace the currently used zip file (``self.zip``) with the given
        zip file. Basically, ``os.rename(zfile.filename,
        self.zip.filename)``.
        """
        if not zfile.fp.closed:
            zfile.close()
        if not self.zip.fp.closed:
            self.zip.close()
        shutil.move(zfile.filename, self.zip.filename)
        self.zip = ZipFile(self.zip.filename, mode="a")

    def _update_from_tempfiles(self):
        """Update project files from temporary files."""
        for tempfname in self._tempfiles:
            tmp = open(tempfname)
            self.update_file(self._tempfiles[tempfname], tmp)
            if not tmp.closed:
                tmp.close()

    def _zip_add(self, pfname, infile):
        """Add the contents of ``infile`` to the zip with file name ``pfname``."""
        if hasattr(infile, "seek"):
            infile.seek(0)
        self.zip.writestr(pfname, infile.read())
        # Clear the cached file object to force the file to be read from the
        # zip file.
        self._files[pfname] = None

    def _zip_delete(self, fnames):
        """Delete the files with the given names from the zip file (``self.zip``)."""
        # Sanity checking
        if not isinstance(fnames, (list, tuple)):
            raise ValueError("fnames must be list or tuple: %s" % (fnames))
        if not self.zip:
            raise ValueError("No zip file to work on")
        zippedfiles = self.zip.namelist()
        for fn in fnames:
            if fn not in zippedfiles:
                raise KeyError("File not in zip archive: %s" % (fn))

        newzip = self._create_temp_zipfile()
        newzip.writestr("project.xtp", self._generate_settings())

        for fname in zippedfiles:
            # Copy all files from self.zip that are not project.xtp (already
            # in the new zip file) or in fnames (they are to be removed, after
            # all.
            if fname in fnames or fname == "project.xtp":
                continue
            newzip.writestr(fname, self.zip.read(fname))

        self._replace_project_zip(newzip)
Esempio n. 56
0
NotionZip = Path(fileopenbox())


# Load zip file
notionsData = ZipFile(NotionZip, 'r')

NotionPathRaw = []
ObsidianPathRaw = []
NotionPaths = []
ObsidianPaths = []



# Generate a list of file paths for all zip content
[NotionPathRaw.append(line.rstrip()) for line in notionsData.namelist()]



# Clean paths for Obsidian destination
regexUID = compile("\s+\w{32}")

for line in NotionPathRaw:
    ObsidianPathRaw.append(regexUID.sub("", line))


### PATHS IN PROPER OS FORM BY PATHLIB ###
[NotionPaths.append(Path(line)) for line in NotionPathRaw]
[ObsidianPaths.append(Path(line)) for line in ObsidianPathRaw]

Esempio n. 57
0
import sys
import os
import pathlib
import shutil
from zipfile import ZipFile
import subprocess
import re
file1 = sys.argv
encr_file = file1[1]
new_file_inter = str(pathlib.Path(encr_file).stem)
new_file = new_file_inter + ".zip"
new_file_copy = new_file_inter + "_cp.zip"
os.rename(encr_file, new_file)
shutil.copy(new_file, new_file_copy)
archive = ZipFile(new_file_copy)
for file in archive.namelist():
    if file.startswith('ppt/'):
        archive.extract(file)
file_to_change = "ppt/presentation.xml"
file1 = open(file_to_change, "r")
s = file1.read()
file1.close()
new_s = re.sub(r"<p:modifyVerifier[^>]+>", '', s)
file1 = open(file_to_change, "w+")
file1.write(new_s)
file1.close()
subprocess.call(["zip", "-r", new_file, file_to_change])
os.rename(new_file, encr_file)
os.remove(new_file_copy)
folder_to_remove = str(pathlib.Path(file_to_change).parent)
shutil.rmtree(folder_to_remove)
Esempio n. 58
0
                time.sleep(random.randint(1, 10))

        # read the zipped folder into memory
        buf = web_sock.read()
        GEEZippedGeotiff = io.BytesIO(buf)
        GEEZippedGeotiff.flush()  # not sure if this is needed ...
        #print GEEZippedGeotiff

        # pretend we've got a .zip folder (it's just in memory instead of on disk) and read the tif inside
        zipdir = ZipFile(GEEZippedGeotiff)

        # Debug: unzip both files into a folder so I can look at the geotiff and world file
        #zipdir.extractall("DEM_from_GEE")

        # get the entry for the tif file from the zip (there's usually also world file in the zip folder)
        nl = zipdir.namelist()
        tifl = [f for f in nl if f[-4:] == ".tif"]
        assert tifl != [], "zip from ee didn't contain a tif: " + str(nl)

        # ETOPO will have bedrock and ice_surface tifs
        if DEM_name == """NOAA/NGDC/ETOPO1""":
            tif = [f for f in tifl if "ice_surface" in f
                   ][0]  # get the DEM tif that has the ice surface
        else:
            tif = tifl[
                0]  # for non ETOPO, there's just one DEM tif in that list

        # Debug: print out the data from the world file
        #worldfile = zipdir.read(zipfile.namelist()[0]) # world file as textfile
        #raster_info = [float(l) for l in worldfile.splitlines()]  # https://en.wikipedia.org/wiki/World_file
Esempio n. 59
0
def verify(certificate, jar_file, key_alias):
    """
    Verifies signature of a JAR file.

    Limitations:
    - diagnostic is less verbose than of jarsigner
    :return: tuple (exit_status, result_message)

    Reference:
    http://docs.oracle.com/javase/7/docs/technotes/guides/jar/jar.html#Signature_Validation
    Note that the validation is done in three steps. Failure at any step is a
    failure of the whole validation.
    """

    from .crypto import verify_signature_block
    from tempfile import mkstemp

    zip_file = ZipFile(jar_file)
    sf_data = zip_file.read("META-INF/%s.SF" % key_alias)

    # Step 1: check the crypto part.
    sf_file = mkstemp()[1]
    with open(sf_file, "w") as tmp_buf:
        tmp_buf.write(sf_data)
        tmp_buf.flush()
        file_list = zip_file.namelist()
        sig_block_filename = None
        # JAR specification mentions only RSA and DSA; jarsigner also has EC
        signature_extensions = ("RSA", "DSA", "EC")
        for extension in signature_extensions:
            candidate_filename = "META-INF/%s.%s" % (key_alias, extension)
            if candidate_filename in file_list:
                sig_block_filename = candidate_filename
                break
        if sig_block_filename is None:
            return "None of %s found in JAR" % \
                   ", ".join(key_alias + "." + x for x in signature_extensions)

        sig_block_data = zip_file.read(sig_block_filename)
        error = verify_signature_block(certificate, sf_file, sig_block_data)
        os.unlink(sf_file)
        if error is not None:
            return error

    # KEYALIAS.SF is correctly signed.
    # Step 2: Check that it contains correct checksum of the manifest.
    signature_manifest = SignatureManifest()
    signature_manifest.parse(sf_data)

    jar_manifest = Manifest()
    jar_manifest.parse(zip_file.read("META-INF/MANIFEST.MF"))

    error = signature_manifest.verify_manifest_checksums(jar_manifest)
    if error is not None:
        return error

    # Checksums of MANIFEST.MF itself are correct.
    # Step 3: Check that it contains valid checksums for each file from the JAR.
    error = jar_manifest.verify_jar_checksums(jar_file)
    if error is not None:
        return error

    return None
Esempio n. 60
0
def read_single_sheet(path, name=None):
    """ Read an xlsx, csv or tsv from a zipfile or directory
    """
    from zipfile import ZipFile
    from . import xlreader

    if name is None:
        root, ext = os.path.splitext(path)
        stream = open(path, 'r')

        if ext == '.xlsx':
            return read_xl(stream)

        if ext == '.tsv':
            return read_csv(stream, dialect='excel-tab')

        if ext == '.csv':
            return read_csv(stream)

        if ext == '.json':
            return read_json(stream)

        raise ValueError('Unknown file extension for %r' % path)

    if path.endswith('.xlsx'):
        return cast_row_values(xlreader.DictReader(open(path, 'rb'), sheetname=name))

    if path.endswith('.zip'):
        zf = ZipFile(path)
        names = zf.namelist()

        if (name + '.xlsx') in names:
            stream = zf.open(name + '.xlsx', 'r')
            return read_xl(stream)

        if (name + '.tsv') in names:
            stream = io.TextIOWrapper(zf.open(name + '.tsv'), encoding='utf-8')
            return read_csv(stream, dialect='excel-tab')

        if (name + '.csv') in names:
            stream = io.TextIOWrapper(zf.open(name + '.csv'), encoding='utf-8')
            return read_csv(stream)

        if (name + '.json') in names:
            stream = io.TextIOWrapper(zf.open(name + '.json'), encoding='utf-8')
            return read_json(stream)

    if os.path.isdir(path):
        root = os.path.join(path, name)

        if os.path.exists(root + '.xlsx'):
            stream = open(root + '.xlsx', 'rb')
            return read_xl(stream)

        if os.path.exists(root + '.tsv'):
            stream = open(root + '.tsv', 'rU')
            return read_csv(stream, dialect='excel-tab')

        if os.path.exists(root + '.csv'):
            stream = open(root + '.csv', 'rU')
            return read_csv(stream)

        if os.path.exists(root + '.json'):
            stream = open(root + '.json', 'r')
            return read_json(stream)

    return []