Ejemplo n.º 1
0
def importzip(conn, filename, zipfile):
    print 'Import ' + filename
    files = filelist(zipfile)
    cur = conn.cursor()
    meta = metadata(zipfile.read(files['OPERDAY']))
    if datetime.strptime(meta['ValidThru'].replace('-', ''),
                         '%Y%m%d') < (datetime.now() - timedelta(days=1)):
        return meta
    header = (zipfile.read(files['DEST']).split('\r\n')[0].split('|')[1]
              in versionheaders)
    encoding = encodingof(meta['DataOwnerCode'])
    for table in importorder:
        if table in files:
            f = zipfile.open(files[table])
            table = table + '_delta'
            if header:
                cur.copy_expert(
                    "COPY %s FROM STDIN WITH DELIMITER AS '|' NULL AS '' CSV HEADER ENCODING '%s'"
                    % (table, encoding), f)
            else:
                cur.copy_expert(
                    "COPY %s FROM STDIN WITH DELIMITER AS '|' NULL AS '' CSV ENCODING '%s'"
                    % (table, encoding), f)
    conn.commit()
    cur.close()
    return meta
Ejemplo n.º 2
0
def extract_zip(zipfile):
    """Функция распаковки zip архива
    Извлекает все файлы и папки архива в текущую папку.

    """
    name_list = zipfile.namelist()
    new_files_path = {}
    directory_tree = {}
    print("Получаю имена файлов и директорий...")
    for name in name_list:
        if name.split('/')[-1] != "":
            new_files_path.update({name.split('/')[-1]: name.split('/')})
        elif len(name.split('/')) > 2:
            directory_tree.update({name.split('/')[-2]: name.split('/')})
    print("Создаю дерево директорий...")
    for name, path in directory_tree.items():
        temp_path = ""
        for i in range(1, len(path)):
            temp_path += path[i] + "\\"
        if not os.path.exists(temp_path):
            os.makedirs(temp_path)
    print("Создаю файлы...")
    for name, path in new_files_path.items():
        temp_path = ""
        for i in range(1, len(path) - 1):
            temp_path += path[i] + "\\"
        temp_path += path[-1]
        with open(temp_path, "wb") as file:
            f_path_temp = ""
            for i in range(0, len(path) - 1):
                f_path_temp += path[i] + "/"
            f_path_temp += path[-1]
            file.write(zipfile.read(f_path_temp))
    print("Файлы успешно созданы")
Ejemplo n.º 3
0
def extract_member(zipfile, member, dstdir):
    """Copied and adjusted from Python 2.6 stdlib zipfile.py module.

       Extract the ZipInfo object 'member' to a physical
       file on the path targetpath.
    """

    assert dstdir.endswith(os.path.sep), "/ missing at end"

    fn = member.filename
    if isinstance(fn, str):
        fn = unicode(fn, "utf-8")
    targetpath = os.path.normpath(os.path.join(dstdir, fn))

    if not targetpath.startswith(dstdir):
        raise RuntimeError("bad filename in zipfile %r" % (targetpath,))

    # Create all upper directories if necessary.
    if member.filename.endswith("/"):
        upperdirs = targetpath
    else:
        upperdirs = os.path.dirname(targetpath)

    if not os.path.isdir(upperdirs):
        os.makedirs(upperdirs)

    if not member.filename.endswith("/"):
        open(targetpath, "wb").write(zipfile.read(member.filename))
Ejemplo n.º 4
0
def read_info_file(zipfile, path, section):
    """Return a dictionary matching the contents of the config file at
    path in zipfile"""
    cp = SafeConfigParser()
    info = StringIO(zipfile.read(path))
    cp.readfp(info)
    return dict(cp.items(section))
Ejemplo n.º 5
0
    def test_requested_template_does_not_exists_raises_an_error(self):
        # Arrange
        self.fs.add_real_file(ALTERNATIVE_STANDARDS_PATH)
        self.fs.add_real_file(ALTERNATIVE_TEMPLATES_PATH)

        template_compiler = Mock()
        zipfile = mock_template_zip_file()

        httpretty.register_uri(
            httpretty.GET,
            "https://api.github.com/repos/QualiSystems/shellfoundry-tosca-networking-template/zipball/5.0.0",
            body=zipfile.read(),
            content_type='application/zip',
            content_disposition=
            "attachment; filename=quali-resource-test-dd2ba19.zip",
            stream=True)

        # Act
        with \
            patch.object(Standards, '_fetch_from_cloudshell', side_effect=FeatureUnavailable()), \
            patch.object(TempDirContext, '__enter__', return_value=self.fs.CreateDirectory('mock_temp').name):
            cmd = NewCommandExecutor(
                template_retriever=TemplateRetriever(),
                repository_downloader=RepositoryDownloader(),
                template_compiler=template_compiler,
                standards=Standards(),
                standard_versions=StandardVersionsFactory())
            # Assert
            output_msg = "Template gen2/doesnot/exists does not exist. Supported templates are: gen1/resource, " \
                         "gen1/resource-clean, gen1/deployed-app, gen1/networking/switch, gen1/networking/router," \
                         " gen1/pdu, gen1/firewall, gen1/compute, layer-1-switch, gen2/networking/switch, " \
                         "gen2/networking/router, gen2/networking/wireless-controller, gen2/compute, " \
                         "gen2/deployed-app, gen2/pdu, gen2/resource, gen2/firewall"
            self.assertRaisesRegexp(BadParameter, output_msg, cmd.new,
                                    'new_shell', 'gen2/doesnot/exists')
Ejemplo n.º 6
0
def read_info_file(zipfile, path, section):
    """Return a dictionary matching the contents of the config file at
    path in zipfile"""
    cp = SafeConfigParser()
    info = StringIO(zipfile.read(path))
    cp.readfp(info)
    return dict(cp.items(section))
Ejemplo n.º 7
0
 def save_extracted_file(self, zipfile, filename):
     "Extract the file to a temp directory for viewing"
     try:
         filebytes = zipfile.read(filename)
     except BadZipfile, err:
         print 'Error opening the zip file: %s' % (err)
         return False
Ejemplo n.º 8
0
    def test_new_cmd_creates_gen2_in_latest_version_that_matches_the_standard_version_on_cs(
            self):
        # Arrange
        templates = """templates:
    - name : gen1/resource
      description : 1st generation shell template for basic inventory resources
      repository : https://github.com/QualiSystems/shell-resource-standard
      params:
        project_name :
      min_cs_ver: 7.0
    - name : gen2/networking/switch
      params:
        project_name :
        family_name: Switch
      description : 2nd generation shell template for a standard switch
      repository : https://github.com/QualiSystems/shellfoundry-tosca-networking-template
      min_cs_ver: 8.0"""

        template_compiler = Mock()

        standards = Mock()
        standards.fetch.return_value = [{
            'StandardName': "cloudshell_networking_standard",
            'Versions': ['5.0.0', '5.0.1']
        }]

        zipfile = mock_template_zip_file()

        httpretty.register_uri(httpretty.GET, TEMPLATES_YML, body=templates)
        httpretty.register_uri(
            httpretty.GET,
            "https://api.github.com/repos/QualiSystems/shellfoundry-tosca-networking-template/zipball/5.0.1",
            body=zipfile.read(),
            content_type='application/zip',
            content_disposition=
            "attachment; filename=quali-resource-test-dd2ba19.zip",
            stream=True)

        # Act
        with \
            patch.object(TemplateRetriever, '_get_templates_from_github', return_value=templates), \
            patch.object(TempDirContext, '__enter__', return_value=self.fs.CreateDirectory('mock_temp').name):
            cmd = NewCommandExecutor(
                template_retriever=TemplateRetriever(),
                repository_downloader=RepositoryDownloader(),
                template_compiler=template_compiler,
                standards=standards,
                standard_versions=StandardVersionsFactory())
            cmd.new('new_shell', 'gen2/networking/switch')

            # Assert
            template_compiler.compile_template.smarter_assert_called_once_with(
                CookiecutterTemplateCompiler.compile_template,
                shell_name='new_shell',
                template_path=os.path.join('mock_temp', 'root'),
                extra_context={
                    'family_name': "Switch",
                    "project_name": None
                },
                running_on_same_folder=False)
Ejemplo n.º 9
0
def __raw_zip_data_to_str(data):
    fp = BytesIO(data)
    print(type(fp))
    import zipfile
    zipfile = zipfile.ZipFile(fp)
    name = zipfile.filelist[0].filename
    return zipfile.read(name)
Ejemplo n.º 10
0
	def __init__(self):
		
		#this is not a must dependency
		import dlib #19.20.0
	
		self.layers = [DlibMetaData()]
		
		#---------------------
		
		home = str(Path.home())
		weight_file = home+'/.deepface/weights/dlib_face_recognition_resnet_model_v1.dat'
		
		#---------------------
		
		#download pre-trained model if it does not exist
		if os.path.isfile(weight_file) != True:
			print("dlib_face_recognition_resnet_model_v1.dat is going to be downloaded")  
			
			url = "http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2"
			output = home+'/.deepface/weights/'+url.split("/")[-1]
			gdown.download(url, output, quiet=False)
			
			zipfile = bz2.BZ2File(output)
			data = zipfile.read()
			newfilepath = output[:-4] #discard .bz2 extension
			open(newfilepath, 'wb').write(data)
			
		#---------------------
		
		model = dlib.face_recognition_model_v1(weight_file)
		self.__model = model
		
		#---------------------
		
		return None #classes must return None
Ejemplo n.º 11
0
def extract_member(zipfile, member, dstdir):
    """Copied and adjusted from Python 2.6 stdlib zipfile.py module.

       Extract the ZipInfo object 'member' to a physical
       file on the path targetpath.
    """

    assert dstdir.endswith(os.path.sep), "/ missing at end"

    fn = member.filename
    if isinstance(fn, str):
        fn = unicode(fn, 'utf-8')
    targetpath = os.path.normpath(os.path.join(dstdir, fn))

    if not targetpath.startswith(dstdir):
        raise RuntimeError("bad filename in zipfile %r" % (targetpath, ))

    # Create all upper directories if necessary.
    if member.filename.endswith("/"):
        upperdirs = targetpath
    else:
        upperdirs = os.path.dirname(targetpath)

    if not os.path.isdir(upperdirs):
        os.makedirs(upperdirs)

    if not member.filename.endswith("/"):
        open(targetpath, 'wb').write(zipfile.read(member.filename))
Ejemplo n.º 12
0
    def test_integration_latest_version_is_default_when_version_was_not_specified(self, verification):
        # Arrange
        templates = {'tosca/resource/test': [ShellTemplate('test-resource', '', 'url', '8.1', 'resource')]}
        repo_info = ('quali', 'resource-test')

        zipfile = mock_template_zip_file()
        httpretty.register_uri(httpretty.GET, "https://api.github.com/repos/quali/resource-test/zipball/2.0.1",
                               body=zipfile.read(), content_type='application/zip',
                               content_disposition="attachment; filename=quali-resource-test-dd2ba19.zip", stream=True)
        template_compiler = Mock()

        standards = Mock()
        standards.fetch.return_value = {"resource": ['2.0.0', '2.0.1']}

        # Act
        with patch.object(TemplateRetriever, 'get_templates', return_value=templates), \
             patch('shellfoundry.utilities.template_url._parse_repo_url', return_value=repo_info), \
             patch.object(TempDirContext, '__enter__', return_value=self.fs.CreateDirectory('mock_temp').name):
            command_executor = NewCommandExecutor(template_retriever=TemplateRetriever(),
                                                  repository_downloader=RepositoryDownloader(),
                                                  template_compiler=template_compiler,
                                                  standards=standards,
                                                  standard_versions=StandardVersionsFactory())
            command_executor._get_template_params = Mock(return_value={})
            command_executor.new('new_shell', 'tosca/resource/test')

        # Assert
        template_compiler.compile_template.smarter_assert_called_once_with(
            CookiecutterTemplateCompiler.compile_template,
            shell_name='new_shell',
            template_path=os.path.join('mock_temp', 'root'),
            extra_context={},
            running_on_same_folder=False)
Ejemplo n.º 13
0
def read_metadata(zipfile):
    for info in zipfile.infolist():
        basename = os.path.basename(info.filename)
        if basename == METADATA_FILENAME:
            meta_file = zipfile.read(info.filename)
            return kbjson.loads(meta_file)
    return {}
 def save_extracted_file(self, zipfile, filename):
     "Extract the file to a temp directory for viewing"
     try:
         filebytes = zipfile.read(filename)
     except zipfile.BadZipfile, err:
         print 'Error opening the zip file: %s' % (err)
         return False
Ejemplo n.º 15
0
    def zip_files_stock(self, cr, uid, context=None):
        ##Cek folder dan buat folder bila belum ada##
        import pdb
        pdb.set_trace()
        self.cek_folder_stock(cr, uid, context)
        #Cari Root File
        for root, dirs, files in os.walk('%s/stock' % self.homedir):
            for file in files:
                #Buat Zip Saat
                my_archive = make_archive(file, 'zip',
                                          "%s/stock" % self.homedir)
                _logger.info('CRON --> Proses Zip file %s done di root=%s' %
                             (file, self.homedir))

                zipfile = open('%s/stock.csv.zip' % self.homedir, 'r')
                attachment_pool = self.pool.get('ir.attachment')

                #Simpan Di ir.attachment
                attachment_id = attachment_pool.create(
                    cr, uid, {
                        'name': "stock.csv.zip",
                        'datas': base64.encodestring(zipfile.read()),
                        'datas_fname': "stock.csv.zip",
                        'res_model': 'stock.move',
                    })
                # import pdb;pdb.set_trace()
                thread_pool = self.pool.get('mail.thread')

                #Cari Id untuk email [email protected]
                partner_obj = self.pool.get('res.partner')
                partner_id_server = partner_obj.search(
                    cr, uid, [('name', '=', '*****@*****.**')])

                #Buat String waktu untuk label
                t = datetime.datetime.now()
                date_str = t.strftime('%m/%d/%Y')
                subject = t.strftime('%m/%d/%Y %X')

                #Kirim Variable dengan message_post()
                # post_vars = {'subject': "Client move.csv.zip per %s" % subject,'body': "Ini adalah Pesan dari Clien per tanggal %s" % subject,'partner_ids': partner_id_server,'attachment_ids':[attachment_id],}
                post_vars = {
                    'subject': "stock.csv.zip",
                    'body':
                    "Ini adalah Pesan dari Clien per tanggal %s" % subject,
                    'partner_ids': partner_id_server,
                    'attachment_ids': [attachment_id],
                }

                thread_pool.message_post(cr,
                                         uid,
                                         False,
                                         type="comment",
                                         subtype="mt_comment",
                                         context=context,
                                         **post_vars)
                _logger.info(
                    'CRON --> Proses Pengirimiman stock.zip ke [email protected] .. Done!! '
                )

                return attachment_id
Ejemplo n.º 16
0
    def test_integration_can_generate_shell_from_specific_version(self):
        # Arrange
        templates = {
            'tosca/resource/test': ShellTemplate('test-resource', '', 'url',
                                                 '7.0')
        }
        repo_info = ('quali', 'resource-test')

        zipfile = mock_template_zip_file()
        httpretty.register_uri(
            httpretty.GET,
            "https://api.github.com/repos/quali/resource-test/zipball/1.1",
            body=zipfile.read(),
            content_type='application/zip',
            content_disposition=
            "attachment; filename=quali-resource-test-dd2ba19.zip",
            stream=True)
        template_compiler = Mock()

        # Act
        with patch.object(TemplateRetriever, 'get_templates', return_value=templates),\
            patch.object(RepositoryDownloader, '_parse_repo_url', return_value=repo_info),\
            patch.object(TempDirContext, '__enter__', return_value=self.fs.CreateDirectory('mock_temp').name):
            NewCommandExecutor(template_retriever=TemplateRetriever(),
                               repository_downloader=RepositoryDownloader(),
                               template_compiler=template_compiler)\
                .new('new_shell', 'tosca/resource/test', '1.1')

        # Assert
        template_compiler.compile_template.smarter_assert_called_once_with(
            CookiecutterTemplateCompiler.compile_template,
            shell_name='new_shell',
            template_path=os.path.join('mock_temp', 'root'),
            extra_context={},
            running_on_same_folder=False)
Ejemplo n.º 17
0
    def _get_app_zip(self, abs_nb_path):
        '''
        Creates a zip file containing a dashboard application bundle.

        :param abs_nb_path:
        '''
        md = self._create_app_bundle(abs_nb_path)
        converter.add_cf_manifest(
            md['bundle_dir'],
            md['kernel_server'],
            md['notebook_basename'],
            md['tmpnb_mode']
        )
        converter.add_dockerfile(
            md['bundle_dir'],
            md['kernel_server'],
            md['tmpnb_mode']
        )
        # Make the zip Archive
        converter.to_zip(md['bundle_dir'], md['bundle_dir'])
        self.set_header('Content-Disposition', 'attachment; filename={}.zip'.format(md['notebook_basename']))
        self.set_header('Content-Type', 'application/zip')
        with open(md['bundle_dir'] + '.zip', 'rb') as zipfile:
            self.write(zipfile.read())
        self.flush()
        self.finish()
Ejemplo n.º 18
0
def read_metadata(zipfile):
    for info in zipfile.infolist():
        basename = os.path.basename(info.filename)
        if basename == METADATA_FILENAME:
            meta_file = zipfile.read(info.filename)
            return kbjson.loads(meta_file)
    return {}
Ejemplo n.º 19
0
def read_metadata(zipfile):
    """Reads and returns metadata from a backup zipfile."""
    for info in zipfile.infolist():
        basename = os.path.basename(info.filename)
        if basename == METADATA_FILENAME:
            meta_file = zipfile.read(info.filename)
            return kbjson.loads(meta_file)
    return {}
Ejemplo n.º 20
0
def getzflo(zipfile, member_path):
    # GET a Zipfile File-Like Object for passing to
    # an XML parser
    try:
        return zipfile.open(member_path) # CPython 2.6 onwards
    except AttributeError:
        # old way
        return BYTES_IO(zipfile.read(member_path))
Ejemplo n.º 21
0
def read_metadata(zipfile):
    """Reads and returns metadata from a backup zipfile."""
    for info in zipfile.infolist():
        basename = os.path.basename(info.filename)
        if basename == METADATA_FILENAME:
            meta_file = zipfile.read(info.filename)
            return kbjson.loads(meta_file)
    return {}
Ejemplo n.º 22
0
 def save_extracted_file(self, zipfile, filename):
     "Extract the file to a temp directory for viewing"
     filebytes = zipfile.read(filename)
     f = open("/tmp/" + filename, 'w')
     try:
         f.write(filebytes)
     finally:
         f.close()
Ejemplo n.º 23
0
 def ziptodict(self,zipfile):
     contentdict ={}
     list = zipfile.namelist()
     for name in list:
         if not name[-1] == '/':
             contentdict[name]=zipfile.read(name)
             print name
     return contentdict
Ejemplo n.º 24
0
 def save_extracted_file(self, zipfile, filename):
     "Extract the file to a temp directory for viewing"
     filebytes = zipfile.read(filename)
     f = open("/tmp/" + filename, 'w')
     try:
         f.write(filebytes)
     finally:
         f.close()
def getzflo(zipfile, member_path):
    # GET a Zipfile File-Like Object for passing to
    # an XML parser
    try:
        return zipfile.open(member_path)  # CPython 2.6 onwards
    except AttributeError:
        # old way
        return BYTES_IO(zipfile.read(member_path))
Ejemplo n.º 26
0
def find_merged_file(zipfile, part, zipfilem, prefix):
    print('find_merged_file')
    print('prefix:' + prefix)
    try:
        data = zipfile.read(part)
        result = hashlib.md5(data)
        rr = result.digest()
        print("The byte equivalent of hash is : ", end="")
        print('---')
        print(result.digest())
        print('---')
    except KeyError:
        print('ERROR: Did not find %s in zip file' % part)

    # try reading the file from the normal filename
    rrm = ""
    try:
        data = zipfilem.read(part)
        resultm = hashlib.md5(data)
        rrm = resultm.digest()
        print("The byte equivalent of hash is : ", end="")
        print(result.digest())
    except KeyError:
        print('ERROR: Did not find %s in zip file' % part)

    if (rr == rrm):
        print('matches normal file')
        return part

    try:
        data = zipfilem.read(prefix + '/' + part)
        resultm = hashlib.md5(data)
        rrm = result.digest()
        print("The byte equivalent of hash is : ", end="")
        print(result.digest())
    except KeyError:
        print('ERROR: Did not find %s in zip file' % part)

    if (rr == rrm):
        print('matches prefix')
        return prefix + '/' + part

    #
    #  HERE?
    #

    print('AJS HERE AJS HERE')
    for info in zipfilem.infolist():
        print(info)
        print(info.filename)
        data = zipfilem.read(info.filename)
        resultm = hashlib.md5(data)
        rrm = resultm.digest()
        if (rrm == rr):
            print('SUPERMATCH:' + info.filename)
            return info.filename

    return part
Ejemplo n.º 27
0
    def verify(self, zipfile=None):
        """Configure the VerifyingZipFile `zipfile` by verifying its signature
        and setting expected hashes for every hash in RECORD.
        Caller must complete the verification process by completely reading
        every file in the archive (e.g. with extractall)."""
        sig = None
        if zipfile is None:
            zipfile = self.zipfile
        zipfile.strict = True

        record_name = '/'.join((self.distinfo_name, 'RECORD'))
        sig_name = '/'.join((self.distinfo_name, 'RECORD.jws'))
        # tolerate s/mime signatures:
        smime_sig_name = '/'.join((self.distinfo_name, 'RECORD.p7s'))
        zipfile.set_expected_hash(record_name, None)
        zipfile.set_expected_hash(sig_name, None)
        zipfile.set_expected_hash(smime_sig_name, None)
        record = zipfile.read(record_name)

        record_digest = urlsafe_b64encode(hashlib.sha256(record).digest())
        try:
            sig = from_json(native(zipfile.read(sig_name)))
        except KeyError:  # no signature
            pass
        if sig:
            headers, payload = signatures.verify(sig)
            if payload['hash'] != "sha256=" + native(record_digest):
                msg = "RECORD.jws claimed RECORD hash {} != computed hash {}."
                raise BadWheelFile(
                    msg.format(payload['hash'], native(record_digest)))

        reader = csv.reader((native(r, 'utf-8') for r in record.splitlines()))

        for row in reader:
            filename = row[0]
            hash = row[1]
            if not hash:
                if filename not in (record_name, sig_name):
                    print("%s has no hash!" % filename, file=sys.stderr)
                continue

            algo, data = row[1].split('=', 1)
            assert algo == "sha256", "Unsupported hash algorithm"
            zipfile.set_expected_hash(filename,
                                      urlsafe_b64decode(binary(data)))
Ejemplo n.º 28
0
	def get_zip_data( self, zipfile ):
		#data = data.replace( b'\xb5', b'\xc2\xb5' )
		#data = data.replace( b'\xe9', b'\xc3\xa9' )
		data = zipfile.read( self.fileName )
		data = data.decode( encoding='ISO-8859-1' )
		#data = data.encode( 'UTF-8' ).decode( 'UTF-8' )
		#print( data )
		data = data.splitlines()
		return data
Ejemplo n.º 29
0
    def verify(self, zipfile=None):
        """Configure the VerifyingZipFile `zipfile` by verifying its signature
        and setting expected hashes for every hash in RECORD.
        Caller must complete the verification process by completely reading
        every file in the archive (e.g. with extractall)."""
        sig = None
        if zipfile is None:
            zipfile = self.zipfile
        zipfile.strict = True

        record_name = '/'.join((self.distinfo_name, 'RECORD'))
        sig_name = '/'.join((self.distinfo_name, 'RECORD.jws'))
        # tolerate s/mime signatures:
        smime_sig_name = '/'.join((self.distinfo_name, 'RECORD.p7s'))
        zipfile.set_expected_hash(record_name, None)
        zipfile.set_expected_hash(sig_name, None)
        zipfile.set_expected_hash(smime_sig_name, None)
        record = zipfile.read(record_name)

        record_digest = urlsafe_b64encode(hashlib.sha256(record).digest())
        try:
            sig = from_json(native(zipfile.read(sig_name)))
        except KeyError:  # no signature
            pass
        if sig:
            headers, payload = signatures.verify(sig)
            if payload['hash'] != "sha256=" + native(record_digest):
                msg = "RECORD.jws claimed RECORD hash {} != computed hash {}."
                raise BadWheelFile(msg.format(payload['hash'],
                                              native(record_digest)))

        reader = csv.reader((native(r, 'utf-8') for r in record.splitlines()))

        for row in reader:
            filename = row[0]
            hash = row[1]
            if not hash:
                if filename not in (record_name, sig_name):
                    print("%s has no hash!" % filename, file=sys.stderr)
                continue

            algo, data = row[1].split('=', 1)
            assert algo == "sha256", "Unsupported hash algorithm"
            zipfile.set_expected_hash(filename, urlsafe_b64decode(binary(data)))
Ejemplo n.º 30
0
 def load(self, srcpath):
     """Reads the metadata from an ebook file.
     """
     content_xml = self._load_metadata(srcpath)
     if content_xml is not None:
         book = self._load_ops_data(content_xml)
         if self._configuration['import']['hash']:
             with open(srcpath, 'rb') as zipfile:
                 book['_sha_hash'] = sha1(zipfile.read()).hexdigest()
         return book
Ejemplo n.º 31
0
 def load(self, srcpath):
     """Reads the metadata from an ebook file.
     """
     content_xml = self._load_metadata(srcpath)
     if content_xml is not None:
         book = self._load_ops_data(content_xml)
         if self._configuration['import']['hash']:
             with open(srcpath, 'rb') as zipfile:
                 book['_sha_hash'] = sha1(zipfile.read()).hexdigest()
         return book
Ejemplo n.º 32
0
def importzip(conn,filename,zipfile):
    print 'Import ' + filename
    files = filelist(zipfile)
    cur = conn.cursor()
    meta = metadata(zipfile.read(files['OPERDAY']))
    if datetime.strptime(meta['ValidThru'].replace('-',''),'%Y%m%d') < (datetime.now() - timedelta(days=1)):
        return meta
    header = (zipfile.read(files['DEST']).split('\n')[0].split('|')[1] in versionheaders)
    encoding = encodingof(meta['DataOwnerCode'])
    for table in importorder:
        if table in files:
            f = zipfile.open(files[table])
            table = table+'_delta'
            if header:
                cur.copy_expert("COPY %s FROM STDIN WITH DELIMITER AS '|' NULL AS '' CSV HEADER ENCODING '%s'" % (table,encoding),f)
            else:
                cur.copy_expert("COPY %s FROM STDIN WITH DELIMITER AS '|' NULL AS '' CSV ENCODING '%s'" % (table,encoding),f)
    conn.commit()
    cur.close()
    return meta
 def save_extracted_file(self, zipfile, filename):
     "Extract the file to a temp directory for viewing"
     filebytes = zipfile.read(filename)
     outfn = self.make_new_filename(filename)
     if (outfn == ''):
         return False
     f = open(os.path.join(self.get_activity_root(), 'tmp',  outfn),  'w')
     try:
         f.write(filebytes)
     finally:
         f.close()
Ejemplo n.º 34
0
def importzip(conn,zipfile):
    files = filelist(zipfile)
    cur = conn.cursor()
    if 'OPERDAY' in files:
        meta = metadata(zipfile.read(files['OPERDAY']))
    elif 'PUJO' in files:
        meta = metadata(zipfile.read(files['PUJO']))
    else:
        raise Exception('OPERDAY mist')
    header = (zipfile.read(files['DEST']).split('\r\n')[0].split('|')[1] in versionheaders)
    encoding = encodingof(meta['dataownercode'])
    del(meta['dataownercode'])
    for table in importorder:
        if table in files:
            f = zipfile.open(files[table])
            if header:
                cur.copy_expert("COPY %s FROM STDIN WITH DELIMITER AS '|' NULL AS '' CSV HEADER ENCODING '%s'" % (table,encoding),f)
            else:
                cur.copy_expert("COPY %s FROM STDIN WITH DELIMITER AS '|' NULL AS '' CSV ENCODING '%s'" % (table,encoding),f)
    cur.close()
    return meta
Ejemplo n.º 35
0
def open(filename, mode='r', signaturetag='ipodder'): 
    "Open a file either from the update or the run directory."
    assert mode in ('r', 'rt', 'rb')
    updateinfo = loadedUpdates.get(signaturetag)
    if updateinfo is not None: 
        try: 
            zipfile = updateinfo['object']
            fileinfo = zipfile.getinfo(filename)
            return StringIO.StringIO(zipfile.read(filename))
        except KeyError: 
            pass
    return file(os.path.join(getrundir(), filename, mode))
Ejemplo n.º 36
0
    def download_data(self, year, month):
        syear = str(year)
        smonth = str(month)
        if month < 10:
            smonth = "0" + str(month)
        base_url = "http://pogoda.by/zip/"
        citi_code = "34504"
        url = base_url + syear + "/" + citi_code + "_" + syear + "-" + smonth + ".zip"
        zip_name = self.zip_name(year, month)
        zipfile = urllib2.urlopen(url)

        with open(zip_name, 'w') as f:
            f.write(zipfile.read())
Ejemplo n.º 37
0
    def download_data(self, year, month):
        syear = str(year)
        smonth = str(month)
        if month < 10:
            smonth = "0"+str(month)
        base_url = "http://pogoda.by/zip/"
        citi_code = "34504"
        url = base_url+syear+"/"+citi_code+"_"+syear+"-"+smonth+".zip"
        zip_name = self.zip_name(year, month)
        zipfile = urllib2.urlopen(url)

        with open(zip_name, 'w') as f: 
            f.write(zipfile.read())        
Ejemplo n.º 38
0
def _extract_symlink(zipinfo: zipfile.ZipInfo,
                     pathto: str,
                     zipfile: zipfile.ZipFile,
                     nofixlinks: bool = False) -> str:
    """
    Extract: read the link path string, and make a new symlink.

    'zipinfo' is the link file's ZipInfo object stored in zipfile.
    'pathto'  is the extract's destination folder (relative or absolute)
    'zipfile' is the ZipFile object, which reads and parses the zip file.
    """
    assert zipinfo.external_attr >> 28 == SYMLINK_TYPE

    zippath = zipinfo.filename
    linkpath = zipfile.read(zippath)
    linkpath = linkpath.decode('utf8')

    # drop Win drive + unc, leading slashes, '.' and '..'
    zippath = os.path.splitdrive(zippath)[1]
    zippath = zippath.lstrip(os.sep)
    allparts = zippath.split(os.sep)
    okparts = [p for p in allparts if p not in ('.', '..')]
    zippath = os.sep.join(okparts)

    # where to store link now
    destpath = os.path.join(pathto, zippath)
    destpath = os.path.normpath(destpath)

    # make leading dirs if needed
    upperdirs = os.path.dirname(destpath)
    if upperdirs and not os.path.exists(upperdirs):
        os.makedirs(upperdirs)

    # adjust link separators for the local platform
    if not nofixlinks:
        linkpath = linkpath.replace('/', os.sep).replace('\\', os.sep)

    # test+remove link, not target
    if os.path.lexists(destpath):
        os.remove(destpath)

    # windows dir-link arg
    isdir = zipinfo.external_attr & SYMLINK_ISDIR
    if (isdir and sys.platform.startswith('win') and int(sys.version[0]) >= 3):
        dirarg = dict(target_is_directory=True)
    else:
        dirarg = {}

    # make the link in dest (mtime: caller)
    os.symlink(linkpath, destpath, **dirarg)
    return destpath
Ejemplo n.º 39
0
 def unzip(self, *path_parts, out=None):
     """Extracts the contents into memory or to a file
     :param *path_parts: aany number of parts to be joined for the path
     :param out: The output path for extracting the contents. If set to None
         it will extract in memory
     """
     read_path = os.path.join(self.base_path, *path_parts)
     zip_file = self._get_zipfile(read_path)
     if out:
         zip_file.extractall(out)
     else:
         for name in zipfile.namelist():
             content = zipfile.read(name)
             yield content
Ejemplo n.º 40
0
def get_original_crash_test_case_of_zipfile(crash_test_case,
                                            original_test_case):
    import zipfile
    zipfile = zipfile.ZipFile(io.BytesIO(original_test_case))
    max_similarity = 0
    for name in zipfile.namelist():
        possible_original_test_case = zipfile.read(name)
        similarity = SequenceMatcher(
            None, base64.b64encode(possible_original_test_case),
            base64.b64encode(crash_test_case)).ratio()
        if similarity > max_similarity:
            max_similarity = similarity
            original_test_case = possible_original_test_case
    return original_test_case
Ejemplo n.º 41
0
def zip_extractall(zipfile, rootdir):
    """Python 2.4 compatibility instead of ZipFile.extractall."""
    for name in zipfile.namelist():
        if name.endswith('/'):
            if not os.path.exists(os.path.join(rootdir, name)):
                os.makedirs(os.path.join(rootdir, name))
        else:
            destfile = os.path.join(rootdir, name)
            destdir = os.path.dirname(destfile)
            if not os.path.isdir(destdir):
                os.makedirs(destdir)
            data = zipfile.read(name)
            f = open(destfile, 'w')
            f.write(data)
            f.close()
Ejemplo n.º 42
0
def zip_extractall(zipfile, rootdir):
    """Python 2.4 compatibility instead of ZipFile.extractall."""
    for name in zipfile.namelist():
        if name.endswith('/'):
            if not os.path.exists(os.path.join(rootdir, name)):
                os.makedirs(os.path.join(rootdir, name))
        else:
            destfile = os.path.join(rootdir, name)
            destdir = os.path.dirname(destfile)
            if not os.path.isdir(destdir):
                os.makedirs(destdir)
            data = zipfile.read(name)
            f = open(destfile, 'w')
            f.write(data)
            f.close()
Ejemplo n.º 43
0
def handle_docs(email, files):
    # Assume zipper always succeed
    docfiles = list_files_by_extension(files, "docx")
    print "List of document files", docfiles
    zipper(ZipFileName, docfiles)

    zipfile = open(ZipFileName, "rb")
    zip = MIMEBase("application", "zip", name=ZipFileName)
    zip.set_payload(zipfile.read())
    zipfile.close()

    encoders.encode_base64(zip)
    email.attach(zip)

    delete_files(docfiles)
def _unzip(zipfile, path):
    """
        Python 2.5 doesn't have extractall()
    """
    isdir = os.path.isdir
    join = os.path.join
    norm = os.path.normpath
    split = os.path.split

    for each in zipfile.namelist():
        if not each.endswith('/'):
            root, name = split(each)
            directory = norm(join(path, root))
            if not isdir(directory):
                os.makedirs(directory)
            file(join(directory, name), 'wb').write(zipfile.read(each))
    def _load_zip(self, zipfile):
        content = []
        for libitem in zipfile.namelist():
            data = zipfile.read(libitem).decode('utf-8')

            if libitem.startswith('__'):
                continue

            if libitem.endswith('csv'):
                content.extend(self._load_csv(libitem, data))
            elif libitem.endswith('json'):
                content.extend(self._load_json(libitem, data))
            else:
                continue

        return content
Ejemplo n.º 46
0
    def _get_ipynb_with_files(self, abs_nb_path):
        '''
        Creates a zip file containing the ipynb and associated files.

        :param abs_nb_path: absolute path to the notebook file
        '''
        notebook_basename = os.path.basename(abs_nb_path)
        notebook_basename = os.path.splitext(notebook_basename)[0]

        # pick a tmp directory for the "bundle"
        bundle_id = generate_id()
        bundle_dir = os.path.join(self.tmp_dir,
            bundle_id,
            notebook_basename
        )
        zipfile_path = os.path.join(self.tmp_dir,
            bundle_id,
            notebook_basename
        )
        # Try up to three times to make the bundle directory
        for i in range(3):
            try:
                os.makedirs(bundle_dir)
            except OSError as exc:
                if exc.errno == errno.EEXIST:
                    pass
                else:
                    raise exc
            else:
                break
        else:
            raise RuntimeError('could not create bundle directory')

        referenced_files = converter.get_referenced_files(abs_nb_path, 4)
        # make the zip Archive, is there a more efficient way that copy+zip?
        converter.copylist(os.path.dirname(abs_nb_path), bundle_dir, referenced_files)
        shutil.copy2(abs_nb_path, os.path.join(bundle_dir, os.path.basename(abs_nb_path)))
        shutil.make_archive(zipfile_path, 'zip', bundle_dir)

        # send the archive
        self.set_header('Content-Disposition', 'attachment; filename={}.zip'.format(notebook_basename))
        self.set_header('Content-Type', 'application/zip')
        with open(zipfile_path + '.zip', 'rb') as zipfile:
            self.write(zipfile.read())
        self.flush()
        self.finish()
Ejemplo n.º 47
0
    def read_sp_manifest_file(path):
        """Read a state point manifest file.

        Parameters
        ----------
        path : str
            Path to manifest file.

        Returns
        -------
        dict
            Parsed manifest contents.

        """
        # Must use forward slashes, not os.path.sep.
        fn_manifest = path + "/" + project.Job.FN_MANIFEST
        if fn_manifest in names:
            return json.loads(zipfile.read(fn_manifest).decode())
Ejemplo n.º 48
0
def decompress_bz2(filepath):

    download_photo(filepath)

    try:
        # Open the BZ2 file
        zipfile = bz2.BZ2File(filepath)
        # Get the decompressed data
        data = zipfile.read()
        # Assuming the filepath ends with .bz2
        newfilepath = filepath[:-4]
        # Write an uncompressed file
        open(newfilepath, 'wb').write(data)

        return newfilepath

    except IOError as e:
        print "[!] Please verifiy you entered the correct cookie value.\n"
        print e
Ejemplo n.º 49
0
def load(c, zipfile, fn):
    data=zipfile.read(fn).split('\r\n')
    i=0
    stats.setStatName(fn)
    for line in data:
        i=i+1
        datum=parseDatum(line)
        if isinstance(datum, TigerTypes.ParsedField):
            stats.start()
            c.execute(datum.toSql())
            stats.click()
            stats.stop()
            # os.write(1, ".")
            if i % 80 == 0:
                print stats.getStats()
        elif datum == None:
            pass
        else:
            print "WARNING:  " + type(datum).__name__ + " isn't a ParsedField"
    print ''
Ejemplo n.º 50
0
	def zip_files_stock(self, cr, uid, context=None):
		##Cek folder dan buat folder bila belum ada##
		import pdb;pdb.set_trace()
		self.cek_folder_stock(cr, uid, context)
		#Cari Root File
		for root, dirs, files in os.walk('%s/stock' % self.homedir):
			for file in files:
				#Buat Zip Saat
				my_archive = make_archive(file,'zip',"%s/stock" % self.homedir)
				_logger.info('CRON --> Proses Zip file %s done di root=%s' % (file,self.homedir))

				zipfile = open('%s/stock.csv.zip' % self.homedir,'r')
				attachment_pool = self.pool.get('ir.attachment')

				#Simpan Di ir.attachment
				attachment_id = attachment_pool.create(cr, uid, {
				  	'name': "stock.csv.zip",
            		'datas': base64.encodestring(zipfile.read()),
            		'datas_fname': "stock.csv.zip",
            		'res_model': 'stock.move',
            		})
				# import pdb;pdb.set_trace()
				thread_pool = self.pool.get('mail.thread')
				
				#Cari Id untuk email [email protected]
				partner_obj = self.pool.get('res.partner')
				partner_id_server = partner_obj.search(cr,uid,[('name','=','*****@*****.**')])

				#Buat String waktu untuk label 
				t = datetime.datetime.now()
				date_str = t.strftime('%m/%d/%Y')
				subject = t.strftime('%m/%d/%Y %X')

				#Kirim Variable dengan message_post()
				# post_vars = {'subject': "Client move.csv.zip per %s" % subject,'body': "Ini adalah Pesan dari Clien per tanggal %s" % subject,'partner_ids': partner_id_server,'attachment_ids':[attachment_id],}
				post_vars = {'subject': "stock.csv.zip",'body': "Ini adalah Pesan dari Clien per tanggal %s" % subject,'partner_ids': partner_id_server,'attachment_ids':[attachment_id],}

				thread_pool.message_post(cr, uid, False,type="comment",subtype="mt_comment",context=context,**post_vars)
				_logger.info('CRON --> Proses Pengirimiman stock.zip ke [email protected] .. Done!! ' )
				
				return attachment_id
Ejemplo n.º 51
0
def extract_imsfile(filename, destination_path):

    #global manifest  #development
    global resdict, failed_files

    # dictionary to store <resource> information (location of files in zipfile)
    resdict = {}
    failed_files = []

    #
    # walk through xml tree "folder"
    #
    # RECURSIVE FUNCTION!
    #
    # folder = list of elementree items
    # path = pathlib Path object "the current path"
    #
    def do_folder(folder, path):

        #print "DEBUG: entering do_folder(). old path=", path
        title = removeDisallowedFilenameChars(unicode(folder[0].text))
        new_path = path / title # add subfolder to path
        if not new_path.exists():
            if (verbose):
                print 'creating directory: ', str(new_path)
            new_path.mkdir() # create directory
        else:
            if (verbose):
                print 'chdir into existing directory:', str(new_path)

        new_path.resolve() # change dir

        files = folder[1:]  # files is list of files and subfolders in this folder

        for f in files:
            # is this file a folder?
            # if it is the identifier contains '_folder_'
            id = f.get('identifier')

            if '_folder_' in id:                  # item is subfolder! branch into
                subfolder = f.getchildren()
                do_folder(subfolder,new_path)

            if '_folderfile_' in id:              # item is file. Extract
                # identifiers zien er zo uit: 'I_rYTieTdHa_folderfile_42508'
                # we hebben alleen het getal nodig
                idval = id.split('_folderfile_')[1]
                bestandsnaam = removeDisallowedFilenameChars(unicode(resdict[idval].split('/')[1]))
                if (verbose):
                    print 'extracting file: ',bestandsnaam
                extract_from_zip_and_write(resdict[idval], new_path, bestandsnaam)

            if '_weblink_' in id:              # item is weblink. Extract
                idval = id.split('_weblink_')[1]
                url = resdict[idval] # get url from resource dict

                title = f[0].text # get title from <items>

                bestandsnaam = removeDisallowedFilenameChars(unicode(title+'.url'))
                if (verbose):
                    print 'extracting weblink: ',bestandsnaam

                # .url file just a txt file with [Internet Shortcut]. Clickable in windows
                try:
                    doel = open(str(new_path / bestandsnaam), "wb")
                    doel.write('[InternetShortcut]\nURL=')
                    doel.write(url)
                    doel.write('\n')
                    doel.close()
                except IOError:
                    print "Cannot create:", str(new_path / bestandsnaam)
                    failed_files.append(str(new_path/ bestandsnaam))

            if '_note_' in id:              # item is note. Extract html contents
                idval = id.split('_note_')[1]

                title = f[0].text # get title from <items>
                bestandsnaam = removeDisallowedFilenameChars(unicode(title+'.html'))
                if (verbose):
                    print 'extracting note: ',bestandsnaam
                extract_from_zip_and_write(resdict[idval], new_path, bestandsnaam)

            if '_picture_' in id:              # item is image. Extract

                idval = id.split('_picture_')[1]
                bestandsnaam = resdict[idval][1].split('/')[1]
                folder_in_zip = resdict[idval][0].split('/')[0]

                if (verbose):
                    print 'extracting image: ',bestandsnaam

                # The correct imagefile is NOT in the <rescources> dict.
                #  Images are renamed and an .html container is used

                # get .html and recover imagefilename (sigh!)
                htmlfile = zipfile.open(resdict[idval][0])
                lines = htmlfile.readlines()

                for line in lines:
                    x = line.find('src=')
                    if (x != -1):
                        imagefilename = line[x:x+20].split('\'')[1]
                        print "reconstructed imagefilename (in zip): ", imagefilename

                bestandsnaam_in_zip = folder_in_zip + '/' + imagefilename

                extract_from_zip_and_write(bestandsnaam_in_zip, new_path, bestandsnaam)

        #
        # END OF local function: do_folder()
        #

    #
    # START
    #
    global zipfile # zipfile is used in do_folder()

    try:
        with zipfile.ZipFile(filename,'r') as zipfile:

            # Zoek het manifest en lees de XML tree
            try:
                manifest = zipfile.read('imsmanifest.xml')
            except KeyError:
                print 'imsmanifest.xml not found in zip. Bad export?'
                return False

            root = ElementTree.fromstring(manifest)

            # de volgende code is geinspireerd door:
            #    http://trac.lliurex.net/pandora/browser/simple-scorm-player
            # de xml tags worden voorafgenaam door {http://www.w3... blaat}
            # haal die eerst op:
            namespace = root.tag[1:].split("}")[0] #extract namespace from xml file
            #
            # Maak lijsten van XML items. Gebruikt voor development
            # Alleen resources (<resources>) is nodig in de rest van de code
            #
            org = root.findall(".//{%s}organisations" % namespace) # for development
            items = root.findall(".//{%s}item" % namespace) # for development
            resources = root.findall(".//{%s}resource" % namespace)

            #
            # Maak een dict met alle <resource> (bestanden)
            #
            # resdict is global
            for r in resources:
                # identifiers zien er zo uit: 'R_rYTieTdHa_folderfile_42508'
                # we hebben alleen het laatste getal nodig
                if '_folderfile_' in r.get('identifier'):
                    resdict[r.get('identifier').split('_folderfile_')[1]] = r.get('href')
                if '_weblink_' in r.get('identifier'):
                    resdict[r.get('identifier').split('_weblink_')[1]] = r.get('href')
                if '_note_' in r.get('identifier'):
                    resdict[r.get('identifier').split('_note_')[1]] = r.get('href')
                if '_picture_' in r.get('identifier'):
                    # _picture_ has two items. [0] = html container [1] = actual imagefile
                    # as the actual imagefilename is *not* the archivefilename, we use the html to recover filename
                    resdict[r.get('identifier').split('_picture_')[1]] = [r[0].get('href') , r[1].get('href')]
            #
            # Doorloop de XML boom zodat we bij het beginpunt van de <items> aankomen
            #
            # voodoo:
            organisations = root.getchildren()[0]
            main = organisations.getchildren()[0]
            rootfolder = main.getchildren()

            destpath = Path(destination_path) # high level Path object (windows/posix/osx)

            # rootfolder is een lijst[] met items
            # loop deze (recursief door. Maak (sub)mappen en extract bestanden)
            do_folder(rootfolder, destpath)

            if len(failed_files)==0:
                print "Klaar: Alle bestanden uitgepakt!"
                return True
            else:
                print "\n\n ERRORS:"
                for file in failed_files:
                    print "mislukt: ", file
                return False

    except IOError:
        print('IOError: File not found?')
Ejemplo n.º 52
0
else:
    allowed_extensions = ['.sdf', '.smi', '.inchi']

for url in urls.split('\n'):
    url = url.strip()
    request = urllib2.Request( url )
    request.add_header('Accept-encoding', 'gzip')
    request.add_header('Accept-encoding', 'gz')
    response = urllib2.urlopen( request )

    if response.info().get('Content-Encoding') in ['gz','gzip'] or os.path.splitext(url)[-1] in ['.gz','.gzip']:
        temp = tempfile.NamedTemporaryFile( delete=False )
        temp.write( response.read() )
        temp.close()
        zipfile = gzip.open(temp.name, 'rb')
        out.write( zipfile.read() )
        os.remove(temp.name)
    elif response.info().get('Content-Encoding') in ['zip'] or os.path.splitext(url)[-1] in ['.zip']:
        temp = tempfile.NamedTemporaryFile(delete=False)
        temp.close()
        with open(temp.name, 'wb') as fp:
            shutil.copyfileobj(response, fp)

        zf = zipfile.ZipFile(temp.name, allowZip64=True)
        tmpdir = tempfile.mkdtemp( )

        for filename in zf.namelist():
            zf.extractall( tmpdir )

        os.remove( temp.name )
        molfiles = []
Ejemplo n.º 53
0
def readDataFromeArchieve(zipfile, filename):
    filenameDetails = filename.split("/")
    bucket = filenameDetails[-1].split('.zip')[0]
    urls = file.split("/")
    urlId = urls[-1].split('_')[0]
    yield bucket, urlId, zipfile.read(file)
Ejemplo n.º 54
0
s = sys.argv[2]
url = 'http://www.congreso.es/votaciones/OpenData?sesion=%s&completa=1&legislatura=%s' % (s, l)
zipname = 'l%ss%s.zip' % (l, s)
os.system('wget -c "%s" -O %s' % (url, zipname))

legislatura = u''
if l == '10':
    legislatura = u'X Legislatura'
else:
    print 'Error legislatura'
    sys.exit()

votacionesids = []
zipfile = zipfile.ZipFile(zipname)
for zipp in zipfile.namelist():
    xmlraw = unicode(zipfile.read(zipp), 'ISO-8859-1')
    #print xmlraw
   
    sesion = re.findall(ur"(?im)<sesion>(\d+)</sesion>", xmlraw)[0]
    if sesion != s:
        print 'Error, no coinciden los numeros de sesion'
        sys.exit()
    
    numerovotacion = re.findall(ur"(?im)<numerovotacion>(\d+)</numerovotacion>", xmlraw)[0]
    votacionesids.append(int(numerovotacion))
    fecha = re.findall(ur"(?im)<fecha>([^<]+)</fecha>", xmlraw)[0]
    fecha = u'%s-%s-%s' % (fecha.split('/')[2], '%02d' % (int(fecha.split('/')[1])), '%02d' % (int(fecha.split('/')[0])))
    titulo = re.search(ur"(?im)<titulo>", xmlraw) and re.findall(ur"(?im)<titulo>([^<]+)</titulo>", xmlraw)[0] or u''
    textoexp = re.search(ur"(?im)<textoexpediente>", xmlraw) and re.findall(ur"(?im)<textoexpediente>([^<]+)</textoexpediente>", xmlraw)[0] or u''
    titulosub = re.search(ur"(?im)<titulosubgrupo>", xmlraw) and re.findall(ur"(?im)<titulosubgrupo>([^<]+)</titulosubgrupo>", xmlraw)[0] or u''
    textosub = re.search(ur"(?im)<textosubgrupo>", xmlraw) and re.findall(ur"(?im)<textosubgrupo>([^<]+)</textosubgrupo>", xmlraw)[0] or u''
Ejemplo n.º 55
0
import zipfile
import re

seed = "90052"
zipfile = zipfile.ZipFile("D:\channel.zip", mode='r')
comments=""

while True:
    file_like = zipfile.read(seed + ".txt")
    comments+=zipfile.getinfo(seed + ".txt").comment.decode()
    seed = "".join(re.findall('([0-9]{1,10})', str(file_like)))
    if seed == '':
        print(file_like)
        break
print(comments)
#print("".join(comments))
#     else:
#         pass
#         #print(seed)
#file_like.read()
Ejemplo n.º 56
0
def smartExtractFromZip(zipfile, fn, destdir):
    shortfn = fn[fn.rfind('/')+1:]
    data = zipfile.read( fn )
    file(destdir+"/"+shortfn,'w').write(data)
Ejemplo n.º 57
0
f2.close()
# Set it to be viewed "inline"
img1.add_header('Content-Disposition', 'inline', filename='Odin Jobseeker.png')
img2.add_header('Content-Disposition', 'inline', filename='Thor Job Hunter.png')
msg.attach(img1)
msg.attach(img2)
#
def zipper(zipname, files):
    z = zipfile.ZipFile(zipname, 'w')
    for f in files:
        z.write(f)
    z.close()

zipper("cv.zip", cvs)
zipfile = open("cv.zip","rb")
zip = MIMEBase('application', 'zip', name="cv.zip")
zip.set_payload(zipfile.read())
zipfile.close()
#
encoders.encode_base64(zip)
msg.attach(zip)
# File deletion
def delete_files(files):
    for f in files:
        os.remove(f)

files = [cvs, images]
delete_files(files)
# Send the message
s = smtplib.SMTP("blast.sit.rp.sg")
s.sendmail(sender, recipient, msg.as_string())
Ejemplo n.º 58
0
def md5_from_zipped_file(zipfile, filename):   
    hasher = hashlib.md5()
    hasher.update(zipfile.read(filename))
    return hasher.hexdigest()