예제 #1
0
def zip_directory(directory, b64enc=True, src=True):
    """Compress a directory

    @param directory: The directory to compress
    @param base64enc: if True the function will encode the zip file with base64
    @param src: Integrate the source files

    @return: a string containing the zip file
    """

    RE_exclude = re.compile(
        '(?:^\..+\.swp$)|(?:\.py[oc]$)|(?:\.bak$)|(?:\.~.~$)', re.I)

    def _zippy(archive, path, src=True):
        path = os.path.abspath(path)
        base = os.path.basename(path)
        for f in tools.osutil.listdir(path, True):
            bf = os.path.basename(f)
            if not RE_exclude.search(bf) and (src or bf in ('__openerp__.py',
                                                            '__terp__.py')
                                              or not bf.endswith('.py')):
                archive.write(os.path.join(path, f), os.path.join(base, f))

    archname = StringIO()
    archive = PyZipFile(archname, "w", ZIP_DEFLATED)

    # for Python 2.5, ZipFile.write() still expects 8-bit strings (2.6 converts to utf-8)
    directory = tools.ustr(directory).encode('utf-8')

    archive.writepy(directory)
    _zippy(archive, directory, src=src)
    archive.close()
    archive_data = archname.getvalue()
    archname.close()

    if b64enc:
        return base64.encodestring(archive_data)

    return archive_data
예제 #2
0
	def load_special_tools(self,var,ban=[]):
		if os.path.isdir(waf_dir):
			lst=self.root.find_node(waf_dir).find_node('waflib/extras').ant_glob(var)
			for x in lst:
				if not x.name in ban:
					load_tool(x.name.replace('.py',''))
		else:
			from zipfile import PyZipFile
			waflibs=PyZipFile(waf_dir)
			lst=waflibs.namelist()
			for x in lst:
				if not re.match('waflib/extras/%s'%var.replace('*','.*'),var):
					continue
				f=os.path.basename(x)
				doban=False
				for b in ban:
					r=b.replace('*','.*')
					if re.match(r,f):
						doban=True
				if not doban:
					f=f.replace('.py','')
					load_tool(f)
예제 #3
0
def test_getinfo():
    with tempfile.TemporaryFile() as tf:
        with PyZipFile(tf, 'w') as zf:
            with zf.open('sample.txt', 'w') as out:
                out.write(b'P' * 0x10)
        tf.seek(0)

        zf = ZipFile(tf)
        stat = zf.getinfo(b'sample.txt')

        # We don't bother checking every field, we're just ensuring we can
        # get the results we don't want to test miniz itself!
        assert stat['m_uncomp_size'] == 0x10
        assert stat['m_filename'] == b'sample.txt'

        # Ensure we get a KeyError if the file does not exist, which mimics
        # the behaviour of PyZipFile.
        with pytest.raises(KeyError) as exception:
            zf.getinfo(b'does_not_exist')

        # Ensure it's actually our error being raised.
        assert 'There is no item named' in str(exception.value)
def decompile_zip(src_dir: str, filename: str, dst_dir: str) -> None:
    """
    Copies a zip file to a temporary folder, extracts it, and then decompiles it to the projects folder
    Modified from andrew's code.
    https://sims4studio.com/thread/15145/started-python-scripting

    :param src_dir: Source directory for zip file
    :param filename: zip filename
    :param dst_dir: Destination for unzipped files
    :return: Nothing
    """

    # Create paths and directories
    file_stem = get_file_stem(filename)

    src_zip = os.path.join(src_dir, filename)
    dst_dir = os.path.join(dst_dir, file_stem)

    tmp_dir = tempfile.TemporaryDirectory()
    tmp_zip = os.path.join(tmp_dir.name, filename)

    # Copy zip to temp path
    shutil.copyfile(src_zip, tmp_zip)

    # Grab handle to zip file and extract all contents to the same folder
    zip = PyZipFile(tmp_zip)
    zip.extractall(tmp_dir.name)

    # Decompile the directory
    decompile_dir(tmp_dir.name, dst_dir, filename)

    # There's a temporary directory bug that causes auto-cleanup to sometimes fail
    # We're preventing crash messages from flooding the screen to keep things tidy
    try:
        tmp_dir.cleanup()
    except:
        pass
예제 #5
0
def get_module_as_zip_from_module_directory(module_directory,
                                            b64enc=True,
                                            src=True):
    """Compress a module directory

    @param module_directory: The module directory
    @param base64enc: if True the function will encode the zip file with base64
    @param src: Integrate the source files

    @return: a stream to store in a file-like object
    """

    RE_exclude = re.compile(
        '(?:^\..+\.swp$)|(?:\.py[oc]$)|(?:\.bak$)|(?:\.~.~$)', re.I)

    def _zippy(archive, path, src=True):
        path = os.path.abspath(path)
        base = os.path.basename(path)
        for f in tools.osutil.listdir(path, True):
            bf = os.path.basename(f)
            if not RE_exclude.search(bf) and (src or bf in ('__openerp__.py',
                                                            '__terp__.py')
                                              or not bf.endswith('.py')):
                archive.write(os.path.join(path, f), os.path.join(base, f))

    archname = StringIO()
    archive = PyZipFile(archname, "w", ZIP_DEFLATED)
    archive.writepy(module_directory)
    _zippy(archive, module_directory, src=src)
    archive.close()
    val = archname.getvalue()
    archname.close()

    if b64enc:
        val = base64.encodestring(val)

    return val
예제 #6
0
def is_changed(table, zip_file, r_name):
    # First just try checking the modification date of the file.
    hash_value = compute_hash(zip_file)
    last_hash_entry = retrieve_hash_by_name(table, r_name)

    if last_hash_entry is not None:
        print("last hash = {}".format(last_hash_entry['value']))
    else:
        notify_admins("A new election has been detected: {}".format(r_name))

    print("new hash  = {}".format(hash_value))

    zf = PyZipFile(zip_file)
    last_mod = None
    last_mod = datetime(*zf.getinfo("summary.csv").date_time)
    if last_hash_entry is None:  # No previous hash found
        return True, last_hash_entry, last_mod

    # Check database of hashes.
    if hash_value == last_hash_entry['value']:
        return False, last_hash_entry, last_mod

    try:
        last_mod = datetime(*zf.getinfo("summary.csv").date_time)
        prev_mod = last_hash_entry['last_modified']
        if prev_mod is not None and prev_mod != '':
            previous_modification_date = datetime.strptime(
                prev_mod, "%Y-%m-%d %H:%M")
            if last_mod <= previous_modification_date:
                return False, last_hash_entry, last_mod

    except:
        print(
            "Unable to compare the last hash entry's file modification date with the current file's last modification date."
        )
    return True, last_hash_entry, last_mod
예제 #7
0
    def load_special_tools(self, var, ban=[]):
        r"""
        Loads third-party extensions modules for certain programming languages
        by trying to list certain files in the extras/ directory. This method
        is typically called once for a programming language group, see for
        example :py:mod:`waflib.Tools.compiler_c`

        :param var: glob expression, for example 'cxx\_\*.py'
        :type var: string
        :param ban: list of exact file names to exclude
        :type ban: list of string
        """
        if os.path.isdir(waf_dir):
            lst = self.root.find_node(waf_dir).find_node(
                "waflib/extras").ant_glob(var)
            for x in lst:
                if not x.name in ban:
                    load_tool(x.name.replace(".py", ""))
        else:
            from zipfile import PyZipFile

            waflibs = PyZipFile(waf_dir)
            lst = waflibs.namelist()
            for x in lst:
                if not re.match("waflib/extras/%s" % var.replace("*", ".*"),
                                var):
                    continue
                f = os.path.basename(x)
                doban = False
                for b in ban:
                    r = b.replace("*", ".*")
                    if re.match(r, f):
                        doban = True
                if not doban:
                    f = f.replace(".py", "")
                    load_tool(f)
예제 #8
0
파일: _zip.py 프로젝트: zacker150/pytorch
    "curses",
    # Tcl/Tk GUI
    "tkinter",
    "tkinter",
    # Tests for the standard library
    "test",
    "tests",
    "idle_test",
    "__phello__.foo.py",
    # importlib frozen modules. These are already baked into CPython.
    "_bootstrap.py",
    "_bootstrap_external.py",
]

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Zip py source")
    parser.add_argument("paths", nargs="*", help="Paths to zip.")
    parser.add_argument("--install_dir",
                        help="Root directory for all output files")
    parser.add_argument("--zip_name", help="Output zip name")
    args = parser.parse_args()

    zip_file_name = args.install_dir + '/' + args.zip_name
    zf = PyZipFile(zip_file_name, mode='w')

    for p in args.paths:
        path = Path(p)
        if path.name in DENY_LIST:
            continue
        zf.write(p)
예제 #9
0
def main(schema, **kwparams):
    # Scrape location of zip file (and designation of the election):
    r = requests.get(
        "http://www.alleghenycounty.us/elections/election-results.aspx")
    tree = html.fromstring(r.content)
    #title_kodos = tree.xpath('//div[@class="custom-form-table"]/table/tbody/tr[1]/td[2]/a/@title')[0] # Xpath to find the title for the link
    # As the title is human-generated, it can differ from the actual text shown on the web page.
    # In one instance, the title was '2019 Primary', while the link text was '2019 General'.
    election_index = 1  # Manually increment this to re-pull older elections
    title_kodos = tree.xpath(
        '//div[@class="custom-form-table"]/table/tbody/tr[{}]/td[2]/a/text()'.
        format(election_index))[0]  # Xpath to find the text for the link
    ## to the MOST RECENT election (e.g., "2017 General Election").

    url = tree.xpath(
        '//div[@class="custom-form-table"]/table/tbody/tr[{}]/td[2]/a'.format(
            election_index))[0].attrib['href']
    # But this looks like this:
    #   'http://results.enr.clarityelections.com/PA/Allegheny/71801/Web02/#/'
    # so it still doesn't get us that other 6-digit number needed for the
    # full path, leaving us to scrape that too, and it turns out that
    # such scraping is necessary since the directory where the zipped CSV
    # files are found changes too.

    path = dname + "/tmp"
    # If this path doesn't exist, create it.
    if not os.path.exists(path):
        os.makedirs(path)

    # Worse than that, the page is server-side generated, so one must
    # use something like Selenium to find out what the download link is.
    from selenium import webdriver
    from selenium.common.exceptions import TimeoutException
    chrome_options = webdriver.ChromeOptions()
    prefs = {'download.default_directory': path}
    chrome_options.add_experimental_option('prefs', prefs)
    chromedriver_path = "/usr/local/bin/chromedriver"
    try:
        chrome_options.add_argument(
            "--headless")  # Enable headless mode to allow ETL job to
        chrome_options.add_argument(
            "--window-size=1920x1080")  # run when the screen is locked.
        driver = webdriver.Chrome(chromedriver_path,
                                  chrome_options=chrome_options)
    except:
        driver = webdriver.Chrome("/Users/drw/Apps/Internet/chromedriver",
                                  chrome_options=chrome_options)
        # This is just a different location to check for chromedriver. The path
        # could be moved to a local preferences file.

    driver.get(url)
    # At this point, it's not possible to get the link since
    # the page is generated and loaded too slowly.
    # "the webdriver will wait for a page to load by default. It does
    # not wait for loading inside frames or for ajax requests. It means
    # when you use .get('url'), your browser will wait until the page
    # is completely loaded and then go to the next command in the code.
    # But when you are posting an ajax request, webdriver does not wait
    # and it's your responsibility to wait an appropriate amount of time
    # for the page or a part of page to load; so there is a module named
    # expected_conditions."
    delay = 15  # seconds
    time.sleep(delay)

    download_class = "pl-2"
    download_entities = fetch_download_entities(driver, download_class)
    if len(download_entities) == 0:
        # Fall back to older download_class (2019 Primary election and earlier
        # [yes, the HTML can change from election to election]).
        download_class = "list-download-link"
        download_entities = fetch_download_entities(driver, download_class)

    if len(download_entities) == 0:
        send_to_slack(
            "countermeasures can no longer find the part of the DOM that contains the download links.",
            username='******',
            channel='@david',
            icon=':satellite_antenna:')
        driver.quit()
        raise RuntimeError(
            "Screen-scraping error. Nothing found in class {}.".format(
                download_class))

    summary_file_url = download_entities[0].get_attribute("href")

    # Download ZIP file
    #r = requests.get("http://results.enr.clarityelections.com/PA/Allegheny/63905/188108/reports/summary.zip") # 2016 General Election file URL
    #election_type = "Primary"
    #r = requests.get("http://results.enr.clarityelections.com/PA/Allegheny/68994/188052/reports/summary.zip") # 2017 Primary Election file URL

    election_type = "General"
    #path_for_current_results = "http://results.enr.clarityelections.com/PA/Allegheny/71801/189912/reports/"
    #summary_file_url = path_for_current_results + "summary.zip"
    r = requests.get(summary_file_url)  # 2017 General Election file URL
    # For now, this is hard-coded.
    #xml_file_url = path_for_current_results + "detailxml.zip"
    xml_index = 2  # Previously this was 3
    #xml_file_url = driver.find_elements_by_class_name(download_class)[xml_index].get_attribute("href")
    xml_file_url = download_entities[xml_index].get_attribute("href")
    found = True
    if re.search("xml", xml_file_url) is None:
        xml_index = 1
        found = False
        #list_download_links = driver.find_elements_by_class_name(download_class)
        while xml_index < len(download_entities) and not found:
            xml_file_url = download_entities[xml_index].get_attribute("href")
            found = re.search("xml", xml_file_url) is not None
            xml_index += 1

    driver.quit()

    print("xml_file_url = {}".format(xml_file_url))
    if not found:
        notify_admins(
            "Scraping Failure: Unable to find an XML file. Countermeasures terminated."
        )
        raise ValueError(
            "This ETL job is broken on account of scraping failure.")

    # Save result from requests to zip_file location.
    zip_file = dname + '/tmp/summary.zip'
    with open(format(zip_file), 'wb') as f:
        f.write(r.content)

    print("zip_file = {}".format(zip_file))
    today = datetime.now()

    # Make name of hash database dependent on the server
    # as a very clear way of differentiating test and production
    # datasets.
    server = kwparams.get('server', "test")
    db = dataset.connect('sqlite:///{}/hashes-{}.db'.format(dname, server))
    table = db['election']

    # with open(os.path.dirname(os.path.abspath(__file__))+'/ckan_settings.json') as f: # The path of this file needs to be specified.
    with open(ELECTION_RESULTS_SETTINGS_FILE) as f:
        settings = json.load(f)
    site = settings['loader'][server]['ckan_root_url']
    package_id = settings['loader'][server]['package_id']
    API_key = settings['loader'][server]['ckan_api_key']

    changed, last_hash_entry, last_modified = is_changed(
        table, zip_file, title_kodos)
    if not changed:
        print(
            "The Election Results summary file for {} seems to be unchanged.".
            format(title_kodos))
        return
    else:
        print(
            "The Election Results summary file for {} does not match a previous file."
            .format(title_kodos))
        election_type = None  # Change this to force a particular election_type to be used, but it's
        # basically irrelevant since r_name_kang is not being used.
        r_name_kang = build_resource_name(today, last_modified, election_type)
        #r_name_kodos = re.sub(" Results"," Election Results",title_kodos)
        # Sample names from titles of links:
        # Special Election for 35th Legislative District
        # 2017 General Results
        # Election Results: 2014 Primary
        # Election Results: 2014 General Election
        # 2012 Special 40th State Sen Results

        # Since there's so much variation in these names, maybe it's best just
        # to use them without modifying them and accept that the resource
        # names will vary a little. They can always be cleaned up after the election.
        r_name_kodos = title_kodos

        print("Inferred name = {}, while scraped name = {}".format(
            r_name_kang, r_name_kodos))

        r_chosen_name = r_name_kodos  # Using the scraped name seems better.

        if r_name_kang != r_name_kodos:
            resource_id = find_resource_id(site,
                                           package_id,
                                           r_chosen_name,
                                           API_key=API_key)
            if resource_id is None:
                send_to_slack(
                    "countermeasures has found two conflicting names for the resource: {} and {}. Neither can be found in the dataset. {} is being used as the default.\nThis is your reminder to move the new resources to the top of the list."
                    .format(r_name_kodos, r_name_kang, r_name_kodos),
                    username='******',
                    channel='@david',
                    icon=':satellite_antenna:')
                # The first time this notification fired, the Kodos name was "Special Election for 35th Legislative District" and the Kang name was "2018 General Election Results".
                # The second name was (incorrectly) used for storing the CSV file, while the first name was used for storing the zipped XML file.

    # Unzip the file
    filename = "summary.csv"
    zf = PyZipFile(zip_file).extract(filename, path=path)
    target = "{}/{}".format(path, filename)
    print("target = {}".format(target))
    specify_resource_by_name = True
    if specify_resource_by_name:
        kwargs = {'resource_name': r_chosen_name}
    #else:
    #kwargs = {'resource_id': ''}

    # Code below stolen from prime_ckan/*/open_a_channel() but really
    # from utility_belt/gadgets

    print(
        "Preparing to pipe data from {} to resource {} (package ID = {}) on {}"
        .format(target,
                list(kwargs.values())[0], package_id, site))
    time.sleep(1.0)

    pipeline = pl.Pipeline('election_results_pipeline',
                              'Pipeline for the County Election Results',
                              log_status=False,
                              settings_file=ELECTION_RESULTS_SETTINGS_FILE,
                              settings_from_file=True,
                              start_from_chunk=0
                              ) \
        .connect(pl.FileConnector, target, encoding='utf-8') \
        .extract(pl.CSVExtractor, firstline_headers=True) \
        .schema(schema) \
        .load(pl.CKANDatastoreLoader, server,
              fields=fields_to_publish,
              #package_id=package_id,
              #resource_id=resource_id,
              #resource_name=resource_name,
              key_fields=['line_number'],
              method='upsert',
              **kwargs).run()

    update_hash(db, table, zip_file, r_chosen_name, last_modified)

    # Also update the zipped XML file.

    r_xml = requests.get(xml_file_url)
    xml_file = dname + '/tmp/detailxml.zip'
    with open(format(xml_file), 'wb') as g:
        g.write(r_xml.content)

    xml_name = r_chosen_name + ' by Precinct (zipped XML file)'

    ckan = RemoteCKAN(site, apikey=API_key)
    resource_id = find_resource_id(site, package_id, xml_name, API_key=API_key)
    if resource_id is None:
        ckan.action.resource_create(
            package_id=package_id,
            url='dummy-value',  # ignored but required by CKAN<2.6
            name=xml_name,
            upload=open(xml_file, 'rb'))
    else:
        ckan.action.resource_update(
            package_id=package_id,
            url='dummy-value',  # ignored but required by CKAN<2.6
            id=resource_id,
            upload=open(xml_file, 'rb'))

    log = open(dname + '/uploaded.log', 'w+')
    if specify_resource_by_name:
        print("Piped data to {}".format(kwargs['resource_name']))
        log.write("Finished upserting {}\n".format(kwargs['resource_name']))
    else:
        print("Piped data to {}".format(kwargs['resource_id']))
        log.write("Finished upserting {}\n".format(kwargs['resource_id']))
    log.close()

    # Delete temp file after extraction.
    delete_temporary_file(zip_file)
    delete_temporary_file(path + '/' + filename)
예제 #10
0
#!/usr/bin/env python
#
# Packs Python standard library into zip file python$(ver).zip
#

import os, os.path, shutil, sys
from zipfile import PyZipFile

name = "python%i%i.zip" % (sys.version_info[0], sys.version_info[1])
print "creating %s..." % name

# delete tests, we don't need them:
for root, dirs, files in os.walk("Lib", topdown=False):
    if "test" in dirs:
        shutil.rmtree(os.path.join(root, "test"))

# pack Lib to a zipfile:
zip = PyZipFile(name, mode="w")

for f in os.listdir("Lib"):
    fn = os.path.join("Lib", f)
    if os.path.isdir(fn) or fn.endswith(".py"):
        zip.writepy(fn)
    else:
        print "warning: ignoring file %s" % f

zip.close()
def debug_install_egg(egg_path: str, mods_dir, dest_name: str,
                      mod_folder_name: str) -> None:
    """
    Copies the debug egg provided by Pycharm Pro which adds the capability to make debugging happen inside of
    PyCharm Pro. A bit of work goes into this so it'll be much slower.

    :param egg_path: Path to the debug egg
    :param mods_dir: Path to the mods folder
    :param dest_name: Name of the mod
    :param mod_folder_name: Name of mod Subfolder
    :return:
    """

    print("Re-packaging and installing the debugging capability mod...")
    # Get egg filename and path
    filename = Path(egg_path).name
    mods_sub_dir = os.path.join(mods_dir, mod_folder_name)
    mod_path = os.path.join(mods_sub_dir, dest_name + ".ts4script")

    ensure_path_created(mods_sub_dir)

    # Get python ctypes folder
    sys_ctypes_folder = os.path.join(get_sys_folder(), "Lib", "ctypes")

    # Create temp directory
    tmp_dir = tempfile.TemporaryDirectory()
    tmp_egg = tmp_dir.name + os.sep + filename

    # Remove old mod in mods folder there, if it exists
    remove_file(mod_path)

    # Copy egg to temp path
    shutil.copyfile(egg_path, tmp_egg)

    # Extract egg
    # This step is a bit redundant but I need to copy over everything but one folder into the zip file and I don't
    # know how to do that in python so I copy over the zip, extract it, copy in the whole folder, delete the one
    # sub-folder, then re-zip everything up. It's a pain but it's what I know hwo to do now and Google's not much help
    zip = PyZipFile(tmp_egg)
    zip.extractall(tmp_dir.name)
    zip.close()

    # Remove archive
    remove_file(tmp_egg)

    # Copy ctype folder to extracted archive
    shutil.copytree(sys_ctypes_folder, tmp_dir.name + os.sep + "ctypes")

    # Remove that one folder
    remove_dir(tmp_dir.name + os.sep + "ctypes" + os.sep + "__pycache__")

    # Grab a handle on the egg
    zf = PyZipFile(mod_path,
                   mode='w',
                   compression=ZIP_STORED,
                   allowZip64=True,
                   optimize=2)

    # Add all the files in the tmp directory to the zip file
    for folder, subs, files in os.walk(tmp_dir.name):
        for file in files:
            archive_path = get_rel_path(folder + os.sep + file, tmp_dir.name)
            zf.write(folder + os.sep + file, archive_path)

    zf.close()

    # There's a temporary directory bug that causes auto-cleanup to sometimes fail
    # We're preventing crash messages from flooding the screen to keep things tidy
    try:
        tmp_dir.cleanup()
    except:
        pass
예제 #12
0
def compile_module(mod_creator_name=None,
                   root=None,
                   mod_scripts_folder=None,
                   mod_name=None,
                   ignore_folders=None,
                   include_folders=None):
    if not mod_creator_name:
        mod_creator_name = creator_name
    if not mod_name:
        mod_name = os.path.join(
            '..', '..',
            os.path.basename(
                os.path.normpath(os.path.dirname(
                    os.path.realpath('__file__')))))
        print('No mod name found, setting the path name to \'{}\'.'.format(
            mod_name))
    print('The current working directory {}.'.format(os.getcwd()))

    if mod_creator_name:
        print(
            'Mod creator name found, appending mod creator name to file name.')
        mod_name = '{}_{}'.format(mod_creator_name, mod_name)
    script_zip_name = '{}.ts4script'.format(mod_name)
    if not root:
        ts4script = script_zip_name
    else:
        ts4script = os.path.join(root, script_zip_name)

    try:
        if os.path.exists(ts4script):
            print('Script archive found, removing found archive.')
            os.remove(ts4script)
            print('Script archive removed.')
        zf = PyZipFile(ts4script, mode='w', allowZip64=True, optimize=2)
        child_directories = get_child_directories(mod_scripts_folder)
        previous_working_directory = os.getcwd()
        print('Changing the working directory to \'{}\''.format(
            mod_scripts_folder))
        os.chdir(mod_scripts_folder)
        print('Changed the current working directory \'{}\'.'.format(
            os.getcwd()))
        # print('Found child directories {}'.format(pformat(tuple(child_directories))))
        for folder in child_directories:
            # print('Attempting to compile {}'.format(folder))
            if ignore_folders is not None and os.path.basename(
                    folder) in ignore_folders:
                # print('Folder is set to be ignored. Continuing to the next folder.')
                continue
            if include_folders is not None and os.path.basename(
                    folder) not in include_folders:
                # print('Folder is not set to be included. Continuing to the next folder.')
                continue
            try:
                print('Compiling folder \'{}\''.format(folder))
                zf.writepy(folder)
                print('\'{}\' compiled successfully.'.format(folder))
            except Exception as ex:
                print('Failed to write {}. {}'.format(folder, ex.args[1]))
                continue
        print('Done compiling files.')
        zf.close()
        print('Changing working directory to previous working directory.')
        os.chdir(previous_working_directory)
        print('Changed the current working directory to \'{}\''.format(
            os.getcwd()))
    except Exception as ex:
        print('Failed to create {}. {}'.format(ts4script, ex.args[1]))
        return
예제 #13
0
 def test_build_with_single_file(self):
     builder = Builder('test.zip', function_filename='lambda_function.py', single_file=True)
     builder.build()
     with PyZipFile(builder._zippath, 'r', compression=ZIP_DEFLATED) as zipfile:
         ok_('lambda_function.py' in zipfile.namelist())
         ok_(not ('.lamvery_secret.json' in zipfile.namelist()))
예제 #14
0
def import_quiz(file):
    try:
        os.mkdir('tempdir')
    except:
        shutil.rmtree('tempdir')
        os.mkdir('tempdir')
    os.mkdir('tempdir/extracted')
    with open('tempdir/uploaded.zip', 'wb') as quiz_zip:
        for chunk in file.chunks():
            quiz_zip.write(chunk)
    arch = PyZipFile('tempdir/uploaded.zip')
    arch.extractall(path='tempdir/extracted')
    try:
        with open('tempdir/extracted/description.txt', 'r') as description:
            info = description.readlines()
            quiz = Quizzes.objects.get_or_create(
                url_name=info[0].strip(),
                name=info[1].strip(),
                description=info[2].strip(),
            )[0]
        with open('tempdir/extracted/test.txt', 'r') as test:
            # parse plaintext
            lines = test.readlines()
            tags = []
            tag = ''
            current_text = ''
            for line in lines:
                if tag and current_text:
                    tags.append((tag, current_text.rstrip('\n\r ')))
                if line.startswith("<%%"):
                    tag = line.partition('>')[0] + '>'
                    current_text = line.partition('>')[2]
                else:
                    current_text += line
            # if last line was without tag:
            if tag and current_text:
                tags.append((tag, current_text.rstrip('\n\r ')))
            # handling parsed data to custom classes
            questions = []
            for tag, value in tags:
                if tag == "<%%ID>":
                    questions.append(Question(value))
                elif tag == "<%%Q>":
                    questions[-1].text = value
                elif tag == "<%%QP>":
                    questions[-1].pic = value
                elif tag == "<%%AC>" or tag == "<%%A>":
                    questions[-1].answers.append(Answer())
                    questions[-1].answers[-1].text = value
                    questions[-1].answers[-1].correct = tag == "<%%AC>"
                elif tag == "<%%AP>":
                    questions[-1].answers[-1].pic = value
            # save to db
            for q in questions:
                question = Questions.objects.get_or_create(
                    quiz_id=quiz, question_tag=q.id_)[0]
                question.question_text = q.text
                if q.pic: save_pic(q.pic, question)
                question.save()
                for a in q.answers:
                    answer = Answers.objects.get_or_create(
                        question_id=question, answer_text=a.text)[0]
                    answer.answer_correct = a.correct
                    if a.pic: save_pic(a.pic, answer)
                    answer.save()
        quiz.calc_weight()
        quiz.save()
        return True
    except Exception as e:
        print(e, str(e))
예제 #15
0
#!/usr/bin/env python3
import sys
from zipfile import PyZipFile

for zip_file in sys.argv[1:]:
    pzf = PyZipFile(zip_file)
    pzf.extractall()
예제 #16
0
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

from zipfile import PyZipFile

with PyZipFile("monasca-transform.zip", "w") as spark_submit_zipfile:
    spark_submit_zipfile.writepy("../monasca_transform")
    def compile_mod(cls,
                    names_of_modules_include: Iterator[str],
                    folder_path_to_output_ts4script_to: str,
                    output_ts4script_name: str,
                    names_of_modules_to_exclude: str = None,
                    mod_creator_name: str = None,
                    folder_path_to_gather_script_modules_from: str = '..'):
        """compile_mod(\
            names_of_modules_include,\
            folder_path_to_output_ts4script_to,\
            output_ts4script_name,\
            names_of_modules_to_exclude=None,\
            mod_creator_name=None,\
            folder_path_to_gather_script_packages_from=None\
        )

        Compile a mod using unpyc3.

        """
        os.makedirs(folder_path_to_output_ts4script_to, exist_ok=True)
        from compile_utils import _remove_files_conflicting_with_decompile, _replace_renamed_files
        _remove_files_conflicting_with_decompile(decompile_ea_scripts=False)
        names_of_modules_include = tuple(names_of_modules_include)
        if not mod_creator_name:
            mod_creator_name = creator_name
        if not output_ts4script_name:
            output_ts4script_name = os.path.join(
                '..', '..',
                os.path.basename(
                    os.path.normpath(
                        os.path.dirname(os.path.realpath('__file__')))))
            print('No mod name found, setting the path name to \'{}\'.'.format(
                output_ts4script_name))
        print(f'The current working directory {os.getcwd()}.')

        if mod_creator_name:
            print(
                'Mod creator name found, appending mod creator name to file name.'
            )
            output_ts4script_name = '{}_{}'.format(mod_creator_name,
                                                   output_ts4script_name)
        output_script_zip_name = '{}.ts4script'.format(output_ts4script_name)
        if not folder_path_to_output_ts4script_to:
            ts4script = output_script_zip_name
        else:
            ts4script = os.path.join(folder_path_to_output_ts4script_to,
                                     output_script_zip_name)

        # noinspection PyBroadException
        try:
            if os.path.exists(ts4script):
                print('Script archive found, removing found archive.')
                os.remove(ts4script)
                print('Script archive removed.')

            output_zip = PyZipFile(ts4script,
                                   mode='w',
                                   allowZip64=True,
                                   optimize=2)
            previous_working_directory = os.getcwd()

            if folder_path_to_gather_script_modules_from is not None:
                print(
                    f'Changing the working directory to \'{folder_path_to_gather_script_modules_from}\''
                )
                os.chdir(folder_path_to_gather_script_modules_from)
            else:
                folder_path_to_gather_script_modules_from = '..'
                os.chdir(folder_path_to_gather_script_modules_from)
            print(f'Changed the current working directory \'{os.getcwd()}\'.')
            # print('Found child directories {}'.format(pformat(tuple(child_directories))))
            for folder_path in cls._child_directories_gen(os.getcwd()):
                # print(f'Attempting to compile {folder_path}')
                if names_of_modules_to_exclude is not None and os.path.basename(
                        folder_path) in names_of_modules_to_exclude:
                    # print(f'Folder is set to be ignored {folder_path}. Continuing to the next folder.')
                    continue
                if names_of_modules_include is not None and os.path.basename(
                        folder_path) not in names_of_modules_include:
                    # print(f'Folder is not set to be included {folder_path}. Continuing to the next folder.')
                    continue
                # noinspection PyBroadException
                try:
                    print(f'Compiling folder \'{folder_path}\'')
                    output_zip.writepy(folder_path)
                    print(f'\'{folder_path}\' compiled successfully.')
                except Exception as ex:
                    print(f'Failed to write {folder_path}. {ex}')
                    traceback.print_exc()
                    continue

            print('Done compiling modules.')
            output_zip.close()
            print('Changing working directory to previous working directory.')
            os.chdir(previous_working_directory)
            print(
                f'Changed the current working directory to \'{os.getcwd()}\'')
        except Exception as ex:
            print(f'Failed to create {ts4script}. {ex}')
            return
        finally:
            _replace_renamed_files(decompile_ea_scripts=False)
예제 #18
0
import sys
from zipfile import PyZipFile

pzf = PyZipFile('parser_psychology_06082018.zip')
pzf.extractall()
예제 #19
0
 def setUp(self):
     tmp = tempfile.mkstemp(prefix=__name__)
     self.zipfile_path = tmp[1]
     self.zipfile = PyZipFile(self.zipfile_path, 'w')
     self.pj_root = os.path.abspath(
         os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
예제 #20
0
from zipfile import PyZipFile, ZIP_STORED
from settings import *

root = os.path.dirname(os.path.realpath('__file__'))
mod_name = None

if __name__ == "__main__":
    mod_name = input(
        "Type the name of your mod and hit enter or just hit enter to skip naming: "
    )
    src = os.path.join(root, 'Scripts')
    if not mod_name:
        mod_name = os.path.basename(
            os.path.normpath(os.path.dirname(os.path.realpath('__file__'))))

    mod_name = creator_name + '_' + mod_name
    ts4script = os.path.join(root, mod_name + '.ts4script')

    ts4script_mods = os.path.join(os.path.join(mods_folder),
                                  mod_name + '.ts4script')

    zf = PyZipFile(ts4script,
                   mode='w',
                   compression=ZIP_STORED,
                   allowZip64=True,
                   optimize=2)
    for folder, subs, files in os.walk(src):
        zf.writepy(folder)
    zf.close()
    shutil.copyfile(ts4script, ts4script_mods)
예제 #21
0
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE.  See the GNU Affero General Public License for more details.

Developers of this script will check out a complete copy of the GNU Affero
General Public License in the file COPYING.txt.  Users uncompressing this from
an archive may not have received this license file.  If not, see
<http://www.gnu.org/licenses/>.
"""

# This script creates the 'temoa.py' zip archive/executable using Python's
# PyZipFile interface.  It accepts no arguments.

import os

from cStringIO import StringIO
from zipfile import PyZipFile, ZIP_DEFLATED

temoa_pkg = StringIO()
temoa_pkg.write('#!/usr/bin/env coopr_python\n')
with PyZipFile(temoa_pkg, mode='w', compression=ZIP_DEFLATED) as zf:
    zf.debug = 3
    zf.writepy('./temoa_model/')

fname = 'temoa.py'
with open(fname, 'wb') as f:
    f.write(temoa_pkg.getvalue())

os.chmod(fname, 0755)