def unpack(zipFull): notify("Unpacking...", False) import zipfile zipfile.main(["-e", zipFull, TMP]) os.remove(zipFull) result = os.path.splitext(zipFull)[0] + ".SAFE" notify(result) return result
def update_event(self, inp=-1): self.set_output_val(0, zipfile.main(self.input(0)))
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """Make a PasteTray zipfile.""" import os import shutil import tempfile import zipfile here = os.path.dirname(os.path.abspath(__file__)) os.chdir(os.path.dirname(here)) for root, dirs, files in os.walk(os.path.curdir): for d in dirs: if d == '__pycache__': shutil.rmtree(os.path.join(root, d)) # TODO: Use os.walk and write directly to the final file instead of # making a temporary zipfile and copying its content. tempzip = tempfile.NamedTemporaryFile('rb') try: zipfile.main(['-c', tempzip.name, 'LICENSE', '__main__.py', 'pastetray', 'README.md', 'WRITING_PASTEBINS.md']) except SystemExit: pass with open('pastetray.pyz', 'wb') as result: result.write(b'#!/usr/bin/env python3\n') with tempzip: shutil.copyfileobj(tempzip, result)
import requests, zipfile, subprocess update_data_file = 'http://data.octo.dc.gov/feeds/crime_incidents/archive/crime_incidents_2013_CSV.zip' zip_filename = 'crime_incidents_2013_CSV.zip' # Gather the file zip_request = requests.get('http://data.octo.dc.gov/feeds/crime_incidents/archive/crime_incidents_2013_CSV.zip', stream=True) # Download the file and save locally with open(zip_filename, 'wb') as zip_file: for chunk in zip_request.iter_content(chunk_size=1024): if chunk: zip_file.write(chunk) zip_file.flush() # Unzip the locally saved file zipfile.main(['-e', zip_filename, 'data']) # Clean up downloaded file subprocess.call('rm crime_incidents_2013_CSV.zip', shell=True)
zip_filename = 'abra.zip' # Gather the file zip_request = requests.get( 'http://dcatlas.dcgis.dc.gov/catalog/download.asp?downloadID=969&downloadTYPE=ESRI', stream=True) # Download the file and save locally with open(zip_filename, 'wb') as zip_file: for chunk in zip_request.iter_content(chunk_size=1024): if chunk: zip_file.write(chunk) zip_file.flush() # Unzip the locally saved file zipfile.main(['-e', zip_filename, 'abra']) # Remove existing ABRA download (OGR fails otherwise) subprocess.call('rm data/abra.csv', shell=True) # Run OGR on the downloaded ESRI file subprocess.call('ogr2ogr -f "CSV" data/abra.csv abra', shell=True) # Cut out coordinates and convert to Lat/Long instead of Maryland State Plane NAD83 abra_read = csv.reader(open('data/abra.csv'), delimiter=',') abra_write = csv.writer(open('data/abra-nad83.csv', 'w'), delimiter=' ') for rec in abra_read: if rec[7] == 'X': continue else:
def make_zipfile(dir): print('writing xipfile') zipfile.main(['-c', dir + '.zip', dir])
zip_filename = 'dcra_building_permits_current_csv.zip' # Gather the file zip_request = requests.get( 'http://data.octo.dc.gov/feeds/dcra_building_permits/dcra_building_permits_current_csv.zip', stream=True) # Download the file and save locally with open(zip_filename, 'wb') as zip_file: for chunk in zip_request.iter_content(chunk_size=1024): if chunk: zip_file.write(chunk) zip_file.flush() # Unzip the locally saved file zipfile.main(['-e', zip_filename, 'data']) # Clean up downloaded file subprocess.call('rm dcra_building_permits_current_csv.zip', shell=True) # Calculate permits in each ANC permits_read = csv.reader(open('data/dcra_building_permits_current_csv.csv'), delimiter=',') anc_permits_write = csv.writer(open('data/anc-building-permits.csv', 'w'), delimiter=',') smd_permits_write = csv.writer(open('data/smd-building-permits.csv', 'w'), delimiter=',') anc_permits = {} smd_permits = {}
if not os.path.exists(target_dir): os.mkdir(target_dir) today = target_dir + os.sep + time.strftime('%Y%m%d') now = time.strftime('%H%M%S') comment = input('Enter a comment -->') if len(comment) == 0: target = today + os.sep + now + '.zip' else: target = today + os.sep + now + '_' +\ comment.replace(" ", "_") + '.zip' if not os.path.exists(today): os.mkdir(today) print('Successfully created directory', today) zip_command = 'zip -r {0} {1}'.format(target, ' '.join(source)) print('Zip command is:') zipfile.main() print(zip_command) print('Running:') if os.system(zip_command) == 0: print('Successful backup to', target) else: print('Backup Failed')
import requests import zipfile update_data_file = 'http://data.octo.dc.gov/feeds/crime_incidents/archive/crime_incidents_2013_CSV.zip' zip_filename = 'crime_incidents_2013_CSV.zip' # Gather the file zip_request = requests.get('http://data.octo.dc.gov/feeds/crime_incidents/archive/crime_incidents_2013_CSV.zip', stream=True) # Download the file and save locally with open(zip_filename, 'wb') as zip_file: for chunk in zip_request.iter_content(chunk_size=1024): if chunk: zip_file.write(chunk) zip_file.flush() # Unzip the locally saved file zipfile.main(['-e', zip_filename, 'unzip'])
update_data_file = 'http://dcatlas.dcgis.dc.gov/catalog/download.asp?downloadID=969&downloadTYPE=ESRI' zip_filename = 'abra.zip' # Gather the file zip_request = requests.get('http://dcatlas.dcgis.dc.gov/catalog/download.asp?downloadID=969&downloadTYPE=ESRI', stream=True) # Download the file and save locally with open(zip_filename, 'wb') as zip_file: for chunk in zip_request.iter_content(chunk_size=1024): if chunk: zip_file.write(chunk) zip_file.flush() # Unzip the locally saved file zipfile.main(['-e', zip_filename, 'abra']) # Remove existing ABRA download (OGR fails otherwise) subprocess.call('rm data/abra.csv', shell=True) # Run OGR on the downloaded ESRI file subprocess.call('ogr2ogr -f "CSV" data/abra.csv abra', shell=True) # Cut out coordinates and convert to Lat/Long instead of Maryland State Plane NAD83 abra_read = csv.reader(open('data/abra.csv'), delimiter=',') abra_write = csv.writer(open('data/abra-nad83.csv', 'w'), delimiter=' ') for rec in abra_read: if rec[7] == 'X': continue else: