def download_and_extract(path, dst): import zipfile filepath = download(path) if not os.path.exists(dst): os.makedirs(dst) with zipfile.ZipFile(filepath, 'r') as zipfile: zipfile.extractall(dst)
def _download_and_extract_model_zip(self, url, file_name, force=False): if file_name: if os.path.exists(file_name): if force: os.remove(file_name) return temp_folder = tempfile.mkdtemp() try: if file_name != None: # online zip file # Download and extract zip archive. zip_file_name = os.path.join(temp_folder, "tmp.zip") self._download_with_progress_bar(url, zip_file_name) sys.stdout.write( "\rDownload complete, decompressing files ... " ) sys.stdout.flush() else: zip_file_name = url zipfile = ZipFile(zip_file_name, "r") zipfile.extractall(self.disk_path) zipfile.close() sys.stdout.write("\nModel extracted successfully.") sys.stdout.flush() except Exception as e: print("Error encountered, cleaning up and exiting ...") rmtree(temp_folder, ignore_errors=True) raise e # delete temporary folder rmtree(temp_folder, ignore_errors=True)
def test_pass(zipfile, password): try: zipfile.extractall(password) return password except Exception as ex: print("An error occured "%(ex)) return
def extract_zifile(zipfile, zip_password): try: zipfile.extractall(pwd=zip_password) return zip_password except Exception as e: #print e pass
def extract_archive(source_path, destination_path=None, clean=False): """ This is a similar implementation referring to Pytorch Utils Reference: :param source_path: archive file source (str) :param destination_path: specific location to extract data (str) :param clean: remove the archive files after extracting (boolean) """ if destination_path is None: destination_path = os.path.dirname(source_path) if FileUtils.is_tar(source_path): with tarfile.open(source_path, 'r') as tar: tar.extractall(path=destination_path) elif FileUtils.is_targz(source_path): with tarfile.open(source_path, 'r:gz') as tar: tar.extractall(path=destination_path) elif FileUtils.is_gzip(source_path): destination_path = os.path.join(destination_path, os.path.splitext(os.path.basename(source_path))[0]) with open(destination_path, "wb") as output_file, GzipFile(source_path) as zip_file: output_file.write(zip_file.read()) elif FileUtils.is_zip(source_path): with ZipFile(source_path, 'r') as zipfile: zipfile.extractall(destination_path) else: raise ParameterError("Unsupported Extract Format".format(source_path)) if clean: os.remove(source_path)
def test_pass(zipfile, password): try: zipfile.extractall(password) return password except Exception as ex: print("An error occured " % (ex)) return
def extract(zipfile): for passwd in range(00000,99999): # print passwd try: zipfile.extractall(pwd=str(passwd)) return passwd except: pass
def crackfile(zipfile,password): try: zipfile.extractall(pwd=password) print "password found !! " return password except (KeyboardInterrupt , SystemExit): raise except: return
def unzip(zfpath, path=None): if not path: path = DEF_UNZIP_DIR if zp.is_zipfile(zfpath): try: zp.extractall(DEF_UNZIP_DIR) return DEF_UNZIP_DIR except: return None
def tryzippwd(zipfile, pwd, savep): try: zipfile.extractall(path=savep, pwd=pwd.encode('utf-8')) print('success, password: %s' % (pwd)) return True except: # print(zipfile) print('failed, password: %s' % (pwd)) return False
def extractfile(future_path): if future_path.endswith('.zip'): print("Unzipping to : "+future_path+" patience will be needed.") zipfile.extractall(future_path, None, None) if future_path.endswith('.tgz'): print("Extracting file to : "+future_path+" patience will be needed.") tarfile.extractall(future_path, None) else: print("Format seems wrong....") return
def __unpackZip(verzip, rodir, verbose): zipfile = ZipFile(verzip) if verbose: for l in zipfile.namelist(): print os.path.join(rodir, l) if not os.path.exists(rodir) or not os.path.isdir(rodir): os.mkdir(rodir) zipfile.extractall(rodir) print "%d files checked out" % len(zipfile.namelist()) return 0
def UrlZip(url): import requests, zipfile # Split URL to get the file name filename = url.split('/')[-1] # Downloading the file by sending the request to the URL req = requests.get(url) print('Downloading ' + url + ' Completed') # extracting the zip file contents zipfile = zipfile.ZipFile(BytesIO(req.content)) zipfile.extractall( 'C:/Users/mattp/Desktop/WorkFiles/XMLFiles/2021Tiger/Zip')
def ZipBruteAttack2(length, charset): passlist = (list(ListCreate(length, charset))) Xipfile = (input("Exact Name\Path of .zip file here: ")) Xipfile = zipfile.ZipFile(Xipfile) snake = len(list(ListCreate(length, charset))) print("Total passwords to test:",snake)# total number of passwords in list with (list(ListCreate(length, charset))) as passlist: for word in tqdm(passlist, total=snake, unit="cobra"): try: zipfile.extractall(pwd=cobra.strip()) except: continue else: print("[+] YAY!!!!!!!!!!! \nPassword found:", word.decode().strip()) break print("[-] Password not found, try other wordlist.")
def get(cls, workdir=DEFAULT_DATA_DIR): dataset_path = os.path.join(workdir, cls.filename) if not isfile(dataset_path): cls.download(workdir) with ZipFile(dataset_path, "r") as zipfile: zipfile.extractall(workdir) df = pd.read_csv(os.path.join(workdir, "dataset_Facebook.csv"), sep=";") X = df[df.columns[:7]] X = encode_feature_as_cyclical(X, "Post Month", 12) X = encode_feature_as_cyclical(X, "Post Hour", 24) y = df[df.columns[7:]] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=TEST_SIZE, random_state=RANDOM_STATE ) return (X_train, y_train), (X_test, y_test)
def unzipFile(fileName): archiveDir = r"\\localhost\Prod_Archive" outputDir = r"\\localhost\outputdir" print "fileName: ", fileName[:-4] # File name is .zip stripped. for f in os.listdir(d): fullpath = os.path.join(d, f) if f[:-4] == fileName[:-4]: # Log file found logfile = open('files_processed.txt', 'w+') logfile.write("{0} - FOUND".format(f)) # Unzip file zipfile = ZipFile(fullpath) print "extracting file: %s" % f zipfile.extractall(output) print "file moved to output" logfile.close()
def get(cls, workdir=DEFAULT_DATA_DIR): dataset_path = os.path.join(workdir, cls.filename) if not isfile(dataset_path): cls.download(workdir) with ZipFile(dataset_path, "r") as zipfile: zipfile.extractall(workdir) df = pd.read_csv(os.path.join(workdir, "hour.csv"), sep=",") # The first column is an id and the second the date # The 3 last columns are labels X = df[df.columns[2:-3]] y = df[df.columns[-1]] X = encode_feature_as_cyclical(X, "hr", 24) X = encode_feature_as_cyclical(X, "mnth", 12) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=TEST_SIZE, random_state=RANDOM_STATE ) return (X_train, y_train), (X_test, y_test)
def get(cls, workdir=DEFAULT_DATA_DIR): dataset_path = os.path.join(workdir, cls.filename) if not isfile(dataset_path): cls.download(workdir) with ZipFile(dataset_path, "r") as zipfile: zipfile.extractall(workdir) df = pd.read_csv(os.path.join(workdir, "sgemm_product.csv"), sep=",") X = df[df.columns[:14]] y = df[df.columns[14:]].mean(axis=1) # All columns but the last 4 only contain powers of 2 X_log_2 = {col: np.log2(X[col]) for col in X.columns[:-4]} X = X.assign(**X_log_2) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=TEST_SIZE, random_state=RANDOM_STATE ) return (X_train, y_train), (X_test, y_test)
def get(cls, subject, workdir=DEFAULT_DATA_DIR): dataset_path = os.path.join(workdir, cls.filename) if not isfile(dataset_path): cls.download(workdir) with ZipFile(dataset_path, "r") as zipfile: zipfile.extractall(workdir) df = pd.read_csv(os.path.join(workdir, f"student-{subject}.csv"), sep=";") X = df[df.columns[:-1]] y = df[df.columns[-1]] # Encode yes/no categories as int X = X.replace("yes", 1) X = X.replace("no", 0) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=TEST_SIZE, random_state=RANDOM_STATE ) return (X_train, y_train), (X_test, y_test)
def ZipListAttack():# Dictionary Attack print("*" * 60) print("The password list path you want to use \nmust be available in the current directory") passlist = (input("Exact Name\Path of file here: ")) print("*" * 60) print("The password list path you want to use \nmust be available in the current directory") Xipfile = (input("Exact Name\Path of .zip file here: ")) Xipfile = zipfile.ZipFile(Xipfile) snake = len(list(open(passlist, "rb"))) print("Total passwords to test:",snake)# total number of passwords in list with open(passlist, "rb") as passlist: for word in tqdm(passlist, total=snake, unit="cobra"): try: zipfile.extractall(pwd=cobra.strip()) except: continue else: print("[+] YAY!!!!!!!!!!! \nPassword found:", word.decode().strip()) break print("[-] Password not found, try other wordlist.")
def extract_packs(zipfile, config): packs_by_gameid, pack_files = read_packs(zipfile) packs_to_unpack = set() _ids_to_unpack = config['gameid_list'] if not _ids_to_unpack: _ids_to_unpack = packs_by_gameid.keys() # unpack all for _id in _ids_to_unpack: if _id not in packs_by_gameid: continue _packs_res = packs_by_gameid[_id] if not config['resolutions']: logger.debug('Extracting all packs for game %s', _id) for _packs in _packs_res.values(): packs_to_unpack.update(_packs) # all resolutions else: logger.debug('Filtering resolutions for game %s', _id) packs_to_unpack.update(_packs_res.get(None, ())) for res in config['resolutions']: if res in _packs_res: packs_to_unpack.update(_packs_res[res]) files_to_unpack = [] for packname in packs_to_unpack: files_to_unpack.extend(pack_files[packname]) destination_path = os.path.join(config['cemu_path'], 'graphicPacks') backup_path = os.path.join(config['cemu_path'], 'graphicPacks_old') control_file = os.path.join(destination_path, '.upd_cemu') if (os.path.exists(destination_path) and (not os.path.exists(control_file)) and (not os.path.exists(backup_path))): logger.debug('Creating backup %r', backup_path) shutil.move(destination_path, backup_path) logger.debug('Creating destination path') create_path(destination_path, remove_first=True) with open(control_file, 'w'): pass hide_file(control_file) if not files_to_unpack: logger.debug('Unpacking all files') files_to_unpack = None # unpack all zipfile.extractall(destination_path, members=files_to_unpack)
def decrypt_sound_found_in_memory(self, in_file_path): with open(in_file_path, 'rb') as in_file: file_size = struct.unpack('<Q', in_file.read(struct.calcsize('<Q')))[0] iv = in_file.read(16) aes = AES.new(self.key, AES.MODE_CBC, iv) out_file = tempfile.NamedTemporaryFile(delete=False) while True: data = in_file.read(self.size_chunk) n = len(data) if n == 0: break decode = aes.decrypt(data) n = len(decode) if file_size > n: out_file.write(decode) else: out_file.write( decode[:file_size]) # <- remove padding on last block file_size -= n temp_path = out_file.name[:out_file.name.rfind('\\')] import zipfile zipfile = zipfile.ZipFile(out_file) zipfile.extractall(temp_path) zipfile.close() info_zipfile = zipfile.infolist() if len(info_zipfile) < 2: info_file_path = info_zipfile[0].filename.replace('/', '\\') print(info_file_path) sf_path = temp_path + '\\' + info_file_path print(sf_path) return sf_path
def zCrack(filename, string, min_range, max_range): # Function to crack zip files zip_file = zipfile.ZipFile(filename) # Create handle of zip file for x in xrange(min_range, max_range): # Range of wordlists for xs in product(string, repeat=x): # Cartesian product passwd = ''.join(xs) # Password to test stdout.write('\r[-] Attemp password: {passwd}'.format( passwd=passwd)) # Print inline try: zipfile.extractall( pwd=passwd) # Try to extract zip file with password print '\n[+] Password found : {passwd} !\n'.format( passwd=passwd) # If worth print the password handle = open('passwd.txt', 'w') # Open the log archive to log password handle.write('\n[+] Password found : {passwd} !\n'.format( passwd=passwd)) # Write the same message of the crack handle.close() # Close the handler of log exit(0) # Exit all threads except: pass
import urllib2 from cStringIO import StringIO import zipfile MASTER_ZIP = 'https://github.com/matpow2/anaconda/archive/master.zip' print 'Downloading update.' data = urllib2.urlopen(MASTER_ZIP).read() print 'Done, extracting.' fp = StringIO(data) zipfile = zipfile.ZipFile(fp, 'r') names = [] for name in zipfile.namelist(): splitted = name.split('/') if len(splitted) >= 2 and (splitted[0] == 'anaconda-master' and splitted[1] == 'tools'): continue names.append(name) zipfile.extractall('.', names)
# Map the predictions to each user. top_n = defaultdict(list) for uid, iid, true_r, est, _ in predictions: top_n[uid].append((iid, est)) # Sort the predictions for each user and retrieve the k highest ones. for uid, user_ratings in top_n.items(): user_ratings.sort(key=lambda x: x[1], reverse=True) top_n[uid] = user_ratings[:n] return top_n # Unzip ml-100k.zip zipfile = zipfile.ZipFile('ml-100k.zip', 'r') zipfile.extractall() zipfile.close() # Read data into an array of strings with open('./ml-100k/u.data') as f: all_lines = f.readlines() # Prepare the data to be used in Surprise reader = Reader(line_format='user item rating timestamp', sep='\t') data = Dataset.load_from_file('./ml-100k/u.data', reader=reader) cont = 1 while (cont): print("Choose an algorithm:\n") print( "1. SVD 2. SVD++ 3. NMF 4. KNNWithMeans 5. Slope one 6. Co-Clustering\n"
import zipfile source = 'photos.zip' destination = 'photos/' with zipfile.ZipFile(source, 'r') as zipfile: zipfile.extractall(destination)
print "Batch size: {0}".format(layer.batchsize) print "Weights: {0}".format(None if layer.weights is None else layer.weights.shape) print "Biases: {0}".format(None if layer.biases is None else layer.biases.shape) print "z: {0}".format(None if layer.z is None else layer.z.shape) print "a={1}(z): {0}".format(None if layer.a is None else layer.a.shape, layer.activation) print "Gradient Weights: {0}".format(None if layer.g_weights is None else layer.g_weights.shape) print "Gradient Biases: {0}".format(None if layer.g_biases is None else layer.g_biases.shape) print "Activation: {0}".format(layer.activation) ############################################ # Main routine ############################################ print "Unziping training dataset ..." zip = zip.ZipFile('mnist_train_pkl.zip') zip.extractall() print "Building a standard neural network ..." snn = StandardNeuralNetwork(network_layers_description=[["FullyConnected", 784, None], ["FullyConnected", 30, "sigmoid"], ["FullyConnected", 10, "softmax"]], cost="crossentropy") print "Loading data set ..." rfile = open("mnist_train.pkl", "rb") mnist = pickle.load(rfile) rfile.close() inputs = (rescale_pixel((mnist["inputs"][0:50000]).astype('float32')), mnist["labels"][0:50000]) inputs_test = (rescale_pixel((mnist["inputs"][50000:60000]).astype('float32')), mnist["labels"][50000:60000]) print "Running training sessions..." snn.execute(input=inputs, epochs=10, eta=3.0, _lambda=0, input_test=inputs_test, batch=50, debug=False) print "==============================================="
url_sonar_scanner = "https://sonarsource.bintray.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-3.0.3.778-windows.zip" sonar_scanner_zip = "C:/Sonar/sonar-scanner.zip" utils.print_(">> Configuring First Run ...") directory = "C:/Sonar" os.makedirs(directory, exist_ok=True) if not os.path.exists("C:/Sonar/sonar-scanner"): if not os.path.exists(sonar_scanner_zip): utils.print_(">> Downloading Sonar Scanner ...") urllib.request.urlretrieve(url_sonar_scanner, sonar_scanner_zip) if os.path.exists(sonar_scanner_zip): with zipfile.ZipFile(sonar_scanner_zip, "r") as zipfile: zipfile.extractall("C:/Sonar") os.remove(sonar_scanner_zip) if os.path.exists("C:/Sonar/sonar-scanner-3.0.3.778-windows"): os.rename("C:/Sonar/sonar-scanner-3.0.3.778-windows", "C:/Sonar/sonar-scanner") directory = "C:/Sonar/issues-report" os.makedirs(directory, exist_ok=True) file = "C:/Sonar/template/template.sonarsource.properties" os.makedirs(os.path.dirname(file), exist_ok=True) with open(file, "w") as f: f.writelines("sonar.host.url={url}\n"\ "sonar.login={login}\n"\ "sonar.password={password}\n"\
def download_test_set(test_set, langpair=None): """Downloads the specified test to the system location specified by the SACREBLEU environment variable. :param test_set: the test set to download :param langpair: the language pair (needed for some datasets) :return: the set of processed files """ outdir = os.path.join(SACREBLEU_DIR, test_set) os.makedirs(outdir, exist_ok=True) expected_checksums = DATASETS[test_set].get('md5', [None] * len(DATASETS[test_set])) for dataset, expected_md5 in zip(DATASETS[test_set]['data'], expected_checksums): tarball = os.path.join(outdir, os.path.basename(dataset)) rawdir = os.path.join(outdir, 'raw') lockfile = '{}.lock'.format(tarball) with portalocker.Lock(lockfile, 'w', timeout=60): if not os.path.exists(tarball) or os.path.getsize(tarball) == 0: logging.info("Downloading %s to %s", dataset, tarball) try: with urllib.request.urlopen(dataset) as f, open(tarball, 'wb') as out: out.write(f.read()) except ssl.SSLError: logging.warning('An SSL error was encountered in downloading the files. If you\'re on a Mac, ' 'you may need to run the "Install Certificates.command" file located in the ' '"Python 3" folder, often found under /Applications') sys.exit(1) # Check md5sum if expected_md5 is not None: md5 = hashlib.md5() with open(tarball, 'rb') as infile: for line in infile: md5.update(line) if md5.hexdigest() != expected_md5: logging.error('Fatal: MD5 sum of downloaded file was incorrect (got {}, expected {}).'.format(md5.hexdigest(), expected_md5)) logging.error('Please manually delete "{}" and rerun the command.'.format(tarball)) logging.error('If the problem persists, the tarball may have changed, in which case, please contact the SacreBLEU maintainer.') sys.exit(1) else: logging.info('Checksum passed: {}'.format(md5.hexdigest())) # Extract the tarball logging.info('Extracting %s', tarball) if tarball.endswith('.tar.gz') or tarball.endswith('.tgz'): import tarfile with tarfile.open(tarball) as tar: tar.extractall(path=rawdir) elif tarball.endswith('.zip'): import zipfile with zipfile.ZipFile(tarball, 'r') as zipfile: zipfile.extractall(path=rawdir) found = [] # Process the files into plain text languages = DATASETS[test_set].keys() if langpair is None else [langpair] for pair in languages: if '-' not in pair: continue src, tgt = pair.split('-') rawfile = DATASETS[test_set][pair][0] field = None # used for TSV files if rawfile.endswith('.tsv'): field, rawfile = rawfile.split(':', maxsplit=1) field = int(field) rawpath = os.path.join(rawdir, rawfile) outpath = os.path.join(outdir, '{}.{}'.format(pair, src)) process_to_text(rawpath, outpath, field=field) found.append(outpath) refs = DATASETS[test_set][pair][1:] for i, ref in enumerate(refs): field = None if ref.endswith('.tsv'): field, ref = ref.split(':', maxsplit=1) field = int(field) rawpath = os.path.join(rawdir, ref) if len(refs) >= 2: outpath = os.path.join(outdir, '{}.{}.{}'.format(pair, tgt, i)) else: outpath = os.path.join(outdir, '{}.{}'.format(pair, tgt)) process_to_text(rawpath, outpath, field=field) found.append(outpath) return found
# Текст задания: https://stepik.org/lesson/245299/step/2?unit=217525 # Главный бухгалтер компании "Рога и копыта" случайно удалил ведомость с начисленной зарплатой. К счастью, у него # сохранились расчётные листки всех сотрудников. Помогите по этим расчётным листкам восстановить зарплатную ведомость. # Архив с расчётными листками доступен по ссылке https://stepik.org/media/attachments/lesson/245299/rogaikopyta.zip (вы # можете скачать и распаковать его вручную или самостоятельно научиться делать это с помощью скрипта на Питоне). # Ведомость должна содержать 1000 строк, в каждой строке должно быть указано ФИО сотрудника и, через пробел, его # зарплата. Сотрудники должны быть упорядочены по алфавиту. import xlrd, wget, os, zipfile zipname, zipfolder, payroll = 'rogaikopyta.zip', 'xlsxfiles', {} if not os.access(zipname, os.F_OK): wget.download('https://stepik.org/media/attachments/lesson/245299/' + zipname) if zipfolder not in os.listdir(): zipfile = zipfile.ZipFile(zipname) zipfile.extractall(zipfolder) zipfile.close() path = os.getcwd() + "\\" + zipfolder + "\\" for file in os.listdir(zipfolder): wb = xlrd.open_workbook(path + file) sh = wb.sheet_by_name(wb.sheet_names()[0]) payroll[sh.row_values(1)[1]] = int(sh.row_values(1)[3]) with open("out.txt", 'w', encoding='utf-8') as f: [ f.write(i[0] + " " + str(i[1]) + "\n") for i in sorted(payroll.items(), key=lambda item: item[0]) ]
# importing necessary modules import requests, zipfile from io import BytesIO print('Downloading started') #Defining the zip file URL url = 'https://www.learningcontainer.com/wp-content/uploads/2020/05/sample-zip-file.zip' # Split URL to get the file name filename = url.split('/')[-1] # Downloading the file by sending the request to the URL req = requests.get(url) print('Downloading Completed') # extracting the zip file contents zipfile = zipfile.ZipFile(BytesIO(req.content)) zipfile.extractall('C:/Users/mattp/Desktop/WorkFiles/XMLFiles/2021Tiger/Zip')
''' zf = zipfile.ZipFile("C:/Users/Administrator/Desktop/liutan2.zip") print(zf) ''' ZipFile.getinfo(name) 获取zip文档内指定文件信息,返回一个zipfile.Zipinfo对象他包括文件的详细信息 ''' ll = zf.getinfo("11.txt") print(ll) ''' zipfile.namelist() 获取zip文档内所有文件的名称列表 ''' nl = zf.namelist() print(nl) ''' zipfile.extractall() 解压zip文档中的所有文件到当前目录,参数members的默认值为zip文档内的所有文件名称 ''' iu = zf.extractall("C:/Users/Administrator/Desktop/") print(iu) ############################random模块################################################################################################################### ''' random 随机数 所有的随机模块都是伪随机 random() 获取0-1之间的随机小数 格式:random.random() 返回值:随机0-1之间的小数 '''
def unpack_zip(filename): """ Take a zipfile, assume correct OS location """ zipfile.extractall(filename)
# Downloads and extracts all necessary models # TF Checkpoint for facenet trained on MS-Celeb 1M # MTCNN for face detection and alignment model # Pre-calculated embeddings on LFW using facenet with dlib face recognizer and alignment # # Currently hosted on dropbox of [email protected] # import zipfile import io from urllib.request import urlopen url = urlopen("https://www.dropbox.com/s/fjsen6tbdrgdj4s/models.zip?dl=1") zipfile = zipfile.ZipFile(io.BytesIO(url.read())) zipfile.extractall('./') zipfile.close()
@author: Rohan """ from six.moves import urllib import tensorflow as tf import zipfile import os from tensorflow.keras.layers import Activation, Dense, Conv2D, MaxPooling2D, BatchNormalization, Flatten import matplotlib.pyplot as plt from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.image import ImageDataGenerator DOWNLOAD_ROOT = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' local_file = 'C:/Users/Rohan/Downloads/cats_and_dogs_filtered.zip' zipfile = zipfile.ZipFile(local_file, 'r') zipfile.extractall('/tmp') zipfile.close() base_dir = '/tmp/cats_and_dogs_filtered' train_dir = os.path.join(base_dir, 'train') val_dir = os.path.join(base_dir, 'validation') train_cat_dir = os.path.join(train_dir, 'cats') train_dog_dir = os.path.join(train_dir, 'dogs') '''n_cols=4 n_rows=4 img_index=0 train_cat_fname=os.listdir(train_cat_dir) train_dog_fname=os.listdir(train_dog_dir) img=plt.gcf()
def check_password(zipfile, password): try: zipfile.extractall(os.path.join(path_from_archive), pwd=password) return password except: return
def extract_all(zipfile,directory): """Extracts all files from the zip archive to the specified directory""" zipfile.extractall(directory)