def create_archive_stream(self,items,subdir=None): import zipstream from django.http.response import StreamingHttpResponse from settings.settings import ZIPFILE_SIZE_LIMIT_BYTES from utils import zipdir, get_total_size from os.path import isfile, isdir path = self.get_path() if subdir is None else os.path.join(self.get_path(),subdir) if not os.path.exists(path): raise Exception('Invalid subdirectory provided') share_path = self.get_path() z = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) # total_size = get_total_size([os.path.join(path,item) for item in items]) # if total_size > ZIPFILE_SIZE_LIMIT_BYTES: # raise Exception("%d bytes is above bioshare's limit for creating zipfiles, please use rsync or wget instead" % (total_size)) for item in items: item_path = os.path.join(path,item) if not os.path.exists(item_path): raise Exception("File or folder: '%s' does not exist" % (item)) if isfile(item_path): item_name = item#os.path.join(self.id,item) z.write(item_path,arcname=item_name) elif isdir(item_path): zipdir(share_path,item_path,z) from datetime import datetime zip_name = 'archive_'+datetime.now().strftime('%Y_%m_%d__%H_%M_%S')+'.zip' response = StreamingHttpResponse(z, content_type='application/zip') response['Content-Disposition'] = 'attachment; filename={}'.format(zip_name) return response
def create_archive_stream(self, items, subdir=None): import zipstream from django.http.response import StreamingHttpResponse from settings.settings import ZIPFILE_SIZE_LIMIT_BYTES from utils import zipdir, get_total_size from os.path import isfile, isdir path = self.get_path() if subdir is None else os.path.join( self.get_path(), subdir) if not os.path.exists(path): raise Exception('Invalid subdirectory provided') share_path = self.get_path() z = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) # total_size = get_total_size([os.path.join(path,item) for item in items]) # if total_size > ZIPFILE_SIZE_LIMIT_BYTES: # raise Exception("%d bytes is above bioshare's limit for creating zipfiles, please use rsync or wget instead" % (total_size)) for item in items: item_path = os.path.join(path, item) if not os.path.exists(item_path): raise Exception("File or folder: '%s' does not exist" % (item)) if isfile(item_path): item_name = item #os.path.join(self.id,item) z.write(item_path, arcname=item_name) elif isdir(item_path): zipdir(share_path, item_path, z) from datetime import datetime zip_name = 'archive_' + datetime.now().strftime( '%Y_%m_%d__%H_%M_%S') + '.zip' response = StreamingHttpResponse(z, content_type='application/zip') response['Content-Disposition'] = 'attachment; filename={}'.format( zip_name) return response
def do_backup(file): """Backup albums to Digital Ocean Spaces. I run this command locally on the machine where I use GIMP to create new files to put into the albums. It zips up the albums/ directory and uploads it to (currently) Digital Ocean Spaces. """ # upload_location is going to be top level of the DO Space/Azure container at the moment upload_location = file Path(file).unlink(missing_ok=True) zipdir("albums/", file) # this will overwrite what is in Digital Ocean! do_upload_file(file, upload_location)
def zip_project(clfs, proj): from utils import zipdir import zipfile path = proj['path'] name = proj['name'] # We zip up the project project_zip = clfs.get([path, '..', name + '.zip']) with zipfile.ZipFile(project_zip, 'w', zipfile.ZIP_BZIP2) as zipf: zipdir(path, zipf) # We need to package these files: # script.py # model.pickl # pipeline.py for refitting return project_zip
def get_zip(self): from utils import zipdir import zipfile path = self.path name = self.name # We zip up the project project_zip = self.clfs.get([path, '..', name + '.zip']) with zipfile.ZipFile(project_zip, 'w', zipfile.ZIP_BZIP2) as zipf: zipdir(path, zipf) # We need to package these files: # script.py # model.pickl # pipeline.py for refitting return project_zip
def writeToCSV(self, magazines): if magazines != None: fileResults = CSVWriter() #zip old files first stamp = time.time() * 1000 output = "old_csv_log." + str(stamp) + ".zip" zipf = zipfile.ZipFile(output, 'w', zipfile.ZIP_DEFLATED) utils.zipdir('csv_log/', zipf) zipf.close() for mId, magazine in enumerate(magazines): filename = "csv_log/" + magazine.get_name( ) + "_" + magazine.get_volume() + "_" + magazine.get_issue( ) + ".csv" fileResults.writeMagazineCSV(filename, magazine)
def save(): data = request.form.to_dict() template = env.get_template('index.html') html = template.render(slides=data['html']) unique_filename = generate_unique_filename() if not os.path.exists('tmp'): os.makedirs('tmp') tmp_path = os.path.join(ROOT, 'tmp') filepath = os.path.join(tmp_path, unique_filename) zipf = zipfile.ZipFile(filepath, 'w') zipdir('slides', zipf) zipf.writestr('slides/index.html', html) zipf.close() return send_from_directory(tmp_path, unique_filename, as_attachment=True)
def download_all(request): _file = zipdir(settings.MEDIA_ROOT, 'documents') zip_file = open(_file.filename, 'r') response = HttpResponse(zip_file, content_type='application/force-download') response['Content-Disposition'] = 'attachment; filename="%s"' % _file.filename os.remove(_file.filename) return response
def writeTextOutput(results): #zip old files first stamp = time.time() * 1000 output = "backup/old_logs." + str(stamp) + ".zip" zipf = zipfile.ZipFile(output, 'w', zipfile.ZIP_DEFLATED) utils.zipdir('log/', zipf) zipf.close() for magazine in results: articles = magazine.get_articles() for article in articles: document_id = (str(magazine.get_name()).replace(":", "") + str(article.get_title())).replace("/", "_").replace( " ", "_").replace("__", "_") document_name = "log/" + document_id + "_FULLTEXT_" + ".txt" f = open(document_name, 'w') xml = article.get_article_full_text() f.write(xml) # python will convert \n to os.linesep f.close()
def generate_batch(batch): host = 'http://112.124.117.97' zip_dir = '/usr/share/nginx/html' import tempfile to_dir = tempfile.mkdtemp() print to_dir # generate all qrcode in that temp dir for i in xrange(1, int(batch.count)+1): record = Record(batch=batch, index=i, serial_num=utils.generate_serial_num(), left_time=batch.verify_time) record.save() url = urlparse.urljoin(host, record.serial_num) filepath = os.path.join(to_dir, record.serial_num + '.png') utils.generate_qrcode(url, filepath) zip_filepath = os.path.join(zip_dir, batch.bid + '.zip') utils.zipdir(to_dir, zip_filepath) utils.safe_rmtree(to_dir)
def logCandicates(logger, full_list_magazines, rank): tfidf = None if full_list_magazines != None: count = 0 tfidf, candidates, limit = do_linguistics(full_list_magazines, logger) #print(tfidf) if len(rank) > 0: limit = 0 #zip old files first stamp = time.time() * 1000 output = "backup/old_candidate_log." + str(stamp) + ".zip" zipf = zipfile.ZipFile(output, 'w', zipfile.ZIP_DEFLATED) utils.zipdir('TEST_ALL_CANDIDATES.csv', zipf) zipf.close() #print candidates to csv fileResults = CSVWriter() fileResults.writeCandidatesCSV("TEST_ALL_CANDIDATES.csv", candidates, limit) #log candidates logger.debug("Candidates for each magazine: ") for candidate in candidates: count = 0 logger.debug(candidate) candidate_list = candidates[candidate] for tuple in candidate_list: #for a,b in candidate_list: #if count < 21: a, b = tuple if b > limit and count < 41: logger.info(str(a) + ": " + str(b)) count = count + 1 return tfidf, limit
def pack(self): """ Pack files into ZIP archive. """ dirname = self._get_export_root_directory() logging.debug("pack(): dirname: %s" % dirname) # --- Add files to export, if necessary self.add_files_to_export(dirname) zipname = self._get_archive_name() logging.debug("pack(): zipname: %s" % zipname) try: zipdir(dirname, zipname) except IOError: logging.error(u"pack(): Cannot create archive '%s' in directory '%s'" \ % (zipname, dirname)) raise except Exception, e: message = u"pack(): Exception during archive creation. " message += u'Exception ``%s`` raised: %s ' \ % (e.__class__.__name__, e.message) logging.error(message, exc_info=True) raise
if __name__ == '__main__': connect('paibei') host = 'http://112.124.117.97' zip_dir = '/tmp/qrcode' import tempfile to_dir = tempfile.mkdtemp() print to_dir batch_id = sys.argv[1] added_count = sys.argv[2] batch = get_batch(batch_id) currentIndex = batch.count # query current record index in batch for i in xrange(currentIndex + 1, currentIndex + added_count + 1): record = Record(batch=batch, index=i, serial_num=utils.generate_serial_num(), left_time=batch.verify_time) record.save() url = urlparse.urljoin(host, batch.bid) filepath = os.path.join(to_dir, record.serial_num + '.png') utils.generate_qrcode(url, filepath) zip_filepath = os.path.join(zip_dir, batch.bid + '.zip') utils.zipdir(to_dir, zip_filepath) utils.safe_rmtree(to_dir)
#use when using ranges #apply_weights(units, tfidf, rank, logger, limits, rank_range) print("convert to rdf") writeResultsToRDF(units, annotator, counter, target_format, source_file, source_format) writeXmlOutput(full_list_magazines) writeCSVOutputOfResults(full_list_magazines) annotator.writeToCSV(full_list_magazines) annotator.logConseptsByIndex(full_list_magazines) #writeResultsToRDF(u,annotator,counter, target_format, source_file, source_format) annotator.print_filtered_terms(full_list_magazines) annotator.print_included_terms(full_list_magazines) annotator.print_stats(full_list_magazines) now = datetime.now() - momentum end = datetime.now() - startTime print("Finished queries in " + str(now)) print("REACHED THE END in " + str(end)) logger.info("Application execution ended, and it lasted for " + str(end)) if __name__ == '__main__': stamp = int(time.time() * 1000) outputfile = "target_file/old_targets." + str(stamp) + ".zip" zipf = zipfile.ZipFile(outputfile, 'w', zipfile.ZIP_DEFLATED) utils.zipdir('target_file/', zipf) zipf.close() main(sys.argv[1:])
def inline_buttons_handler(bot, update): from app import app, db query = update.callback_query chat_id = query.message.chat_id logger.debug("Got an inline button action: %s" % query.data) bot.send_chat_action(chat_id=chat_id, action=telegram.ChatAction.TYPING) # Try to get params try: params = json.loads(query.data) action = params.get("action") userfile_id = int(params.get("uf")) except Exception as e: logger.error(e) bot.send_message( chat_id=chat_id, text="\n".join( [ "Упс! Что-то пошло не так 😱", "Передайте это администратору, чтобы он все исправил:", "Query data: %s" % query.data, "Exception: %s" % e, ] ), ) raise # Try to get info about file from db file_info = get_file_info(bot, userfile_id) if action in ACTIONS_MAPPING: outfile = os.path.join( app.config["PROCESSED_DIR"], "%s %s %s.zip" % ( remove_extension(file_info["filename"]), file_info["userfile_id"], action, ), ) bot.send_message(text="Сейчас посмотрю...⏳", chat_id=chat_id) try: extract_file(bot, chat_id, file_info) statuses = ACTIONS_MAPPING[action](file_info["extract_path"]) if any(statuses.values()): zipdir(file_info["extract_path"], outfile) bot.send_message(chat_id=chat_id, text="Готово!🚀") bot.send_document( chat_id=chat_id, document=open(outfile, "rb"), filename=os.path.basename(outfile), reply_to_message_id=file_info["message_id"], ) if not all(statuses.values()): message = "⚠️ Следующие файлы не удалось обработать: ⚠️\n" for file, status in statuses.items(): if not status: file_path = os.path.relpath( file, file_info["extract_path"] ) # Telegram has limit for message length, so we # split the message in case it is too long (> 4096) if len(message) + len(file_path) + 10 < 4096: message += f"\n ❌ {file_path}" else: bot.send_message(chat_id=chat_id, text=message) message = f" ❌ {file_path}" bot.send_message(chat_id=chat_id, text=message) else: bot.send_message( chat_id=chat_id, text="Не удалось обработать данные. Проверьте, что файлы предоставлены в нужном формате.", ) except Exception as e: logger.error(e) bot.send_message( chat_id=chat_id, text="\n".join( [ "Упс! Что-то пошло не так 😱", "Передайте это администратору, чтобы он все исправил:", "Query data: %s" % query.data, "Exception: %s" % e, ] ), ) raise else: bot.send_message( chat_id=chat_id, text="Данная команда в процессе реализации и пока не доступна 😞", ) return "OK"
def splitter(originalFile, no_levels=3, zip=False, inichunk=False, demo=False): if demo: print('\nMAPPS Map Spliter **DEMO**, v0.1, 2014.') else: print('\nMAPPS Map Spliter, v0.1, 2014.') sys.stdout.write("\n Importing original image...") sys.stdout.flush() img = Image(filename=originalFile) sys.stdout.write(" complete.\n") sys.stdout.flush() imgwidth = img.width imgheight = img.height if imgwidth / imgheight != 2: print('\n Ooops!!! The Image Width to Height ratio should be 2!!!') return else: stem = originalFile.split('.')[0] if not os.path.exists(stem): os.makedirs(stem) else: print('\n Uh-oh! The directory {} already exists.'.format(stem)) if utils.yesno(' Do you want to replace it?'): shutil.rmtree(stem) os.makedirs(stem) else: return levels = range(1, no_levels + 1) for level in levels: print('\n Processing Level {}'.format(level)) split = 2 ** level segs = range(split) div = 1. / split for h in segs: for w in segs: w1 = int(imgwidth * div * w) w2 = int(imgwidth * div * (w + 1)) h1 = int(imgheight * div * h) h2 = int(imgheight * div * (h + 1)) imgtmp = img[w1:w2, h1:h2] # print(w1, w2, h1, h2) imgtmp.transform(resize='1440x720!') imgtmp.format = 'jpeg' hlevel = '{0:03d}'.format(h + 1) wlevel = '{0:03d}'.format(w + 1) saveas = os.path.join(stem, '{}_{}_{}_{}.jpg'.format( stem, level, hlevel, wlevel)) print(' Writing: {}'.format(saveas)) imgtmp.save(filename=saveas) if imgtmp.width != 1440: print('ERROR: image width = {}\n'.format(imgtmp.width)) if imgtmp.height != 720: print('ERROR: image height = {}\n'.format(imgtmp.height)) # process input image img.transform(resize='1440x720') img.format = 'jpeg' img.save(filename=os.path.join(stem, '{}_0_001_001.jpg'.format( stem))) # create ini file segment if inichunk: utils.inifix(stem, no_levels) # zip output if zip: print('\n Zipping output to {}.zip'.format(stem)) zipf = zipfile.ZipFile('{}.zip'.format(stem), 'w') utils.zipdir('{}/'.format(stem), zipf) zipf.close() shutil.rmtree(stem) print('\nFinished!\n') return