def run(self): if not self.user_authorize('system', 'manage'): return while True: task = self.task_q.get() logging.info(task) if task[0] == 'UPLOAD': p = PosixPath(task[1]) if p.is_dir(): target_dir = os.path.join(task[2], p.name) if self.do_mkdir(p.name, task[2]): children = [d for d in p.iterdir()] children.sort() for f in children: self.task_q.put(['UPLOAD', str(f), target_dir]) else: self.do_upload(str(p), task[2]) elif task[0] == 'DOWNLOAD': R = self._stat(task[1]) if R["fileType"]==1: #dir p = PosixPath(task[2]).joinpath(R["name"]=="" and "ROOT" or R["name"]) p.mkdir() target_dir = os.path.join(task[2], p.name) for r in self._stat2(task[1]): self.task_q.put(['DOWNLOAD', r["path"], target_dir]) else: self.do_download(task[1], task[2]) elif task[0] == 'MKDIR': self.do_mkdir(os.path.basename(task[1]), os.path.dirname(task[1])) elif task[0] == 'STAT': self.do_stat(task[1]) elif task[0] == 'LS': self.do_ls(task[1]) elif task[0] == 'LS-R': self.do_ls_r(task[1]) elif task[0] == 'RENAME': self.do_rename(task[1], task[2]) elif task[0] == 'REMOVE': self.do_delete(task[1]) elif task[0] == 'EXIT': self.close() logging.info("Exit.") self.task_q.task_done() return else: msg = "Invalid task: %s" % str(task) logging.error(msg) TestClient.ERRORS.append(msg) self.task_q.task_done()
def infer_settings(opt_root, opt_pattern="**/optimizer.py"): opt_root = PosixPath(opt_root) assert opt_root.is_dir(), "Opt root directory doesn't exist: %s" % opt_root assert opt_root.is_absolute(), "Only absolute path should have even gotten this far." # Always sort for reproducibility source_files = sorted(opt_root.glob(opt_pattern)) source_files = [ss.relative_to(opt_root) for ss in source_files] settings = {_cleanup(str(ss.parent)): [str(ss), {}] for ss in source_files} assert all(joinable(kk) for kk in settings), "Something went wrong in name sanitization." assert len(settings) == len(source_files), "Name collision after sanitization of %s" % repr(source_files) assert len(set(CONFIG.keys()) & set(settings.keys())) == 0, "Name collision with builtin optimizers." return settings
def run_gc(db, base_dir: str, dry_run: bool): repos = module_config.list_seen_repo(db) pool_dir = base_dir + '/pool' to_delete = [] # Scan if any of the repos are missing for i in repos: path = PosixPath(pool_dir).joinpath(i) if not path.is_dir(): to_delete.append(i) logger_gc.info("Branch %s to be removed from the database", i) if not dry_run: purge_from_db(db, to_delete) purge_from_dists(base_dir, to_delete) else: logger_gc.warning("DRY RUN - database is unmodified")
def dump_databases(self, temporary_dir): # /tmp/abcd/2018.05.04.14.00.03.tar.gz # backups/databases/PostgreSQL/<db-name>.sql # create destination directories /backups/databases/PostgreSQL # retrieve needed information to connect to the database and # dump their content postgresql_user = self.config['postgresql-user'] postgresql_password = self.config['postgresql-password'] postgresql_host = self.config['postgresql-host'] postgresql_port = self.config['postgresql-port'] selected_dbs = self.config['selected-dbs'] # compute dirs temporary_dir = PosixPath(temporary_dir.name) assert temporary_dir.exists() and temporary_dir.is_dir() databases_directory = temporary_dir / 'backups' / 'databases' / 'PostgreSQL' databases_directory.mkdir(parents=True, exist_ok=False) self.logger.info(databases_directory) self.logger.info(str(selected_dbs)) # connect to the database and dump the selected databases if selected_dbs == 'all': try: connection = psycopg2.connect(dbname='postgres', user=postgresql_user, password=postgresql_password, host=postgresql_host, port=str(postgresql_port)) except: print("Unable to connect to the PostgreSQL server for some reasons.") exit(1) databases = get_databases(connection) else: databases = selected_dbs for database in databases: database_filename = databases_directory / (database + '.sql') command = compute_dump_command(database, postgresql_user, postgresql_password, postgresql_host, postgresql_port) dump_database(command, database_filename, postgresql_password)
def local_dir(self, value): if value is None: raise ValueError("The local directory may not be set to None.") ldir = None if isinstance(value, Path): ldir = value else: ldir = PosixPath(str(value)) if str(ldir).startswith('~'): ldir = ldir.__class__(os.path.expanduser(str(ldir))) if self.connected: if not ldir.is_dir(): msg = "Directory %r does not exists or is not a directory." % (ldir) raise SFTPLocalPathError(msg) os.chdir(str(ldir)) self.base_dir = str(ldir) self._local_dir = ldir
def _is_image(file_path: PosixPath) -> bool: if file_path.is_dir(): return False ext: str = file_path.suffix.lower() return ext in CONVERT_TARGET_FILE_TYPE
def set_output_path(self, path: PosixPath) -> None: if not (path.exists() and path.is_dir()): raise RuntimeError( f"Output path {path} does not exist or is not a directory.") self._output_path = path
def process_download_file_request(self, request): """ Process DOWNLOAD_FILE request. The possible response are. - REFUSED with INCORRECT_CHUNK_SIZE - REFUSED with INVALID_FILE_NAME - REFUSED with NOT_A_DIRECTORY - REFUSED with FILE_NOT_FOUND - REFUSED with NOT_A_FILE - ACCEPTED with TRANSFER_ACCEPTED - REFUSED with NOT_A_DIRECTORY if the file directory doesn't exists. - REFUSED with INCORRECT_CHUNK_SIZE if the chunk size isnn' t accepted. Long descripion. """ # extract information from request and trigger bad request error # if something goes wrong try: assert isinstance(request, tuple) assert len(request) == 4 _, name, directory, chunk_size = request directory = PosixPath(directory) except Exception: response = make_bad_request_error() self.socket.send_pyobj(response) return # check if chunk size are correct, send refused response and # fail early if this is the case if chunk_size < MINIMUM_CHUNK_SIZE or chunk_size > MAXIMUM_CHUNK_SIZE: response = make_incorrect_chunk_size_response() self.socket.send_pyobj(response) return # return INVALID_FILE_NAME refused response if the download file # name isn't valid if not is_file_name_valid(name): response = make_invalid_file_name_response() self.socket.send_pyobj(response) return # normalize the directory (later it can be combined with the # root directory) directory = normalize_directory(directory) # combine the source directory with the root directory directory = self.root_directory / directory # send NOT_A_DIRECTORY if not directory.exists() or not directory.is_dir(): response = make_not_a_directory_response() self.socket.send_pyobj(response) return # combine the source directory with the name to get the full # path of the download file file_path = directory / name # send FILE_NOT_FOUND if not file_path.exists(): response = make_file_not_found_response() self.socket.send_pyobj(response) return # send NOT_A_FILE if not file_path.is_file(): response = make_not_a_file_response() self.socket.send_pyobj(response) return # compute file size source_file = file_path.open('rb') source_file.seek(0, os.SEEK_END) file_size = source_file.tell() source_file.seek(0, os.SEEK_SET) source_file.close() # download file request is accepted, initiate the download # process try: self.initiate_download_file(file_path, file_size, chunk_size) except Exception as error: response = make_unknown_error_response(str(error)) else: response = make_transfer_accepted_response(file_size) self.socket.send_pyobj(response)
def absoluteFileLocation(output_dir, base): out_dir = PosixPath(output_dir) # todo: configurable if not out_dir.is_dir(): out_dir.mkdir() return (out_dir / PosixPath(base + ".spec.ts")).absolute()
def download_directory(self, source, destination, name=None, chunk_size=512, process_chunk=None, timeout=None): """ Download a directory from the remote directory. This method downloads an entire directory from a given directory in the remote directory. The **source** parameter refers to the remote directory to be transfered from the remote directory and must to be a :term:`path-like object`. It must be an absolute path or it will raise the ValueError exception. If the source directory can't be found or is not a directory, the SourceNotFound exception is raised. The **destination** parameter refers to **an existing** local directory in which the directory must be transfered to. It must be a :term:`path-like object` and if it's a relative path, it's treated like relative to the current working directory. If the destination directory can't be found or is not a directory, the DestinationNotFound exception is raised. The name parameter can be used to rename the source directory while downloading it (the content is guaranteed to be the same). It must be a string of a :term:`valid file name` and must not conflict with an existing directory (or file) in the destination directory. By default, it reads the name from the source to leave it unchanged. If the name isn't valid, a :py:exc:`InvalidFileName` is raised and if the file is conflicting, a :py:exc:`FileExistsError` exception is raised. Additionally, you can adjust the chunk size value which defines how fragmented files have to be received from the server and/or pass a callback that process each fragment **before** it's written to the local file. Usually, the chunk value is between 512 and 8192. The callback is called with various parameters and in a specific order; the chunk data, the remaining bytes, the file size and the file name. The chunk data is a bytes string of the actual data just received from the server. The remaining bytes is an integer indicating the number of bytes left to be received (and this includes the current chunk of data). The file size is a fixed integer telling how large the file is, and the file name is the file name currently being processed. For instance, it can be used to display a progress indicator. Here is an example. :: def display_progress(chunk_data, remaining_bytes, file_size, file_name): chunk_size = 512 progress = (file_size - (remaining_bytes - len(chunk_data))) / file_size * 100 sys.stdout.write("\r{0:0.2f}% | {1}".format(progress, file_name)) sys.stdout.flush() if remaining_bytes <= chunk_size: sys.stdout.write('\n') return True If the operation takes longer than the given timeout, a :py:exc:`TimeoutError` exception is raised. :param source: Foobar. :param destination: Foobar. :param name: Foobar. :param chunk_size: Foobar. :param process_chunk: Foobar. :param timeout: Foobar. :raises ValueError: If the source directory isn't an absolute path. :raises SourceNotFound: If the source file doesn't exist or isn't a file. :raises DestinationNotFound: If the destination directory doesn't exist or isn't a directory. :raises FileExistsError: If the source file conflicts with an existing file or directory. :raises InvalidFileName: If the source file doesn't have a valid name. :raises TimeoutError: If it takes more than the timeout value to receive a response. """ # ensure we work with posix paths source = PurePosixPath(source) destination = PosixPath(destination) # normalize the destination to work with an absolute path if not destination.is_absolute(): destination = PosixPath(os.getcwd(), destination) # compute the name from the source if not specified (file name # unchanged) if not name: name = source.name # raise ValueError exception if source directory is not an # absolute path if not source.is_absolute(): raise ValueError("Source must be an absolute path") # raise SourceNotFound exception if the source directory doesn't # exist or is not a directory if str(source) != source.root: try: files = self.list_files(source.parent, timeout) except Exception: raise NotImplementedError # catch and treat relevant exceptions if source.name not in files or files[source.name][1] == True: raise SourceNotFound("Source directory could not be found") # check if the destination directory exists and raises # DestinationNotFound exception if it doesn't exist or is not # a directory (a root is always a valid destination) if not destination.exists() or not destination.is_dir(): raise DestinationNotFound( "Destination directory could not be found") # check if the file name doesn't conflict with an existing file # (or directory) in the destination directory if name in os.listdir(destination): raise FileExistsError # the following code is a workaround! it should let the server # refuse the chunk size instead, but if we do that, the # first directory is created first and left undeleted after the # first file is denied from being downloaded if chunk_size == 0 or chunk_size > 8192: raise ValueError("Chunk size value is invalid") # foobars self._download_directory(source, destination, name, chunk_size, process_chunk, timeout)
def _validate_pants_repo(self, pants_repo: pathlib.PosixPath) -> bool: """Validates a given or stored path is a valid pants repo.""" return (pants_repo and pants_repo.is_dir() and pants_repo.joinpath('pants').is_file())
class Expresso2office(): def __init__(self, dir_csv="./output", dir_vcf="."): logging.debug(f"Passando aqui") self.dir_csv = PosixPath(dir_csv).expanduser() self.dir_vcf = PosixPath(dir_vcf).expanduser() self.format_date = "%m/%d/%Y" self.format_time = "%H:%M" self.format_date_time = f"{self.format_date} {self.format_time}" def checa_diretorios(self): logging.debug(f"Verificando diretorio de origem existe: {self.dir_vcf.resolve()}") if self.dir_vcf.is_dir(): logging.info(f"Tratando arquivos vcf do diretorio: {self.dir_vcf.resolve()}") else: logging.info(f"Diretorio com arquivos vcf nao existe: {self.dir_vcf.resolve()}") exit(1) if not self.dir_csv.is_dir(): logging.info(f"Diretorio para os arquivo csv nao existe, o mesmo sera criado: {self.dir_csv.resolve()}") try: self.dir_csv.mkdir() except OSError: logging.info(f"A criacao do diretorio {self.dir_csv.resolve()} falhou") exit(1) else: logging.log(f"Criou com sucesso o diretorio: {self.dir_csv.resolve()}") def convert2csv(self, file_vcf): try: data = Path(file_vcf).read_text() for cal in vobject.readComponents(data): file_csv = PurePath(self.dir_csv, f"address-{self.dir_vcf.name}.csv") logging.info("-" * 40) logging.info(f"Convertendo o arquivo VCF: {file_vcf}") logging.info(f"Arquivo cvs gerado: {file_csv}") with Path(file_csv).open(mode='a') as csv_out: csv_writer = csv.writer(csv_out, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) if os.path.isfile(file_csv) and os.path.getsize(file_csv) == 0: csv_writer.writerow(["firstName","middleName","lastName","company","jobTitle","workPhone","workPhone2","companyPhone","homePhone","homePhone2","mobilePhone","email","email2"]) for line in cal.lines(): #import ipdb; ipdb.set_trace() logging.debug(f"{file_vcf} name: {line.name}") logging.debug(f"{file_vcf} behavior: {line.behavior}") logging.debug(f"{file_vcf} encoded: {line.encoded}") logging.debug(f"{file_vcf} group: {line.group}") logging.debug(f"{file_vcf} params: {line.params}") logging.debug(f"{file_vcf} serialize: {line.serialize()}") logging.debug(f"{file_vcf} singletonparams: {line.singletonparams}") logging.debug(f"{file_vcf} value: {line.value}") logging.debug(f"{file_vcf} varlueRepr: {line.valueRepr()}") if line.name == "VERSION": try: version = line.value except: version = "" if line.name == "PRODID": try: prodid = line.value except: prodid = "" if line.name == "FN": try: full_name = line.value except: full_name = "" if line.name == "N": try: first_name = line.serialize().split(":")[1].split(";")[1].lstrip() except: first_name = "" try: middle_name = line.serialize().split(":")[1].split(";")[2].lstrip() except: middle_name = "" try: last_name = line.serialize().split(":")[1].split(";")[0].lstrip() except: last_name = "" if line.name == "UID": try: uid = line.value except: uid = "" if line.name == "ORG": try: company = line.value[0] except: company = "" if line.name == "TITLE": try: job_title = line.value except: job_title = "" if line.name == "TEL": if line.params['TYPE'] == ['WORK']: try: work_phone = line.value except: work_phone = "" if line.params['TYPE'] == ['HOME']: try: home_phone = line.value except: home_phone = "" if line.params['TYPE'] == ['CELL', 'WORK']: try: company_phone = line.value except: company_phone = "" if line.params['TYPE'] == ['CELL', 'HOME']: try: mobile_phone = line.value except: mobile_phone = "" if line.params['TYPE'] == ['FAX', 'WORK']: try: work_phone2 = line.value except: work_phone2 = "" if line.params['TYPE'] == ['FAX', 'HOME']: try: home_phone2 = line.value except: home_phone2 = "" if line.name == "EMAIL": if line.params['TYPE'] == ["WORK"]: try: email = line.value except: email = "" if line.params['TYPE'] == ["HOME"]: try: email2 = line.value except: email2 = "" csv_writer.writerow([first_name, middle_name, last_name, company, job_title, work_phone, work_phone2, company_phone, home_phone, home_phone2, mobile_phone, email, email2]) except Exception as e: logging.fatal(f"Excessao nao mapeada: {e}") def realizar_parse(self): os.chdir(self.dir_vcf) for file_vcf in glob.glob("*.vcf"): self.convert2csv(file_vcf)
#!/usr/bin/env python3 from matplotlib import pyplot from matplotlib.colors import Normalize from sys import argv import numpy as np from pathlib import PosixPath from multiprocessing import Pool import csv if len(argv) != 2: print("Usage: {} <output dir>".format(argv[0])) exit() output_dir = PosixPath(argv[1]) assert (output_dir.is_dir()) def plot_frame(path): with open(path) as frame: array = np.asarray([[abs(float(cell)) for cell in row] for row in csv.reader(frame)], dtype=np.float32) pyplot.pcolormesh(array) ax = pyplot.gca() ax.set_ylim(ax.get_ylim()[::-1]) ax.xaxis.tick_top() path = path.with_suffix(".png") pyplot.savefig(path, format="png") with Pool() as pool:
# -*- coding: utf-8 -*- """This gist uses tools to add 100 German cloze image cards.""" from contextlib import closing from pathlib import PosixPath import os from typing import List import anki import tools.anki from tools.process import DoubleAdjectivePic, SingleAdjectivePic ANKI_DB = PosixPath('/home/grzesiek/Documents/Anki/grzesiek/collection.anki2') assert (ANKI_DB.is_file()) IMAGE_DIR = PosixPath('images/').absolute() assert (IMAGE_DIR.is_dir()) SHARED_TAGS = {'200-wichtigsten-deutschen-adjektive', 'Adjektiv'} DOUBLE_CLOZE_TEMPLATE = """ <div style="display:flex;justify-content:center;"> <div style="text-align:center;"> <img src="{left_pic}" style="max-height:200px"/> <div>{{{{c1::{left_word}}}}}</div> </div> <div style="text-align:center;"> <img src="{right_pic}" style="max-height:200px"/> <div>{right_cloze}</div> </div> </div> """
#dir_csv = "/home/lgro/git/ics2csv/temp/calendars/adm.office/result" #dir_ics = "/home/lgro/git/ics2csv/temp/calendars/adm.office/" format_date = "%m/%d/%Y" format_time = "%H:%M" format_date_time = f"{format_date} {format_time}" if len(sys.argv) == 3: dir_ics = sys.argv[1] dir_csv = sys.argv[2] else: print(f"Eh necessario dois parametros de entrada: dir_origem, dir_destino") dir_csv = PosixPath(dir_csv).expanduser() dir_ics = PosixPath(dir_ics).expanduser() if dir_ics.is_dir(): print(f"Tratando arquivos ics do diretorio: {dir_ics}") else: print("Diretorio com arquivos ics nao existe") exit(1) if not dir_csv.is_dir(): print( f"Diretorio para os arquivo csv nao existe, o mesmo sera criado: {dir_csv}" ) try: dir_csv.mkdir() except OSError: print(f"A criacao do diretorio {dir_csv} falhou") exit(1) else: