def make_download(self, evt): """Tenta realizar o download requeido. Args: evt (event): evento do mouse - botão 1. """ self.file.quantity = int(self.spiner_files.get()) self.text_files.delete('1.0', 'end') if self.file.quantity < 1: messagebox.showwarning( title='Problem with size', message='File\'s number invalid, it must be great than 0') return if self.file.quantity < int(self.spiner_from.get()): messagebox.showwarning( title='Problem with first file', message='File\'s initial must be smaller than quantity') return self.file.link = self.entry_link.get() self.file.way = self.entry_way.get() # extra digits = int(self.spiner_digits.get()) from_t = int(self.spiner_from.get()) self.file.quantity += 1 if from_t > 0 else 0 # extra File arquivo = File() arquivo.quantity = self.file.quantity self.file.way = Tools.join_dirs(dir_1=self.file.way, dir_2=Tools.get_name(self.file.link)) # downloading and inserts to Text for i in range(from_t, self.file.quantity): try: zeros = digits - str(i).__len__() arquivo.link = self.file.link.format(zeros * '0' + str(i)) arquivo.way = self.file.way.format(zeros * '0' + str(i)) self.text_files.insert(f'{i}.0', arquivo.way + '\n') downloaded = Sing.get_service().validate_file(file=arquivo) if not downloaded: messagebox.showerror(title='It found an error', message=SMsg.message().msg) return except Exception as ex: messagebox.showerror(title='Excepction Found', message=str(ex) + '\n' + arquivo.way) return else: messagebox.showinfo(title='Success', message='All Files Downloaded!')
def sample_load(self, data): """ Helper to update several paramters at the same time. Note that dynamics properties like project and template cannot be updated with this method. However, you can update project_id and template_id. """ from core.model.analysis import Analysis, AnalysisSample from core.model.subject import Subject from core.model.file import File try: # update simple properties if "name" in data.keys(): self.name = check_string(data['name']) if "comment" in data.keys(): self.comment = check_string(data['comment']) if "is_mosaic" in data.keys(): self.is_mosaic = check_bool(data['is_mosaic']) if "default_dbuid" in data.keys(): self.default_dbuid = check_string(data['default_dbuid']) if "filter_description" in data.keys(): self.filter_description = data['filter_description'] if "subject_id" in data.keys(): self.subject_id = check_int(data['subject_id']) if "file_id" in data.keys(): self.file_id = check_int(data['file_id']) if "analyses_ids" in data.keys(): self.analyses_ids = data['analyses_ids'] if "update_date" in data.keys(): self.update_date = check_date(data['update_date']) if "stats" in data.keys(): self.stats = data['stats'] # save modifications self.save() # reload dependencies if self.loading_depth > 0: self.subject = Subject.from_id(self.subject_id, self.loading_depth-1) self.file = File.from_id(self.file_id, self.loading_depth-1) self.analyses = AnalysisSample.get_analyses(self.id, self.loading_depth-1) except Exception as err: raise RegovarException('Invalid input data to load.', "", err) return self
def pipeline_load(self, data): from core.model.file import File try: # Required fields if "name" in data.keys(): self.name = check_string(data['name']) if "type" in data.keys(): self.type = check_string(data["type"]) if "status" in data.keys(): self.status = check_string(data["status"]) if "description" in data.keys(): self.description = check_string(data["description"]) if "developpers" in data.keys(): self.developpers = data["developpers"] if "installation_date" in data.keys(): self.installation_date = check_date(data["installation_date"]) if "version" in data.keys(): self.version = check_string(data['version']) if "version_api" in data.keys(): self.version_api = check_string(data["version_api"]) if "image_file_id" in data.keys(): self.image_file_id = check_int(data["image_file_id"]) if "manifest" in data.keys(): self.manifest = data['manifest'] if "documents" in data.keys(): self.documents = data['documents'] if "path" in data.keys(): self.path = check_string(data['path']) # check to reload dynamics properties if self.loading_depth > 0: self.jobs = self.get_jobs(self.loading_depth - 1) self.image_file = File.from_id(self.image_file_id, self.loading_depth - 1) self.save() except KeyError as e: raise RegovarException('Invalid input pipeline: missing ' + e.args[0]) return self
def subject_get_files(self, loading_depth=0): """ Return the list of files linked to the subject """ from core.model.file import File ids = Session().query(SubjectFile).filter_by(subject_id=self.id).all() return File.from_ids([i.file_id for i in ids], loading_depth)
def test_main_workflow_image_upload(self): """ CRUD Pipeline from Image UPLOAD """ # Upload init p, f = core.pipelines.install_init_image_upload( "test_image_success.tar.gz", 10, {"type": "FakeManager4Test"}) self.assertEqual(f.name, "test_image_success.tar.gz") self.assertEqual(f.size, 10) self.assertEqual(f.upload_offset, 0) self.assertEqual(f.status, "uploading") self.assertEqual(f.path.startswith(TEMP_DIR), True) self.assertEqual(p.name, f.name) self.assertEqual(p.status, "initializing") self.assertEqual(p.type, "FakeManager4Test") self.assertEqual(p.image_file_id, f.id) old_path = f.path # Upload chunk f = core.files.upload_chunk(f.id, 0, 5, b'chunk') self.assertEqual(f.size, 10) self.assertEqual(f.upload_offset, 5) self.assertEqual(f.status, "uploading") self.assertEqual(p.status, "initializing") self.assertEqual( core.container_managers["FakeManager4Test"].is_installed, False) # Upload finish -> installation shall start automatically as the type have been set f = core.files.upload_chunk(f.id, 5, 5, b'chunk') self.assertEqual(f.size, 10) self.assertEqual(f.upload_offset, f.size) self.assertEqual(f.status, "uploaded") self.assertEqual(f.path.startswith(FILES_DIR), True) self.assertEqual(os.path.isfile(old_path), False) self.assertEqual(os.path.isfile(f.path), True) self.assertEqual(os.path.getsize(f.path), f.size) time.sleep(0.1) # Wait that threads called for the install ends # Check that install_pipeline method have been successfully called p = Pipeline.from_id(p.id) self.assertEqual( core.container_managers["FakeManager4Test"].is_installed, True) self.assertEqual(p.status, "ready") # Delete pipeline pid = p.id iid = p.image_file_id path = f.path r = core.pipelines.delete(pid) self.assertEqual(isinstance(r, dict), True) self.assertEqual(Pipeline.from_id(pid), None) self.assertEqual(File.from_id(iid), None) self.assertEqual(os.path.isfile(path), False)
def test_main_workflow_image_upload(self): """ CRUD Pipeline from Image UPLOAD """ # Upload init p, f = core.pipelines.install_init_image_upload("test_image_success.tar.gz", 10, {"type" : "FakeManager4Test"}) self.assertEqual(f.name, "test_image_success.tar.gz") self.assertEqual(f.size, 10) self.assertEqual(f.upload_offset, 0) self.assertEqual(f.status, "uploading") self.assertEqual(f.path.startswith(TEMP_DIR), True) self.assertEqual(p.name, f.name) self.assertEqual(p.status, "initializing") self.assertEqual(p.type, "FakeManager4Test") self.assertEqual(p.image_file_id, f.id) old_path = f.path # Upload chunk f = core.files.upload_chunk(f.id, 0, 5, b'chunk') self.assertEqual(f.size, 10) self.assertEqual(f.upload_offset, 5) self.assertEqual(f.status, "uploading") self.assertEqual(p.status, "initializing") self.assertEqual(core.container_managers["FakeManager4Test"].is_installed, False) # Upload finish -> installation shall start automatically as the type have been set f = core.files.upload_chunk(f.id, 5, 5, b'chunk') self.assertEqual(f.size, 10) self.assertEqual(f.upload_offset, f.size) self.assertEqual(f.status, "uploaded") self.assertEqual(f.path.startswith(FILES_DIR), True) self.assertEqual(os.path.isfile(old_path), False) self.assertEqual(os.path.isfile(f.path), True) self.assertEqual(os.path.getsize(f.path), f.size) time.sleep(0.1) # Wait that threads called for the install ends # Check that install_pipeline method have been successfully called p = Pipeline.from_id(p.id) self.assertEqual(core.container_managers["FakeManager4Test"].is_installed, True) self.assertEqual(p.status, "ready") # Delete pipeline pid = p.id iid = p.image_file_id path = f.path r = core.pipelines.delete(pid) self.assertEqual(isinstance(r, dict), True) self.assertEqual(Pipeline.from_id(pid), None) self.assertEqual(File.from_id(iid), None) self.assertEqual(os.path.isfile(path), False)
def pipeline_load_depth(self, loading_depth): from core.model.job import Job from core.model.file import File if loading_depth > 0: try: self.image_file = None self.image_file = File.from_id(self.image_file_id, self.loading_depth - 1) self.jobs = [] if len(self.jobs_ids) > 0: self.jobs = session().query(Job).filter( Job.id.in_(self.jobs_ids)).all() for j in self.jobs: j.init(loading_depth - 1) except Exception as err: raise RegovarException( "File data corrupted (id={}).".format(self.id), "", err)
def pipeline_init(self, loading_depth=0, force_refresh=False): """ Init properties of a Pipeline : - id : int : The unique id of the pipeline in the database - name : str : The name of the analysis - description : str : An optional description - type : enum : Enum that help the container engine to know the type of the pipeline: 'lxd', 'docker', ... - status : enum : The status of the pipeline : 'initializing', 'installing', 'ready', 'error' - starred : bool : Flag to know if the pipeline is starred or not - developpers : [str] : List of developpers of the pipeline - installation_date : date : When the pipe have been installed on the server - version : str : The version of the pipeline - version_api : str : The version of the api of regovar used by the pipeline - image_file_id : int : Id of the file that was used to import/install the pipeline - jobs_ids : [int] : List of the job created with this pipeline - path : str : Path to the pipeline on the server (internal usage only) - manifest : json : The manifest of the pipeline with all its informations - documents : json : The dic of the related documents <key>: <path_to_the_doc>. Keys are: manifest, form, icon, help, home, license, readme If loading_depth is > 0, Following properties fill be loaded : (Max depth level is 2) - image_file : File : The file of the pipeline image (if exists) - jobs : [Jobs] : The list of jobs done with this pipeline """ from core.model.file import File # With depth loading, sqlalchemy may return several time the same object. Take care to not erase the good depth level) # Avoid recursion infinit loop if hasattr(self, "loading_depth" ) and not force_refresh and self.loading_depth >= loading_depth: return else: self.loading_depth = min(2, loading_depth) try: self.jobs_ids = [] self.jobs = [] self.image_file = None self.jobs_ids = self.get_jobs_ids() if self.loading_depth > 0: self.jobs = self.get_jobs(self.loading_depth - 1) self.image_file = File.from_id(self.image_file_id, self.loading_depth - 1) except Exception as ex: raise RegovarException( "Pipeline data corrupted (id={}).".format(self.id), "", ex)
def test_CRUD_upload(self): """ Check that upload's features are working as expected """ # Upload init f = core.files.upload_init("test_upload.tar.gz", 10, {'tags': 'Coucou'}) self.assertEqual(f.name, "test_upload.tar.gz") self.assertEqual(f.size, 10) self.assertEqual(f.upload_offset, 0) self.assertEqual(f.status, "uploading") self.assertEqual(f.type, "gz") self.assertEqual(f.path.startswith(TEMP_DIR), True) old_path = f.path # Upload chunk f = core.files.upload_chunk(f.id, 0, 5, b'chunk') self.assertEqual(f.size, 10) self.assertEqual(f.upload_offset, 5) self.assertEqual(f.status, "uploading") self.assertEqual(os.path.isfile(f.path), True) self.assertEqual(os.path.getsize(f.path), f.upload_offset) # Upload finish f = core.files.upload_chunk(f.id, 5, 5, b'chunk') self.assertEqual(f.size, 10) self.assertEqual(f.upload_offset, f.size) self.assertEqual(f.status, "uploaded") self.assertEqual(f.path.startswith(FILES_DIR), True) self.assertEqual(os.path.isfile(old_path), False) self.assertEqual(os.path.isfile(f.path), True) self.assertEqual(os.path.getsize(f.path), f.size) # Check file content with open(f.path, "r") as r: c = r.readlines() self.assertEqual(c, ['chunkchunk']) # Delete file core.files.delete(f.id) f2 = File.from_id(f.id) self.assertEqual(f2, None) self.assertEqual(os.path.isfile(f.path), False)
def sample_init(self, loading_depth=0): """ Init properties of a sample : - id : int : the unique id of the sample in the database - name : str : the name of the sampel (name in the vcf file by default) - comment : str : a comment on the sample - is_mosaic : bool : true if data (variant) for this sample are mosaic; false otherwise - subject_id : int : the id of the subject linked to this sample - file_id : int : the id of the file (vcf) from which the sample have been extracted - loading_progress : float : progress (from 0 to 1) of the import of the sample - update_date : date : The last time that the object have been updated - create_date : date : The datetime when the object have been created - reference_id : int : the reference id for this sample - status : enum : import status values can be : 'empty', 'loading', 'ready', 'error' - default_dbuid : [str] : list of annotation's databases used in the vcf from where come the sample - filter_description json : description of the filter used in the vcf. Structure : { "<FilterValue>": "<Description>"} - analyses_id : [int] : the list of id of analyses that are using this sample - stats : json : stats regarding import and quality If loading_depth is > 0, Following properties fill be loaded : (Max depth level is 2) - subject : Subject : Subject data of the linked subject - file : File : File data of the source file - analyses : [Analysis] : Analysis data of linked analyses """ from core.model.analysis import Analysis, AnalysisSample from core.model.subject import Subject from core.model.file import File # With depth loading, sqlalchemy may return several time the same object. Take care to not erase the good depth level) if hasattr(self, "loading_depth") and self.loading_depth >= loading_depth: return else: self.loading_depth = min(2, loading_depth) try: self.analyses_ids = AnalysisSample.get_analyses_ids(self.id) self.subject = None self.file = None self.analyses = [] if self.loading_depth > 0: self.subject = Subject.from_id(self.subject_id, self.loading_depth-1) self.file = File.from_id(self.file_id, self.loading_depth-1) self.analyses = AnalysisSample.get_analyses(self.id, self.loading_depth-1) except Exception as ex: raise RegovarException("Sample data corrupted (id={}).".format(self.id), "", ex)
def test_CRUD_upload(self): """ CRUD File with UPLOAD """ # Upload init f = core.files.upload_init("test_upload.tar.gz", 10, {'tags':'Coucou'}) self.assertEqual(f.name, "test_upload.tar.gz") self.assertEqual(f.size, 10) self.assertEqual(f.upload_offset, 0) self.assertEqual(f.status, "uploading") self.assertEqual(f.type, "gz") self.assertEqual(f.path.startswith(TEMP_DIR), True) old_path = f.path # Upload chunk f = core.files.upload_chunk(f.id, 0, 5, b'chunk') self.assertEqual(f.size, 10) self.assertEqual(f.upload_offset, 5) self.assertEqual(f.status, "uploading") self.assertEqual(os.path.isfile(f.path),True) self.assertEqual(os.path.getsize(f.path), f.upload_offset) # Upload finish f = core.files.upload_chunk(f.id, 5, 5, b'chunk') self.assertEqual(f.size, 10) self.assertEqual(f.upload_offset, f.size) self.assertEqual(f.status, "uploaded") self.assertEqual(f.path.startswith(FILES_DIR), True) self.assertEqual(os.path.isfile(old_path), False) self.assertEqual(os.path.isfile(f.path), True) self.assertEqual(os.path.getsize(f.path), f.size) # Check file content with open(f.path, "r") as r: c = r.readlines() self.assertEqual(c, ['chunkchunk']) # Delete file core.files.delete(f.id) f2 = File.from_id(f.id) self.assertEqual(f2, None) self.assertEqual(os.path.isfile(f.path), False)
def sample_init(self, loading_depth=0): """ Init properties of a sample : - id : int : the unique id of the sample in the database - name : str : the name of the sampel (name in the vcf file by default) - comment : str : a comment on the sample - is_mosaic : bool : true if data (variant) for this sample are mosaic; false otherwise - subject_id : int : the id of the subject linked to this sample - file_id : int : the id of the file (vcf) from which the sample have been extracted - file : File : File data of the source file - loading_progress : float : progress (from 0 to 1) of the import of the sample - update_date : date : The last time that the object have been updated - create_date : date : The datetime when the object have been created - reference_id : int : the reference id for this sample - status : enum : import status values can be : 'empty', 'loading', 'ready', 'error' - default_dbuid : [str] : list of annotation's databases used in the vcf from where come the sample - filter_description json : description of the filter used in the vcf. Structure : { "<FilterValue>": "<Description>"} - analyses_id : [int] : the list of id of analyses that are using this sample - stats : json : stats regarding import and quality If loading_depth is > 0, Following properties fill be loaded : (Max depth level is 2) - subject : Subject : Subject data of the linked subject - analyses : [Analysis] : Analysis data of linked analyses """ from core.model.analysis import Analysis, AnalysisSample from core.model.subject import Subject from core.model.file import File # With depth loading, sqlalchemy may return several time the same object. Take care to not erase the good depth level) if hasattr(self, "loading_depth") and self.loading_depth >= loading_depth: return else: self.loading_depth = min(2, loading_depth) try: self.analyses_ids = AnalysisSample.get_analyses_ids(self.id) self.subject = None self.file = File.from_id(self.file_id, 0) self.analyses = [] if self.loading_depth > 0: self.subject = Subject.from_id(self.subject_id, self.loading_depth-1) self.analyses = AnalysisSample.get_analyses(self.id, self.loading_depth-1) except Exception as ex: raise RegovarException("Sample data corrupted (id={}).".format(self.id), "", ex)
def pipeline_init(self, loading_depth=0, force_refresh=False): """ Init properties of a Pipeline : - id : int : The unique id of the pipeline in the database - name : str : The name of the analysis - description : str : An optional description - type : enum : Enum that help the container engine to know the type of the pipeline: 'lxd', 'docker', ... - status : enum : The status of the pipeline : 'initializing', 'installing', 'ready', 'error' - starred : bool : Flag to know if the pipeline is starred or not - developpers : [str] : List of developpers of the pipeline - installation_date : date : When the pipe have been installed on the server - version : str : The version of the pipeline - image_file_id : int : Id of the file that was used to import/install the pipeline - jobs_ids : [int] : List of the job created with this pipeline - path : str : Path to the pipeline on the server (internal usage only) - manifest : json : The manifest of the pipeline with all its informations - documents : json : The dic of the related documents <key>: <path_to_the_doc>. Keys are: manifest, form, icon, help, home, license, readme If loading_depth is > 0, Following properties fill be loaded : (Max depth level is 2) - image_file : File : The file of the pipeline image (if exists) - jobs : [Jobs] : The list of jobs done with this pipeline """ from core.model.file import File # With depth loading, sqlalchemy may return several time the same object. Take care to not erase the good depth level) # Avoid recursion infinit loop if hasattr(self, "loading_depth") and not force_refresh and self.loading_depth >= loading_depth: return else: self.loading_depth = min(2, loading_depth) try: self.jobs_ids = [] self.jobs = [] self.image_file = None self.jobs_ids = self.get_jobs_ids() if self.loading_depth > 0: self.jobs = self.get_jobs(self.loading_depth-1) self.image_file = File.from_id(self.image_file_id, self.loading_depth-1) except Exception as ex: raise RegovarException("Pipeline data corrupted (id={}).".format(self.id), "", ex)
def pipeline_load(self, data): from core.model.file import File try: # Required fields if "name" in data.keys(): self.name = check_string(data['name']) if "type" in data.keys(): self.type = check_string(data["type"]) if "status" in data.keys(): self.status = check_string(data["status"]) if "description" in data.keys(): self.description = check_string(data["description"]) if "developpers" in data.keys(): self.developpers = data["developpers"] if "installation_date" in data.keys(): self.installation_date = check_date(data["installation_date"]) if "version" in data.keys(): self.version = check_string(data['version']) if "image_file_id" in data.keys(): self.image_file_id = check_int(data["image_file_id"]) if "manifest" in data.keys(): self.manifest = data['manifest'] if "documents" in data.keys(): self.documents = data['documents'] if "path" in data.keys(): self.path = check_string(data['path']) # check to reload dynamics properties if self.loading_depth > 0: self.jobs = self.get_jobs(self.loading_depth-1) self.image_file = File.from_id(self.image_file_id, self.loading_depth-1) self.save() except KeyError as e: raise RegovarException('Invalid input pipeline: missing ' + e.args[0]) return self
class GetFile: """Classe Responsável pela interface gráfica do projeto. Author: bcarsoft """ _font_titulo = ('Arial', 18, 'bold') _font_normal = ('Arial', 15) _font_butt = ('Arial', 12) _back_g = '#2f2f2f' _file = File() _dir_init = expanduser('~') def __init__(self): """Construtor da Interface Gráfica. """ self.window = Tk() self.window.minsize(640, 512) self.window.title('Downloader') self.window['bg'] = self._back_g self.title_frame = Frame(self.window) self.title_frame['bg'] = self._back_g self.title_frame.pack() self.label_title = Label(self.title_frame) self.label_title['font'] = self._font_titulo self.label_title['text'] = 'Get Files from Internet' self.label_title['fg'] = '#ffffff' self.label_title['bg'] = self._back_g self.label_title.pack(pady=10) self.link_frame = Frame(self.window) self.link_frame['bg'] = self._back_g self.link_frame.pack() self.label_link = Label(self.link_frame) self.label_link['font'] = self._font_normal self.label_link['bg'] = self._back_g self.label_link['fg'] = '#ffffff' self.label_link['text'] = 'Link URL*' self.label_link.pack() self.entry_link = Entry(self.link_frame) self.entry_link['width'] = 50 self.entry_link['font'] = self._font_normal self.entry_link.bind('<Button-1>', self.clear_url) self.entry_link.pack() self.way_frame = Frame(self.window) self.way_frame['bg'] = self._back_g self.way_frame.pack() self.label_way = Label(self.way_frame) self.label_way['text'] = 'Select Directory*' self.label_way['fg'] = '#ffffff' self.label_way['bg'] = self._back_g self.label_way['font'] = self._font_normal self.label_way.pack() self.entry_way = Entry(self.way_frame) self.entry_way['font'] = self._font_normal self.entry_way['width'] = 43 self.entry_way.pack(side=LEFT) self.butt_way = Button(self.way_frame) self.butt_way['font'] = self._font_butt self.butt_way['text'] = 'Select' self.butt_way['width'] = 6 self.butt_way['fg'] = '#ffffff' self.butt_way['bg'] = '#d40003' self.butt_way.bind('<Button-1>', self.get_folder) self.butt_way.pack(side=LEFT) self.spn_frame = Frame(self.window) self.spn_frame['bg'] = self._back_g self.spn_frame.pack() self.label_spiner = Label(self.spn_frame) self.label_spiner['text'] = 'How Many Files* ' self.label_spiner['font'] = self._font_normal self.label_spiner['fg'] = '#ffffff' self.label_spiner['bg'] = self._back_g self.label_spiner.pack(side=LEFT) self.spiner_files = Spinbox(self.spn_frame) self.spiner_files['width'] = 5 self.spiner_files['values'] = tuple(i for i in range(123)) self.spiner_files['font'] = self._font_normal self.spiner_files.pack(side=LEFT) self.label_digits = Label(self.spn_frame) self.label_digits['font'] = self._font_normal self.label_digits['text'] = ' Digits ' self.label_digits['bg'] = self._back_g self.label_digits['fg'] = '#ffffff' self.label_digits.pack(side=LEFT) self.spiner_digits = Spinbox(self.spn_frame) self.spiner_digits['font'] = self._font_normal self.spiner_digits['width'] = 5 self.spiner_digits['values'] = tuple(i for i in range(1, 8)) self.spiner_digits.pack(side=LEFT) self.label_init = Label(self.spn_frame) self.label_init['font'] = self._font_normal self.label_init['bg'] = self._back_g self.label_init['fg'] = '#ffffff' self.label_init['text'] = ' From ' self.label_init.pack(side=LEFT) self.spiner_from = Spinbox(self.spn_frame) self.spiner_from['font'] = self._font_normal self.spiner_from['values'] = tuple(i for i in range(123)) self.spiner_from['width'] = 5 self.spiner_from.pack(side=LEFT) self.files_frame = Frame(self.window) self.files_frame['bg'] = self._back_g self.files_frame.pack() self.label_files = Label(self.files_frame) self.label_files['font'] = self._font_normal self.label_files['text'] = 'Files Downloaded' self.label_files['fg'] = '#ffffff' self.label_files['bg'] = self._back_g self.label_files.pack() self.text_files = Text(self.files_frame) self.text_files['width'] = 61 self.text_files['font'] = self._font_butt self.text_files['fg'] = '#2c5e9a' self.text_files['height'] = 10 self.text_files['bd'] = 2 self.text_files.pack() self.down_frame = Frame(self.window) self.down_frame['bg'] = self._back_g self.down_frame.pack() self.butt_down = Button(self.down_frame) self.butt_down['font'] = self._font_butt self.butt_down['text'] = 'Make Download' self.butt_down['width'] = 15 self.butt_down['fg'] = '#ffffff' self.butt_down['bg'] = '#2c5e9a' self.butt_down.bind('<Button-1>', self.make_download) self.butt_down.pack(pady=30) self.window.mainloop() def get_folder(self, evt): """Pega o caminho da pasta. Args: evt (event): evento do mouse - botão 1. """ road = self.entry_way.get() directory = askdirectory( initialdir=self._dir_init if not isdir(road) else road) self.entry_way.delete(0, 'end') self.entry_way.insert(0, directory) def clear_url(self, evt): """Limpa entry de URL. Args: evt (event): evento do mouse - botão 1. """ self.entry_link.delete(0, 'end') def make_download(self, evt): """Tenta realizar o download requeido. Args: evt (event): evento do mouse - botão 1. """ self.file.quantity = int(self.spiner_files.get()) self.text_files.delete('1.0', 'end') if self.file.quantity < 1: messagebox.showwarning( title='Problem with size', message='File\'s number invalid, it must be great than 0') return if self.file.quantity < int(self.spiner_from.get()): messagebox.showwarning( title='Problem with first file', message='File\'s initial must be smaller than quantity') return self.file.link = self.entry_link.get() self.file.way = self.entry_way.get() # extra digits = int(self.spiner_digits.get()) from_t = int(self.spiner_from.get()) self.file.quantity += 1 if from_t > 0 else 0 # extra File arquivo = File() arquivo.quantity = self.file.quantity self.file.way = Tools.join_dirs(dir_1=self.file.way, dir_2=Tools.get_name(self.file.link)) # downloading and inserts to Text for i in range(from_t, self.file.quantity): try: zeros = digits - str(i).__len__() arquivo.link = self.file.link.format(zeros * '0' + str(i)) arquivo.way = self.file.way.format(zeros * '0' + str(i)) self.text_files.insert(f'{i}.0', arquivo.way + '\n') downloaded = Sing.get_service().validate_file(file=arquivo) if not downloaded: messagebox.showerror(title='It found an error', message=SMsg.message().msg) return except Exception as ex: messagebox.showerror(title='Excepction Found', message=str(ex) + '\n' + arquivo.way) return else: messagebox.showinfo(title='Success', message='All Files Downloaded!') @property def file(self): return self._file