def main(): parser = Parser() args = parser.get_arguments() container_manager = ContainerManager() events_queue = Queue() capabilities_tracer = CapabilitiesTracer(events_queue) capabilities_tracer.start() container_pid = container_manager.start(args) container_config = container_manager.get_config() capabilities_analyzer = CapabilitiesAnalyzer(events_queue, container_pid, container_config) capabilities_analyzer.start() def clean_up(): container_manager.stop() capabilities_tracer.stop() capabilities_analyzer.stop() capabilities_analyzer.print_report() sys.exit() def signal_handler(_signal_number, _frame): clean_up() signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGINT, signal_handler) for log in container_manager.logs(): print(log.decode()) print('Container exited') clean_up()
def analize_last_run(current_manager): finished_files = find_finished_files(current_manager.drive_container.parent.parent) # MAKE SURE THIS WORKS if len(finished_files) != 0: latest_run = finished_files[-1] # datetime.fromisoformat(drive_time[:19]) latest_run_path = DL.download_file(latest_run) latest_manager = ContainerManager([latest_run_path], current_manager.drive_container, True) gap_path = current_manager.generate_gap_report(latest_manager) new_path = current_manager.generate_new_report(latest_manager) DL.upload_file(gap_path, unifier_io.folder_data['id']) DL.upload_file(new_path, unifier_io.folder_data['id']) os.remove(gap_path) os.remove(new_path) slack(f"Uploaded {current_manager.drive_container.path} New and Gap") else: return None
def main(): container_manager = ContainerManager(config) communication_handler = CommunicationHandler() rdf_store_config = config['rdf_store'] endpoint, username, password = rdf_store_config['endpoint'], \ rdf_store_config['username'],\ rdf_store_config['password'] rdf_store = RDFStore(endpoint, username, password) dataset_storage_handler = DatasetStorageHandler(rdf_store) threading.Thread(target=consumers.run_consumer, args=(container_manager, )).start() threading.Thread(target=consumers.run_communication_consumer, args=(communication_handler, )).start() threading.Thread(target=consumers.dataset_storage_job_consumer, args=(dataset_storage_handler, )).start()
import socket import pickle from container_manager import ContainerManager from utility import * manager = ContainerManager() sock = socket.socket() sock.bind(('', 9090)) sock.listen(1) while True: conn, addr = sock.accept() print ('connected: {0}{1}'.format(addr, '!!')) data = pickle.loads(conn.recv(1024)) print('data: {0}'.format(repr(data))) if(data[0] == 'check'): conn.close() elif(data[0] == 'create'): sys_mem = manager.get_system_memory() doc_mem = manager.get_memory_usage_for_all() if (doc_mem < sys_mem * 0.8): conn.send(pickle.dumps(('bibas', 'You\'ve used more then 80\% of memory !!!\n\tDo you really want to continue(yes)?'))) resp = pickle.loads(conn.recv(1024)) if (resp == 'no'): conn.send(pickle.dumps(('failed'))) conn.close() continue
for container in files_present_queue: brand = container.parent.parent container.modify_time() # Update container time for recursive search unifier_io, _ = find_io_folders(brand) # Download brand files DL.clear_storage() print(f'Downloading {len(container.files)} {container} files from {brand}') current_downloads = download_files(container.files) # Download old files to get missing info old_transformer_files = find_finished_files(brand) print(f'Downloading {len(old_transformer_files)} old files from {brand}') previous_downloads = download_files(old_transformer_files) # Parse files container_manager = ContainerManager(current_downloads, container) if previous_downloads: previous_manager = ContainerManager(previous_downloads, unifier_io, True) container_manager.load_knowns(previous_manager) print('Exporting files') known_path = container_manager.generate_knowns() if container_manager.unknowns(): # Upload Unknowns file unknown_path = container_manager.generate_unknowns() upload = DL.upload_file(unknown_path, unifier_io.folder_data['id']) os.remove(unknown_path) slack(f'Unknowns found: uploaded {unifier_io.path}/{upload["name"]}') if SLACK else False else: # Upload knowns, gap, new, for_transformer
# Copyright 2016 Steve Hazel # # This file is part of Benome. # # Benome is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License version 3 # as published by the Free Software Foundation. # # Benome is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Benome. If not, see http://www.gnu.org/licenses/. from container_manager import ContainerManager c = ContainerManager('http://127.0.0.1:5200', '127.0.0.1', user_manager=False) f = ''' ''' for container_id in f.split(): print container_id c.docker(['rm', '-f', container_id])
def main(): container_manager = ContainerManager(config) communication_handler = CommunicationHandler() threading.Thread(target=consumers.run_consumer, args=(container_manager,)).start() threading.Thread(target=consumers.run_communication_consumer, args=(communication_handler,)).start()