class ZTFCutoutImages(AbsBufferComplement): """ Add cutout images from ZTF archive database """ #: Which detection to retrieve cutouts for eligible: Literal["first", "last", "brightest", "all"] = "last" def __init__(self, context: AmpelContext, **kwargs) -> None: super().__init__(**kwargs) self.session = BaseUrlSession(base_url=context.config.get( "resource.ampel-ztf/archive", str, raise_exc=True)) @backoff.on_exception( backoff.expo, requests.ConnectionError, max_tries=5, factor=10, ) @backoff.on_exception( backoff.expo, requests.HTTPError, giveup=lambda e: e.response.status_code not in {503, 504, 429, 408}, max_time=60, ) def get_cutout(self, candid: int) -> None | dict[str, bytes]: response = self.session.get(f"cutouts/{candid}") if response.status_code == 404: return None else: response.raise_for_status() return {k: b64decode(v) for k, v in response.json().items()} def complement(self, records: Iterable[AmpelBuffer], t3s: T3Store) -> None: for record in records: if (photopoints := record.get("t0")) is None: raise ValueError(f"{type(self).__name__} requires t0 records") pps = sorted( [pp for pp in photopoints if pp["id"] > 0], key=lambda pp: pp["body"]["jd"], ) if not pps: return if self.eligible == "last": candids = [pps[-1]["id"]] elif self.eligible == "first": candids = [pps[0]["id"]] elif self.eligible == "brightest": candids = [min(pps, key=lambda pp: pp["body"]["magpsf"])["id"]] elif self.eligible == "all": candids = [pp["id"] for pp in pps] cutouts = {candid: self.get_cutout(candid) for candid in candids} if "extra" not in record or record["extra"] is None: record["extra"] = {self.__class__.__name__: cutouts} else: record["extra"][self.__class__.__name__] = cutouts
def await_tasks_finished(api_client: BaseUrlSession, tasks: List[Dict]): tasks_set: Set[int] = {t["id"] for t in tasks} sleep_time = 0.1 while tasks_set: sleep_time *= 2 time.sleep(sleep_time) for task_id in list(tasks_set): r = api_client.get(f"uploads/{task_id}/") raise_for_status(r) if r.json()["status"] == "FAILED": errors = r.json()["error_messages"] raise Exception( f"Upload with Task ID {task_id} failed with errors: {errors}" ) if r.json()["status"] == "FINISHED": tasks_set.remove(task_id)
class Client: """ cvr.dev API client. Contains a dedicated client for the /cvr/ part of the API. See https://docs.cvr.dev/#ra-cvr-data for more info. """ def __init__(self, api_key): self._session = BaseUrlSession(_BASE_URL) self._session.headers.update({'Authorization': api_key}) self.cvr = CVRClient(self._session) def test_api_key(self): resp = self._session.get('test/apikey') handle_response_status_code(resp) def close(self): self._session.close() def __enter__(self): return self def __exit__(self, type, value, traceback): self.close()
def session(): s = BaseUrlSession('http://girder:8080/api/v1/') r = s.get('user/authentication', auth=('admin', 'password')) r.raise_for_status() s.headers.update({'Girder-Token': r.json()['authToken']['token']}) yield s
class OpenVidu(object): """ This object represents a OpenVidu server instance. """ def __init__(self, url: str, secret: str, initial_fetch: bool = True, timeout: Union[int, tuple, None] = None): """ :param url: The url to reach your OpenVidu Server instance. Typically something like https://localhost:4443/ :param secret: Secret for your OpenVidu Server :param initial_fetch: Enable the initial fetching on object creation. Defaults to `True`. If set to `False` a `fetc()` must be called before doing anything with the object. In most scenarios you won't need to change this. :param timeout: Set timeout to all Requests to the OpenVidu server. Default: None = No timeout. See https://2.python-requests.org/en/latest/user/advanced/#timeouts for possible values. """ self._session = BaseUrlSession(base_url=url) self._session.auth = HTTPBasicAuth('OPENVIDUAPP', secret) self._session.headers.update( {'User-Agent': user_agent('PyOpenVidu', __version__)}) self._session.request = partial(self._session.request, timeout=timeout) self._openvidu_sessions = {} # id:object self._last_fetch_result = { } # Used only to calculate the return value of the fetch() call if initial_fetch: self.fetch() # initial fetch def fetch(self) -> bool: """ Updates every property of every active Session with the current status they have in OpenVidu Server. After calling this method you can access the updated list of active sessions trough the `sessions` property. :return: true if the Session status has changed with respect to the server, false if not. This applies to any property or sub-property of the object. """ r = self._session.get("sessions") r.raise_for_status() new_data = r.json()['content'] data_changed = new_data != self._last_fetch_result self._last_fetch_result = new_data if data_changed: self._openvidu_sessions = {} # update, create valid streams for session_data in new_data: session_id = session_data['id'] self._openvidu_sessions[session_id] = OpenViduSession( self._session, session_data) return data_changed def get_session(self, session_id: str) -> OpenViduSession: """ Get a currently active session to the server. :param session_id: The ID of the session to acquire. :return: An OpenViduSession object. """ if session_id not in self._openvidu_sessions: raise OpenViduSessionDoesNotExistsError() session = self._openvidu_sessions[session_id] if not session.is_valid: raise OpenViduSessionDoesNotExistsError() return session def create_session(self, custom_session_id: str = None, media_mode: str = None) -> OpenViduSession: """ Creates a new OpenVidu session. This method calls fetch() automatically since the server does not return the proper data to construct the OpenViduSession object. https://docs.openvidu.io/en/2.16.0/reference-docs/REST-API/#post-openviduapisessions :param custom_session_id: You can fix the sessionId that will be assigned to the session with this parameter. :param media_mode: ROUTED (default) or RELAYED :return: The created OpenViduSession instance. """ # Prepare parameters if media_mode not in ['ROUTED', 'RELAYED', None]: raise ValueError( f"media_mode must be any of ROUTED or RELAYED, not {media_mode}" ) parameters = { "mediaMode": media_mode, "customSessionId": custom_session_id } parameters = {k: v for k, v in parameters.items() if v is not None} # send request r = self._session.post('sessions', json=parameters) if r.status_code == 409: raise OpenViduSessionExistsError() elif r.status_code == 400: raise ValueError() r.raise_for_status() # As of OpenVidu 2.16.0 the server returns the created session object new_session = OpenViduSession(self._session, r.json()) self._openvidu_sessions[new_session.id] = new_session return new_session @property def sessions(self) -> List[OpenViduSession]: """ Get a list of currently active sessions to the server. :return: A list of OpenViduSession objects. """ return [ sess for sess in self._openvidu_sessions.values() if sess.is_valid ] @property def session_count(self) -> int: """ Get the number of active sessions on the server. :return: The number of active sessions. """ return len(self.sessions) def get_config(self) -> dict: """ Get OpenVidu active configuration. Unlike session related calls. This call does not require prior calling of the fetch() method. Using this function will always result an API call to the backend. https://docs.openvidu.io/en/2.16.0/reference-docs/REST-API/#get-openviduapiconfig :return: The exact response from the server as a dict. """ # Note: Since 2.16.0 This endpoint is moved from toplevel under /api # https://docs.openvidu.io/en/2.16.0/reference-docs/REST-API/#get-openviduapiconfig r = self._session.get('config') r.raise_for_status() return r.json()
class AppConnect: """App connection object. A wrapper for requests BaseUrlSession to hold Atlassian keys across command runs. Parameters ---------- server: base url of app server. username: username for connection. password: password for connection. cookie_store: path to file for cookie_store. session_headers: default headers added to every call. """ _server: str username: str _password: str session: BaseUrlSession = None auth: HTTPBasicAuth = None _response: requests = None cookie_store: os.path = None def __init__(self, server: str, username: str = None, password: str = None, cookie_store: os.path = None, session_headers: dict = None) -> None: self.server = server self.session = BaseUrlSession(base_url=server) if username: self.username = username if password: self.password = password if cookie_store: self.cookie_store = cookie_store if username and password: self.auth = HTTPBasicAuth(self.username, self.password) if session_headers: self.session.headers.update(session_headers) self.reload_cookies() @property def server(self): """server baseUrl for connection""" return self._server @server.setter def server(self, server: str): self._server = server if self.session: self.session.base_url = server @property def password(self): """password for connection.""" return base64.decodebytes(self._password).decode() @password.setter def password(self, password: str): self._password = base64.encodebytes(password.encode()) def get(self, api, headers: dict = None, params: dict = None, data: dict = None, auth: bool = False, allow_redirects=True): """send http get request. Parameters ---------- api: str url path appended to baseUrl. headers: dict of headers. params: dict of url query parameters. data: dict of data to send. auth: bool(False) send BasicAuth. allow_redirects Returns ------- """ # url = urljoin(self.server, api) url = api try: self._response = self.session.get(url, headers=headers, params=params, data=data, auth=self.auth if auth else None, allow_redirects=allow_redirects) self._response.raise_for_status() except requests.exceptions.ConnectionError as err: raise SystemExit(err) except requests.exceptions.Timeout as err: raise SystemExit(err) except requests.exceptions.TooManyRedirects as err: raise SystemExit(err) except requests.exceptions.HTTPError as err: raise SystemExit(err) return self.json_response(self._response) def delete(self, api, headers: dict = None, params=None, auth: bool = False): """send http delete request. Parameters ---------- api: str url path appended to baseUrl. headers: dict of headers. params: dict of url query parameters. auth: bool(False) send BasicAuth. Returns ------- ->json """ url = api try: self._response = self.session.delete( url, headers=headers, params=params, auth=self.auth if auth else None) self._response.raise_for_status() except requests.exceptions.ConnectionError as err: raise SystemExit(err) except requests.exceptions.Timeout as err: raise SystemExit(err) except requests.exceptions.TooManyRedirects as err: raise SystemExit(err) except requests.exceptions.HTTPError as err: raise SystemExit(err) return self.json_response(self._response) def post(self, api: str, headers: dict = None, params: dict = None, data: dict = None, auth: bool = False, allow_redirects: bool = True): """send http post request. Parameters ---------- api: str url path appended to baseUrl. headers: dict of headers. params: dict of url query parameters. data: dict of data to send. auth: bool(False) send BasicAuth. allow_redirects Returns ------- ->json """ # url = urljoin(self.server, api) url = api try: self._response = self.session.post( url, headers=headers, params=params, data=data, auth=self.auth if auth else None, allow_redirects=allow_redirects) # self._response.raise_for_status() except requests.exceptions.ConnectionError as err: raise SystemExit(err) except requests.exceptions.Timeout as err: raise SystemExit(err) except requests.exceptions.TooManyRedirects as err: raise SystemExit(err) # except requests.exceptions.HTTPError as err: # raise SystemExit(err) return self.json_response(self._response) def put(self, api: str, headers: dict = None, params: dict = None, data: dict = None, auth: bool = False): """send http put request. Parameters ---------- api: str url path appended to baseUrl. headers: dict of headers. params: dict of url query parameters. data: dict of data to send. auth: bool(False) send BasicAuth. Returns ------- ->json """ url = api try: self._response = self.session.put(url, headers=headers, params=params, data=data, auth=self.auth if auth else None) self._response.raise_for_status() except requests.exceptions.ConnectionError as err: raise SystemExit(err) except requests.exceptions.Timeout as err: raise SystemExit(err) except requests.exceptions.TooManyRedirects as err: raise SystemExit(err) except requests.exceptions.HTTPError as err: raise SystemExit(err) return self.json_response(self._response) def json_response(self, res: requests): """Always return a json response. Parameters ---------- res: requests response. Returns ------- ->json """ _json = None if res.ok: if res.cookies: self.session.cookies.update(res.cookies) self.cache_cookies() try: _json = res.json() except JSONDecodeError as err: SystemExit(err) if not _json: if res.ok: _json = json.dumps({ 'success': self._response.ok, 'status code': self._response.status_code, 'elapsed seconds': self._response.elapsed.seconds }) else: _json = json.dumps({ 'ok': self._response.ok, 'status_code': self._response.status_code, 'reason': self._response.text, 'request-url': self._response.request.url, 'request-method': self._response.request.method, 'text': self._response.text, 'redirect': self._response.is_redirect, 'elapsed': self._response.elapsed.seconds }) return _json def update_cookies(self, cookies: dict = None): """add cookie(s) to cookie jar. Parameters ---------- cookies """ self.session.cookies.update(cookies) self.cache_cookies() def cache_cookies(self): """cache cookies to file.""" if self.session.cookies: with open(self.cookie_store, 'wb') as f: pickle.dump(self.session.cookies, f) def reload_cookies(self): """reload cookies from file.""" if os.path.isfile(self.cookie_store): with open(self.cookie_store, 'rb') as f: self.session.cookies.update(pickle.load(f))
class RCTFAdminV1: session: requests.Session def __init__(self, endpoint: str, login_token: Optional[str]): self.session = BaseUrlSession(urljoin(endpoint, "api/v1/admin/")) if login_token is not None: login_resp = requests.post( urljoin(endpoint, "api/v1/auth/login"), json={"teamToken": login_token} ).json() if login_resp["kind"] == "goodLogin": auth_token = login_resp["data"]["authToken"] self.session.headers["Authorization"] = f"Bearer {auth_token}" else: raise ValueError( f"Invalid login_token provided (reason: {login_resp['kind']})" ) @staticmethod def assertResponseKind(response: Any, kind: str) -> None: if response["kind"] != kind: raise RuntimeError(f"Server error: {response['kind']}") def list_challenges(self) -> List[Dict[str, Any]]: r = self.session.get("challs").json() self.assertResponseKind(r, "goodChallenges") return r["data"] def put_challenge(self, chall_id: str, data: Dict[str, Any]) -> None: r = self.session.put("challs/" + quote(chall_id), json={"data": data}).json() self.assertResponseKind(r, "goodChallengeUpdate") def delete_challenge(self, chall_id: str) -> None: r = self.session.delete("challs/" + quote(chall_id)).json() self.assertResponseKind(r, "goodChallengeDelete") def create_upload(self, uploads: Dict[str, bytes]) -> Dict[str, str]: """ :param uploads: uploads {name: data} :return: urls {name: url} """ if len(uploads) == 0: return {} payload = [ {"name": name, "data": "data:;base64," + b64encode(data).decode()} for name, data in uploads.items() ] r = self.session.post("upload", json={"files": payload}).json() self.assertResponseKind(r, "goodFilesUpload") return {f["name"]: f["url"] for f in r["data"]} def get_url_for_files(self, files: Dict[str, str]) -> Dict[str, Optional[str]]: """ :param files: files to get {name: sha256} :return: urls {name: url} """ payload = [{"name": name, "sha256": sha256} for name, sha256 in files.items()] r = self.session.post("upload/query", json={"uploads": payload}).json() self.assertResponseKind(r, "goodUploadsQuery") return {f["name"]: f["url"] for f in r["data"]}
def main(): if len(sys.argv) < 5: print( "usage: multinet.py <instance-url> <workspace> <api-token> <volume>", file=sys.stderr ) return 1 # Extract args _, base_url, workspace, api_token, volume = sys.argv # Inject auth token into every request api_client = BaseUrlSession(base_url=base_url) api_client.headers.update({"Authorization": f"Bearer {api_token}"}) print("Uploading files...") # Upload all files to S3 s3ff_client = S3FileFieldClient("/api/s3-upload/", api_client) # Upload nodes.csv with open("artifacts/nodes.csv", "rb") as file_stream: nodes_field_value = s3ff_client.upload_file( file_stream, "nodes.csv", "api.Upload.blob" )["field_value"] # Upload links.csv with open("artifacts/links.csv", "rb") as file_stream: links_field_value = s3ff_client.upload_file( file_stream, "links.csv", "api.Upload.blob" )["field_value"] # Update base url, since only workspace endpoints are needed now api_client.base_url = f"{base_url}/api/workspaces/{workspace}/" # Get names of all networks and tables networks = [x["name"] for x in api_client.get("networks/").json().get("results")] tables = [x["name"] for x in api_client.get("tables/").json().get("results")] # Filter names to ones we want to remove (like the volume) networks = list(filter(lambda x: volume in x, networks)) tables = list(filter(lambda x: volume in x, tables)) # Delete network and tables if they exist for network in networks: api_client.delete(f"networks/{network}/") for table in tables: api_client.delete(f"tables/{table}/") # Generate new network and table names NODE_TABLE_NAME = f"{volume}_nodes" EDGE_TABLE_NAME = f"{volume}_links" NETWORK_NAME = f"{volume}_{datetime.now(pytz.timezone('America/Denver')).strftime('%Y-%m-%d_%H-%M')}" # Create nodes table r = api_client.post( "uploads/csv/", json={ "field_value": nodes_field_value, "edge": False, "table_name": NODE_TABLE_NAME, "columns": { "TypeID": "category", "Verified": "boolean", "Confidence": "number", "ParentID": "category", "Created": "date", "LastModified": "date", "TypeLabel": "category", "Volume (nm^3)": "number", "MaxDimension": "number", "MinZ": "number", "MaxZ": "number", }, }, ) raise_for_status(r) nodes_upload = r.json() # Create links table r = api_client.post( "uploads/csv/", json={ "field_value": links_field_value, "edge": True, "table_name": EDGE_TABLE_NAME, "columns": { "TotalChildren": "number", "LastModified": "date", "Bidirectional": "boolean", "Type": "category", "TotalSourceArea(nm^2)": "number", "TotalTargetArea(nm^2)": "number", }, }, ) raise_for_status(r) links_upload = r.json() print("Processing files...") # Wait for nodes and links tables to be created await_tasks_finished(api_client, [nodes_upload, links_upload]) # Create network raise_for_status( api_client.post( "networks/", json={"name": NETWORK_NAME, "edge_table": EDGE_TABLE_NAME}, ) ) print("Network created.") print("Synchronization finished.")
class NsdiClient(object): def __init__(self, config: typing.Dict[str, typing.Any]) -> None: super().__init__() proxy = config.get("PROXY_HOST") or None # Header Settings self.session = BaseUrlSession("http://openapi.nsdi.go.kr/") self.session.headers.update({"User-Agent": USER_AGENT}) if proxy: apply_proxy(self.session, proxy) self.retryer = Retryer( strategy_factory=( ExponentialModulusBackoffStrategy.create_factory(2, 10) ), should_retry=lambda e: isinstance( e, (requests.exceptions.ConnectionError,) ), default_max_trials=3, ) def _handle_json_response( self, r: requests.Response ) -> typing.Dict[str, typing.Any]: r.raise_for_status() try: data = r.json() return data except (json.JSONDecodeError, ValueError): raise NsdiClientResponseError(r.status_code, r.text) def _handle_text_response(self, r: requests.Response) -> str: r.raise_for_status() try: r.json() except (json.JSONDecodeError, ValueError): return r.text else: raise NsdiClientResponseError(r.status_code, r.text) def init_page( self, prov_org: str, gubun: str, svc_se: str, svc_id: str ) -> requests.Response: # 세션에 쿠키와 세션정보를 넣어줌 params1 = ( ("provOrg", prov_org), ("gubun", gubun), ) parmas = { "svcSe": svc_se, "svcId": svc_id, } response1 = self.session.get( "/nsdi/eios/OpenapiList.do", params=params1 ) self._handle_text_response(response1) response = self.session.get( "/nsdi/eios/ServiceDetail.do", params=parmas ) self._handle_text_response(response) return response def fetch_land_using_info_table( self, svc_se: str, svc_id: str, start_date: str, end_date: str, extrc_se_search: str, prov_org: str, page_index: int, ) -> NsdiLandUsingInfoResponse: data = { "svcSe": svc_se, "svcId": svc_id, "pageIndex": "1", "provOrg": prov_org, "startDate": start_date, "endDate": end_date, "doArea": "", "svcNmSearch": "", "pageIndexSecond": "1", } if prov_org == "NIDO": data.update( { "extrcSeSearch": extrc_se_search, } ) if page_index: data.update({"pageIndexSecond": str(page_index)}) response = self._handle_text_response( self.retryer.run( ( functools.partial( self.session.post, "/nsdi/eios/ServiceDetail.do", data=data, ) ) ) ) return NsdiLandUsingInfoResponse.from_html(response) def fetch_download_response( self, table_data: NsdiLandUsingInfo.NsdiTableData, prov_org: str ) -> requests.Response: data = { "opertSnDialog": table_data.opert_sn_dialog, "fileNmDialog": table_data.file_nm_dialog, "extrcScopeDialog": table_data.extrc_scope_dialog, "extrcSeDialog": table_data.extrc_se_dialog, "extrcDtDialog": table_data.extrc_dt_dialog, "svcIdDialog": table_data.svcld_dialog, "checkedValue": "", "downloadFileTy": "", "provOrg": prov_org, } response = self.retryer.run( functools.partial( self.session.post, "/nsdi/eios/fileDownload.do", data=data ) ) # 다운로드시에 response.iter_content를 사용해야하기때문에 이와 같이 설정하였습니다 response.raise_for_status() return response def fetch_region_list( self, response: requests.Response ) -> typing.List[NsdiRegion]: self.session.headers.update({"Referer": response.url}) response = self._handle_json_response( self.retryer.run( functools.partial( self.session.get, "/nsdi/eios/service/rest/AdmService/admCodeList.json", ) ) ) region_list = response["admVOList"]["admVOList"] return [NsdiRegion.from_json(x) for x in region_list] def fetch_region_detail_list( self, adm_code: str ) -> typing.List[NsdiRegion]: params = {"admCode": adm_code} response = self._handle_json_response( self.retryer.run( functools.partial( self.session.get, "/nsdi/eios/service/rest/AdmService/admSiList.json", params=params, ) ) ) region_detail_list = response["admVOList"]["admVOList"] return [NsdiRegion.from_json(x) for x in region_detail_list]
class Workbench: """ Create a full report of your computer with serials, testing, benchmarking, erasing and installing an OS. """ def __init__(self, smart: Smart = False, erase: EraseType = False, erase_steps: int = 1, erase_leading_zeros: bool = False, stress: int = 0, install: str = False, server: str = None, Tester: Type[Tester] = Tester, Computer: Type[Computer] = Computer, Eraser: Type[Eraser] = Eraser, Benchmarker: Type[Benchmarker] = Benchmarker, USBSneaky: Type[USBSneaky] = USBSneaky, Installer: Type[Installer] = Installer): """ Configures this Workbench. :param smart: Should we perform a SMART test to the hard-drives? If so, pass :attr:`.Workbench.Smart.short` for a short test and :attr:`.Workbench.Smart.long` for a long test. Falsy values disables the functionality. :param erase: Should we erase the hard-drives? Pass-in a :attr:`.Workbench.Erase.normal` to perform a normal erasure (quite secure) or :attr:`.Workbench.Erase.sectors` to perform a slower but fully secured erasure. Falsy values disables the functionality. See `a detailed explanation of the erasure process in the FAQ <https://ereuse-org.gitbooks.io/faq/content/w- hich-is-the-data-wiping-process-performed.html>`_. :param erase_steps: In case `erase` is truthy, how many steps overriding data should we perform? Policies and regulations may set a specific value. :param erase_leading_zeros: In case `erase` is truthy, should we finish erasing with an extra step that writes zeroes? This can be enforced by policy and regulation. :param stress: How many minutes should stress the machine. 0 minutes disables this test. A stress test puts the machine at 100% (CPU, RAM and HDD) to ensure components can handle heavy work. :param install: Image name to install. A falsy value will disable installation. The image is a FSA file that will be installed on the first hard-drive. Do not add the extension ('.fsa'). :param server: An URL pointing to a WorkbenchServer. Setting a truthy value will turn-on server functionality like USBSneaky module, sending snapshots to server and getting configuration from it. :param Tester: Testing class to use to perform tests. :param Computer: Computer class to use to retrieve computer information. """ if os.geteuid() != 0: raise EnvironmentError('Execute Workbench as root.') init(autoreset=True) self.smart = smart self.erase = erase self.erase_steps = erase_steps self.erase_leading_zeros = erase_leading_zeros self.stress = stress self.server = server self.uuid = uuid.uuid4() self.install = install self.install_path = Path('/media/workbench-images') if self.server: # Override the parameters from the configuration from the server self.session = BaseUrlSession(base_url=self.server) self.session.verify = False self.session.headers.update({'Content-Type': 'application/json'}) urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) self.config_from_server() if self.install: # We get the OS to install from the server through a mounted samba self.mount_images(self.server) # By setting daemon=True USB Sneaky will die when we die self.usb_sneaky = Process(target=USBSneaky, args=(self.uuid, server), daemon=True) self.phases = 1 + bool(self.smart) + bool(self.stress) + bool(self.erase) + \ bool(self.install) """ The number of phases we will be performing. A phase is a piece of execution. Gathering hardware info is the first phase, and executing a smart test is the second one. """ self.installer = Installer() self.tester = Tester() self.eraser = Eraser(self.erase, self.erase_steps, self.erase_leading_zeros) self.benchmarker = Benchmarker() self.Computer = Computer def config_from_server(self): """Configures the Workbench from a config endpoint in the server.""" r = self.session.get('/config') r.raise_for_status() for key, value in r.json().items(): if key == 'smart' and value: self.smart = Smart(value) elif key == 'erase' and value: self.erase = EraseType(value) else: setattr(self, key, value) def mount_images(self, server: str): """Mounts the folder where the OS images are.""" self.install_path.mkdir(parents=True, exist_ok=True) ip, _ = urlparse(server).netloc.split(':') try: run(('mount', '-t', 'cifs', '-o', 'guest,uid=root,forceuid,gid=root,forcegid', '//{}/workbench-images'.format(ip), str(self.install_path)), universal_newlines=True, check=True) except CalledProcessError as e: raise CannotMount('Did you umount?') from e def run(self) -> str: """ Executes Workbench on this computer and returns a valid JSON for DeviceHub. """ try: return self._run() except Exception: print( '{}Workbench panic - unexpected exception found. Please take ' 'a photo of the screen and send it to eReuse Workbench Developers.' .format(Fore.RED)) raise finally: if self.server and self.install: # Un-mount images try: run(('umount', str(self.install_path)), universal_newlines=True, check=True) except CalledProcessError as e: raise CannotMount() from e def _run(self) -> str: print('{}Starting eReuse.org Workbench'.format(Fore.CYAN)) if self.server: self.usb_sneaky.start() print('{} Getting computer information...'.format( self._print_phase(1))) init_time = now() computer_getter = self.Computer(self.benchmarker) computer, components = computer_getter.run() snapshot = { 'device': computer, 'components': components, '_uuid': self.uuid, '_totalPhases': self.phases, '_phases': 0, # Counter of phases we have executed 'snapshotSoftware': 'Workbench', 'inventory': { 'elapsed': now() - init_time }, # The version of Workbench # from https://stackoverflow.com/a/2073599 # This throws an exception if you git clone this package # and did not install it with pip # Perform ``pip install -e .`` or similar to fix 'version': pkg_resources.require('ereuse-workbench')[0].version, 'automatic': True, 'date': now( ), # todo we should ensure debian updates the machine time from Internet '@type': 'devices:Snapshot' } self.after_phase(snapshot, init_time) hdds = tuple(c for c in components if c['@type'] == 'HardDrive') if self.benchmarker: snapshot['benchmarks'] = [self.benchmarker.benchmark_memory()] if self.smart: print('{} Run SMART test and benchmark hard-drives...'.format( self._print_phase(2))) for hdd in hdds: hdd['test'] = self.tester.smart( hdd[PrivateFields.logical_name], self.smart) if hdd['test']['error']: print('{}Failed SMART for HDD {}'.format( Fore.RED, hdd.get('serialNumber'))) self.after_phase(snapshot, init_time) if self.stress: print('{} Run stress tests for {} mins...'.format( self._print_phase(3), self.stress)) snapshot['tests'] = [self.tester.stress(self.stress)] self.after_phase(snapshot, init_time) if self.erase: text = '{} Erase Hard-Drives with {} method, {} steps and {} overriding with zeros...' print( text.format(self._print_phase(4), self.erase.name, self.erase_steps, '' if self.erase_leading_zeros else 'not')) for hdd in hdds: hdd['erasure'] = self.eraser.erase( hdd[PrivateFields.logical_name]) if not hdd['erasure']['success']: print('{}Failed erasing HDD {}'.format( Fore.RED, hdd.get('serialNumber'))) self.after_phase(snapshot, init_time) if self.install: print('{} Install {}...'.format(self._print_phase(5), self.install)) snapshot['osInstallation'] = self.installer.install( self.install_path / self.install) if not snapshot['osInstallation']['success']: print('{}Failed installing OS'.format(Fore.RED)) self.after_phase(snapshot, init_time) print('{}eReuse.org Workbench has finished properly.'.format( Fore.GREEN)) # Comply with DeviceHub's Snapshot snapshot.pop('_phases', None) snapshot.pop('_totalPhases', None) return json.dumps(snapshot, skipkeys=True, cls=DeviceHubJSONEncoder, indent=2) def after_phase(self, snapshot: dict, init_time: datetime): snapshot['_phases'] += 1 snapshot['elapsed'] = now() - init_time if self.server: # Send to server url = '/snapshots/{}'.format(snapshot['_uuid']) data = json.dumps(snapshot, cls=DeviceHubJSONEncoder, skipkeys=True) self.session.patch(url, data=data).raise_for_status() @staticmethod def _print_phase(phase: int): return '[ {}Phase {}{} ]'.format(Fore.CYAN, phase, Fore.RESET)