def _upload_to_new_robot( self, params: UploadNewRobotParamsDict) -> ActionResultDict: from robocorp_ls_core.progress_report import progress_context directory = params["directory"] error_msg = self._validate_directory(directory) if error_msg: return {"success": False, "message": error_msg, "result": None} workspace_id = params["workspaceId"] robot_name = params["robotName"] # When we upload to a new activity, clear the existing cache key. self._dir_cache.discard(self.CLOUD_LIST_WORKSPACE_CACHE_KEY) with progress_context(self._endpoint, "Uploading to new robot", self._dir_cache): new_robot_result = self._rcc.cloud_create_robot( workspace_id, robot_name) if not new_robot_result.success: return new_robot_result.as_dict() robot_id = new_robot_result.result if not robot_id: return dict( success=False, message= "Expected to have package id from creating new activity.", result=None, ) result = self._rcc.cloud_set_robot_contents( directory, workspace_id, robot_id) self._add_package_info_to_access_lru(workspace_id, robot_id, directory) return result.as_dict()
def get_interpreter_info( cls, robot_yaml_file_info: _CachedFileInfo, conda_config_file_info: Optional[_CachedFileInfo], env_json_path_file_info: Optional[_CachedFileInfo], pm: PluginManager, ) -> IInterpreterInfo: interpreter_info = cls._cached_interpreter_info.get( robot_yaml_file_info.file_path ) if interpreter_info is not None and interpreter_info.is_cache_valid( robot_yaml_file_info, conda_config_file_info, env_json_path_file_info ): _CacheInfo._cache_hit_interpreter += 1 return interpreter_info.info from robocorp_ls_core.progress_report import progress_context from robotframework_ls.ep_providers import EPEndPointProvider endpoint = pm[EPEndPointProvider].endpoint with progress_context(endpoint, "Obtain env for robot.yaml", dir_cache=None): # If it got here, it's not cached or the cache doesn't match. # This may take a while... interpreter_info = cls._cached_interpreter_info[ robot_yaml_file_info.file_path ] = _CachedInterpreterInfo( robot_yaml_file_info, conda_config_file_info, env_json_path_file_info, pm, ) return interpreter_info.info
def _is_login_needed_internal(self) -> ActionResultDict: from robocorp_ls_core.progress_report import progress_context with progress_context( self._endpoint, "Validating cloud credentials", self._dir_cache ): login_needed = not self._rcc.credentials_valid() return {"success": login_needed, "message": None, "result": login_needed}
def _configuration_diagnostics_internal( self, params: ConfigurationDiagnosticsDict) -> ActionResultDict: from robocorp_ls_core.progress_report import progress_context robot_yaml = params["robotYaml"] with progress_context(self._endpoint, "Collecting configuration diagnostics", self._dir_cache): action_result = self._rcc.configuration_diagnostics(robot_yaml, json=False) return action_result.as_dict()
def _cloud_logout(self) -> ActionResultDict: from robocorp_ls_core.progress_report import progress_context self._feedback_metric("vscode.cloud.logout") # When credentials are removed we need to remove existing caches. self._dir_cache.discard(self.CLOUD_LIST_WORKSPACE_CACHE_KEY) with progress_context( self._endpoint, "Removing cloud credentials", self._dir_cache ): return self._rcc.remove_current_credentials().as_dict()
def _cloud_login(self, params: CloudLoginParamsDict) -> ActionResultDict: from robocorp_ls_core.progress_report import progress_context # When new credentials are added we need to remove existing caches. self._dir_cache.discard(self.CLOUD_LIST_WORKSPACE_CACHE_KEY) credentials = params["credentials"] with progress_context(self._endpoint, "Adding cloud credentials", self._dir_cache): result = self._rcc.add_credentials(credentials) if not result.success: return result.as_dict() result = self._rcc.credentials_valid() return {"success": result, "message": None, "result": result}
def _upload_to_existing_activity( self, params: UploadRobotParamsDict) -> ActionResultDict: from robocorp_ls_core.progress_report import progress_context directory = params["directory"] error_msg = self._validate_directory(directory) if error_msg: return {"success": False, "message": error_msg, "result": None} workspace_id = params["workspaceId"] robot_id = params["robotId"] with progress_context(self._endpoint, "Uploading to existing robot", self._dir_cache): result = self._rcc.cloud_set_robot_contents( directory, workspace_id, robot_id) self._add_package_info_to_access_lru(workspace_id, robot_id, directory) return result.as_dict()
def _cloud_list_workspaces( self, params: CloudListWorkspaceDict ) -> ListWorkspacesActionResultDict: from robocorp_ls_core.progress_report import progress_context DEFAULT_SORT_KEY = 10 package_info: PackageInfoDict ws_dict: WorkspaceInfoDict ws_id_and_pack_id_to_lru_index = self._get_sort_key_info() curr_account_info = self._rcc.last_verified_account_info if curr_account_info is None: curr_account_info = self._rcc.get_valid_account_info() if curr_account_info is None: return { "success": False, "message": "Unable to get workspace info (no user is logged in).", "result": None, } account_cache_key = (curr_account_info.account, curr_account_info.identifier) if not params.get("refresh", True): try: cached: ListWorkspaceCachedInfoDict = self._dir_cache.load( self.CLOUD_LIST_WORKSPACE_CACHE_KEY, dict ) except KeyError: pass else: # We need to update the sort key when it's gotten from the cache. try: if account_cache_key == tuple(cached.get("account_cache_key", ())): for ws_dict in cached["ws_info"]: for package_info in ws_dict["packages"]: key = (package_info["workspaceId"], package_info["id"]) sort_key = "%05d%s" % ( ws_id_and_pack_id_to_lru_index.get( key, DEFAULT_SORT_KEY ), package_info["name"].lower(), ) package_info["sortKey"] = sort_key return { "success": True, "message": None, "result": cached["ws_info"], } except Exception: log.exception( "Error computing new sort keys for cached entry. Refreshing and proceeding." ) last_error_result = None with progress_context( self._endpoint, "Listing cloud workspaces", self._dir_cache ): ws: IRccWorkspace ret: List[WorkspaceInfoDict] = [] result = self._rcc.cloud_list_workspaces() if not result.success: return result.as_dict() workspaces = result.result for ws in workspaces: packages: List[PackageInfoDict] = [] activity_package: IRccRobotMetadata activities_result = self._rcc.cloud_list_workspace_robots( ws.workspace_id ) if not activities_result.success: # If we can't list the robots of a specific workspace, just skip it # (the log should still show it but we can proceed to list the # contents of other workspaces). last_error_result = activities_result continue workspace_activities = activities_result.result for activity_package in workspace_activities: key = (ws.workspace_id, activity_package.robot_id) sort_key = "%05d%s" % ( ws_id_and_pack_id_to_lru_index.get(key, DEFAULT_SORT_KEY), activity_package.robot_name.lower(), ) package_info = { "name": activity_package.robot_name, "id": activity_package.robot_id, "sortKey": sort_key, "workspaceId": ws.workspace_id, "workspaceName": ws.workspace_name, } packages.append(package_info) ws_dict = { "workspaceName": ws.workspace_name, "workspaceId": ws.workspace_id, "packages": packages, } ret.append(ws_dict) if not ret and last_error_result is not None: return last_error_result.as_dict() if ret: # Only store if we got something. store: ListWorkspaceCachedInfoDict = { "ws_info": ret, "account_cache_key": account_cache_key, } self._dir_cache.store(self.CLOUD_LIST_WORKSPACE_CACHE_KEY, store) return {"success": True, "message": None, "result": ret}
def _cloud_list_workspaces( self, params: CloudListWorkspaceDict) -> ListWorkspacesActionResultDict: from robocorp_ls_core.progress_report import progress_context DEFAULT_SORT_KEY = 10 package_info: PackageInfoDict ws_dict: WorkspaceInfoDict ws_id_and_pack_id_to_lru_index = self._get_sort_key_info() if not params.get("refresh", True): try: cached: List[WorkspaceInfoDict] = self._dir_cache.load( self.CLOUD_LIST_WORKSPACE_CACHE_KEY, list) except KeyError: pass else: # We need to update the sort key when it's gotten from the cache. try: for ws_dict in cached: for package_info in ws_dict["packages"]: key = (package_info["workspaceId"], package_info["id"]) sort_key = "%05d%s" % ( ws_id_and_pack_id_to_lru_index.get( key, DEFAULT_SORT_KEY), package_info["name"].lower(), ) package_info["sortKey"] = sort_key return {"success": True, "message": None, "result": cached} except Exception: log.exception( "Error computing new sort keys for cached entry. Refreshing and proceeding." ) with progress_context(self._endpoint, "Listing cloud workspaces", self._dir_cache): ws: IRccWorkspace ret: List[WorkspaceInfoDict] = [] result = self._rcc.cloud_list_workspaces() if not result.success: return result.as_dict() workspaces = result.result for ws in workspaces: packages: List[PackageInfoDict] = [] activity_package: IRccRobotMetadata activities_result = self._rcc.cloud_list_workspace_robots( ws.workspace_id) if not activities_result.success: return activities_result.as_dict() workspace_activities = activities_result.result for activity_package in workspace_activities: key = (ws.workspace_id, activity_package.robot_id) sort_key = "%05d%s" % ( ws_id_and_pack_id_to_lru_index.get( key, DEFAULT_SORT_KEY), activity_package.robot_name.lower(), ) package_info = { "name": activity_package.robot_name, "id": activity_package.robot_id, "sortKey": sort_key, "workspaceId": ws.workspace_id, "workspaceName": ws.workspace_name, } packages.append(package_info) ws_dict = { "workspaceName": ws.workspace_name, "workspaceId": ws.workspace_id, "packages": packages, } ret.append(ws_dict) if ret: # Only store if we got something. self._dir_cache.store(self.CLOUD_LIST_WORKSPACE_CACHE_KEY, ret) return {"success": True, "message": None, "result": ret}