def _get_url(self, url, as_json=False, verify=None): """Callback for url checking.""" data = {} verify_value = self._get_verify_ssl(verify) if self._is_internet_available(): try: # See: https://github.com/ContinuumIO/navigator/issues/1485 session = requests.Session() retry = Retry(connect=3, backoff_factor=0.5) adapter = HTTPAdapter(max_retries=retry) session.mount('http://', adapter) session.mount('https://', adapter) r = session.get( url, proxies=self.proxy_servers, verify=verify_value, timeout=self.DEFAULT_TIMEOUT, ) data = to_text_string(r.content, encoding='utf-8') if as_json: data = json.loads(data) except Exception as error: logger.error(str(error)) return data
def _get_api_info(self, url, verify=None): """Callback.""" verify_value = self._get_verify_ssl(verify) data = { "api_url": url, "api_docs_url": "https://api.anaconda.org/docs", "conda_url": "https://conda.anaconda.org/", "main_url": "https://anaconda.org/", "pypi_url": "https://pypi.anaconda.org/", "swagger_url": "https://api.anaconda.org/swagger.json", } if self._is_internet_available(): try: r = requests.get( url, proxies=self.proxy_servers, verify=verify_value, timeout=self.DEFAULT_TIMEOUT, ) content = to_text_string(r.content, encoding='utf-8') new_data = json.loads(content) data['conda_url'] = new_data.get('conda_url', data['conda_url']) except Exception as error: logger.error(str(error)) return data
def _get_api_info(self, url, proxy_servers=None, verify=True): """Callback.""" proxy_servers = proxy_servers or {} data = { "api_url": url, "api_docs_url": "https://api.anaconda.org/docs", "brand": DEFAULT_BRAND, "conda_url": "https://conda.anaconda.org", "main_url": "https://anaconda.org", "pypi_url": "https://pypi.anaconda.org", "swagger_url": "https://api.anaconda.org/swagger.json", } try: r = requests.get( url, proxies=proxy_servers, verify=verify, timeout=self.DEFAULT_TIMEOUT, ) content = to_text_string(r.content, encoding='utf-8') new_data = json.loads(content) # Enforce no trailing slash for key, value in new_data.items(): if is_text_string(value): data[key] = value[:-1] if value[-1] == '/' else value except Exception as error: logger.error(str(error)) return data
def set_domain(self, domain='https://api.anaconda.org'): """Reset current api domain.""" logger.debug('Setting domain {}'.format(domain)) config = binstar_client.utils.get_config() config['url'] = domain try: binstar_client.utils.set_config(config) except binstar_client.errors.BinstarError: logger.error('Could not write anaconda client configuation') traceback = format_exc() msg_box = MessageBoxError( title='Anaconda Client configuration error', text='Anaconda Client domain could not be updated.<br><br>' 'This may result in Navigator not working properly.<br>', error='<pre>' + traceback + '</pre>', report=False, learn_more=None, ) msg_box.exec_() self._anaconda_client_api = binstar_client.utils.get_server_api( token=None, log_level=logging.NOTSET, )
def _is_valid_channel(self, channel, conda_url='https://conda.anaconda.org'): """Callback for is_valid_channel.""" if channel.startswith('https://') or channel.startswith('http://'): url = channel else: url = "{0}/{1}".format(conda_url, channel) if url[-1] == '/': url = url[:-1] plat = self._conda_api.get_platform() repodata_url = "{0}/{1}/{2}".format(url, plat, 'repodata.json') try: r = requests.head( repodata_url, proxies=self.proxy_servers, verify=self._client_api.get_ssl(), timeout=self.DEFAULT_TIMEOUT, ) value = r.status_code in [200] except Exception as error: logger.error(str(error)) value = False return value
def convert_image(self, worker, output, error): """ Load an image using PIL, and converts it to a QPixmap. This was needed as some image libraries are not found in some OS. """ # Needs to come after qtpy imports path = output if path in self.pixmaps: return if path: if os.path.isfile(path): try: if sys.platform == 'darwin': from PIL.ImageQt import ImageQt from PIL import Image image = Image.open(path) image = ImageQt(image) qt_image = QImage(image) pixmap = QPixmap.fromImage(qt_image) else: extension = path.split('.')[-1].upper() if extension in ['PNG', 'JPEG', 'JPG']: pixmap = QPixmap(path, format=extension) else: pixmap = QPixmap(path) self.pixmaps[path] = pixmap except (IOError, OSError) as error: logger.error(str(error))
def _get_url(self, url, as_json=False, verify=None): """Callback for url checking.""" data = {} if verify is None: verify_value = self._client_api.get_ssl() else: verify_value = verify try: r = requests.get( url, proxies=self.proxy_servers, verify=verify_value, timeout=self.DEFAULT_TIMEOUT, ) data = to_text_string(r.content, encoding='utf-8') if as_json: data = json.loads(data) except Exception as error: logger.error(str(error)) return data
def _is_valid_api_url(self, url, verify=None): """Callback for is_valid_api_url.""" verify_value = self._get_verify_ssl(verify) # Check response is a JSON with ok: 1 data = {} if verify is None: verify_value = self._client_api.get_ssl() else: verify_value = verify if self._is_internet_available(): try: r = requests.get( url, proxies=self.proxy_servers, verify=verify_value, timeout=self.DEFAULT_TIMEOUT, ) content = to_text_string(r.content, encoding='utf-8') data = json.loads(content) except Exception as error: logger.error(str(error)) return data.get('ok', 0) == 1
def _environment_removed(self, worker, output, error): self.update_visibility(True) if error: logger.error(str(error)) self.setup_tab() self.list_environments.setCurrentRow(0)
def get_ip(self): """Return the current ip based on ipify.org.""" try: response = requests.get('https://api.ipify.org/?format=json', proxies=self.api.conda_load_proxy_config()) ip = response.json()['ip'] except Exception as error: logger.error(str(error)) ip = None return ip
def get_ip(self): """ """ try: response = requests.get('https://api.ipify.org/?format=json') ip = response.json()['ip'] except Exception as error: logger.error(str(error)) ip = None return ip
def create_application_projects(self, worker, output, error): if error: logger.error(str(error)) packages, apps = output self.api.create_application_projects( apps, add_project=self.first_run, ) self.post_setup(apps) self.check_for_updates(packages)
def handle_action_finished(self, worker, output, error): if not isinstance(output, dict): output = {} success = output.get('success', True) if error or not success: # Error might be harmless if no decoding was possible... # Success deserves some sort of messagebox logger.error(error) self.widget.sig_application_updated.emit(self.name, self.version) self.update_status() self.set_loading(False)
def _is_valid_url(self, url): """Callback for is_valid_url.""" try: r = requests.head( url, proxies=self.proxy_servers, timeout=self.DEFAULT_TIMEOUT, ) value = r.status_code in [200] except Exception as error: logger.error(str(error)) value = False return value
def update_config(self, prefix): logger.debug('Update app config to use prefix {}'.format(prefix)) try: _config = os.path.join( CONF_PATH, 'Code', 'User', 'settings.json', ) _config_dir = os.path.dirname(_config) try: if not os.path.isdir(_config_dir): os.makedirs(_config_dir) except Exception as e: logger.error(e) config_update = {'python.pythonPath': prefix} if os.path.isfile(_config): try: with io.open(_config, 'r', encoding='utf-8') as f: data = f.read() self.create_config_backup(data) config_data = json.loads(data) for key, val in config_update.items(): config_data[key] = val except Exception: # If there is any error, don't overwrite app config return False else: config_data = config_update.copy() mode = 'w' if PY3 else 'wb' with io.open(_config, mode) as f: json.dump( config_data, f, sort_keys=True, indent=4, ) except Exception as e: logger.error(e) return False return True
def _environment_created(self, worker, output, error): if error: logger.error(str(error)) self.update_visibility(False) for row, environment in enumerate(self.get_environments()): if worker.name == environment: break self.last_env_prefix = self.api.conda_get_prefix_envname(environment) self.setup_tab(load_environment=False) self.list_environments.setCurrentRow(row) self.load_environment() self.refresh() self.update_visibility(True) update_pointer()
def get_ip(self): """ Return the current ip based on ipify.org. This method is used for testing not for collecting actual ip addresses. """ try: response = requests.get( 'https://api.ipify.org/?format=json', proxies=self.api.conda_load_proxy_config() ) ip = response.json()['ip'] except Exception as error: logger.error(str(error)) ip = None return ip
def set_api_url(url): """Set the anaconda client url configuration.""" config_data = binstar_client.utils.get_config() config_data['url'] = url try: binstar_client.utils.set_config(config_data) except Exception as e: logger.error('Could not write anaconda client configuration') msg_box = MessageBoxError( title='Anaconda Client configuration error', text='Anaconda Client configuration could not be updated.<br>' 'This may result in Navigator not working properly.<br>', error=e, report=False, learn_more=None, ) msg_box.exec_()
def set_ssl(self, value): """Set the anaconda client url configuration.""" config_data = binstar_client.utils.get_config() config_data['verify_ssl'] = value try: binstar_client.utils.set_config(config_data) self._conda_api.config_set('ssl_verify', value).communicate() except Exception as e: logger.error('Could not write anaconda client configuration') msg_box = MessageBoxError( title='Anaconda Client configuration error', text='Anaconda Client configuration could not be updated.<br>' 'This may result in Navigator not working properly.<br>', error=e, report=False, learn_more=None, ) msg_box.exec_()
def _is_valid_url(self, url, verify=None): """Callback for is_valid_url.""" verify_value = self._get_verify_ssl(verify) if self._is_internet_available(): try: r = requests.head( url, proxies=self.proxy_servers, verify=verify_value, timeout=self.DEFAULT_TIMEOUT, ) value = r.status_code in [200] except Exception as error: logger.error(str(error)) value = False return value
def _pip_data_ready(worker, output, error): logger.debug('output: {}, error: {}'.format(output, error)) base_worker = worker clean_packages = base_worker.packages # Blacklisted removed! if error: logger.error(error) else: logger.debug('') pip_packages = output or [] # Get linked data linked = self._conda_api.linked(prefix=prefix) worker = self._client_api.prepare_model_data( clean_packages, linked, pip_packages) worker.base_worker = base_worker worker.sig_finished.connect(_model_data_ready)
def _metadata_updated(self, worker, path, error): self.set_splash('Updating repodata...') if error: logger.error(str(error)) if path and os.path.isfile(path): with open(path, 'r') as f: data = f.read() try: self._metadata = json.loads(data) except Exception: self._metadata = {} channels = CONF.get('main', 'conda_channels', default=tuple()) if not channels: channels = self.api.conda_get_condarc_channels() CONF.set('main', 'conda_channels', channels) CONF.set('main', 'conda_active_channels', channels) self.api.update_repodata(channels=channels) self.api.sig_repodata_updated.connect(self._repodata_updated)
def convert_image(self, worker, output, error): """ Load an image using PIL, and converts it to a QPixmap. This was needed as some image libraries are not found in some OS. """ path = output if path in self.pixmaps: return try: if sys.platform == 'darwin' and PYQT4: from PIL.ImageQt import ImageQt from PIL import Image if path: image = Image.open(path) image = ImageQt(image) qt_image = QImage(image) pixmap = QPixmap.fromImage(qt_image) else: pixmap = QPixmap() else: if path and os.path.isfile(path): extension = path.split('.')[-1].upper() if extension in ['PNG', 'JPEG', 'JPG']: # This might be producing an error message on windows # for some of the images pixmap = QPixmap(path, format=extension) else: pixmap = QPixmap(path) else: pixmap = QPixmap() self.pixmaps[path] = pixmap except (IOError, OSError) as error: logger.error(str(error))
def _create_project_folders(self, apps, force=False): """ Create local dev tools project folders for dev tools applications. """ paths = [] # Temporal hardcoded images image_paths = { 'glueviz': images.GLUEVIZ_ICON_1024_PATH, 'spyder-app': images.SPYDER_ICON_1024_PATH, 'spyder': images.SPYDER_ICON_1024_PATH, 'ipython-qtconsole': images.IPYTHON_QTCONSOLE_ICON_1024_PATH, 'qtconsole': images.IPYTHON_QTCONSOLE_ICON_1024_PATH, 'ipython-notebook': images.IPYTHON_NOTEBOOK_ICON_1024_PATH, 'notebook': images.NOTEBOOK_ICON_1024_PATH, 'orange-app': images.ORANGE_ICON_1024_PATH, 'rodeo': images.RODEO_ICON_1024_PATH, 'veusz': images.VEUSZ_ICON_1024_PATH, } invalid_apps = ['spyder-app', 'ipython-qtconsole', 'ipython-notebook'] for app in apps: if app in invalid_apps: continue data = apps[app] project_path = os.sep.join([DEVTOOLS_PATH, app]) if os.path.isdir(project_path) and force: if IS_WINDOWS: temp_path = tempfile.mkdtemp() shutil.move(project_path, temp_path) else: try: shutil.rmtree(project_path) except Exception as error: logger.error(error) # else: # paths.append(project_path) # continue project = Project(name=app) project.default_channels = self.conda_get_condarc_channels() project.is_app = True project.is_conda_app = True project.dev_tool = app in VALID_DEV_TOOLS image_path = image_paths.get(app) project.save(project_path) # Open, file, adjust size if it is too big and save it as icon.png if image_path: project.icon = 'icon.png' self.save_icon(image_path, project_path=project_path, project=project) project.version = '0.1.0' versions = data.get('versions') version = versions[-1] # Versions are sorted from small to big app_entry = data.get('app_entry').get(version, '') # Handle deprecated entrypoints for notebook and qtconsole try: if 'ipython notebook' in app_entry: app_entry = app_entry.replace('ipython notebook', 'jupyter-notebook') elif 'ipython qtconsole' in app_entry: app_entry = app_entry.replace('ipython qtconsole', 'jupyter-qtconsole') except: pass project.commands = [app_entry] project.save(project_path) spec_file = os.sep.join([project_path, 'env.yaml']) with open(spec_file, 'w') as f: f.write("{0}={1}".format(app, version)) paths.append(project_path) return paths
def load_content(self, paths=None): """Load downloaded and bundled content.""" content = [] # Load downloaded content for filepath in self._downloaded_filepaths: fname = filepath.split(os.sep)[-1] items = [] if os.path.isfile(filepath): with open(filepath, 'r') as f: data = f.read() try: items = json.loads(data) except Exception as error: logger.error(str((filepath, error))) else: items = [] if 'video' in fname: for item in items: try: item['tags'] = ['video'] item['uri'] = item.get('video', '') if item['uri']: item['banner'] = item.get('thumbnail') image_path = item['banner'].split('/')[-1] item['image_file'] = image_path else: url = '' item['image_file'] = '' item['banner'] = url item['date'] = item.get('date_start', '') except Exception: logger.debug("Video parse failed: {0}".format(item)) items = items[:self.VIDEOS_LIMIT] elif 'event' in fname: for item in items: try: item['tags'] = ['event'] item['uri'] = item.get('url', '') if item['banner']: image_path = item['banner'].split('/')[-1] item['image_file'] = image_path else: item['banner'] = '' except Exception: logger.debug('Event parse failed: {0}'.format(item)) items = items[:self.EVENTS_LIMIT] elif 'webinar' in fname: for item in items: try: item['tags'] = ['webinar'] uri = item.get('url', '') utm_campaign = item.get('utm_campaign', '') item['uri'] = self.add_campaign(uri, utm_campaign) image = item.get('image', '') if image and isinstance(image, dict): item['banner'] = image.get('src', '') if item['banner']: image_path = item['banner'].split('/')[-1] item['image_file'] = image_path else: item['image_file'] = '' else: item['banner'] = '' item['image_file_path'] = '' except Exception: logger.debug('Webinar parse failed: {0}'.format(item)) items = items[:self.WEBINARS_LIMIT] if items: content.extend(items) # Load bundled content with open(self.bundle_path, 'r') as f: data = f.read() items = [] try: items = json.loads(data) except Exception as error: logger.error(str((filepath, error))) content.extend(items) # Add the image path to get the full path for i, item in enumerate(content): uri = item['uri'] item['uri'] = uri.replace(' ', '%20') filename = item.get('image_file', '') item['image_file_path'] = os.path.sep.join( [self.image_path, filename]) # if 'video' in item['tags']: # print(i, item['uri']) # print(item['banner']) # print(item['image_file_path']) # print('') # Make sure items of the same type/tag are contiguous in the list content = sorted(content, key=lambda i: i.get('tags')) # But also make sure sticky content appears first sticky_content = [] for i, item in enumerate(content[:]): sticky = item.get('sticky') if isinstance(sticky, str): is_sticky = sticky == 'true' elif sticky is None: is_sticky = False # print(i, sticky, is_sticky, item.get('title')) if is_sticky: sticky_content.append(item) content.remove(item) content = sticky_content + content self.content_info = content # Save loaded data in a single file with open(self.saved_content_path, 'w') as f: json.dump(content, f) self.make_tag_filters() self.timer_load.start(random.randint(25, 35))
def _download( self, url, path=None, force=False, verify=True, chunked=True, ): """Callback for download.""" if path is None: path = url.split('/')[-1] # Make dir if non existent folder = os.path.dirname(os.path.abspath(path)) if not os.path.isdir(folder): os.makedirs(folder) # Get headers try: r = requests.head( url, proxies=self.proxy_servers, verify=verify, timeout=self.DEFAULT_TIMEOUT, ) status_code = r.status_code except Exception as error: status_code = -1 logger.error(str(error)) logger.debug('Status code {0} - url'.format(status_code, url)) if status_code != 200: logger.error('Invalid url {0}'.format(url)) return path total_size = int(r.headers.get('Content-Length', 0)) # Check if file exists if os.path.isfile(path) and not force: file_size = os.path.getsize(path) else: file_size = -1 # print(path, total_size, file_size) # Check if existing file matches size of requested file if file_size == total_size: self._sig_download_finished.emit(url, path) return path else: try: r = requests.get( url, stream=chunked, proxies=self.proxy_servers, verify=verify, timeout=self.DEFAULT_TIMEOUT, ) status_code = r.status_code except Exception as error: status_code = -1 logger.error(str(error)) # File not found or file size did not match. Download file. progress_size = 0 bytes_stream = QBuffer() # BytesIO was segfaulting for big files bytes_stream.open(QBuffer.ReadWrite) # For some chunked content the app segfaults (with big files) # so now chunked is a kwarg for this method if chunked: for chunk in r.iter_content(chunk_size=self._chunk_size): # print(url, progress_size, total_size) if chunk: bytes_stream.write(chunk) progress_size += len(chunk) self._sig_download_progress.emit( url, path, progress_size, total_size, ) else: bytes_stream.write(r.content) bytes_stream.seek(0) data = bytes_stream.data() with open(path, 'wb') as f: f.write(data) bytes_stream.close() self._sig_download_finished.emit(url, path) return path
def _download_finished(worker, output, error): """Download callback.""" logger.debug('Finished App download') if error: dummy_worker.sig_finished.emit(dummy_worker, output, error) return if MAC: try: os.makedirs(self._INST_DIR) except OSError as e: if e.errno != errno.EEXIST: logger.error(e) logger.debug('Decompressing app application') # Unzip using Mac defalut command/application command = [ '/usr/bin/unzip', '-qo', self._INSTFILE, '-d', self._INST_DIR, ] worker = wm.create_process_worker(command) worker.sig_partial.connect(dummy_worker.sig_partial) worker.sig_partial.emit( dummy_worker, {'message': 'Uncompressing file...'}, None, ) worker.sig_finished.connect(_install_extensions) worker.start() elif WIN: # Run windows installer silently # When quotes are used with START the first param is the title # that is why we add an empty string and then the actual # executable after the /WAIT. The quotes are for users with # spaces command = [ 'START', '/WAIT', '""', '"{}"'.format(self._INSTFILE), '/VERYSILENT', '/MERGETASKS=!runcode', '/SUPPRESSMSGBOXES', '/NORESTART', '/LOG="{}"'.format(self.log_path(delete=True)), '/DIR="{0}\\"'.format(self._INST_DIR), ] # Create temp batch file and run that cmd = u' '.join(command) # The u'... is important on py27! logger.debug(cmd) bat_path = os.path.join( CONF_PATH, 'temp', 'app-install.bat', ) base_temp_path = os.path.dirname(bat_path) if not os.path.isdir(base_temp_path): os.makedirs(base_temp_path) with io.open(bat_path, 'w') as f: f.write(cmd) worker = wm.create_process_worker([bat_path]) worker.sig_partial.connect(dummy_worker.sig_partial) worker.sig_finished.connect(_install_extensions) worker.start() elif LINUX: # See: https://code.visualstudio.com/docs/setup/linux if LINUX_DEB and is_deb: cmd = ['sudo', '-kS', 'dpkg', '-i', self._INSTFILE] worker = wm.create_process_worker(cmd) worker.sig_partial.connect(dummy_worker.sig_partial) worker.sig_finished.connect(_install_deb_dependencies) worker.start() stdin = to_binary_string(password + '\n') worker.write(stdin) elif LINUX_RPM and is_rpm: # Add key cmd = [ 'sudo', '-kS', 'rpm', '--import', self.rpm_asc_file_url ] worker = wm.create_process_worker(cmd) worker.sig_partial.connect(dummy_worker.sig_partial) worker.sig_finished.connect(_install_rpm_repodata) worker.start() stdin = to_binary_string(password + '\n') worker.write(stdin) else: dummy_worker.sig_finished.emit(dummy_worker, None, None)
def load_content(self, paths=None): """ Load downloaded and bundled content. """ content = [] # Load downloaded content for filepath in self._downloaded_filepaths: fname = filepath.split(os.sep)[-1] items = [] if os.path.isfile(filepath): with open(filepath, 'r') as f: data = f.read() try: items = json.loads(data) except Exception as error: logger.error(str((filepath, error))) else: items = [] if 'video' in fname: for item in items: item['tags'] = ['video'] item['uri'] = item['video'] uri = item['uri'].split('watch?v=')[-1] url = 'http://img.youtube.com/vi/{0}/0.jpg'.format(uri) item['banner'] = url item['date'] = item.get('date_start', '') item['image_file_path'] = uri + '.jpg' items = items[:self.VIDEOS_LIMIT] elif 'event' in fname: for item in items: item['tags'] = ['event'] + item.get('categories', '').split(', ') item['uri'] = item['url'] item['image_file_path'] = item['banner'].split('/')[-1] items = items[:self.EVENTS_LIMIT] elif 'webinar' in fname: for item in items: item['tags'] = ['webinar'] item['uri'] = item['url'] item['banner'] = item['image']['src'] item['image_file_path'] = item['banner'].split('/')[-1] items = items[:self.WEBINARS_LIMIT] if items: content.extend(items) # Load bundled content with open(LINKS_INFO_PATH, 'r') as f: data = f.read() items = [] try: items = json.loads(data) except Exception as error: logger.error(str(filepath, error)) content.extend(items) # Add the omage path to get the full path for item in content: filename = item['image_file_path'] item['image_file_path'] = os.path.sep.join([IMAGE_DATA_PATH, filename]) self.content_info = content # Save loaded data in a single file with open(CONTENT_JSON_PATH, 'w') as f: json.dump(content, f) self.make_tag_filters() self.timer_load.start(random.randint(25, 35))