def getargspec(func, name, verbose=False): try: argspec = inspect.getargspec(func.__self__._parse_args) except AttributeError: argspec = inspect.getargspec(func) try: signature = inspect.signature(func.__self__._parse_args) except AttributeError: signature = inspect.signature(func) if verbose: msg = """ Function: {name} ({arity}) Arg spec: {argspec} Signature: {signature} Args: {args} """ logger.debug( msg.format( name=name, arity=len(argspec.args) - 1, argspec=argspec, signature=signature, args=inspect.signature(func), )) return argspec
def delete(self, process): try: processName = process.name self.processes.pop(processName) logger.debug(u"Removed process %s" % processName) except KeyError: logger.warning(u"Trying to remove not existing process..")
async def me(access_token: str = Cookie(None)): if not access_token: raise HTTPException(401, "Authentication required") headers = {"Authorization": f"Bearer {access_token}"} res = await client.post(url=openid_conf.userinfo_endpoint, headers=headers) if res.is_error: expected_errors = { 401: "Authentication required", 403: "Forbidden", } if res.status_code in expected_errors: logger.debug("Got error from user info endpoint", code=res.status_code, err=res.text) raise HTTPException(res.status_code, expected_errors[res.status_code]) else: logger.error("Failed to call user info endpoint", code=res.status_code, err=res.text) raise HTTPException(502, "Error with authentication APIs") result = UserInfoResponse(**res.json()) return MeResponse(id=result.sub, name=result.name)
def delete(self, process): try: processName = process.name self.processes.pop(processName) logger.debug("Removed process %s" % processName) except KeyError: logger.warning("Trying to remove not existing process..")
def restoreSettings(self): try: self.restoreGeometry( self.config.getValue("geometry").toByteArray()) self.restoreState( self.config.getValue("windowState").toByteArray()) except Exception: logger.debug("No settings to restore") # restore tray icon state trayIconVisibility = self.config.getValue('trayIconVisibility', "true").toBool() self.tray.setVisible(trayIconVisibility) self.showHostsInGroups = self.config.getValue('showHostsInGroups', 'false').toBool() if self.tray.isVisible(): mainWindowVisibility = self.config.getValue( 'mainWindowVisibility', "true").toBool() self.setVisible(mainWindowVisibility) else: # it tray icon is not visible, always show main window self.show() self.groups = { unicode(k): v for k, v in self.config.getValue('groups', {}).toPyObject().items() }
def getComposedCommand(self): """ Compose command with set earlier params :return: execCmd and optsList """ argsList = ["/%s:%s" % (k, v) for k, v in self.settings.items()] argsList.extend(self.args) logger.debug("Running command:\n%s %s" % (self.executable, " ".join( [str(arg) for arg in argsList if not str(arg).startswith("/p:")]))) return self.executable, argsList
def getComposedCommand(self): """ Compose command with set earlier params :return: execCmd and optsList """ argsList = ["/%s:%s" % (k, v) for k, v in self.settings.items()] argsList.extend(self.args) logger.debug(u"Running command:\n%s %s" % (self.executable, " ".join( [unicode(arg) for arg in argsList if not unicode(arg).startswith("/p:")] ))) return self.executable, argsList
def getComposedCommand(self): argsList = [] for key, value in self.settings.items(): argsList.append("-%s" % key) argsList.append(value) if self.args: argsList.extend(self.args) argsList.append(self.host) logger.debug("Running command:\n%s %s" % (self.executable, " ".join(argsList))) return self.executable, argsList
async def route_identities(data_product: str, request: Request) -> Response: url = f"{conf.PRODUCT_GATEWAY_URL}/{data_product}" if request.url.query: url += f"?{request.url.query}" json_payload = await request.json() logger.debug("Fetching Data Product", url=url) try: resp = await client.post(url, json=json_payload) except HTTPError: logger.exception( "Failed to fetch Data Product from the Product Gateway") raise HTTPException(status_code=502) return JSONResponse(resp.json(), status_code=resp.status_code)
def before_request(): logger.debug("request ip: %s, path: %s, args: %s, body: %s", request.remote_addr, request.path, request.args, request.data.decode('utf-8')) if request.method == 'OPTIONS': return SuccessResponse()() login_urls = set( [url.decode('utf-8') for url in redis.smembers(login_url_key)]) try: if request.endpoint.split('.')[-1] in login_urls: return check_request_token() except AttributeError as err: logger.error(err) return ErrorResponse(ENDPOINT_NOT_EXIST).make()
def savePassword(self): currentPassword = self.ui.currentPassword.text() newPassword = self.ui.newPassword.text() repeatNewPassword = self.ui.repeatPassword.text() if self.isFieldEmpty(currentPassword) and self.isFieldEmpty(newPassword) and \ self.isFieldEmpty(repeatNewPassword): raise ValueError("No master password changes detected") if newPassword != repeatNewPassword: raise ValueError("Passwords mismatch") config = Config() ck = config.getPrivateKey(str(currentPassword)) ck.save(config.privateKeyPath, str(newPassword)) logger.debug("Key exported")
def savePassword(self): currentPassword = self.ui.currentPassword.text() newPassword = self.ui.newPassword.text() repeatNewPassword = self.ui.repeatPassword.text() if self.isFieldEmpty(currentPassword) and self.isFieldEmpty(newPassword) and \ self.isFieldEmpty(repeatNewPassword): raise ValueError(u"No master password changes detected") if newPassword != repeatNewPassword: raise ValueError(u"Passwords mismatch") config = Config() ck = config.getPrivateKey(unicode(currentPassword)) ck.save(config.privateKeyPath, unicode(newPassword)) logger.debug(u"Key exported")
def post(self): try: obj = json.loads(request.data) data = obj["data"] years = obj["years"] function = (obj["function"] if obj["function"] in list( scipy_functions("pdf").keys()) else "") logger.debug("Estimating with function: {}".format(function)) except ValueError: abort(400, errors=["Request is not valid JSON."]) except KeyError as err: abort( 400, errors=[ "Expected to find property '{}' on the request data.". format(str(err)) ], ) if len(data) == 0 or len(years) == 0: abort(400, errors=["Empty data or years."]) try: # result = estimate(analysis.logistic, data, years, 0, log=False) # result = estimate(analysis.wrap_scipy(stats.gamma.pdf), data, years, 100, log=False) result = estimate(scipy_functions('pdf').get(function), data, years, 100, log=True, norm=True) except RuntimeError as err: abort(400, errors=[str(err)]) except ValueError as err: abort(400, errors=[str(err)]) except Exception as err: abort(500, errors=[str(err)]) e_years, e_data, e_cov, e_stderr = result return { "years": as_json(e_years), "data": as_json(e_data.astype(np.float64)), "covariance": as_json(e_cov), "stderr": e_stderr, }, 200
def restoreSettings(self): try: self.restoreGeometry(self.config.getValue("geometry").toByteArray()) self.restoreState(self.config.getValue("windowState").toByteArray()) except Exception: logger.debug("No settings to restore") # restore tray icon state trayIconVisibility = self.config.getValue('trayIconVisibility', "true").toBool() self.tray.setVisible(trayIconVisibility) self.showHostsInGroups = self.config.getValue('showHostsInGroups', 'false').toBool() if self.tray.isVisible(): mainWindowVisibility = self.config.getValue('mainWindowVisibility', "true").toBool() self.setVisible(mainWindowVisibility) else: # it tray icon is not visible, always show main window self.show() self.groups = {unicode(k): v for k, v in self.config.getValue('groups', {}).toPyObject().items()}
def slotRead(self): proc = self.sender() txt = proc.readAllStandardOutput() logger.debug(txt) self.appendText(txt)
def estimate(func, data, years, until=0, log=False, norm=False): data, years = sanitize(data, years) if len(data) == 0 or len(years) == 0: raise ValueError("Empty data or years") (start, end) = (np.min(years), np.max(years) + until) x = years - start e_x = np.arange(end - start + 1) e_years = e_x + start orig_data = data scale = 1 if log: data = np.log(data) logger.debug("Describe logarithic data: {stats}".format( stats=(stats.describe(data, nan_policy='omit')), )) if norm: scale = np.amax(data) data = data / scale logger.debug( "Describe normalised data: {stats} (min: {min}, max: {max})". format( min=np.amin(data), max=np.amax(data), stats=(stats.describe(data / np.amax(data), nan_policy='omit')), )) else: logger.debug("Describe original data: {stats}".format( stats=(stats.describe(data, nan_policy='omit')), )) popt, pcov, infodict, errmsg, ier = curve_fit(func, x, data, maxfev=10000, full_output=True, absolute_sigma=False) std_err = np.sqrt(np.diag(pcov)) error = dict( std=list(std_err), min=np.amin(std_err), max=np.amax(std_err), mean=np.mean(std_err), median=np.median(std_err), ) estd = func(e_x, *popt) if norm: estd = estd * scale if log: estd = np.exp(estd) msg = """ ============================================================================= function: {function} nfev: {nfev} error: {error} error mean: {mean_error} error median: {median_error} error max: {max_error} error min: {min_error} errmsg: {errmsg} ier: {ier} popt: {popt} pcov: {pcov} data: {data} estd: {estd} ============================================================================= """ params = dict( function=func.__name__, error=error["std"], mean_error=error["mean"], median_error=error["median"], max_error=error["max"], min_error=error["min"], nfev=infodict["nfev"], errmsg=errmsg, ier=ier, popt=popt, pcov=pcov, data=orig_data[:50], estd=np.round(estd[:50], 0), ) logger.debug(msg.format(**params)) if ier not in [1, 2, 3, 4]: raise RuntimeError(errmsg) if not np.isfinite(error["mean"]): msg = "Function is not suitable for this series" raise RuntimeWarning(msg) # if error['mean'] > 100: # msg = 'Mean error is over tolerance: {mean_error}' # raise RuntimeWarning(msg.format( # mean_error=np.round(error['mean'], 2), # )) return (e_years, estd, pcov, error)
def getUrl(url): logger.debug("Parsing source url for " + url) if (url[-4:] in (".avi", ".mkv", ".mp4", ".mp3")) or (".googlevideo.com/" in url): logger.debug("Direct video URL, no need to use youtube-dl.") return url ydl = youtube_dl.YoutubeDL({ "logger": logger, "noplaylist": True, "ignoreerrors": True, }) # Ignore errors in case of error in long playlists with ydl: # Downloading youtub-dl infos. We just want to extract the info result = ydl.extract_info(url, download=False) if result is None: logger.error( "Result is none, returning none. Cancelling following function.") return None if "entries" in result: # Can be a playlist or a list of videos video = result["entries"][0] else: video = result # Just a video if "youtu" in url: logger.debug( """CASTING: Youtube link detected. Extracting url in maximal quality.""" ) for fid in ("22", "18", "36", "17"): for i in video["formats"]: if i["format_id"] == fid: logger.debug("CASTING: Playing highest video quality " + i["format_note"] + "(" + fid + ").") return i["url"] elif "vimeo" in url: logger.debug("Vimeo link detected, extracting url in maximal quality.") return video["url"] else: logger.debug( """Video not from Youtube or Vimeo. Extracting url in maximal quality.""" ) return video["url"]
def data(filename): data_dir = os.path.join(current_app.root_path, 'static/data') logger.debug("Data asked for %s from directory %s" % (filename, data_dir)) if os.path.splitext(filename)[1][1:].strip().lower() != 'csv': return False # TODO return 404 error return send_from_directory(data_dir, filename, mimetype='text/csv')
def apply_local_settings(local_settings_name: str = "settings_local", skip_for_test_env: bool = True): """ Import provided module name, fetch all the settings defined in global namespace as KubernetesParam instances and override them. If environment variable ENV is set either to "development" or "unittest" then default values is allowed and errors won't be raised. :param local_settings_name: Name of local settings module :param skip_for_test_env: If environment is unittest, then don't import local settings and just replace KubernetesParam instances with their default values :return: """ caller_globals = inspect.currentframe().f_back.f_globals environment = os.environ.get("ENV") if skip_for_test_env and environment == "unittest": for name, value in caller_globals.items(): if isinstance(value, KubernetesParam): if not value.default_provided: raise ValueError( f"No default dev value for {name} was provided") else: caller_globals[name] = value.dev_default return defaults_allowed = environment == "development" required_fields = [ name for name, value in caller_globals.items() if isinstance(value, KubernetesParam) ] local_settings_module_exists = importlib.util.find_spec( local_settings_name) if local_settings_module_exists: settings_module = importlib.import_module(local_settings_name) else: return missing_fields = [] for field in required_fields: field_in_settings = getattr(settings_module, field, None) is not None dev_default_provided = caller_globals[field].default_provided if not field_in_settings and (not defaults_allowed or not dev_default_provided): missing_fields.append(field) if missing_fields: fields_fmt = ", ".join(missing_fields) raise ValueError(f'Following settings must be defined: "{fields_fmt}"') for field in required_fields: local_settings_value = getattr(settings_module, field, None) if defaults_allowed and local_settings_value is None: new_value = caller_globals[field].dev_default logger.debug(f"Set {field} to default: {new_value}") else: try: caller_globals[field].validate(local_settings_value) except ValueError as exc: raise ValueError(f"Validation failed for {field}: {str(exc)}") new_value = local_settings_value caller_globals[field] = new_value
def doWork(self): d = os.listdir(self._outDir) while "stream001.ts" not in d: time.sleep(2) d = os.listdir(self._outDir) logger.info("STARTING XBMC4XBOX Player") logger.debug( requests.get(self._endpoint + "ClearPlayList(1)", auth=self._auth).text) logger.debug( requests.get( self._endpoint + "AddToPlayList(" + urllib.parse.quote( self._baseUrl + "stream000.ts?token=" + self._token, safe="") + ";1)", auth=self._auth, ).text) logger.debug( requests.get(self._endpoint + "SetCurrentPlaylist(1)", auth=self._auth).text) logger.debug( requests.get(self._endpoint + "PlayNext()", auth=self._auth).text) logger.debug("STARTING XBMC4XBOX Main Loop") prev = 0 dowork = True while dowork: d = sorted(os.listdir(self._outDir)) num = -1 for i in range(len(d) - 1, 0, -1): if d[i][len(d[i]) - 2:] == "ts": num = d[i] num = num[6:len(num) - 3] break if int(num) > prev: logger.debug("Add stream: " + num) logger.debug( requests.get( self._endpoint + "AddToPlayList(" + urllib.parse.quote( self._baseUrl + "stream" + num + ".ts?token=" + self._token, safe="", ) + ";1)", auth=self._auth, ).text) prev = int(num) time.sleep(1) dowork = self.activePid(self._startData["pid"])
def __init__(self, app: bottle.Bottle = None): logger.debug(f"Controller: Base init. {self.__class__.__name__}") if app is not None: self._app = app
def get_train_df(overwrite_existing_files=False, obj_dicts_filepath=pri_constants.MSGS_READ_PATH, pkl_obj_msg_filepath=pri_constants.PKL_MSGS_READ_PATH, feat_names_save_path=pri_constants.FEAT_NAMES_PATH, unlabeled_train_df_fpath=pri_constants.UNLABELED_TRAIN_DF_PATH, labels_filepath=pri_constants.LABELS_FILEPATH, labeled_df_filepath=pri_constants.LABELED_TRAIN_DF_PATH, id_col_name=pri_constants.ID_COL_NAME, label_name=pri_constants.LABEL_COL_NAME, default_feature_val=pri_constants.DEFAULT_FEATURE_VAL, unique_feats_threshold=pri_constants.UNIQUE_VALS, is_training_data=False): """ Get the train_df, either from CSV file, or direct from raw obj_dicts. :param overwrite_existing_files: (bool) if True, overwrite the labeled_df_filepath with a new CSV trained on obj_msgs, otherwise default to the stored CSV at labeled_df_filepath (WARNING: possibly overwrite existing files) :param obj_dicts_filepath: (str) filepath for obj_dicts :param feat_names_save_path: (str) filepath for feature names :param unlabeled_train_df_fpath: (str) filepath for unlabeled_train_df :param labels_filepath: (str) filepath to the label names :param labeled_df_filepath: (str) the path to the labeled training dataframe CSV :param id_col_name: (str) column name of IDs (e.g. "company_id", "id", "obj_id", etc) :param label_name: (str) the name of the labels column (e.g. "max_progress") :param default_feature_val: (int, float) default placeholder value when a feature value is not found/specified :param is_training_data: (bool) toggles tasks for only train/predict; e.g. building vectorized tf-idf model :return: (DataFrame) a training dataframe with labels for supervised learning """ # Load decompressed raw dicts and featurizes the dictionaries logger.debug("Loading Person-dicts") if not os.path.isfile(pkl_obj_msg_filepath): # or overwrite_existing_files # Read person dicts and featurize from raw string; then pickle to file with open(obj_dicts_filepath, "r") as rfile: pids_to_feats = featurize([eval(msg) for msg in rfile], is_training_data=is_training_data) with open(pkl_obj_msg_filepath, "wb") as pkl_msgs: pickle.dump(pids_to_feats, pkl_msgs) else: with open(pkl_obj_msg_filepath, "rb") as pkl_msgs: pids_to_feats = pickle.load(pkl_msgs) # Build (or load if already exists) unlabeled DataFrame logger.debug("building unlabeled df") if not os.path.isfile(unlabeled_train_df_fpath): # or overwrite_existing_files train_df = build_unlabeled_features_df(pids_to_feats, feat_names_save_path=feat_names_save_path, unlabeled_train_df_fpath=unlabeled_train_df_fpath, default_feature_val=default_feature_val, unique_feats_threshold=unique_feats_threshold, save=overwrite_existing_files) else: train_df = pd.read_pickle(unlabeled_train_df_fpath) # Label (or load if already exists) the features DataFrame logger.debug("Now adding labels to df; Rows: {}, Columns: {}" .format(len(train_df.index), len(train_df.columns))) if not os.path.isfile(labeled_df_filepath): # or overwrite_existing_files train_df = add_labels_to_dataframe(train_df, labels_filepath, labeled_df_filepath, id_col_name=id_col_name, label_name=label_name, save=overwrite_existing_files) else: train_df = pd.read_pickle(labeled_df_filepath) return train_df
def slotRead(self): proc = self.sender() txt = proc.readAllStandardOutput() logger.debug(txt) self.appendText(txt.data().rstrip('\n'))