def apy(self, samples: ApySamples): if curve and curve.get_pool(self.token.address): return apy.curve.simple(self, samples) elif Version(self.api_version) >= Version("0.3.2"): return apy.v2.average(self, samples) else: return apy.v2.simple(self, samples)
def __init__(self, driver): super().__init__(driver) self.ocp_version = get_ocp_version() self.ocp_version_full = version.get_semantic_ocp_version_from_config() self.page_nav = locators[self.ocp_version]["page"] self.ocs_version_semantic = version.get_semantic_ocs_version_from_config() self.ocp_version_semantic = version.get_semantic_ocp_version_from_config() self.operator_name = ( ODF_OPERATOR if self.ocs_version_semantic >= version.VERSION_4_9 else OCS_OPERATOR ) if Version.coerce(self.ocp_version) >= Version.coerce("4.8"): self.generic_locators = locators[self.ocp_version]["generic"] if config.ENV_DATA["platform"].lower() == constants.VSPHERE_PLATFORM: self.storage_class = "thin_sc" elif config.ENV_DATA["platform"].lower() == constants.AWS_PLATFORM: aws_sc = config.DEPLOYMENT.get("customized_deployment_storage_class") if aws_sc == "gp3-csi": self.storage_class = "gp3-csi_sc" elif aws_sc == "gp2-csi": self.storage_class = "gp2-csi_sc" else: self.storage_class = "gp2_sc" elif config.ENV_DATA["platform"].lower() == constants.AZURE_PLATFORM: if self.ocp_version_semantic >= version.VERSION_4_11: self.storage_class = "managed-csi_sc" else: self.storage_class = "managed-premium_sc" elif config.ENV_DATA["platform"].lower() == constants.GCP_PLATFORM: self.storage_class = "standard_sc"
def navigate_overview_page(self): """ Navigate to Overview Page """ logger.info("Navigate to Overview Page") if Version.coerce(self.ocp_version) >= Version.coerce("4.8"): self.choose_expanded_mode(mode=False, locator=self.page_nav["Home"]) self.choose_expanded_mode(mode=True, locator=self.page_nav["Storage"]) else: self.choose_expanded_mode(mode=True, locator=self.page_nav["Home"]) self.do_click(locator=self.page_nav["overview_page"])
def create_ocs_jenkins_template(self): """ Create OCS Jenkins Template """ log.info("Create Jenkins Template, jenkins-persistent-ocs") ocp_obj = OCP(namespace="openshift", kind="template") tmp_dict = ocp_obj.get(resource_name="jenkins-persistent", out_yaml_format=True) tmp_dict["labels"]["app"] = "jenkins-persistent-ocs" tmp_dict["labels"]["template"] = "jenkins-persistent-ocs-template" tmp_dict["metadata"]["name"] = "jenkins-persistent-ocs" # Find Kind: 'PersistentVolumeClaim' position in the objects list, differs in OCP 4.5 and OCP 4.6. sc_name = (constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD if storagecluster_independent_check() else constants.DEFAULT_STORAGECLASS_RBD) for i in range(len(tmp_dict["objects"])): if tmp_dict["objects"][i]["kind"] == constants.PVC: tmp_dict["objects"][i]["metadata"]["annotations"] = { "volume.beta.kubernetes.io/storage-class": sc_name } tmp_dict["parameters"][4]["value"] = "10Gi" tmp_dict["parameters"].append({ "description": "Override jenkins options to speed up slave spawning", "displayName": "Override jenkins options to speed up slave spawning", "name": "JAVA_OPTS", "value": "-Dhudson.slaves.NodeProvisioner.initialDelay=0 " "-Dhudson.slaves.NodeProvisioner.MARGIN=50 -Dhudson." "slaves.NodeProvisioner.MARGIN0=0.85", }) if Version.coerce(self.ocp_version) >= Version.coerce("4.8"): # Added "Pipeline Utility Steps" plugin via Jenkins Template # OCP team changed the default plugin list on OCP4.9 tmp_dict["objects"][3]["spec"]["template"]["spec"]["containers"][ 0]["env"].append({ "name": "INSTALL_PLUGINS", "value": "scm-api:2.6.5,pipeline-utility-steps:2.12.0,workflow-step-api:622." "vb_8e7c15b_c95a_,workflow-cps:2648.va9433432b33c,workflow-api:2.47", }) ocs_jenkins_template_obj = OCS(**tmp_dict) ocs_jenkins_template_obj.create()
def __init__(self, driver): super().__init__(driver) self.ocp_version = get_ocp_version() self.page_nav = locators[self.ocp_version]["page"] ocs_version = version.get_semantic_ocs_version_from_config() self.operator_name = (ODF_OPERATOR if ocs_version >= version.VERSION_4_9 else OCS_OPERATOR) if Version.coerce(self.ocp_version) >= Version.coerce("4.8"): self.generic_locators = locators[self.ocp_version]["generic"] if config.ENV_DATA["platform"].lower() == constants.VSPHERE_PLATFORM: self.storage_class = "thin_sc" elif config.ENV_DATA["platform"].lower() == constants.AWS_PLATFORM: self.storage_class = "gp2_sc" elif config.ENV_DATA["platform"].lower() == constants.AZURE_PLATFORM: self.storage_class = "managed-premium_sc"
def get_necessary_migrations(): necessary_migrations = [] for migration_name, definition in sorted(ALL_ASYNC_MIGRATIONS.items()): if is_async_migration_complete(migration_name): continue sm = AsyncMigration.objects.get_or_create(name=migration_name)[0] sm.description = definition.description sm.posthog_max_version = definition.posthog_max_version sm.posthog_min_version = definition.posthog_min_version sm.save() is_migration_required = ALL_ASYNC_MIGRATIONS[ migration_name].is_required() if is_migration_required: if POSTHOG_VERSION > Version(sm.posthog_max_version): necessary_migrations.append(sm) else: dependency_ok, _ = is_migration_dependency_fulfilled( migration_name) if dependency_ok: complete_migration(sm) return necessary_migrations
def update_validated_data_from_url(validated_data: Dict[str, Any], url: str) -> Dict: """If remote plugin, download the archive and get up-to-date validated_data from there.""" if url.startswith("file:"): plugin_path = url[5:] json_path = os.path.join(plugin_path, "plugin.json") json = load_json_file(json_path) if not json: raise ValidationError( "Could not load plugin.json from: {}".format(json_path)) validated_data["plugin_type"] = "local" validated_data["url"] = url validated_data["tag"] = None validated_data["archive"] = None validated_data["name"] = json.get("name", json_path.split("/")[-2]) validated_data["description"] = json.get("description", "") validated_data["config_schema"] = json.get("config", []) validated_data["source"] = None posthog_version = json.get("posthogVersion", None) else: parsed_url = parse_url(url, get_latest_if_none=True) if parsed_url: validated_data["url"] = parsed_url["root_url"] validated_data["tag"] = parsed_url.get("tag", None) validated_data["archive"] = download_plugin_archive( validated_data["url"], validated_data["tag"]) plugin_json = get_json_from_archive(validated_data["archive"], "plugin.json") if not plugin_json: raise ValidationError( "Could not find plugin.json in the plugin") validated_data["name"] = plugin_json["name"] validated_data["description"] = plugin_json.get("description", "") validated_data["config_schema"] = plugin_json.get("config", []) validated_data["source"] = None posthog_version = plugin_json.get("posthogVersion", None) else: raise ValidationError( "Must be a GitHub/GitLab repository or a npm package URL!") # Keep plugin type as "repository" or reset to "custom" if it was something else. if (validated_data.get("plugin_type", None) != Plugin.PluginType.CUSTOM and validated_data.get("plugin_type", None) != Plugin.PluginType.REPOSITORY): validated_data["plugin_type"] = Plugin.PluginType.CUSTOM if posthog_version and not settings.MULTI_TENANCY: try: spec = SimpleSpec(posthog_version.replace(" ", "")) except ValueError: raise ValidationError( f'Invalid PostHog semantic version requirement "{posthog_version}"!' ) if not (Version(VERSION) in spec): raise ValidationError( f'Currently running PostHog version {VERSION} does not match this plugin\'s semantic version requirement "{posthog_version}".' ) return validated_data
def version_string_to_semver(version: str) -> Version: minor = 0 patch = 0 # remove e.g. `-alpha`, Postgres metadata (`11.13 (Ubuntu 11.13-2.heroku1+1)`), etc version_parts = version.split("(")[0].split("-")[0].split(".") major = int(version_parts[0]) if len(version_parts) > 1: minor = int(version_parts[1]) if len(version_parts) > 2: patch = int(version_parts[2]) return Version(major=major, minor=minor, patch=patch)
def setup_async_migrations(ignore_posthog_version: bool = False): """ Execute the necessary setup for async migrations to work: 1. Import all the migration definitions 2. Create a database record for each 3. Check if all migrations necessary for this PostHog version have completed (else don't start) 4. Populate a dependencies map and in-memory record of migration definitions """ applied_migrations = set(instance.name for instance in get_all_completed_async_migrations()) unapplied_migrations = set(ALL_ASYNC_MIGRATIONS.keys()) - applied_migrations first_migration = None for migration_name, migration in ALL_ASYNC_MIGRATIONS.items(): sm = AsyncMigration.objects.get_or_create(name=migration_name)[0] sm.description = migration.description sm.posthog_max_version = migration.posthog_max_version sm.posthog_min_version = migration.posthog_min_version sm.save() dependency = migration.depends_on if not dependency: if first_migration: raise ImproperlyConfigured( "Two or more async migrations have no dependency. Make sure only the first migration has no dependency." ) first_migration = migration_name ASYNC_MIGRATION_TO_DEPENDENCY[migration_name] = dependency if ( (not ignore_posthog_version) and (migration_name in unapplied_migrations) and (POSTHOG_VERSION > Version(migration.posthog_max_version)) ): raise ImproperlyConfigured(f"Migration {migration_name} is required for PostHog versions above {VERSION}.") for key, val in ASYNC_MIGRATION_TO_DEPENDENCY.items(): DEPENDENCY_TO_ASYNC_MIGRATION[val] = key if getattr(config, "AUTO_START_ASYNC_MIGRATIONS") and first_migration: kickstart_migration_if_possible(first_migration, applied_migrations)
def __init__(self, driver): super().__init__(driver) ocp_version = get_ocp_version() self.page_nav = locators[ocp_version]["page"] if Version.coerce(ocp_version) >= Version.coerce("4.8"): self.generic_locators = locators[ocp_version]["generic"]
def simple(vault, samples: ApySamples) -> Apy: harvests = sorted([harvest for strategy in vault.strategies for harvest in strategy.harvests]) # we don't want to display APYs when vaults are ramping up if len(harvests) < 2: raise ApyError("v2:harvests", "harvests are < 2") # set our time values for simple calcs, closest to a harvest around that time period now = closest(harvests, samples.now) week_ago = closest(harvests, samples.week_ago) month_ago = closest(harvests, samples.month_ago) # set our parameters contract = vault.vault price_per_share = contract.pricePerShare # calculate our current price now_price = price_per_share(block_identifier=now) # get our inception data inception_price = 10 ** contract.decimals() inception_block = harvests[:2][-1] if now_price == inception_price: raise ApyError("v2:inception", "no change from inception price") # check our historical data if samples.week_ago > inception_block: week_ago_price = price_per_share(block_identifier=week_ago) else: week_ago_price = inception_price if samples.month_ago > inception_block: month_ago_price = price_per_share(block_identifier=month_ago) else: month_ago_price = inception_price now_point = SharePricePoint(samples.now, now_price) week_ago_point = SharePricePoint(samples.week_ago, week_ago_price) month_ago_point = SharePricePoint(samples.month_ago, month_ago_price) inception_point = SharePricePoint(inception_block, inception_price) week_ago_apy = calculate_roi(now_point, week_ago_point) month_ago_apy = calculate_roi(now_point, month_ago_point) inception_apy = calculate_roi(now_point, inception_point) # use the first non-zero apy, ordered by precedence apys = [month_ago_apy, week_ago_apy, inception_apy] net_apy = next((value for value in apys if value != 0), 0) # for performance fee, half comes from strategy (strategist share) and half from the vault (treasury share) strategy_fees = [] for strategy in vault.strategies: # look at all of our strategies debt_ratio = contract.strategies(strategy.strategy)['debtRatio'] / 10000 performance_fee = contract.strategies(strategy.strategy)['performanceFee'] proportional_fee = debt_ratio * performance_fee strategy_fees.append(proportional_fee) strategy_performance = sum(strategy_fees) vault_performance = contract.performanceFee() if hasattr(contract, "performanceFee") else 0 management = contract.managementFee() if hasattr(contract, "managementFee") else 0 performance = vault_performance + strategy_performance performance /= 1e4 management /= 1e4 # assume we are compounding every week compounding = 52 # calculate our APR after fees apr_after_fees = compounding * ((net_apy + 1) ** (1 / compounding)) - compounding # calculate our pre-fee APR gross_apr = apr_after_fees / (1 - performance) + management # 0.3.5+ should never be < 0% because of management if net_apy < 0 and Version(vault.api_version) >= Version("0.3.5"): net_apy = 0 points = ApyPoints(week_ago_apy, month_ago_apy, inception_apy) fees = ApyFees(performance=performance, management=management) return Apy("v2:simple", gross_apr, net_apy, fees, points=points)
def average(vault, samples: ApySamples) -> Apy: harvests = sorted([harvest for strategy in vault.strategies for harvest in strategy.harvests]) # we don't want to display APYs when vaults are ramping up if len(harvests) < 2: raise ApyError("v2:harvests", "harvests are < 2") # set our parameters contract = vault.vault price_per_share = contract.pricePerShare # calculate our current price now_price = price_per_share(block_identifier=samples.now) # get our inception data inception_price = 10 ** contract.decimals() inception_block = harvests[:2][-1] if now_price == inception_price: raise ApyError("v2:inception", "no change from inception price") # check our historical data if samples.week_ago > inception_block: week_ago_price = price_per_share(block_identifier=samples.week_ago) else: week_ago_price = inception_price if samples.month_ago > inception_block: month_ago_price = price_per_share(block_identifier=samples.month_ago) else: month_ago_price = inception_price now_point = SharePricePoint(samples.now, now_price) week_ago_point = SharePricePoint(samples.week_ago, week_ago_price) month_ago_point = SharePricePoint(samples.month_ago, month_ago_price) inception_point = SharePricePoint(inception_block, inception_price) week_ago_apy = calculate_roi(now_point, week_ago_point) month_ago_apy = calculate_roi(now_point, month_ago_point) inception_apy = calculate_roi(now_point, inception_point) # we should look at a vault's harvests, age, etc to determine whether to show new APY or not # use the first non-zero apy, ordered by precedence apys = [month_ago_apy, week_ago_apy] two_months_ago = datetime.now() - timedelta(days=60) if contract.activation() > two_months_ago.timestamp(): # if the vault was activated less than two months ago then it's ok to use # the inception apy, otherwise using it isn't representative of the current apy apys.append(inception_apy) net_apy = next((value for value in apys if value != 0), 0) # for performance fee, half comes from strategy (strategist share) and half from the vault (treasury share) strategy_fees = [] for strategy in vault.strategies: # look at all of our strategies debt_ratio = contract.strategies(strategy.strategy)['debtRatio'] / 10000 performance_fee = contract.strategies(strategy.strategy)['performanceFee'] proportional_fee = debt_ratio * performance_fee strategy_fees.append(proportional_fee) strategy_performance = sum(strategy_fees) vault_performance = contract.performanceFee() if hasattr(contract, "performanceFee") else 0 management = contract.managementFee() if hasattr(contract, "managementFee") else 0 performance = vault_performance + strategy_performance performance /= 1e4 management /= 1e4 # assume we are compounding every week compounding = 52 # calculate our APR after fees # if net_apy is negative no fees are charged apr_after_fees = compounding * ((net_apy + 1) ** (1 / compounding)) - compounding if net_apy > 0 else net_apy # calculate our pre-fee APR gross_apr = apr_after_fees / (1 - performance) + management # 0.3.5+ should never be < 0% because of management if net_apy < 0 and Version(vault.api_version) >= Version("0.3.5"): net_apy = 0 points = ApyPoints(week_ago_apy, month_ago_apy, inception_apy) fees = ApyFees(performance=performance, management=management) return Apy("v2:averaged", gross_apr, net_apy, fees, points=points)
class TestServiceVersionRequirement(TestCase): def test_accepted_services(self): v1 = ServiceVersionRequirement(service="postgresql", supported_version="==14.0.0") v2 = ServiceVersionRequirement(service="clickhouse", supported_version="==21.6.0") v3 = ServiceVersionRequirement(service="redis", supported_version="==6.2.6") self.assertEqual(v1.service, "postgresql") self.assertEqual(v2.service, "clickhouse") self.assertEqual(v3.service, "redis") self.assertEqual(type(v1.supported_version), SimpleSpec) self.assertEqual(type(v2.supported_version), SimpleSpec) self.assertEqual(type(v3.supported_version), SimpleSpec) self.assertEqual(str(v1.supported_version), "==14.0.0") self.assertEqual(str(v2.supported_version), "==21.6.0") self.assertEqual(str(v3.supported_version), "==6.2.6") try: ServiceVersionRequirement(service="kea", supported_version="==2.5.0") except Exception as e: self.assertEqual( str(e), "service kea cannot be used to specify a version requirement. service should be one of clickhouse, postgresql, redis", ) def test_service_versions(self): version1 = version_requirement.version_string_to_semver("14") self.assertEqual(version1.major, 14) self.assertEqual(version1.minor, 0) self.assertEqual(version1.patch, 0) version2 = version_requirement.version_string_to_semver("14.1") self.assertEqual(version2.major, 14) self.assertEqual(version2.minor, 1) self.assertEqual(version2.patch, 0) version3 = version_requirement.version_string_to_semver("14.1.2") self.assertEqual(version3.major, 14) self.assertEqual(version3.minor, 1) self.assertEqual(version3.patch, 2) version4 = version_requirement.version_string_to_semver("14.1.2.5") self.assertEqual(version4.major, 14) self.assertEqual(version4.minor, 1) self.assertEqual(version4.patch, 2) version5 = version_requirement.version_string_to_semver("15.0.0-alpha") self.assertEqual(version5.major, 15) self.assertEqual(version5.minor, 0) self.assertEqual(version5.patch, 0) version5 = version_requirement.version_string_to_semver( "16.0.0.2-alpha") self.assertEqual(version5.major, 16) self.assertEqual(version5.minor, 0) self.assertEqual(version5.patch, 0) version6 = version_requirement.version_string_to_semver( "11.13 (Ubuntu 11.13-2.heroku1+1)") self.assertEqual(version6.major, 11) self.assertEqual(version6.minor, 13) self.assertEqual(version6.patch, 0) @patch( "posthog.version_requirement.ServiceVersionRequirement.get_service_version", lambda x: Version("12.1.2")) def test_ranges(self): v1 = ServiceVersionRequirement(service="postgresql", supported_version="==14.0.0") in_range, service_version = v1.is_service_in_accepted_version() self.assertEqual(in_range, False) self.assertEqual(str(service_version), "12.1.2") v2 = ServiceVersionRequirement(service="postgresql", supported_version="==12.1.2") in_range, _ = v2.is_service_in_accepted_version() self.assertEqual(in_range, True) v3 = ServiceVersionRequirement(service="postgresql", supported_version=">=12.0.0,<12.1.2") in_range, _ = v3.is_service_in_accepted_version() self.assertEqual(in_range, False) v4 = ServiceVersionRequirement(service="postgresql", supported_version=">=12.0.0,<=12.1.2") in_range, _ = v4.is_service_in_accepted_version() self.assertEqual(in_range, True) v5 = ServiceVersionRequirement(service="postgresql", supported_version=">=11.0.0,<=13.0.0") in_range, _ = v5.is_service_in_accepted_version() self.assertEqual(in_range, True)
from semantic_version.base import Version from posthog.async_migrations.definition import AsyncMigrationDefinition from posthog.models.async_migration import AsyncMigration, get_all_completed_async_migrations from posthog.settings import TEST from posthog.version import VERSION ALL_ASYNC_MIGRATIONS: Dict[str, AsyncMigrationDefinition] = {} ASYNC_MIGRATION_TO_DEPENDENCY: Dict[str, Optional[str]] = {} # inverted mapping of ASYNC_MIGRATION_TO_DEPENDENCY DEPENDENCY_TO_ASYNC_MIGRATION: Dict[Optional[str], str] = {} POSTHOG_VERSION = Version(VERSION) ASYNC_MIGRATIONS_MODULE_PATH = "posthog.async_migrations.migrations" ASYNC_MIGRATIONS_EXAMPLE_MODULE_PATH = "posthog.async_migrations.examples" all_migrations = import_submodules(ASYNC_MIGRATIONS_MODULE_PATH) for name, module in all_migrations.items(): ALL_ASYNC_MIGRATIONS[name] = module.Migration() def setup_async_migrations(ignore_posthog_version: bool = False): """ Execute the necessary setup for async migrations to work: 1. Import all the migration definitions 2. Create a database record for each