def check_database(app_configs, **kwargs): errors = [] if not using_postgresql(): errors.append( weblate_check( "weblate.E006", "Weblate performs best with PostgreSQL, consider migrating to it.", Info, )) try: delta = measure_database_latency() if delta > 100: errors.append( weblate_check( "weblate.C038", f"The database seems slow, the query took {delta} miliseconds", )) except DatabaseError as error: errors.append( weblate_check( "weblate.C037", f"Failed to connect to the database: {error}", )) return errors
def ready(self): super().ready() register(check_data_writable) register(check_mail_connection, deploy=True) register(check_celery, deploy=True) register(check_cache, deploy=True) register(check_settings, deploy=True) register(check_templates, deploy=True) register(check_database, deploy=True) register(check_site) register(check_perms, deploy=True) register(check_errors, deploy=True) register(check_version, deploy=True) register(check_encoding) register(check_diskspace, deploy=True) init_error_collection() if using_postgresql(): CharField.register_lookup(PostgreSQLSearchLookup) TextField.register_lookup(PostgreSQLSearchLookup) CharField.register_lookup(PostgreSQLSubstringLookup) TextField.register_lookup(PostgreSQLSubstringLookup) else: CharField.register_lookup(MySQLSearchLookup) TextField.register_lookup(MySQLSearchLookup) CharField.register_lookup(MySQLSubstringLookup) TextField.register_lookup(MySQLSubstringLookup)
def get_db_version(): if using_postgresql(): try: with connection.cursor() as cursor: cursor.execute("SHOW server_version") version = cursor.fetchone() except RuntimeError: report_error(cause="PostgreSQL version check") return None return ( "PostgreSQL server", "https://www.postgresql.org/", version[0].split(" ")[0], ) try: with connection.cursor() as cursor: version = cursor.connection.get_server_info() except RuntimeError: report_error(cause="MySQL version check") return None return ( f"{connection.display_name} sever", "https://mariadb.org/" if connection.mysql_is_mariadb else "https://www.mysql.com/", version.split("-", 1)[0], )
def test_case_sensitive_fuzzy_get(self): """Test handling of manually created zh-TW, zh-TW and zh_TW languages.""" if not using_postgresql(): raise SkipTest("Not supported on MySQL") language = Language.objects.create(code="zh_TW", name="Chinese (Taiwan)") language.plural_set.create( number=0, formula="0", source=Plural.SOURCE_DEFAULT, ) self.run_create("zh_TW", "zh_TW", "ltr", "0", "Chinese (Taiwan) (zh_TW)", False) language = Language.objects.create(code="zh-TW", name="Chinese Taiwan") language.plural_set.create( number=0, formula="0", source=Plural.SOURCE_DEFAULT, ) self.run_create("zh-TW", "zh-TW", "ltr", "0", "Chinese Taiwan (zh-TW)", False) language = Language.objects.create(code="zh-tw", name="Traditional Chinese") language.plural_set.create( number=0, formula="0", source=Plural.SOURCE_DEFAULT, ) self.run_create("zh-tw", "zh-tw", "ltr", "0", "Traditional Chinese (zh-tw)", False)
def _databases_support_transactions(cls): # This is workaroud for MySQL as FULL TEXT index does not work # well inside a transaction, so we avoid using transactions for # tests. Otherwise we end up with no matches for the query. # See https://dev.mysql.com/doc/refman/5.6/en/innodb-fulltext-index.html if not using_postgresql(): return False return super()._databases_support_transactions()
def get_glossary_terms(unit): """Return list of term pairs for an unit.""" if unit.glossary_terms is not None: return unit.glossary_terms translation = unit.translation language = translation.language component = translation.component project = component.project source_language = component.source_language units = ( Unit.objects.prefetch().select_related("source_unit").order_by(Lower("source")) ) if language == source_language: return units.none() # Build complete source for matching parts = [""] for text in unit.get_source_plurals() + [unit.context]: text = text.lower().strip() if text: parts.append(text) parts.append("") source = PLURAL_SEPARATOR.join(parts) uses_ngram = source_language.uses_ngram() matches = set() automaton = project.glossary_automaton if automaton.kind == ahocorasick.AHOCORASICK: # Extract terms present in the source for end, term in automaton.iter(source): if uses_ngram or ( NON_WORD_RE.match(source[end - len(term)]) and NON_WORD_RE.match(source[end + 1]) ): matches.add(term) if using_postgresql(): match = r"^({})$".format("|".join(re_escape(term) for term in matches)) # Use regex as that is utilizing pg_trgm index query = Q(source__iregex=match) | Q(variant__unit__source__iregex=match) else: # With MySQL we utilize it does case insensitive lookup query = Q(source__in=matches) | Q(variant__unit__source__in=matches) units = units.filter( query, translation__component__in=project.glossaries, translation__component__source_language=source_language, translation__language=language, ).distinct() # Store in a unit cache unit.glossary_terms = units return units
def test_view_redirect(self): """Test case insentivite lookups and aliases in middleware.""" # Non existing fails with 404 kwargs = {"project": "invalid"} response = self.client.get(reverse("project", kwargs=kwargs)) self.assertEqual(response.status_code, 404) # Different casing should redirect, MySQL always does case insensitive lookups kwargs["project"] = self.project.slug.upper() if using_postgresql(): response = self.client.get(reverse("project", kwargs=kwargs)) self.assertRedirects(response, reverse("project", kwargs=self.kw_project), status_code=301) # Non existing fails with 404 kwargs["component"] = "invalid" response = self.client.get(reverse("component", kwargs=kwargs)) self.assertEqual(response.status_code, 404) # Different casing should redirect, MySQL always does case insensitive lookups kwargs["component"] = self.component.slug.upper() if using_postgresql(): response = self.client.get(reverse("component", kwargs=kwargs)) self.assertRedirects( response, reverse("component", kwargs=self.kw_component), status_code=301, ) # Non existing fails with 404 kwargs["lang"] = "cs-DE" response = self.client.get(reverse("translation", kwargs=kwargs)) self.assertEqual(response.status_code, 404) # Aliased language should redirect kwargs["lang"] = "czech" response = self.client.get(reverse("translation", kwargs=kwargs)) self.assertRedirects( response, reverse("translation", kwargs=self.kw_translation), status_code=301, )
def check_database(app_configs, **kwargs): if using_postgresql(): return [] return [ weblate_check( "weblate.E006", "Weblate performs best with PostgreSQL, consider migrating to it.", Info, ) ]
def get_glossary_terms(unit): """Return list of term pairs for an unit.""" if unit.glossary_terms is not None: return unit.glossary_terms translation = unit.translation language = translation.language component = translation.component source_language = component.source_language glossaries = component.project.glossaries units = (Unit.objects.prefetch().select_related("source_unit").order_by( Lower("source"))) if language == source_language: return units.none() # Chain terms terms = set( chain.from_iterable(glossary.glossary_sources for glossary in glossaries)) # Build complete source for matching parts = [] for text in unit.get_source_plurals() + [unit.context]: text = text.lower().strip() if text: parts.append(text) source = PLURAL_SEPARATOR.join(parts) # Extract terms present in the source # This might use a suffix tree for improved performance matches = [ term for term in terms if re.search(r"\b{}\b".format(re.escape(term)), source) ] if using_postgresql(): match = r"^({})$".format("|".join(re_escape(term) for term in matches)) # Use regex as that is utilizing pg_trgm index query = Q(source__iregex=match) | Q( variant__unit__source__iregex=match) else: # With MySQL we utilize it does case insensitive lookup query = Q(source__in=matches) | Q(variant__unit__source__in=matches) units = units.filter( query, translation__component__in=glossaries, translation__component__source_language=source_language, translation__language=language, ).distinct() # Store in a unit cache unit.glossary_terms = units return units
def has_field(self, text, context: Dict): # noqa: C901 if text == "plural": return Q(source__contains=PLURAL_SEPARATOR) if text == "suggestion": return Q(suggestion__isnull=False) if text == "explanation": return ~Q(source_unit__explanation="") if text == "note": return ~Q(note="") if text == "comment": return Q(comment__resolved=False) if text in ("resolved-comment", "resolved_comment"): return Q(comment__resolved=True) if text in ("check", "failing-check", "failing_check"): return Q(check__dismissed=False) if text in ( "dismissed-check", "dismissed_check", "ignored-check", "ignored_check", ): return Q(check__dismissed=True) if text == "translation": return Q(state__gte=STATE_TRANSLATED) if text in ("variant", "shaping"): return Q(variant__isnull=False) if text == "label": return Q(source_unit__labels__isnull=False) | Q( labels__isnull=False) if text == "context": return ~Q(context="") if text == "screenshot": return Q(screenshots__isnull=False) | Q( source_unit__screenshots__isnull=False) if text == "flags": return ~Q(source_unit__extra_flags="") if text == "glossary": project = context.get("project") if not project: return Q(source__isnull=True) terms = set( chain.from_iterable(glossary.glossary_sources for glossary in project.glossaries)) if not terms: return Q(source__isnull=True) if using_postgresql(): template = r"[[:<:]]({})[[:>:]]" else: template = r"(^|[ \t\n\r\f\v])({})($|[ \t\n\r\f\v])" return Q(source__iregex=template.format("|".join( re_escape(term) for term in terms))) raise ValueError(f"Unsupported has lookup: {text}")
def test_glossary_match(self): glossary = self.project.glossaries[0].translation_set.get(language_code="cs") glossary.add_unit(None, "", "hello", "ahoj") if using_postgresql(): expected = "[[:<:]](hello)[[:>:]]" else: expected = r"(^|[ \t\n\r\f\v])(hello)($|[ \t\n\r\f\v])" self.assert_query( "has:glossary", Q(source__iregex=expected), True, project=self.project, )
def database_backup(): if settings.DATABASE_BACKUP == "none": return with backup_lock(): database = settings.DATABASES["default"] env = get_clean_env() compress = settings.DATABASE_BACKUP == "compressed" out_compressed = data_dir("backups", "database.sql.gz") out_plain = data_dir("backups", "database.sql") if using_postgresql(): cmd = ["pg_dump", "--dbname", database["NAME"]] if database["HOST"]: cmd.extend(["--host", database["HOST"]]) if database["PORT"]: cmd.extend(["--port", database["PORT"]]) if database["USER"]: cmd.extend(["--username", database["USER"]]) if settings.DATABASE_BACKUP == "compressed": cmd.extend(["--file", out_compressed]) cmd.extend(["--compress", "6"]) compress = False else: cmd.extend(["--file", out_plain]) env["PGPASSWORD"] = database["PASSWORD"] else: cmd = [ "mysqldump", "--result-file", out_plain, "--single-transaction", "--skip-lock-tables", ] if database["HOST"]: cmd.extend(["--host", database["HOST"]]) if database["PORT"]: cmd.extend(["--port", database["PORT"]]) if database["USER"]: cmd.extend(["--user", database["USER"]]) cmd.extend(["--databases", database["NAME"]]) env["MYSQL_PWD"] = database["PASSWORD"] try: subprocess.run( cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.DEVNULL, check=True, universal_newlines=True, ) except subprocess.CalledProcessError as error: report_error(extra_data={"stdout": error.stdout, "stderr": error.stderr}) raise if compress: with open(out_plain, "rb") as f_in: with gzip.open(out_compressed, "wb") as f_out: shutil.copyfileobj(f_in, f_out) os.unlink(out_plain)
def select_for_update(self): return super().select_for_update(no_key=using_postgresql())