Move repo from git to gitea

Signed-off-by: Boris Yumankulov <boria138@altlinux.org>
This commit is contained in:
2025-06-01 15:21:32 +05:00
parent aae1ce9c10
commit abec9bbef8
110 changed files with 545106 additions and 2 deletions

16
dev-scripts/.spellignore Normal file
View File

@ -0,0 +1,16 @@
PortProton
\n
flatpak
Auto Install
Project-Id-Version:
Report-Msgid-Bugs-To:
POT-Creation-Date:
PO-Revision-Date:
Last-Translator:
Language:
Language-Team:
Plural-Forms:
MIME-Version:
Content-Type:
Content-Transfer-Encoding:
Generated-By:

133
dev-scripts/bump_ver.py Executable file
View File

@ -0,0 +1,133 @@
#!/usr/bin/env python3
import argparse
import re
from pathlib import Path
# Base directory of the project
BASE_DIR = Path(__file__).parent.parent
# Specific project files
APPIMAGE_RECIPE = BASE_DIR / "build-aux" / "AppImageBuilder.yml"
ARCH_PKGBUILD = BASE_DIR / "build-aux" / "PKGBUILD"
FEDORA_SPEC = BASE_DIR / "build-aux" / "fedora.spec"
PYPROJECT = BASE_DIR / "pyproject.toml"
APP_PY = BASE_DIR / "portprotonqt" / "app.py"
GITHUB_WORKFLOW = BASE_DIR / ".github" / "workflows" / "build.yml"
GITEA_WORKFLOW = BASE_DIR / ".gitea" / "workflows" / "build.yml"
def bump_appimage(path: Path, old: str, new: str) -> bool:
"""
Update only the 'version' field under app_info in AppImageBuilder.yml
"""
if not path.exists():
return False
text = path.read_text(encoding='utf-8')
pattern = re.compile(r"(?m)^(\s*version:\s*)" + re.escape(old) + r"$")
new_text, count = pattern.subn(lambda m: m.group(1) + new, text)
if count:
path.write_text(new_text, encoding='utf-8')
return bool(count)
def bump_arch(path: Path, old: str, new: str) -> bool:
"""
Update pkgver in PKGBUILD
"""
if not path.exists():
return False
text = path.read_text(encoding='utf-8')
pattern = re.compile(r"(?m)^(pkgver=)" + re.escape(old) + r"$")
new_text, count = pattern.subn(lambda m: m.group(1) + new, text)
if count:
path.write_text(new_text, encoding='utf-8')
return bool(count)
def bump_fedora(path: Path, old: str, new: str) -> bool:
"""
Update only the '%global pypi_version' line in fedora.spec
"""
if not path.exists():
return False
text = path.read_text(encoding='utf-8')
pattern = re.compile(r"(?m)^(%global\s+pypi_version\s+)" + re.escape(old) + r"$")
new_text, count = pattern.subn(lambda m: m.group(1) + new, text)
if count:
path.write_text(new_text, encoding='utf-8')
return bool(count)
def bump_pyproject(path: Path, old: str, new: str) -> bool:
"""
Update version in pyproject.toml under [project]
"""
if not path.exists():
return False
text = path.read_text(encoding='utf-8')
pattern = re.compile(r"(?m)^(version\s*=\s*)\"" + re.escape(old) + r"\"$")
new_text, count = pattern.subn(lambda m: m.group(1) + f'"{new}"', text)
if count:
path.write_text(new_text, encoding='utf-8')
return bool(count)
def bump_app_py(path: Path, old: str, new: str) -> bool:
"""
Update __app_version__ in app.py
"""
if not path.exists():
return False
text = path.read_text(encoding='utf-8')
pattern = re.compile(r"(?m)^(\s*__app_version__\s*=\s*)\"" + re.escape(old) + r"\"$")
new_text, count = pattern.subn(lambda m: m.group(1) + f'"{new}"', text)
if count:
path.write_text(new_text, encoding='utf-8')
return bool(count)
def bump_workflow(path: Path, old: str, new: str) -> bool:
"""
Update VERSION in GitHub or Gitea Actions workflow
"""
if not path.exists():
return False
text = path.read_text(encoding='utf-8')
pattern = re.compile(r"(?m)^(\s*VERSION:\s*)" + re.escape(old) + r"$")
new_text, count = pattern.subn(lambda m: m.group(1) + new, text)
if count:
path.write_text(new_text, encoding='utf-8')
return bool(count)
def main():
parser = argparse.ArgumentParser(description='Bump project version in specific files')
parser.add_argument('old', help='Old version string')
parser.add_argument('new', help='New version string')
args = parser.parse_args()
old, new = args.old, args.new
tasks = [
(APPIMAGE_RECIPE, bump_appimage),
(ARCH_PKGBUILD, bump_arch),
(FEDORA_SPEC, bump_fedora),
(PYPROJECT, bump_pyproject),
(APP_PY, bump_app_py),
(GITHUB_WORKFLOW, bump_workflow),
(GITEA_WORKFLOW, bump_workflow)
]
updated = []
for path, func in tasks:
if func(path, old, new):
updated.append(path.relative_to(BASE_DIR))
if updated:
print(f"Updated version from {old} to {new} in {len(updated)} files:")
for p in sorted(updated):
print(f" - {p}")
else:
print(f"No occurrences of version {old} found in specified files.")
if __name__ == '__main__':
main()

View File

@ -0,0 +1,28 @@
#!/usr/bin/env python3
import sys
from pathlib import Path
import re
# Запрещенные свойства
FORBIDDEN_PROPERTIES = {
"box-shadow",
"backdrop-filter",
"cursor",
"text-shadow",
}
def check_qss_files():
has_errors = False
for qss_file in Path("portprotonqt/themes").glob("**/*.py"):
with open(qss_file, "r") as f:
content = f.read()
for prop in FORBIDDEN_PROPERTIES:
if re.search(rf"{prop}\s*:", content, re.IGNORECASE):
print(f"ERROR: Unknown qss property found '{prop}' on file {qss_file}")
has_errors = True
return has_errors
if __name__ == "__main__":
if check_qss_files():
sys.exit(1)

199
dev-scripts/get_id.py Executable file
View File

@ -0,0 +1,199 @@
#!/usr/bin/env python3
import os
import json
import asyncio
import aiohttp
import tarfile
# Получаем ключ Steam из переменной окружения.
key = os.environ.get('STEAM_KEY')
base_url = "https://api.steampowered.com/IStoreService/GetAppList/v1/?"
category = "games"
def normalize_name(s):
"""
Приведение строки к нормальному виду:
- перевод в нижний регистр,
- удаление символов ™ и ®,
- замена разделителей (-, :, ,) на пробел,
- удаление лишних пробелов,
- удаление суффиксов 'bin' или 'app' в конце строки,
- удаление ключевых слов типа 'ultimate', 'edition' и т.п.
"""
s = s.lower()
for ch in ["", "®"]:
s = s.replace(ch, "")
for ch in ["-", ":", ","]:
s = s.replace(ch, " ")
s = " ".join(s.split())
for suffix in ["bin", "app"]:
if s.endswith(suffix):
s = s[:-len(suffix)].strip()
# Удаляем служебные слова, которые не должны влиять на сопоставление
keywords_to_remove = {"ultimate", "edition", "definitive", "complete", "remastered"}
words = s.split()
filtered_words = [word for word in words if word not in keywords_to_remove]
return " ".join(filtered_words)
def process_steam_apps(steam_apps):
"""
Для каждого приложения из Steam добавляет ключ "normalized_name",
содержащий нормализованное значение имени (поле "name"),
и удаляет ненужные поля: "name", "last_modified", "price_change_number".
"""
for app in steam_apps:
original = app.get("name", "")
if not app.get("normalized_name"):
app["normalized_name"] = normalize_name(original)
# Удаляем ненужные поля
app.pop("name", None)
app.pop("last_modified", None)
app.pop("price_change_number", None)
return steam_apps
async def get_app_list(session, last_appid, endpoint):
"""
Получает часть списка приложений из API.
Если last_appid передан, добавляет его к URL для постраничной загрузки.
"""
url = endpoint
if last_appid:
url = f"{url}&last_appid={last_appid}"
async with session.get(url) as response:
response.raise_for_status()
return await response.json()
async def fetch_games_json(session):
"""
Загружает JSON с данными из AreWeAntiCheatYet и извлекает поля normalized_name и status.
"""
url = "https://raw.githubusercontent.com/AreWeAntiCheatYet/AreWeAntiCheatYet/HEAD/games.json"
try:
async with session.get(url) as response:
response.raise_for_status()
text = await response.text()
data = json.loads(text)
# Извлекаем только поля normalized_name и status
return [{"normalized_name": normalize_name(game["name"]), "status": game["status"]} for game in data]
except Exception as error:
print(f"Ошибка загрузки games.json: {error}")
return []
async def request_data():
"""
Получает данные списка приложений для категории "games" до тех пор,
пока не закончатся результаты, обрабатывает данные для добавления
нормализованных имён и записывает итоговый результат в JSON-файл.
Отдельно загружает games.json и сохраняет его в отдельный JSON-файл.
"""
# Параметры запроса для игр.
game_param = "&include_games=true"
dlc_param = "&include_dlc=false"
software_param = "&include_software=false"
videos_param = "&include_videos=false"
hardware_param = "&include_hardware=false"
endpoint = (
f"{base_url}key={key}"
f"{game_param}{dlc_param}{software_param}{videos_param}{hardware_param}"
f"&max_results=50000"
)
output_json = []
total_parsed = 0
try:
async with aiohttp.ClientSession() as session:
# Загружаем данные Steam
have_more_results = True
last_appid_val = None
while have_more_results:
app_list = await get_app_list(session, last_appid_val, endpoint)
apps = app_list['response']['apps']
# Обрабатываем приложения для добавления нормализованных имён
apps = process_steam_apps(apps)
output_json.extend(apps)
total_parsed += len(apps)
have_more_results = app_list['response'].get('have_more_results', False)
last_appid_val = app_list['response'].get('last_appid')
print(f"Обработано {len(apps)} игр, всего: {total_parsed}.")
# Загружаем и сохраняем games.json отдельно
anticheat_games = await fetch_games_json(session)
except Exception as error:
print(f"Ошибка получения данных для {category}: {error}")
return False
repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
data_dir = os.path.join(repo_root, "data")
os.makedirs(data_dir, exist_ok=True)
# Путь к JSON-файлам для Steam
output_json_full = os.path.join(data_dir, f"{category}_appid.json")
output_json_min = os.path.join(data_dir, f"{category}_appid_min.json")
# Записываем полные данные Steam с отступами
with open(output_json_full, "w", encoding="utf-8") as f:
json.dump(output_json, f, ensure_ascii=False, indent=2)
# Записываем минимизированные данные Steam
with open(output_json_min, "w", encoding="utf-8") as f:
json.dump(output_json, f, ensure_ascii=False, separators=(',',':'))
# Путь к JSON-файлам для AreWeAntiCheatYet
anticheat_json_full = os.path.join(data_dir, "anticheat_games.json")
anticheat_json_min = os.path.join(data_dir, "anticheat_games_min.json")
# Записываем полные данные AreWeAntiCheatYet с отступами
with open(anticheat_json_full, "w", encoding="utf-8") as f:
json.dump(anticheat_games, f, ensure_ascii=False, indent=2)
# Записываем минимизированные данные AreWeAntiCheatYet
with open(anticheat_json_min, "w", encoding="utf-8") as f:
json.dump(anticheat_games, f, ensure_ascii=False, separators=(',',':'))
# Упаковка только минифицированных JSON в tar.xz архивы с максимальным сжатием
# Архив для Steam
steam_archive_path = os.path.join(data_dir, f"{category}_appid.tar.xz")
try:
with tarfile.open(steam_archive_path, "w:xz", preset=9) as tar:
tar.add(output_json_min, arcname=os.path.basename(output_json_min))
print(f"Упаковано минифицированное JSON Steam в архив: {steam_archive_path}")
# Удаляем исходный минифицированный файл после упаковки
os.remove(output_json_min)
except Exception as e:
print(f"Ошибка при упаковке архива Steam: {e}")
return False
# Архив для AreWeAntiCheatYet
anticheat_archive_path = os.path.join(data_dir, "anticheat_games.tar.xz")
try:
with tarfile.open(anticheat_archive_path, "w:xz", preset=9) as tar:
tar.add(anticheat_json_min, arcname=os.path.basename(anticheat_json_min))
print(f"Упаковано минифицированное JSON AreWeAntiCheatYet в архив: {anticheat_archive_path}")
# Удаляем исходный минифицированный файл после упаковки
os.remove(anticheat_json_min)
except Exception as e:
print(f"Ошибка при упаковке архива AreWeAntiCheatYet: {e}")
return False
return True
async def run():
success = await request_data()
if not success:
exit(1)
if __name__ == "__main__":
asyncio.run(run())

246
dev-scripts/l10n.py Executable file
View File

@ -0,0 +1,246 @@
#!/usr/bin/env python3
import argparse
import sys
import io
import contextlib
import re
from pathlib import Path
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
from babel.messages.frontend import CommandLineInterface
from pyaspeller import YandexSpeller
# ---------- Пути ----------
GUIDE_DIR = Path(__file__).parent.parent / "documentation" / "localization_guide"
README_EN = GUIDE_DIR / "README.md"
README_RU = GUIDE_DIR / "README.ru.md"
LOCALES_PATH = Path(__file__).parent.parent / "portprotonqt" / "locales"
THEMES_PATH = Path(__file__).parent.parent / "portprotonqt" / "themes"
README_FILES = [README_EN, README_RU]
POT_FILE = LOCALES_PATH / "messages.pot"
# ---------- Версия проекта ----------
def _get_version() -> str:
return "0.1.1"
# ---------- Обновление README ----------
def _update_coverage(lines: list[str]) -> None:
# Парсим статистику из вывода pybabel --statistics
locales_stats = [line for line in lines if line.endswith(".po")]
# Извлекаем (count, pct, locale) и сортируем
rows = sorted(
(m := re.search(
r"""(\d+\ of\ \d+).* # message counts
\((\d+\%)\).* # message percentage
locales\/(.*)\/LC_MESSAGES # locale name""",
stat, re.VERBOSE
)) and m.groups()
for stat in locales_stats
)
for md_file in README_FILES:
if not md_file.exists():
continue
text = md_file.read_text(encoding="utf-8")
is_ru = (md_file == README_RU)
# Выбираем заголовок раздела
status_header = (
"Current translation status:" if not is_ru
else "Текущий статус перевода:"
)
# Формируем шапку и строки таблицы
if is_ru:
table_header = (
"<!-- Сгенерировано автоматически! -->\n\n"
"| Локаль | Прогресс | Переведено |\n"
"| :----- | -------: | ---------: |\n"
)
fmt = lambda count, pct, loc: f"| [{loc}](./{loc}/LC_MESSAGES/messages.po) | {pct} | {count.replace(' of ', ' из ')} |"
else:
table_header = (
"<!-- Auto-generated coverage table -->\n\n"
"| Locale | Progress | Translated |\n"
"| :----- | -------: | ---------: |\n"
)
fmt = lambda count, pct, loc: f"| [{loc}](./{loc}/LC_MESSAGES/messages.po) | {pct} | {count} |"
# Собираем строки и добавляем '---' в конце
coverage_table = (
table_header
+ "\n".join(fmt(c, p, l) for c, p, l in rows)
+ "\n\n---"
)
# Удаляем старую автоматически сгенерированную таблицу
old_block = (
r"<!--\s*(?:Сгенерировано автоматически!|Auto-generated coverage table)\s*-->"
r".*?(?=\n(?:##|\Z))"
)
cleaned = re.sub(old_block, "", text, flags=re.DOTALL)
# Вставляем новую таблицу сразу после строки с заголовком
insert_pattern = rf"(^.*{re.escape(status_header)}.*$)"
new_text = re.sub(
insert_pattern,
lambda m: m.group(1) + "\n\n" + coverage_table,
cleaned,
count=1,
flags=re.MULTILINE
)
# Записываем файл, если были изменения
if new_text != text:
md_file.write_text(new_text, encoding="utf-8")
# ---------- PyBabel команды ----------
def compile_locales() -> None:
CommandLineInterface().run([
"pybabel", "compile", "--use-fuzzy", "--directory",
f"{LOCALES_PATH.resolve()}", "--statistics"
])
def extract_strings() -> None:
input_dir = (Path(__file__).parent.parent / "portprotonqt").resolve()
CommandLineInterface().run([
"pybabel", "extract", "--project=PortProtonQT",
f"--version={_get_version()}",
"--strip-comment-tag",
"--no-location",
f"--input-dir={input_dir}",
"--copyright-holder=boria138",
f"--ignore-dirs={THEMES_PATH}",
f"--output-file={POT_FILE.resolve()}"
])
def update_locales() -> None:
CommandLineInterface().run([
"pybabel", "update",
f"--input-file={POT_FILE.resolve()}",
f"--output-dir={LOCALES_PATH.resolve()}",
"--ignore-obsolete",
"--update-header-comment",
])
def create_new(locales: list[str]) -> None:
if not POT_FILE.exists():
extract_strings()
for locale in locales:
CommandLineInterface().run([
"pybabel", "init",
f"--input-file={POT_FILE.resolve()}",
f"--output-dir={LOCALES_PATH.resolve()}",
f"--locale={locale}"
])
# ---------- Игнорируемые префиксы для spellcheck ----------
IGNORED_PREFIXES = ()
def load_ignored_prefixes(ignore_file=".spellignore"):
path = Path(__file__).parent / ignore_file
try:
return tuple(path.read_text(encoding='utf-8').splitlines())
except FileNotFoundError:
return ()
IGNORED_PREFIXES = load_ignored_prefixes() + ("PortProton", "flatpak")
# ---------- Проверка орфографии с параллелизмом ----------
speller = YandexSpeller()
MSGID_RE = re.compile(r'^msgid\s+"(.*)"')
MSGSTR_RE = re.compile(r'^msgstr\s+"(.*)"')
def extract_po_strings(filepath: Path) -> list[str]:
# Collect all strings, then filter by ignore list
texts, current_key, buffer = [], None, ""
def flush():
nonlocal buffer
if buffer.strip():
texts.append(buffer)
buffer = ""
for line in filepath.read_text(encoding='utf-8').splitlines():
stripped = line.strip()
if stripped.startswith("msgid ") and filepath.suffix == '.pot':
flush(); current_key = 'msgid'; buffer = MSGID_RE.match(stripped).group(1) or ''
elif stripped.startswith("msgstr "):
flush(); current_key = 'msgstr'; buffer = MSGSTR_RE.match(stripped).group(1) or ''
elif stripped.startswith('"') and stripped.endswith('"') and current_key:
buffer += stripped[1:-1]
else:
flush(); current_key = None
flush()
# Final filter: remove ignored and multi-line
return [
t for t in texts
if t.strip() and all(pref not in t for pref in IGNORED_PREFIXES) and "\n" not in t
]
def _check_text(text: str) -> tuple[str, list[dict]]:
result = speller.spell(text)
errors = [r for r in result if r.get('word') and r.get('s')]
return text, errors
def check_file(filepath: Path, issues_summary: dict) -> bool:
print(f"Checking file: {filepath}")
texts = extract_po_strings(filepath)
has_errors = False
printed_err = False
with ThreadPoolExecutor(max_workers=8) as pool:
for text, errors in pool.map(_check_text, texts):
print(f' In string: "{text}"')
if errors:
if not printed_err:
print(f"❌ Errors in file: {filepath}")
printed_err = True
has_errors = True
for err in errors:
print(f" - typo: {err['word']}, suggestions: {', '.join(err['s'])}")
issues_summary[filepath].extend([(text, err) for err in errors])
return has_errors
# ---------- Основной обработчик ----------
def main(args) -> int:
if args.update_all:
extract_strings(); update_locales()
if args.create_new:
create_new(args.create_new)
if args.spellcheck:
files = list(LOCALES_PATH.glob("**/*.po")) + [POT_FILE]
seen = set(); has_err = False
issues_summary = defaultdict(list)
for f in files:
if not f.exists() or f in seen: continue
seen.add(f)
if check_file(f, issues_summary):
has_err = True
else:
print(f"{f} — no errors found.")
if has_err:
print("\n📋 Summary of Spelling Errors:")
for file, errs in issues_summary.items():
print(f"\n{file}")
print("-----")
for idx, (text, err) in enumerate(errs, 1):
print(f"{idx}. In '{text}': typo '{err['word']}', suggestions: {', '.join(err['s'])}")
print("-----")
return 1 if has_err else 0
extract_strings(); compile_locales()
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog="l10n", description="Localization utility for PortProtonQT.")
parser.add_argument("--create-new", nargs='+', type=str, default=False, help="Create .po for new locales")
parser.add_argument("--update-all", action='store_true', help="Extract/update locales and update README coverage")
parser.add_argument("--spellcheck", action='store_true', help="Run spellcheck on POT and PO files")
args = parser.parse_args()
if args.spellcheck:
sys.exit(main(args))
f = io.StringIO()
with contextlib.redirect_stdout(f), contextlib.redirect_stderr(f):
main(args)
output = f.getvalue().splitlines()
_update_coverage(output)
sys.exit(0)