Add translations for 6 languages (de, es, fr, ja, ko, pt-BR)
Translate 200 strings to German, Spanish, French, Japanese, Korean, and Brazilian Portuguese, bringing localization coverage from 35% to 95%. Also adds translation helper script for future localization work. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
File diff suppressed because it is too large
Load Diff
29
scripts/analyze_translations.py
Normal file
29
scripts/analyze_translations.py
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
|
||||||
|
with open('/Users/treyt/Desktop/code/Feels/Feels/Localizable.xcstrings', 'r') as f:
|
||||||
|
d = json.load(f)
|
||||||
|
|
||||||
|
strings = d.get('strings', {})
|
||||||
|
|
||||||
|
# Get strings that need translation
|
||||||
|
missing = []
|
||||||
|
for key, val in strings.items():
|
||||||
|
if not key.strip():
|
||||||
|
continue
|
||||||
|
localizations = val.get('localizations', {})
|
||||||
|
|
||||||
|
# Check if German has a translation
|
||||||
|
if 'de' not in localizations:
|
||||||
|
missing.append(key)
|
||||||
|
elif 'stringUnit' in localizations.get('de', {}):
|
||||||
|
state = localizations['de']['stringUnit'].get('state', '')
|
||||||
|
if state != 'translated':
|
||||||
|
missing.append(key)
|
||||||
|
|
||||||
|
# Print all missing strings
|
||||||
|
print("Missing translations:")
|
||||||
|
for s in missing:
|
||||||
|
print(repr(s))
|
||||||
|
print(f'\nTotal missing: {len(missing)}')
|
||||||
212
scripts/translate_xcstrings.py
Executable file
212
scripts/translate_xcstrings.py
Executable file
@@ -0,0 +1,212 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Translate missing strings in Localizable.xcstrings to all target languages.
|
||||||
|
Uses Google Translate (free, no API key needed).
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
pip install deep-translator
|
||||||
|
python3 scripts/translate_xcstrings.py
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
try:
|
||||||
|
from deep_translator import GoogleTranslator
|
||||||
|
except ImportError:
|
||||||
|
print("Please install deep-translator: pip install deep-translator")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
XCSTRINGS_PATH = Path(__file__).parent.parent / "Feels" / "Localizable.xcstrings"
|
||||||
|
TARGET_LANGUAGES = {
|
||||||
|
"de": "german",
|
||||||
|
"es": "spanish",
|
||||||
|
"fr": "french",
|
||||||
|
"ja": "japanese",
|
||||||
|
"ko": "korean",
|
||||||
|
"pt-BR": "portuguese"
|
||||||
|
}
|
||||||
|
|
||||||
|
LANG_DISPLAY = {
|
||||||
|
"de": "German",
|
||||||
|
"es": "Spanish",
|
||||||
|
"fr": "French",
|
||||||
|
"ja": "Japanese",
|
||||||
|
"ko": "Korean",
|
||||||
|
"pt-BR": "Brazilian Portuguese"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Strings that should not be translated
|
||||||
|
SKIP_PATTERNS = [
|
||||||
|
"", " ", " ", " ", "-", "—", "·", "•", ">", "§",
|
||||||
|
"12", "17", "20",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def get_english_value(string_data: dict, key: str) -> str | None:
|
||||||
|
"""Extract the English value for a string."""
|
||||||
|
localizations = string_data.get("localizations", {})
|
||||||
|
if "en" in localizations:
|
||||||
|
en_loc = localizations["en"]
|
||||||
|
if "stringUnit" in en_loc:
|
||||||
|
return en_loc["stringUnit"].get("value")
|
||||||
|
return key
|
||||||
|
|
||||||
|
|
||||||
|
def needs_translation(string_data: dict, lang: str) -> bool:
|
||||||
|
"""Check if a string needs translation for a given language."""
|
||||||
|
localizations = string_data.get("localizations", {})
|
||||||
|
if lang not in localizations:
|
||||||
|
return True
|
||||||
|
lang_data = localizations[lang]
|
||||||
|
if "stringUnit" not in lang_data:
|
||||||
|
return True
|
||||||
|
state = lang_data["stringUnit"].get("state", "")
|
||||||
|
return state not in ["translated"]
|
||||||
|
|
||||||
|
|
||||||
|
def should_skip(key: str, english_value: str) -> bool:
|
||||||
|
"""Check if a string should be skipped."""
|
||||||
|
if not key.strip() or not english_value.strip():
|
||||||
|
return True
|
||||||
|
if key in SKIP_PATTERNS or english_value in SKIP_PATTERNS:
|
||||||
|
return True
|
||||||
|
stripped = english_value.strip()
|
||||||
|
if stripped.startswith("%") and len(stripped) <= 10 and " " not in stripped:
|
||||||
|
return True
|
||||||
|
if "http://" in english_value or "https://" in english_value:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def protect_format_specifiers(text: str) -> tuple[str, dict]:
|
||||||
|
"""Replace format specifiers with placeholders to protect them during translation."""
|
||||||
|
placeholders = {}
|
||||||
|
counter = [0]
|
||||||
|
|
||||||
|
def replace(match):
|
||||||
|
placeholder = f"__FMT{counter[0]}__"
|
||||||
|
placeholders[placeholder] = match.group(0)
|
||||||
|
counter[0] += 1
|
||||||
|
return placeholder
|
||||||
|
|
||||||
|
# Match format specifiers: %@, %lld, %1$@, %2$lld, %.0f%%, etc.
|
||||||
|
pattern = r'%(\d+\$)?[-+0 #]*(\d+)?(\.\d+)?(lld|ld|d|@|f|s|%%)'
|
||||||
|
protected = re.sub(pattern, replace, text)
|
||||||
|
return protected, placeholders
|
||||||
|
|
||||||
|
|
||||||
|
def restore_format_specifiers(text: str, placeholders: dict) -> str:
|
||||||
|
"""Restore format specifiers from placeholders."""
|
||||||
|
for placeholder, original in placeholders.items():
|
||||||
|
text = text.replace(placeholder, original)
|
||||||
|
return text
|
||||||
|
|
||||||
|
|
||||||
|
def translate_string(text: str, target_lang: str) -> str | None:
|
||||||
|
"""Translate a single string, preserving format specifiers."""
|
||||||
|
if not text.strip():
|
||||||
|
return text
|
||||||
|
|
||||||
|
# Protect format specifiers
|
||||||
|
protected, placeholders = protect_format_specifiers(text)
|
||||||
|
|
||||||
|
try:
|
||||||
|
translator = GoogleTranslator(source='en', target=target_lang)
|
||||||
|
translated = translator.translate(protected)
|
||||||
|
|
||||||
|
if translated:
|
||||||
|
# Restore format specifiers
|
||||||
|
result = restore_format_specifiers(translated, placeholders)
|
||||||
|
return result
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
print(f" Translation error: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
print(f"Loading {XCSTRINGS_PATH}...")
|
||||||
|
with open(XCSTRINGS_PATH, "r") as f:
|
||||||
|
data = json.load(f)
|
||||||
|
|
||||||
|
strings = data.get("strings", {})
|
||||||
|
print(f"Found {len(strings)} total strings")
|
||||||
|
|
||||||
|
# Find strings needing translation
|
||||||
|
needs_work = []
|
||||||
|
for key, string_data in strings.items():
|
||||||
|
english_value = get_english_value(string_data, key)
|
||||||
|
if not english_value or should_skip(key, english_value):
|
||||||
|
continue
|
||||||
|
missing_langs = [
|
||||||
|
lang for lang in TARGET_LANGUAGES.keys()
|
||||||
|
if needs_translation(string_data, lang)
|
||||||
|
]
|
||||||
|
if missing_langs:
|
||||||
|
needs_work.append((key, english_value, missing_langs))
|
||||||
|
|
||||||
|
print(f"Found {len(needs_work)} strings needing translation")
|
||||||
|
total_translations = sum(len(ml) for _, _, ml in needs_work)
|
||||||
|
print(f"Total translations needed: {total_translations}")
|
||||||
|
|
||||||
|
if not needs_work:
|
||||||
|
print("All strings are already translated!")
|
||||||
|
return
|
||||||
|
|
||||||
|
total_translated = 0
|
||||||
|
|
||||||
|
for i, (key, english, missing_langs) in enumerate(needs_work):
|
||||||
|
print(f"\n[{i+1}/{len(needs_work)}] \"{english[:50]}{'...' if len(english) > 50 else ''}\"")
|
||||||
|
|
||||||
|
if "localizations" not in strings[key]:
|
||||||
|
strings[key]["localizations"] = {}
|
||||||
|
|
||||||
|
for lang in missing_langs:
|
||||||
|
google_lang = TARGET_LANGUAGES[lang]
|
||||||
|
translated = translate_string(english, google_lang)
|
||||||
|
|
||||||
|
if translated:
|
||||||
|
strings[key]["localizations"][lang] = {
|
||||||
|
"stringUnit": {
|
||||||
|
"state": "translated",
|
||||||
|
"value": translated
|
||||||
|
}
|
||||||
|
}
|
||||||
|
total_translated += 1
|
||||||
|
print(f" {lang}: {translated[:60]}{'...' if len(translated) > 60 else ''}")
|
||||||
|
else:
|
||||||
|
print(f" {lang}: FAILED")
|
||||||
|
|
||||||
|
# Rate limiting to avoid getting blocked
|
||||||
|
time.sleep(0.3)
|
||||||
|
|
||||||
|
# Save progress every 20 strings
|
||||||
|
if (i + 1) % 20 == 0:
|
||||||
|
print(f"\n Saving progress ({total_translated} translations)...")
|
||||||
|
with open(XCSTRINGS_PATH, "w") as f:
|
||||||
|
json.dump(data, f, indent=2, ensure_ascii=False)
|
||||||
|
|
||||||
|
# Final save
|
||||||
|
print(f"\nSaving {total_translated} translations...")
|
||||||
|
with open(XCSTRINGS_PATH, "w") as f:
|
||||||
|
json.dump(data, f, indent=2, ensure_ascii=False)
|
||||||
|
|
||||||
|
print("\n=== Summary ===")
|
||||||
|
for lang, name in LANG_DISPLAY.items():
|
||||||
|
count = sum(
|
||||||
|
1 for s in strings.values()
|
||||||
|
if s.get("localizations", {}).get(lang, {}).get("stringUnit", {}).get("state") == "translated"
|
||||||
|
)
|
||||||
|
total = len([k for k, v in strings.items() if not should_skip(k, get_english_value(v, k) or "")])
|
||||||
|
pct = (count / total * 100) if total > 0 else 0
|
||||||
|
print(f" {name}: {count}/{total} ({pct:.0f}%)")
|
||||||
|
|
||||||
|
print("\nDone!")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
Reference in New Issue
Block a user