[PATCH 7/7] Get rid of the non-deterministic expand_langs and its usage

Vratislav Podzimek vpodzime at redhat.com
Fri Aug 30 09:15:52 UTC 2013


Using expand_langs was non-deterministic in a way that if e.g. "pt" came before
"pt_BR" in searching for a match for something from expand_langs("pt_BR"), it
was picked up. However, in such a case we want to pick up "pt_BR" as it matches
better.

This patch adds a function for finding matches (now the best one)
deterministicly and a function to determine if given langcode (e.g. "en")
matches some locale (e.g. "en_US"). The former one is useful e.g. for getting
the best rnotes to show based on the installation language, the latter one is
useful for adding langsupport groups to the installation transaction.

Signed-off-by: Vratislav Podzimek <vpodzime at redhat.com>
---
 pyanaconda/localization.py                  | 125 +++++++++++++++++++---------
 pyanaconda/packaging/yumpayload.py          |   9 +-
 pyanaconda/ui/gui/hubs/progress.py          |  53 +++++++-----
 tests/pyanaconda_tests/localization_test.py |  61 ++++++++++++--
 4 files changed, 175 insertions(+), 73 deletions(-)

diff --git a/pyanaconda/localization.py b/pyanaconda/localization.py
index b0e8a1b..cf47a43 100644
--- a/pyanaconda/localization.py
+++ b/pyanaconda/localization.py
@@ -88,64 +88,111 @@ def parse_langcode(langcode):
     else:
         return None
 
-def expand_langs(astring):
+def is_supported_locale(locale):
     """
-    Converts a single language into a "language search path". For example,
-    for "fr_FR.UTF-8 at euro" would return set containing:
-    "fr", "fr_FR", "fr_FR.UTF-8 at euro", "fr.UTF-8 at euro", "fr_FR at euro",
-    "fr_FR.UTF-8", "fr at euro", "fr.UTF-8"
+    Function that tells if the given locale is supported by the Anaconda or
+    not. We consider locales supported by the langtable as supported by the
+    Anaconda.
 
-    :rtype: list of strings
+    :param locale: locale to test
+    :type locale: str
+    :return: whether the given locale is supported or not
+    :rtype: bool
+    :raise InvalidLocaleSpec: if an invalid locale is given (see LANGCODE_RE)
 
     """
 
-    langs = set([astring])
-
-    lang_dict = parse_langcode(astring)
-
-    if not lang_dict:
-        return list(langs)
+    en_name = get_english_name(locale)
+    return bool(en_name)
 
-    base, loc, enc, script = [lang_dict[key] for key in ("language",
-                                      "territory", "encoding", "script")]
+def langcode_matches_locale(langcode, locale):
+    """
+    Function that tells if the given langcode matches the given locale. I.e. if
+    all parts of appearing in the langcode (language, territory, script and
+    encoding) are the same as the matching parts of the locale.
 
-    if not base:
-        return list(langs)
+    :param langcode: a langcode (e.g. en, en_US, en_US at latin, etc.)
+    :type langcode: str
+    :param locale: a valid locale (e.g. en_US.UTF-8 or sr_RS.UTF-8 at latin, etc.)
+    :type locale: str
+    :return: whether the given langcode matches the given locale or not
+    :rtype: bool
 
-    if not enc:
-        enc = "UTF-8"
+    """
 
-    langs.add(base)
-    langs.add("%s.%s" % (base, enc))
+    langcode_parts = parse_langcode(langcode)
+    locale_parts = parse_langcode(locale)
 
-    if loc:
-        langs.add("%s_%s" % (base, loc))
-        langs.add("%s_%s.%s" %(base, loc, enc))
-    if script:
-        langs.add("%s@%s" % (base, script))
-        langs.add("%s.%s@%s" % (base, enc, script))
+    if not langcode_parts or not locale_parts:
+        # to match, both need to be valid langcodes (need to have at least
+        # language specified)
+        return False
 
-    if loc and script:
-        langs.add("%s_%s@%s" % (base, loc, script))
+    # Check parts one after another. If some part appears in the langcode and
+    # doesn't match the one from the locale (or is missing in the locale),
+    # return False, otherwise they match
+    for part in ("language", "territory", "script", "encoding"):
+        if langcode_parts[part] and langcode_parts[part] != locale_parts.get(part):
+            return False
 
-    return list(langs)
+    return True
 
-def is_supported_locale(locale):
+def find_best_locale_match(locale, langcodes):
     """
-    Function that tells if the given locale is supported by the Anaconda or
-    not. We consider locales supported by the langtable as supported by the
-    Anaconda.
+    Find the best match for the locale in a list of langcodes. This is useful
+    when e.g. pt_BR is a locale and there are possibilities to choose an item
+    (e.g. rnote) for a list containing both pt and pt_BR or even also pt_PT.
 
-    :param locale: locale to test
+    :param locale: a valid locale (e.g. en_US.UTF-8 or sr_RS.UTF-8 at latin, etc.)
     :type locale: str
-    :return: whether the given locale is supported or not
-    :rtype: bool
-    :raise InvalidLocaleSpec: if an invalid locale is given (see LANGCODE_RE)
+    :param langcodes: a list or generator of langcodes (e.g. en, en_US, en_US at latin, etc.)
+    :type langcodes: list(str) or generator(str)
+    :return: the best matching langcode from the list of None if none matches
+    :rtype: str or None
 
     """
 
-    en_name = get_english_name(locale)
-    return bool(en_name)
+    score_map = { "language" : 1000,
+                  "territory":  100,
+                  "script"   :   10,
+                  "encoding" :    1 }
+
+    def get_match_score(locale, langcode):
+        score = 0
+
+        locale_parts = parse_langcode(locale)
+        langcode_parts = parse_langcode(langcode)
+        if not locale_parts or not langcode_parts:
+            return score
+
+        for part, part_score in score_map.iteritems():
+            if locale_parts[part] and langcode_parts[part]:
+                if locale_parts[part] == langcode_parts[part]:
+                    # match
+                    score += part_score
+                else:
+                    # not match
+                    score -= part_score
+            elif langcode_parts[part] and not locale_parts[part]:
+                # langcode has something the locale doesn't have
+                score -= part_score
+
+        return score
+
+    scores = []
+
+    # get score for each langcode
+    for langcode in langcodes:
+        scores.append((langcode, get_match_score(locale, langcode)))
+
+    # find the best one
+    sorted_langcodes = sorted(scores, key=lambda item_score: item_score[1], reverse=True)
+
+    # matches matching only script or encoding or both are not useful
+    if sorted_langcodes and sorted_langcodes[0][1] > score_map["territory"]:
+        return sorted_langcodes[0][0]
+    else:
+        return None
 
 def setup_locale(locale, lang=None):
     """
diff --git a/pyanaconda/packaging/yumpayload.py b/pyanaconda/packaging/yumpayload.py
index 0ad383b..72e016a 100644
--- a/pyanaconda/packaging/yumpayload.py
+++ b/pyanaconda/packaging/yumpayload.py
@@ -79,7 +79,7 @@ from pyanaconda.packaging import DependencyError, MetadataError, NoNetworkError,
                                  PayloadSetupError
 from pyanaconda.progress import progressQ
 
-from pyanaconda.localization import expand_langs
+from pyanaconda.localization import langcode_matches_locale
 
 from pykickstart.constants import GROUP_ALL, GROUP_DEFAULT, KS_MISSING_IGNORE
 
@@ -979,10 +979,9 @@ reposdir=%s
         with _yum_lock:
             groups = yum_groups.get_groups()
             for lang_code in lang_codes:
-                for lang_code_guess in expand_langs(lang_code):
-                    for group in groups:
-                        if group.langonly == lang_code_guess:
-                            lang_groups.add(group.groupid)
+                for group in groups:
+                    if langcode_matches_locale(group.langonly, lang_code):
+                        lang_groups.add(group.groupid)
 
         return list(lang_groups)
 
diff --git a/pyanaconda/ui/gui/hubs/progress.py b/pyanaconda/ui/gui/hubs/progress.py
index 1394e2f..45b4734 100644
--- a/pyanaconda/ui/gui/hubs/progress.py
+++ b/pyanaconda/ui/gui/hubs/progress.py
@@ -26,9 +26,10 @@ from gi.repository import GLib, Gtk
 import itertools
 import os
 import sys
+import glob
 
 from pyanaconda.i18n import _
-from pyanaconda.localization import expand_langs
+from pyanaconda.localization import langcode_matches_locale, find_best_locale_match
 from pyanaconda.product import productName
 from pyanaconda.flags import flags
 from pyanaconda.constants import THREAD_INSTALL, THREAD_CONFIGURATION, DEFAULT_LANG
@@ -142,28 +143,38 @@ class ProgressHub(Hub):
             self._progressNotebook.set_current_page(0)
 
     def _get_rnotes(self):
-        import glob
-
         # We first look for rnotes in paths containing the language, then in
         # directories without the language component.  You know, just in case.
-        try:
-            langs = expand_langs(os.environ["LANG"]) + [""]
-        except KeyError:
-            # if for some reason LANG isn't set to anything, set it to
-            # the default value
-            os.environ["LANG"] = DEFAULT_LANG
-            langs = expand_langs(os.environ["LANG"]) + [""]
-
-        paths = ["/tmp/updates/pixmaps/rnotes/%s/",
-                 "/tmp/product/pixmaps/rnotes/%s/",
-                 "/usr/share/anaconda/pixmaps/rnotes/%s/"]
-
-        for (l, d) in itertools.product(langs, paths):
-            pixmaps = glob.glob((d % l) + "*.png") + glob.glob((d % l) + "*.jpg")
-            if len(pixmaps) > 0:
-                return pixmaps
-
-        return []
+
+        paths = ["/tmp/updates/pixmaps/rnotes/",
+                 "/tmp/product/pixmaps/rnotes/",
+                 "/usr/share/anaconda/pixmaps/rnotes/"]
+
+        all_lang_pixmaps = []
+        for path in paths:
+            all_lang_pixmaps += glob.glob(path + "*/*.png") + glob.glob(path + "*/*.jpg")
+
+        pixmap_langs = [pixmap.split(os.path.sep)[-2] for pixmap in all_lang_pixmaps]
+        best_lang = find_best_locale_match(os.environ["LANG"], pixmap_langs)
+
+        if not best_lang:
+            # nothing found, try the default language
+            best_lang = find_best_locale_match(DEFAULT_LANG, pixmap_langs)
+
+        if not best_lang:
+            # nothing found even for the default language, try non-localized rnotes
+            non_localized = []
+            for path in paths:
+                non_localized += glob.glob(path + "*.png") + glob.glob(path + "*.jpg")
+
+            return non_localized
+
+        best_lang_pixmaps = []
+        for path in paths:
+            best_lang_pixmaps += (glob.glob(path + best_lang + "/*.png") +
+                                  glob.glob(path + best_lang + "/*.jpg"))
+
+        return best_lang_pixmaps
 
     def _cycle_rnotes(self):
         # Change the ransom notes image every minute by grabbing the next
diff --git a/tests/pyanaconda_tests/localization_test.py b/tests/pyanaconda_tests/localization_test.py
index 2cdcce8..7f94a71 100644
--- a/tests/pyanaconda_tests/localization_test.py
+++ b/tests/pyanaconda_tests/localization_test.py
@@ -99,11 +99,56 @@ class UpcaseFirstLetterTests(unittest.TestCase):
         self.assertEqual(localization._upcase_first_letter("czech Republic"),
                          "Czech Republic")
 
-class ExpandLangsTest(unittest.TestCase):
-    def expand_langs_test(self):
-        """expand_langs should return every valid combination."""
-
-        expected_result = ["fr", "fr_FR", "fr_FR.UTF-8 at euro", "fr.UTF-8 at euro",
-                           "fr_FR at euro", "fr_FR.UTF-8", "fr at euro", "fr.UTF-8"]
-        self.assertListEqual(localization.expand_langs("fr_FR.UTF-8 at euro"),
-                             expected_result)
+class LangcodeLocaleMatchingTests(unittest.TestCase):
+    def langcode_matches_locale_test(self):
+        """Langcode-locale matching should work as expected."""
+
+        # should match
+        self.assertTrue(localization.langcode_matches_locale("sr", "sr"))
+        self.assertTrue(localization.langcode_matches_locale("sr", "sr_RS"))
+        self.assertTrue(localization.langcode_matches_locale("sr", "sr_RS.UTF-8"))
+        self.assertTrue(localization.langcode_matches_locale("sr", "sr_RS.UTF-8 at latin"))
+        self.assertTrue(localization.langcode_matches_locale("sr_RS", "sr_RS"))
+        self.assertTrue(localization.langcode_matches_locale("sr_RS", "sr_RS.UTF-8"))
+        self.assertTrue(localization.langcode_matches_locale("sr_RS", "sr_RS.UTF-8 at latin"))
+        self.assertTrue(localization.langcode_matches_locale("sr_RS.UTF-8", "sr_RS.UTF-8"))
+        self.assertTrue(localization.langcode_matches_locale("sr_RS.UTF-8", "sr_RS.UTF-8 at latin"))
+        self.assertTrue(localization.langcode_matches_locale("sr_RS.UTF-8 at latin", "sr_RS.UTF-8 at latin"))
+
+        # missing language, shouldn't match
+        self.assertFalse(localization.langcode_matches_locale("", "sr"))
+        self.assertFalse(localization.langcode_matches_locale("sr", ""))
+
+        # missing items in the locale, shouldn't match
+        self.assertFalse(localization.langcode_matches_locale("sr_RS", "sr"))
+        self.assertFalse(localization.langcode_matches_locale("sr_RS.UTF-8", "sr_RS"))
+        self.assertFalse(localization.langcode_matches_locale("sr.UTF-8", "sr_RS"))
+        self.assertFalse(localization.langcode_matches_locale("sr_RS.UTF-8", "sr.UTF-8"))
+        self.assertFalse(localization.langcode_matches_locale("sr_RS.UTF-8 at latin", "sr_RS"))
+        self.assertFalse(localization.langcode_matches_locale("sr_RS at latin", "sr_RS"))
+        self.assertFalse(localization.langcode_matches_locale("sr.UTF-8 at latin", "sr_RS.UTF-8"))
+        self.assertFalse(localization.langcode_matches_locale("sr at latin", "sr_RS"))
+
+        # different parts, shouldn't match
+        self.assertFalse(localization.langcode_matches_locale("sr", "en"))
+        self.assertFalse(localization.langcode_matches_locale("de_CH", "fr_CH"))
+        self.assertFalse(localization.langcode_matches_locale("sr_RS", "sr_ME"))
+        self.assertFalse(localization.langcode_matches_locale("sr_RS at latin", "sr_RS at cyrilic"))
+        self.assertFalse(localization.langcode_matches_locale("sr_RS at latin", "sr_ME at latin"))
+
+    def find_best_locale_match_test(self):
+        """Finding best locale matches should work as expected."""
+
+        # can find best matches
+        self.assertEqual(localization.find_best_locale_match("cs_CZ", ["cs", "cs_CZ", "en", "en_US"]), "cs_CZ")
+        self.assertEqual(localization.find_best_locale_match("cs", ["cs_CZ", "cs", "en", "en_US"]), "cs")
+        self.assertEqual(localization.find_best_locale_match("pt_BR", ["pt", "pt_BR"]), "pt_BR")
+        self.assertEqual(localization.find_best_locale_match("pt_BR", ["pt", "pt_BR", "pt_PT"]), "pt_BR")
+        self.assertEqual(localization.find_best_locale_match("cs_CZ.UTF-8", ["cs", "cs_CZ", "cs_CZ.UTF-8"]),
+                         "cs_CZ.UTF-8")
+        self.assertEqual(localization.find_best_locale_match("cs_CZ.UTF-8 at latin",
+                                                             ["cs", "cs_CZ at latin", "cs_CZ.UTF-8"]), "cs_CZ at latin")
+
+        # no matches
+        self.assertIsNone(localization.find_best_locale_match("pt_BR", ["en_BR", "en"]))
+        self.assertIsNone(localization.find_best_locale_match("cs_CZ.UTF-8", ["en", "en.UTF-8"]))
-- 
1.7.11.7



More information about the anaconda-patches mailing list