1 # ***** BEGIN GPL LICENSE BLOCK *****
3 # This program is free software; you can redistribute it and/or
4 # modify it under the terms of the GNU General Public License
5 # as published by the Free Software Foundation; either version 2
6 # of the License, or (at your option) any later version.
8 # This program is distributed in the hope that it will be useful,
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # GNU General Public License for more details.
13 # You should have received a copy of the GNU General Public License
14 # along with this program; if not, write to the Free Software Foundation,
15 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 # ***** END GPL LICENSE BLOCK *****
21 # Some misc utilities...
24 import concurrent.futures
34 from bl_i18n_utils import (
42 ##### Misc Utils #####
43 from bpy.app.translations import locale_explode
46 _valid_po_path_re = re.compile(r"^\S+:[0-9]+$")
49 def is_valid_po_path(path):
50 return bool(_valid_po_path_re.match(path))
53 def get_best_similar(data):
55 key, use_similar, similar_pool = data
57 # try to find some close key in existing messages...
58 # Optimized code inspired by difflib.get_close_matches (as we only need the best match).
59 # We also consider to never make a match when len differs more than -len_key / 2, +len_key * 2 (which is valid
60 # as long as use_similar is not below ~0.7).
61 # Gives an overall ~20% of improvement!
63 # tmp = difflib.get_close_matches(key[1], similar_pool, n=1, cutoff=use_similar)
67 s = difflib.SequenceMatcher()
70 min_len = len_key // 2
72 for x in similar_pool:
73 if min_len < len(x) < max_len:
75 if s.real_quick_ratio() >= use_similar and s.quick_ratio() >= use_similar:
77 if sratio >= use_similar:
83 def locale_match(loc1, loc2):
86 -n if loc1 is a subtype of loc2 (e.g. 'fr_FR' is a subtype of 'fr').
87 +n if loc2 is a subtype of loc1.
88 n becomes smaller when both locales are more similar (e.g. (sr, sr_SR) are more similar than (sr, sr_SR@latin)).
89 0 if they are exactly the same.
90 ... (Ellipsis) if they cannot match!
91 Note: We consider that 'sr_SR@latin' is a subtype of 'sr@latin', 'sr_SR' and 'sr', but 'sr_SR' and 'sr@latin' won't
92 match (will return ...)!
93 Note: About similarity, diff in variants are more important than diff in countries, currently here are the cases:
95 (sr@latin, sr_SR@latin) -> 1
97 (sr_SR, sr_SR@latin) -> 2
98 (sr, sr_SR@latin) -> 3
102 l1, c1, v1, *_1 = locale_explode(loc1)
103 l2, c2, v2, *_2 = locale_explode(loc2)
129 def find_best_isocode_matches(uid, iso_codes):
131 Return an ordered tuple of elements in iso_codes that can match the given uid, from most similar to lesser ones.
133 tmp = ((e, locale_match(e, uid)) for e in iso_codes)
134 return tuple(e[0] for e in sorted((e for e in tmp if e[1] is not ... and e[1] >= 0), key=lambda e: e[1]))
137 def get_po_files_from_dir(root_dir, langs=set()):
139 Yield tuples (uid, po_path) of translations for each po file found in the given dir, which should be either
140 a dir containing po files using language uid's as names (e.g. fr.po, es_ES.po, etc.), or
141 a dir containing dirs which names are language uids, and containing po files of the same names.
144 for p in os.listdir(root_dir):
146 po_file = os.path.join(root_dir, p)
148 if p.endswith(".po") and os.path.isfile(po_file):
150 if langs and uid not in langs:
152 elif os.path.isdir(p):
154 if langs and uid not in langs:
156 po_file = os.path.join(root_dir, p, p + ".po")
157 if not os.path.isfile(po_file):
161 if uid in found_uids:
162 printf("WARNING! {} id has been found more than once! only first one has been loaded!".format(uid))
168 def enable_addons(addons=None, support=None, disable=False, check_only=False):
170 Enable (or disable) addons based either on a set of names, or a set of 'support' types.
171 Returns the list of all affected addons (as fake modules)!
172 If "check_only" is set, no addon will be enabled nor disabled.
181 userpref = bpy.context.user_preferences
182 used_ext = {ext.module for ext in userpref.addons}
185 mod for mod in addon_utils.modules()
186 if ((addons and mod.__name__ in addons) or
187 (not addons and addon_utils.module_bl_info(mod)["support"] in support))
192 module_name = mod.__name__
194 if module_name not in used_ext:
196 print(" Disabling module ", module_name)
197 bpy.ops.wm.addon_disable(module=module_name)
199 if module_name in used_ext:
201 print(" Enabling module ", module_name)
202 bpy.ops.wm.addon_enable(module=module_name)
204 # XXX There are currently some problems with bpy/rna...
205 # *Very* tricky to solve!
206 # So this is a hack to make all newly added operator visible by
207 # bpy.types.OperatorProperties.__subclasses__()
208 for cat in dir(bpy.ops):
209 cat = getattr(bpy.ops, cat)
211 getattr(cat, op).get_rna_type()
216 ##### Main Classes #####
220 Internal representation of a message.
222 __slots__ = ("msgctxt_lines", "msgid_lines", "msgstr_lines", "comment_lines", "is_fuzzy", "is_commented",
225 def __init__(self, msgctxt_lines=None, msgid_lines=None, msgstr_lines=None, comment_lines=None,
226 is_commented=False, is_fuzzy=False, settings=settings):
227 self.settings = settings
228 self.msgctxt_lines = msgctxt_lines or []
229 self.msgid_lines = msgid_lines or []
230 self.msgstr_lines = msgstr_lines or []
231 self.comment_lines = comment_lines or []
232 self.is_fuzzy = is_fuzzy
233 self.is_commented = is_commented
235 def _get_msgctxt(self):
236 return "".join(self.msgctxt_lines)
238 def _set_msgctxt(self, ctxt):
239 self.msgctxt_lines = [ctxt]
240 msgctxt = property(_get_msgctxt, _set_msgctxt)
242 def _get_msgid(self):
243 return "".join(self.msgid_lines)
245 def _set_msgid(self, msgid):
246 self.msgid_lines = [msgid]
247 msgid = property(_get_msgid, _set_msgid)
249 def _get_msgstr(self):
250 return "".join(self.msgstr_lines)
252 def _set_msgstr(self, msgstr):
253 self.msgstr_lines = [msgstr]
254 msgstr = property(_get_msgstr, _set_msgstr)
256 def _get_sources(self):
257 lstrip1 = len(self.settings.PO_COMMENT_PREFIX_SOURCE)
258 lstrip2 = len(self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM)
259 return ([l[lstrip1:] for l in self.comment_lines if l.startswith(self.settings.PO_COMMENT_PREFIX_SOURCE)] +
260 [l[lstrip2:] for l in self.comment_lines
261 if l.startswith(self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM)])
263 def _set_sources(self, sources):
264 cmmlines = self.comment_lines.copy()
267 l.startswith(self.settings.PO_COMMENT_PREFIX_SOURCE) or
268 l.startswith(self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM)
270 self.comment_lines.remove(l)
272 lines_src_custom = []
274 if is_valid_po_path(src):
275 lines_src.append(self.settings.PO_COMMENT_PREFIX_SOURCE + src)
277 lines_src_custom.append(self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM + src)
278 self.comment_lines += lines_src_custom + lines_src
279 sources = property(_get_sources, _set_sources)
281 def _get_is_tooltip(self):
282 # XXX For now, we assume that all messages > 30 chars are tooltips!
283 return len(self.msgid) > 30
284 is_tooltip = property(_get_is_tooltip)
287 # Deepcopy everything but the settings!
288 return self.__class__(msgctxt_lines=self.msgctxt_lines[:], msgid_lines=self.msgid_lines[:],
289 msgstr_lines=self.msgstr_lines[:], comment_lines=self.comment_lines[:],
290 is_commented=self.is_commented, is_fuzzy=self.is_fuzzy, settings=self.settings)
292 def normalize(self, max_len=80):
294 Normalize this message, call this before exporting it...
295 Currently normalize msgctxt, msgid and msgstr lines to given max_len (if below 1, make them single line).
297 max_len -= 2 # The two quotes!
299 def _splitlines(text):
300 lns = text.splitlines()
301 return [l + "\n" for l in lns[:-1]] + lns[-1:]
303 # We do not need the full power of textwrap... We just split first at escaped new lines, then into each line
304 # if needed... No word splitting, nor fancy spaces handling!
305 def _wrap(text, max_len, init_len):
306 if len(text) + init_len < max_len:
308 lines = _splitlines(text)
315 cur_len += len(w) + 1
316 if cur_len > (max_len - 1) and tmp:
317 ret.append(" ".join(tmp) + " ")
322 ret.append(" ".join(tmp))
326 self.msgctxt_lines = _splitlines(self.msgctxt)
327 self.msgid_lines = _splitlines(self.msgid)
328 self.msgstr_lines = _splitlines(self.msgstr)
330 init_len = len(self.settings.PO_MSGCTXT) + 1
331 if self.is_commented:
332 init_len += len(self.settings.PO_COMMENT_PREFIX_MSG)
333 self.msgctxt_lines = _wrap(self.msgctxt, max_len, init_len)
335 init_len = len(self.settings.PO_MSGID) + 1
336 if self.is_commented:
337 init_len += len(self.settings.PO_COMMENT_PREFIX_MSG)
338 self.msgid_lines = _wrap(self.msgid, max_len, init_len)
340 init_len = len(self.settings.PO_MSGSTR) + 1
341 if self.is_commented:
342 init_len += len(self.settings.PO_COMMENT_PREFIX_MSG)
343 self.msgstr_lines = _wrap(self.msgstr, max_len, init_len)
345 # Be sure comment lines are not duplicated (can happen with sources...).
347 for l in self.comment_lines:
350 self.comment_lines = tmp
352 _esc_quotes = re.compile(r'(?!<\\)((?:\\\\)*)"')
353 _unesc_quotes = re.compile(r'(?!<\\)((?:\\\\)*)\\"')
354 _esc_names = ("msgctxt_lines", "msgid_lines", "msgstr_lines")
355 _esc_names_all = _esc_names + ("comment_lines",)
358 def do_escape(cls, txt):
359 """Replace some chars by their escaped versions!"""
361 txt = txt.replace("\n", r"\n")
363 txt.replace("\t", r"\t")
365 txt = cls._esc_quotes.sub(r'\1\"', txt)
369 def do_unescape(cls, txt):
370 """Replace escaped chars by real ones!"""
372 txt = txt.replace(r"\n", "\n")
374 txt = txt.replace(r"\t", "\t")
376 txt = cls._unesc_quotes.sub(r'\1"', txt)
379 def escape(self, do_all=False):
380 names = self._esc_names_all if do_all else self._esc_names
382 setattr(self, name, [self.do_escape(l) for l in getattr(self, name)])
384 def unescape(self, do_all=True):
385 names = self._esc_names_all if do_all else self._esc_names
387 setattr(self, name, [self.do_unescape(l) for l in getattr(self, name)])
392 Internal representation of messages for one language (iso code), with additional stats info.
395 # Avoid parsing again!
396 # Keys should be (pseudo) file-names, values are tuples (hash, I18nMessages)
397 # Note: only used by po parser currently!
400 def __init__(self, uid=None, kind=None, key=None, src=None, settings=settings):
401 self.settings = settings
402 self.uid = uid if uid is not None else settings.PARSER_TEMPLATE_ID
403 self.msgs = self._new_messages()
404 self.trans_msgs = set()
405 self.fuzzy_msgs = set()
406 self.comm_msgs = set()
407 self.ttip_msgs = set()
408 self.contexts = set()
410 self.nbr_trans_msgs = 0
412 self.nbr_trans_ttips = 0
413 self.nbr_comm_msgs = 0
415 self.nbr_trans_signs = 0
416 self.parsing_errors = []
418 self.parse(kind, key, src)
421 self._reverse_cache = None
425 return getattr(collections, 'OrderedDict', dict)()
428 def gen_empty_messages(cls, uid, blender_ver, blender_hash, time, year, default_copyright=True, settings=settings):
429 """Generate an empty I18nMessages object (only header is present!)."""
430 fmt = settings.PO_HEADER_MSGSTR
431 msgstr = fmt.format(blender_ver=str(blender_ver), blender_hash=blender_hash, time=str(time), uid=str(uid))
433 if default_copyright:
434 comment = settings.PO_HEADER_COMMENT_COPYRIGHT.format(year=str(year))
435 comment = comment + settings.PO_HEADER_COMMENT
437 msgs = cls(uid=uid, settings=settings)
438 key = settings.PO_HEADER_KEY
439 msgs.msgs[key] = I18nMessage([key[0]], [key[1]], msgstr.split("\n"), comment.split("\n"),
440 False, False, settings=settings)
445 def normalize(self, max_len=80):
446 for msg in self.msgs.values():
447 msg.normalize(max_len)
449 def escape(self, do_all=False):
450 for msg in self.msgs.values():
453 def unescape(self, do_all=True):
454 for msg in self.msgs.values():
457 def check(self, fix=False):
459 Check consistency between messages and their keys!
460 Check messages using format stuff are consistent between msgid and msgstr!
461 If fix is True, tries to fix the issues.
462 Return a list of found errors (empty if everything went OK!).
465 default_context = self.settings.DEFAULT_CONTEXT
466 _format = re.compile(self.settings.CHECK_PRINTF_FORMAT).findall
470 for key, msg in self.msgs.items():
471 msgctxt, msgid, msgstr = msg.msgctxt, msg.msgid, msg.msgstr
472 real_key = (msgctxt or default_context, msgid)
474 ret.append("Error! msg's context/message do not match its key ({} / {})".format(real_key, key))
475 if real_key in self.msgs:
476 ret.append("Error! msg's real_key already used!")
482 if '%' in msgid and msgstr and _format(msgid) != _format(msgstr):
484 ret.append("Error! msg's format entities are not matched in msgid and msgstr ({} / \"{}\")"
485 "".format(real_key, msgstr))
490 self.msgs.update(tmp)
493 def clean_commented(self):
495 nbr = len(self.comm_msgs)
496 for k in self.comm_msgs:
500 def rtl_process(self):
503 for k, m in self.msgs.items():
505 trans.append(m.msgstr)
506 trans = utils_rtl.log2vis(trans, self.settings)
507 for k, t in zip(keys, trans):
508 self.msgs[k].msgstr = t
510 def merge(self, msgs, replace=False):
512 Merge translations from msgs into self, following those rules:
513 * If a msg is in self and not in msgs, keep self untouched.
514 * If a msg is in msgs and not in self, skip it.
515 * Else (msg both in self and msgs):
516 * If self is not translated and msgs is translated or fuzzy, replace by msgs.
517 * If self is fuzzy, and msgs is translated, replace by msgs.
518 * If self is fuzzy, and msgs is fuzzy, and replace is True, replace by msgs.
519 * If self is translated, and msgs is translated, and replace is True, replace by msgs.
522 for k, m in msgs.msgs.items():
523 if k not in self.msgs:
526 if (sm.is_commented or m.is_commented or not m.msgstr):
528 if (not sm.msgstr or replace or (sm.is_fuzzy and (not m.is_fuzzy or replace))):
530 sm.is_fuzzy = m.is_fuzzy
532 def update(self, ref, use_similar=None, keep_old_commented=True):
534 Update this I18nMessage with the ref one. Translations from ref are never used. Source comments from ref
535 completely replace current ones. If use_similar is not 0.0, it will try to match new messages in ref with an
536 existing one. Messages no more found in ref will be marked as commented if keep_old_commented is True,
539 if use_similar is None:
540 use_similar = self.settings.SIMILAR_MSGID_THRESHOLD
543 if use_similar > 0.0:
544 for key, msg in self.msgs.items():
545 if msg.msgstr: # No need to waste time with void translations!
546 similar_pool.setdefault(key[1], set()).add(key)
548 msgs = self._new_messages().fromkeys(ref.msgs.keys())
549 ref_keys = set(ref.msgs.keys())
550 org_keys = set(self.msgs.keys())
551 new_keys = ref_keys - org_keys
552 removed_keys = org_keys - ref_keys
554 # First process keys present in both org and ref messages.
555 for key in ref_keys - new_keys:
556 msg, refmsg = self.msgs[key], ref.msgs[key]
557 msg.sources = refmsg.sources
558 msg.is_commented = refmsg.is_commented
561 # Next process new keys.
562 if use_similar > 0.0:
563 with concurrent.futures.ProcessPoolExecutor() as exctr:
564 for key, msgid in exctr.map(get_best_similar,
565 tuple((nk, use_similar, tuple(similar_pool.keys())) for nk in new_keys)):
567 # Try to get the same context, else just get one...
568 skey = (key[0], msgid)
569 if skey not in similar_pool[msgid]:
570 skey = tuple(similar_pool[msgid])[0]
571 # We keep org translation and comments, and mark message as fuzzy.
572 msg, refmsg = self.msgs[skey].copy(), ref.msgs[key]
573 msg.msgctxt = refmsg.msgctxt
574 msg.msgid = refmsg.msgid
575 msg.sources = refmsg.sources
577 msg.is_commented = refmsg.is_commented
580 msgs[key] = ref.msgs[key]
583 msgs[key] = ref.msgs[key]
585 # Add back all "old" and already commented messages as commented ones, if required
586 # (and translation was not void!).
587 if keep_old_commented:
588 for key in removed_keys:
589 msgs[key] = self.msgs[key]
590 msgs[key].is_commented = True
591 msgs[key].sources = []
593 # Special 'meta' message, change project ID version and pot creation date...
594 key = self.settings.PO_HEADER_KEY
596 markers = ("Project-Id-Version:", "POT-Creation-Date:")
598 for rl in ref.msgs[key].msgstr_lines:
599 if rl.startswith(mrk):
600 for idx, ml in enumerate(msgs[key].msgstr_lines):
601 if ml.startswith(mrk):
602 rep.append((idx, rl))
604 msgs[key].msgstr_lines[idx] = txt
606 # And finalize the update!
609 def update_info(self):
610 self.trans_msgs.clear()
611 self.fuzzy_msgs.clear()
612 self.comm_msgs.clear()
613 self.ttip_msgs.clear()
614 self.contexts.clear()
616 self.nbr_trans_signs = 0
617 for key, msg in self.msgs.items():
618 if key == self.settings.PO_HEADER_KEY:
621 self.comm_msgs.add(key)
624 self.trans_msgs.add(key)
626 self.fuzzy_msgs.add(key)
628 self.ttip_msgs.add(key)
629 self.contexts.add(key[0])
630 self.nbr_signs += len(msg.msgid)
631 self.nbr_trans_signs += len(msg.msgstr)
632 self.nbr_msgs = len(self.msgs)
633 self.nbr_trans_msgs = len(self.trans_msgs - self.fuzzy_msgs)
634 self.nbr_ttips = len(self.ttip_msgs)
635 self.nbr_trans_ttips = len(self.ttip_msgs & (self.trans_msgs - self.fuzzy_msgs))
636 self.nbr_comm_msgs = len(self.comm_msgs)
638 def print_info(self, prefix="", output=print, print_stats=True, print_errors=True):
640 Print out some info about an I18nMessages object.
645 lvl_trans_ttips = 0.0
646 lvl_ttips_in_trans = 0.0
647 if self.nbr_msgs > 0:
648 lvl = float(self.nbr_trans_msgs) / float(self.nbr_msgs)
649 lvl_ttips = float(self.nbr_ttips) / float(self.nbr_msgs)
650 lvl_comm = float(self.nbr_comm_msgs) / float(self.nbr_msgs + self.nbr_comm_msgs)
651 if self.nbr_ttips > 0:
652 lvl_trans_ttips = float(self.nbr_trans_ttips) / float(self.nbr_ttips)
653 if self.nbr_trans_msgs > 0:
654 lvl_ttips_in_trans = float(self.nbr_trans_ttips) / float(self.nbr_trans_msgs)
660 "{:>6.1%} done! ({} translated messages over {}).\n"
661 "".format(lvl, self.nbr_trans_msgs, self.nbr_msgs),
662 "{:>6.1%} of messages are tooltips ({} over {}).\n"
663 "".format(lvl_ttips, self.nbr_ttips, self.nbr_msgs),
664 "{:>6.1%} of tooltips are translated ({} over {}).\n"
665 "".format(lvl_trans_ttips, self.nbr_trans_ttips, self.nbr_ttips),
666 "{:>6.1%} of translated messages are tooltips ({} over {}).\n"
667 "".format(lvl_ttips_in_trans, self.nbr_trans_ttips, self.nbr_trans_msgs),
668 "{:>6.1%} of messages are commented ({} over {}).\n"
669 "".format(lvl_comm, self.nbr_comm_msgs, self.nbr_comm_msgs + self.nbr_msgs),
670 "This translation is currently made of {} signs.\n".format(self.nbr_trans_signs)
672 if print_errors and self.parsing_errors:
673 lines += ["WARNING! Errors during parsing:\n"]
674 lines += [" Around line {}: {}\n".format(line, error) for line, error in self.parsing_errors]
675 output(prefix.join(lines))
677 def invalidate_reverse_cache(self, rebuild_now=False):
679 Invalidate the reverse cache used by find_best_messages_matches.
681 self._reverse_cache = None
683 src_to_msg, ctxt_to_msg, msgid_to_msg, msgstr_to_msg = {}, {}, {}, {}
684 for key, msg in self.msgs.items():
688 ctxt_to_msg.setdefault(ctxt, set()).add(key)
689 msgid_to_msg.setdefault(msgid, set()).add(key)
690 msgstr_to_msg.setdefault(msg.msgstr, set()).add(key)
691 for src in msg.sources:
692 src_to_msg.setdefault(src, set()).add(key)
693 self._reverse_cache = (src_to_msg, ctxt_to_msg, msgid_to_msg, msgstr_to_msg)
695 def find_best_messages_matches(self, msgs, msgmap, rna_ctxt, rna_struct_name, rna_prop_name, rna_enum_name):
697 Try to find the best I18nMessages (i.e. context/msgid pairs) for the given UI messages:
698 msgs: an object containing properties listed in msgmap's values.
699 msgmap: a dict of various messages to use for search:
700 {"but_label": subdict, "rna_label": subdict, "enum_label": subdict,
701 "but_tip": subdict, "rna_tip": subdict, "enum_tip": subdict}
702 each subdict being like that:
703 {"msgstr": id, "msgid": id, "msg_flags": id, "key": set()}
704 where msgstr and msgid are identifiers of string props in msgs (resp. translated and org message),
705 msg_flags is not used here, and key is a set of matching (msgctxt, msgid) keys for the item.
706 The other parameters are about the RNA element from which the strings come from, if it could be determined:
707 rna_ctxt: the labels' i18n context.
708 rna_struct_name, rna_prop_name, rna_enum_name: should be self-explanatory!
710 # Build helper mappings.
711 # Note it's user responsibility to know when to invalidate (and hence force rebuild) this cache!
712 if self._reverse_cache is None:
713 self.invalidate_reverse_cache(True)
714 src_to_msg, ctxt_to_msg, msgid_to_msg, msgstr_to_msg = self._reverse_cache
716 # print(len(src_to_msg), len(ctxt_to_msg), len(msgid_to_msg), len(msgstr_to_msg))
719 src, src_rna, src_enum = bpy.utils.make_rna_paths(rna_struct_name, rna_prop_name, rna_enum_name)
720 print("src: ", src_rna, src_enum)
723 elbl = getattr(msgs, msgmap["enum_label"]["msgstr"])
725 # Enum items' labels have no i18n context...
726 k = ctxt_to_msg[self.settings.DEFAULT_CONTEXT].copy()
727 if elbl in msgid_to_msg:
728 k &= msgid_to_msg[elbl]
729 elif elbl in msgstr_to_msg:
730 k &= msgstr_to_msg[elbl]
733 # We assume if we already have only one key, it's the good one!
734 if len(k) > 1 and src_enum in src_to_msg:
735 k &= src_to_msg[src_enum]
736 msgmap["enum_label"]["key"] = k
737 rlbl = getattr(msgs, msgmap["rna_label"]["msgstr"])
738 #print("rna label: " + rlbl, rlbl in msgid_to_msg, rlbl in msgstr_to_msg)
740 k = ctxt_to_msg[rna_ctxt].copy()
741 if k and rlbl in msgid_to_msg:
742 k &= msgid_to_msg[rlbl]
743 elif k and rlbl in msgstr_to_msg:
744 k &= msgstr_to_msg[rlbl]
747 # We assume if we already have only one key, it's the good one!
748 if len(k) > 1 and src_rna in src_to_msg:
749 k &= src_to_msg[src_rna]
750 msgmap["rna_label"]["key"] = k
751 blbl = getattr(msgs, msgmap["but_label"]["msgstr"])
753 if blbl.endswith(self.settings.NUM_BUTTON_SUFFIX):
754 # Num buttons report their label with a trailing ': '...
755 blbls.append(blbl[:-len(self.settings.NUM_BUTTON_SUFFIX)])
756 print("button label: " + blbl)
757 if blbl and elbl not in blbls and (rlbl not in blbls or rna_ctxt != self.settings.DEFAULT_CONTEXT):
758 # Always Default context for button label :/
759 k = ctxt_to_msg[self.settings.DEFAULT_CONTEXT].copy()
762 if bl in msgid_to_msg:
763 k &= msgid_to_msg[bl]
766 elif bl in msgstr_to_msg:
767 k &= msgstr_to_msg[bl]
772 # XXX No need to check against RNA path here, if blabel is different
773 # from rlabel, should not match anyway!
774 msgmap["but_label"]["key"] = k
776 # Tips (they never have a specific context).
777 etip = getattr(msgs, msgmap["enum_tip"]["msgstr"])
778 #print("enum tip: " + etip)
780 k = ctxt_to_msg[self.settings.DEFAULT_CONTEXT].copy()
781 if etip in msgid_to_msg:
782 k &= msgid_to_msg[etip]
783 elif etip in msgstr_to_msg:
784 k &= msgstr_to_msg[etip]
787 # We assume if we already have only one key, it's the good one!
788 if len(k) > 1 and src_enum in src_to_msg:
789 k &= src_to_msg[src_enum]
790 msgmap["enum_tip"]["key"] = k
791 rtip = getattr(msgs, msgmap["rna_tip"]["msgstr"])
792 #print("rna tip: " + rtip)
794 k = ctxt_to_msg[self.settings.DEFAULT_CONTEXT].copy()
795 if k and rtip in msgid_to_msg:
796 k &= msgid_to_msg[rtip]
797 elif k and rtip in msgstr_to_msg:
798 k &= msgstr_to_msg[rtip]
801 # We assume if we already have only one key, it's the good one!
802 if len(k) > 1 and src_rna in src_to_msg:
803 k &= src_to_msg[src_rna]
804 msgmap["rna_tip"]["key"] = k
806 btip = getattr(msgs, msgmap["but_tip"]["msgstr"])
807 #print("button tip: " + btip)
808 if btip and btip not in {rtip, etip}:
809 k = ctxt_to_msg[self.settings.DEFAULT_CONTEXT].copy()
810 if btip in msgid_to_msg:
811 k &= msgid_to_msg[btip]
812 elif btip in msgstr_to_msg:
813 k &= msgstr_to_msg[btip]
816 # XXX No need to check against RNA path here, if btip is different from rtip, should not match anyway!
817 msgmap["but_tip"]["key"] = k
819 def parse(self, kind, key, src):
820 del self.parsing_errors[:]
821 self.parsers[kind](self, src, key)
822 if self.parsing_errors:
823 print("{} ({}):".format(key, src))
824 self.print_info(print_stats=False)
825 print("The parser solved them as well as it could...")
828 def parse_messages_from_po(self, src, key=None):
831 Note: This function will silently "arrange" mis-formated entries, thus using afterward write_messages() should
832 always produce a po-valid file, though not correct!
834 reading_msgid = False
835 reading_msgstr = False
836 reading_msgctxt = False
837 reading_comment = False
845 default_context = self.settings.DEFAULT_CONTEXT
848 def finalize_message(self, line_nr):
849 nonlocal reading_msgid, reading_msgstr, reading_msgctxt, reading_comment
850 nonlocal is_commented, is_fuzzy, msgid_lines, msgstr_lines, msgctxt_lines, comment_lines
852 msgid = I18nMessage.do_unescape("".join(msgid_lines))
853 msgctxt = I18nMessage.do_unescape("".join(msgctxt_lines))
854 msgkey = (msgctxt or default_context, msgid)
856 # Never allow overriding existing msgid/msgctxt pairs!
857 if msgkey in self.msgs:
858 self.parsing_errors.append((line_nr, "{} context/msgid is already in current messages!".format(msgkey)))
861 self.msgs[msgkey] = I18nMessage(msgctxt_lines, msgid_lines, msgstr_lines, comment_lines,
862 is_commented, is_fuzzy, settings=self.settings)
864 # Let's clean up and get ready for next message!
865 reading_msgid = reading_msgstr = reading_msgctxt = reading_comment = False
866 is_commented = is_fuzzy = False
872 # try to use src as file name...
873 if os.path.isfile(src):
874 if os.stat(src).st_size > self.settings.PARSER_MAX_FILE_SIZE:
875 # Security, else we could read arbitrary huge files!
876 print("WARNING: skipping file {}, too huge!".format(src))
880 with open(src, 'r', encoding="utf-8") as f:
883 _msgctxt = self.settings.PO_MSGCTXT
884 _comm_msgctxt = self.settings.PO_COMMENT_PREFIX_MSG + _msgctxt
885 _len_msgctxt = len(_msgctxt + '"')
886 _len_comm_msgctxt = len(_comm_msgctxt + '"')
887 _msgid = self.settings.PO_MSGID
888 _comm_msgid = self.settings.PO_COMMENT_PREFIX_MSG + _msgid
889 _len_msgid = len(_msgid + '"')
890 _len_comm_msgid = len(_comm_msgid + '"')
891 _msgstr = self.settings.PO_MSGSTR
892 _comm_msgstr = self.settings.PO_COMMENT_PREFIX_MSG + _msgstr
893 _len_msgstr = len(_msgstr + '"')
894 _len_comm_msgstr = len(_comm_msgstr + '"')
895 _comm_str = self.settings.PO_COMMENT_PREFIX_MSG
896 _comm_fuzzy = self.settings.PO_COMMENT_FUZZY
897 _len_comm_str = len(_comm_str + '"')
899 # Main loop over all lines in src...
900 for line_nr, line in enumerate(src.splitlines()):
903 finalize_message(self, line_nr)
906 elif line.startswith(_msgctxt) or line.startswith(_comm_msgctxt):
907 reading_comment = False
909 if line.startswith(_comm_str):
911 line = line[_len_comm_msgctxt:-1]
913 line = line[_len_msgctxt:-1]
914 msgctxt_lines.append(line)
916 elif line.startswith(_msgid) or line.startswith(_comm_msgid):
917 reading_comment = False
919 if line.startswith(_comm_str):
920 if not is_commented and reading_ctxt:
921 self.parsing_errors.append((line_nr, "commented msgid following regular msgctxt"))
923 line = line[_len_comm_msgid:-1]
925 line = line[_len_msgid:-1]
927 msgid_lines.append(line)
929 elif line.startswith(_msgstr) or line.startswith(_comm_msgstr):
930 if not reading_msgid:
931 self.parsing_errors.append((line_nr, "msgstr without a prior msgid"))
933 reading_msgid = False
934 reading_msgstr = True
935 if line.startswith(_comm_str):
936 line = line[_len_comm_msgstr:-1]
938 self.parsing_errors.append((line_nr, "commented msgstr following regular msgid"))
940 line = line[_len_msgstr:-1]
942 self.parsing_errors.append((line_nr, "regular msgstr following commented msgid"))
943 msgstr_lines.append(line)
945 elif line.startswith(_comm_str[0]):
946 if line.startswith(_comm_str):
949 msgctxt_lines.append(line[_len_comm_str:-1])
951 msgctxt_lines.append(line)
952 self.parsing_errors.append((line_nr, "commented string while reading regular msgctxt"))
955 msgid_lines.append(line[_len_comm_str:-1])
957 msgid_lines.append(line)
958 self.parsing_errors.append((line_nr, "commented string while reading regular msgid"))
961 msgstr_lines.append(line[_len_comm_str:-1])
963 msgstr_lines.append(line)
964 self.parsing_errors.append((line_nr, "commented string while reading regular msgstr"))
966 if reading_msgctxt or reading_msgid or reading_msgstr:
967 self.parsing_errors.append((line_nr,
968 "commented string within msgctxt, msgid or msgstr scope, ignored"))
969 elif line.startswith(_comm_fuzzy):
972 comment_lines.append(line)
973 reading_comment = True
977 msgctxt_lines.append(line[1:-1])
979 msgid_lines.append(line[1:-1])
982 msgstr_lines.append(line)
984 self.parsing_errors.append((line_nr, "regular string outside msgctxt, msgid or msgstr scope"))
985 #self.parsing_errors += (str(comment_lines), str(msgctxt_lines), str(msgid_lines), str(msgstr_lines))
987 # If no final empty line, last message is not finalized!
989 finalize_message(self, line_nr)
992 def write(self, kind, dest):
993 self.writers[kind](self, dest)
995 def write_messages_to_po(self, fname, compact=False):
997 Write messages in fname po file.
999 default_context = self.settings.DEFAULT_CONTEXT
1001 def _write(self, f, compact):
1002 _msgctxt = self.settings.PO_MSGCTXT
1003 _msgid = self.settings.PO_MSGID
1004 _msgstr = self.settings.PO_MSGSTR
1005 _comm = self.settings.PO_COMMENT_PREFIX_MSG
1009 for num, msg in enumerate(self.msgs.values()):
1010 if compact and (msg.is_commented or msg.is_fuzzy or not msg.msgstr_lines):
1013 f.write("\n".join(msg.comment_lines))
1014 # Only mark as fuzzy if msgstr is not empty!
1015 if msg.is_fuzzy and msg.msgstr_lines:
1016 f.write("\n" + self.settings.PO_COMMENT_FUZZY)
1017 _p = _comm if msg.is_commented else ""
1019 if msg.msgctxt and msg.msgctxt != default_context:
1020 if len(msg.msgctxt_lines) > 1:
1022 "\n" + _p + _msgctxt + "\"\"\n" + _p + "\"",
1023 ("\"\n" + _p + "\"").join(msg.msgctxt_lines),
1027 chunks += ["\n" + _p + _msgctxt + "\"" + msg.msgctxt + "\""]
1028 if len(msg.msgid_lines) > 1:
1030 "\n" + _p + _msgid + "\"\"\n" + _p + "\"",
1031 ("\"\n" + _p + "\"").join(msg.msgid_lines),
1035 chunks += ["\n" + _p + _msgid + "\"" + msg.msgid + "\""]
1036 if len(msg.msgstr_lines) > 1:
1038 "\n" + _p + _msgstr + "\"\"\n" + _p + "\"",
1039 ("\"\n" + _p + "\"").join(msg.msgstr_lines),
1043 chunks += ["\n" + _p + _msgstr + "\"" + msg.msgstr + "\""]
1045 f.write("".join(chunks))
1049 self.normalize(max_len=0) # No wrapping for now...
1050 if isinstance(fname, str):
1051 with open(fname, 'w', encoding="utf-8") as f:
1052 _write(self, f, compact)
1053 # Else assume fname is already a file(like) object!
1055 _write(self, fname, compact)
1057 def write_messages_to_mo(self, fname):
1059 Write messages in fname mo file.
1061 # XXX Temp solution, until I can make own mo generator working...
1063 with tempfile.NamedTemporaryFile(mode='w+', encoding="utf-8") as tmp_po_f:
1064 self.write_messages_to_po(tmp_po_f)
1066 self.settings.GETTEXT_MSGFMT_EXECUTABLE,
1067 "--statistics", # show stats
1072 print("Running ", " ".join(cmd))
1073 ret = subprocess.call(cmd)
1076 # XXX Code below is currently broken (generates corrupted mo files it seems :( )!
1077 # Using http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html notation.
1078 # Not generating hash table!
1079 # Only translated, unfuzzy messages are taken into account!
1080 default_context = self.settings.DEFAULT_CONTEXT
1081 msgs = tuple(v for v in self.msgs.values() if not (v.is_fuzzy or v.is_commented) and v.msgstr and v.msgid)
1082 msgs = sorted(msgs[:2],
1083 key=lambda e: (e.msgctxt + e.msgid) if (e.msgctxt and e.msgctxt != default_context) else e.msgid)
1084 magic_nbr = 0x950412de
1091 # Prepare our data! we need key (optional context and msgid), translation, and offset and length of both.
1092 # Offset are relative to start of their own list.
1093 EOT = b"0x04" # Used to concatenate context and msgid
1098 nonlocal _msgid_offset, _msgstr_offset
1099 msgid = v.msgid.encode("utf-8")
1100 msgstr = v.msgstr.encode("utf-8")
1101 if v.msgctxt and v.msgctxt != default_context:
1102 msgctxt = v.msgctxt.encode("utf-8")
1103 msgid = msgctxt + EOT + msgid
1104 # Don't forget the final NULL char!
1105 _msgid_len = len(msgid) + 1
1106 _msgstr_len = len(msgstr) + 1
1107 ret = ((msgid, _msgid_len, _msgid_offset), (msgstr, _msgstr_len, _msgstr_offset))
1108 _msgid_offset += _msgid_len
1109 _msgstr_offset += _msgstr_len
1111 msgs = tuple(_gen(v) for v in msgs)
1113 msgstr_start = msgid_start + _msgid_offset
1114 print(N, msgstr_start + _msgstr_offset)
1117 with open(fname, 'wb') as f:
1119 f.write(struct.pack("=8I", magic_nbr, format_rev, N, O, T, S, H, 0))
1120 # Msgid's length and offset.
1121 f.write(b"".join(struct.pack("=2I", length, msgid_start + offset) for (_1, length, offset), _2 in msgs))
1122 # Msgstr's length and offset.
1123 f.write(b"".join(struct.pack("=2I", length, msgstr_start + offset) for _1, (_2, length, offset) in msgs))
1126 f.write(b"\0".join(msgid for (msgid, _1, _2), _3 in msgs) + b"\0")
1128 f.write(b"\0".join(msgstr for _1, (msgstr, _2, _3) in msgs) + b"\0")
1131 "PO": parse_messages_from_po,
1135 "PO": write_messages_to_po,
1136 "PO_COMPACT": lambda s, fn: s.write_messages_to_po(fn, True),
1137 "MO": write_messages_to_mo,
1143 Internal representation of a whole translation set.
1147 def _parser_check_file(path, maxsize=settings.PARSER_MAX_FILE_SIZE,
1148 _begin_marker=settings.PARSER_PY_MARKER_BEGIN,
1149 _end_marker=settings.PARSER_PY_MARKER_END):
1150 if os.stat(path).st_size > maxsize:
1151 # Security, else we could read arbitrary huge files!
1152 print("WARNING: skipping file {}, too huge!".format(path))
1153 return None, None, None, False
1155 with open(path) as f:
1161 if _begin_marker in txt:
1162 _in = txt.index(_begin_marker) + len(_begin_marker)
1165 if _end_marker in txt:
1166 _out = txt.index(_end_marker)
1167 if _in is not None and _out is not None:
1168 in_txt, txt, out_txt = txt[:_in], txt[_in:_out], txt[_out:]
1169 elif _in is not None:
1170 in_txt, txt, out_txt = txt[:_in], txt[_in:], None
1171 elif _out is not None:
1172 in_txt, txt, out_txt = None, txt[:_out], txt[_out:]
1174 in_txt, txt, out_txt = None, txt, None
1175 return in_txt, txt, out_txt, (True if "translations_tuple" in txt else False)
1178 def _dst(self, path, uid, kind):
1179 if isinstance(path, str):
1181 if uid == self.settings.PARSER_TEMPLATE_ID:
1182 if not path.endswith(".pot"):
1183 return os.path.join(os.path.dirname(path), "blender.pot")
1184 if not path.endswith(".po"):
1185 return os.path.join(os.path.dirname(path), uid + ".po")
1187 if not path.endswith(".py"):
1188 if self.src.get(self.settings.PARSER_PY_ID):
1189 return self.src[self.settings.PARSER_PY_ID]
1190 return os.path.join(os.path.dirname(path), "translations.py")
1193 def __init__(self, kind=None, src=None, langs=set(), settings=settings):
1194 self.settings = settings
1196 self.src = {} # Should have the same keys as self.trans (plus PARSER_PY_ID for py file)!
1197 self.dst = self._dst # A callable that transforms src_path into dst_path!
1199 self.parse(kind, src, langs)
1202 def _py_file_get(self):
1203 return self.src.get(self.settings.PARSER_PY_ID)
1205 def _py_file_set(self, value):
1206 self.src[self.settings.PARSER_PY_ID] = value
1207 py_file = property(_py_file_get, _py_file_set)
1209 def escape(self, do_all=False):
1210 for trans in self.trans.values():
1211 trans.escape(do_all)
1213 def unescape(self, do_all=True):
1214 for trans in self.trans.values():
1215 trans.unescape(do_all)
1217 def update_info(self):
1220 self.lvl_ttips = 0.0
1221 self.lvl_trans_ttips = 0.0
1222 self.lvl_ttips_in_trans = 0.0
1225 self.nbr_trans_signs = 0
1226 self.contexts = set()
1228 if self.settings.PARSER_TEMPLATE_ID in self.trans:
1229 self.nbr_trans = len(self.trans) - 1
1230 self.nbr_signs = self.trans[self.settings.PARSER_TEMPLATE_ID].nbr_signs
1232 self.nbr_trans = len(self.trans)
1233 for msgs in self.trans.values():
1235 if msgs.nbr_msgs > 0:
1236 self.lvl += float(msgs.nbr_trans_msgs) / float(msgs.nbr_msgs)
1237 self.lvl_ttips += float(msgs.nbr_ttips) / float(msgs.nbr_msgs)
1238 self.lvl_comm += float(msgs.nbr_comm_msgs) / float(msgs.nbr_msgs + msgs.nbr_comm_msgs)
1239 if msgs.nbr_ttips > 0:
1240 self.lvl_trans_ttips = float(msgs.nbr_trans_ttips) / float(msgs.nbr_ttips)
1241 if msgs.nbr_trans_msgs > 0:
1242 self.lvl_ttips_in_trans = float(msgs.nbr_trans_ttips) / float(msgs.nbr_trans_msgs)
1243 if self.nbr_signs == 0:
1244 self.nbr_signs = msgs.nbr_signs
1245 self.nbr_trans_signs += msgs.nbr_trans_signs
1246 self.contexts |= msgs.contexts
1248 def print_stats(self, prefix="", print_msgs=True):
1250 Print out some stats about an I18n object.
1251 If print_msgs is True, it will also print all its translations' stats.
1254 msgs_prefix = prefix + " "
1255 for key, msgs in self.trans.items():
1256 if key == self.settings.PARSER_TEMPLATE_ID:
1258 print(prefix + key + ":")
1259 msgs.print_stats(prefix=msgs_prefix)
1262 nbr_contexts = len(self.contexts - {bpy.app.translations.contexts.default})
1263 if nbr_contexts != 1:
1264 if nbr_contexts == 0:
1271 "Average stats for all {} translations:\n".format(self.nbr_trans),
1272 " {:>6.1%} done!\n".format(self.lvl / self.nbr_trans),
1273 " {:>6.1%} of messages are tooltips.\n".format(self.lvl_ttips / self.nbr_trans),
1274 " {:>6.1%} of tooltips are translated.\n".format(self.lvl_trans_ttips / self.nbr_trans),
1275 " {:>6.1%} of translated messages are tooltips.\n".format(self.lvl_ttips_in_trans / self.nbr_trans),
1276 " {:>6.1%} of messages are commented.\n".format(self.lvl_comm / self.nbr_trans),
1277 " The org msgids are currently made of {} signs.\n".format(self.nbr_signs),
1278 " All processed translations are currently made of {} signs.\n".format(self.nbr_trans_signs),
1279 " {} specific context{} present:\n".format(self.nbr_contexts, _ctx_txt)) +
1280 tuple(" " + c + "\n" for c in self.contexts - {bpy.app.translations.contexts.default}) +
1283 print(prefix.join(lines))
1286 def check_py_module_has_translations(clss, src, settings=settings):
1288 Check whether a given src (a py module, either a directory or a py file) has some i18n translation data,
1289 and returns a tuple (src_file, translations_tuple) if yes, else (None, None).
1292 if os.path.isdir(src):
1293 for root, dnames, fnames in os.walk(src):
1294 for fname in fnames:
1295 if not fname.endswith(".py"):
1297 path = os.path.join(root, fname)
1298 _1, txt, _2, has_trans = clss._parser_check_file(path)
1300 txts.append((path, txt))
1301 elif src.endswith(".py") and os.path.isfile(src):
1302 _1, txt, _2, has_trans = clss._parser_check_file(src)
1304 txts.append((src, txt))
1305 for path, txt in txts:
1306 tuple_id = "translations_tuple"
1307 env = globals().copy()
1310 return path, env[tuple_id]
1311 return None, None # No data...
1313 def parse(self, kind, src, langs=set()):
1314 self.parsers[kind](self, src, langs)
1316 def parse_from_po(self, src, langs=set()):
1318 src must be a tuple (dir_of_pos, pot_file), where:
1319 * dir_of_pos may either contains iso_CODE.po files, and/or iso_CODE/iso_CODE.po files.
1320 * pot_file may be None (in which case there will be no ref messages).
1321 if langs set is void, all languages found are loaded.
1323 root_dir, pot_file = src
1324 if pot_file and os.path.isfile(pot_file):
1325 self.trans[self.settings.PARSER_TEMPLATE_ID] = I18nMessages(self.settings.PARSER_TEMPLATE_ID, 'PO',
1326 pot_file, pot_file, settings=self.settings)
1327 self.src_po[self.settings.PARSER_TEMPLATE_ID] = pot_file
1329 for uid, po_file in get_po_files_from_dir(root_dir, langs):
1330 self.trans[uid] = I18nMessages(uid, 'PO', po_file, po_file, settings=self.settings)
1331 self.src_po[uid] = po_file
1333 def parse_from_py(self, src, langs=set()):
1335 src must be a valid path, either a py file or a module directory (in which case all py files inside it
1336 will be checked, first file macthing will win!).
1337 if langs set is void, all languages found are loaded.
1339 default_context = self.settings.DEFAULT_CONTEXT
1340 self.src[self.settings.PARSER_PY_ID], msgs = self.check_py_module_has_translations(src, self.settings)
1342 self.src[self.settings.PARSER_PY_ID] = src
1344 for key, (sources, gen_comments), *translations in msgs:
1345 if self.settings.PARSER_TEMPLATE_ID not in self.trans:
1346 self.trans[self.settings.PARSER_TEMPLATE_ID] = I18nMessages(self.settings.PARSER_TEMPLATE_ID,
1347 settings=self.settings)
1348 self.src[self.settings.PARSER_TEMPLATE_ID] = self.src[self.settings.PARSER_PY_ID]
1349 if key in self.trans[self.settings.PARSER_TEMPLATE_ID].msgs:
1350 print("ERROR! key {} is defined more than once! Skipping re-definitions!")
1352 custom_src = [c for c in sources if c.startswith("bpy.")]
1353 src = [c for c in sources if not c.startswith("bpy.")]
1354 common_comment_lines = [self.settings.PO_COMMENT_PREFIX_GENERATED + c for c in gen_comments] + \
1355 [self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM + c for c in custom_src] + \
1356 [self.settings.PO_COMMENT_PREFIX_SOURCE + c for c in src]
1357 ctxt = [key[0]] if key[0] else [default_context]
1358 self.trans[self.settings.PARSER_TEMPLATE_ID].msgs[key] = I18nMessage(ctxt, [key[1]], [""],
1359 common_comment_lines, False, False,
1360 settings=self.settings)
1361 for uid, msgstr, (is_fuzzy, user_comments) in translations:
1362 if uid not in self.trans:
1363 self.trans[uid] = I18nMessages(uid, settings=self.settings)
1364 self.src[uid] = self.src[self.settings.PARSER_PY_ID]
1365 comment_lines = [self.settings.PO_COMMENT_PREFIX + c for c in user_comments] + common_comment_lines
1366 self.trans[uid].msgs[key] = I18nMessage(ctxt, [key[1]], [msgstr], comment_lines, False, is_fuzzy,
1367 settings=self.settings)
1368 # key = self.settings.PO_HEADER_KEY
1369 # for uid, trans in self.trans.items():
1370 # if key not in trans.msgs:
1374 def write(self, kind, langs=set()):
1375 self.writers[kind](self, langs)
1377 def write_to_po(self, langs=set()):
1379 Write all translations into po files. By default, write in the same files (or dir) as the source, specify
1380 a custom self.dst function to write somewhere else!
1381 Note: If langs is set and you want to export the pot template as well, langs must contain PARSER_TEMPLATE_ID
1383 """.format(self.settings.PARSER_TEMPLATE_ID)
1384 keys = self.trans.keys()
1388 dst = self.dst(self, self.src.get(uid, ""), uid, 'PO')
1389 self.trans[uid].write('PO', dst)
1391 def write_to_py(self, langs=set()):
1393 Write all translations as python code, either in a "translations.py" file under same dir as source(s), or in
1394 specified file if self.py_file is set (default, as usual can be customized with self.dst callable!).
1395 Note: If langs is set and you want to export the pot template as well, langs must contain PARSER_TEMPLATE_ID
1397 """.format(self.settings.PARSER_TEMPLATE_ID)
1398 default_context = self.settings.DEFAULT_CONTEXT
1400 def _gen_py(self, langs, tab=" "):
1401 _lencomm = len(self.settings.PO_COMMENT_PREFIX)
1402 _lengen = len(self.settings.PO_COMMENT_PREFIX_GENERATED)
1403 _lensrc = len(self.settings.PO_COMMENT_PREFIX_SOURCE)
1404 _lencsrc = len(self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM)
1406 "# NOTE: You can safely move around this auto-generated block (with the begin/end markers!),",
1407 "# and edit the translations by hand.",
1408 "# Just carefully respect the format of the tuple!",
1410 "# Tuple of tuples "
1411 "((msgctxt, msgid), (sources, gen_comments), (lang, translation, (is_fuzzy, comments)), ...)",
1412 "translations_tuple = (",
1414 # First gather all keys (msgctxt, msgid) - theoretically, all translations should share the same, but...
1415 # Note: using an ordered dict if possible (stupid sets cannot be ordered :/ ).
1416 keys = I18nMessages._new_messages()
1417 for trans in self.trans.values():
1418 keys.update(trans.msgs)
1419 # Get the ref translation (ideally, PARSER_TEMPLATE_ID one, else the first one that pops up!
1420 # Ref translation will be used to generate sources "comments"
1421 ref = self.trans.get(self.settings.PARSER_TEMPLATE_ID) or self.trans[list(self.trans.keys())[0]]
1422 # Get all languages (uids) and sort them (PARSER_TEMPLATE_ID and PARSER_PY_ID excluded!)
1423 translations = self.trans.keys() - {self.settings.PARSER_TEMPLATE_ID, self.settings.PARSER_PY_ID}
1425 translations &= langs
1426 translations = [('"' + lng + '"', " " * (len(lng) + 6), self.trans[lng]) for lng in sorted(translations)]
1427 print(k for k in keys.keys())
1428 for key in keys.keys():
1429 if ref.msgs[key].is_commented:
1431 # Key (context + msgid).
1432 msgctxt, msgid = ref.msgs[key].msgctxt, ref.msgs[key].msgid
1434 msgctxt = default_context
1435 ret.append(tab + "(({}, \"{}\"),".format('"' + msgctxt + '"' if msgctxt else "None", msgid))
1436 # Common comments (mostly sources!).
1439 for comment in ref.msgs[key].comment_lines:
1440 if comment.startswith(self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM):
1441 sources.append(comment[_lencsrc:])
1442 elif comment.startswith(self.settings.PO_COMMENT_PREFIX_SOURCE):
1443 sources.append(comment[_lensrc:])
1444 elif comment.startswith(self.settings.PO_COMMENT_PREFIX_GENERATED):
1445 gen_comments.append(comment[_lengen:])
1446 if not (sources or gen_comments):
1447 ret.append(tab + " ((), ()),")
1449 if len(sources) > 1:
1450 ret.append(tab + ' (("' + sources[0] + '",')
1451 ret += [tab + ' "' + s + '",' for s in sources[1:-1]]
1452 ret.append(tab + ' "' + sources[-1] + '"),')
1454 ret.append(tab + " ((" + ('"' + sources[0] + '",' if sources else "") + "),")
1455 if len(gen_comments) > 1:
1456 ret.append(tab + ' ("' + gen_comments[0] + '",')
1457 ret += [tab + ' "' + s + '",' for s in gen_comments[1:-1]]
1458 ret.append(tab + ' "' + gen_comments[-1] + '")),')
1460 ret.append(tab + " (" + ('"' + gen_comments[0] + '",' if gen_comments else "") + ")),")
1462 for lngstr, lngsp, trans in translations:
1463 if trans.msgs[key].is_commented:
1465 # Language code and translation.
1466 ret.append(tab + " (" + lngstr + ', "' + trans.msgs[key].msgstr + '",')
1467 # User comments and fuzzy.
1469 for comment in trans.msgs[key].comment_lines:
1470 if comment.startswith(self.settings.PO_COMMENT_PREFIX):
1471 comments.append(comment[_lencomm:])
1472 ret.append(tab + lngsp + "(" + ("True" if trans.msgs[key].is_fuzzy else "False") + ",")
1473 if len(comments) > 1:
1474 ret.append(tab + lngsp + ' ("' + comments[0] + '",')
1475 ret += [tab + lngsp + ' "' + s + '",' for s in comments[1:-1]]
1476 ret.append(tab + lngsp + ' "' + comments[-1] + '"))),')
1478 ret[-1] = ret[-1] + " (" + (('"' + comments[0] + '",') if comments else "") + "))),"
1480 ret.append(tab + "),")
1484 "translations_dict = {}",
1485 "for msg in translations_tuple:",
1486 tab + "key = msg[0]",
1487 tab + "for lang, trans, (is_fuzzy, comments) in msg[2:]:",
1488 tab * 2 + "if trans and not is_fuzzy:",
1489 tab * 3 + "translations_dict.setdefault(lang, {})[key] = trans",
1495 dst = self.dst(self, self.src.get(self.settings.PARSER_PY_ID, ""), self.settings.PARSER_PY_ID, 'PY')
1497 prev = txt = nxt = ""
1498 if os.path.exists(dst):
1499 if not os.path.isfile(dst):
1500 print("WARNING: trying to write as python code into {}, which is not a file! Aborting.".format(dst))
1502 prev, txt, nxt, has_trans = self._parser_check_file(dst)
1503 if prev is None and nxt is None:
1504 print("WARNING: Looks like given python file {} has no auto-generated translations yet, will be added "
1505 "at the end of the file, you can move that section later if needed...".format(dst))
1506 txt = ([txt, "", self.settings.PARSER_PY_MARKER_BEGIN] +
1507 _gen_py(self, langs) +
1508 ["", self.settings.PARSER_PY_MARKER_END])
1510 # We completely replace the text found between start and end markers...
1511 txt = _gen_py(self, langs)
1513 printf("Creating python file {} containing translations.".format(dst))
1515 "# ***** BEGIN GPL LICENSE BLOCK *****",
1517 "# This program is free software; you can redistribute it and/or",
1518 "# modify it under the terms of the GNU General Public License",
1519 "# as published by the Free Software Foundation; either version 2",
1520 "# of the License, or (at your option) any later version.",
1522 "# This program is distributed in the hope that it will be useful,",
1523 "# but WITHOUT ANY WARRANTY; without even the implied warranty of",
1524 "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the",
1525 "# GNU General Public License for more details.",
1527 "# You should have received a copy of the GNU General Public License",
1528 "# along with this program; if not, write to the Free Software Foundation,",
1529 "# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.",
1531 "# ***** END GPL LICENSE BLOCK *****",
1533 self.settings.PARSER_PY_MARKER_BEGIN,
1536 txt += _gen_py(self, langs)
1539 self.settings.PARSER_PY_MARKER_END,
1541 with open(dst, 'w') as f:
1542 f.write((prev or "") + "\n".join(txt) + (nxt or ""))
1546 "PO": parse_from_po,
1547 "PY": parse_from_py,