diff --git a/django/core/management/commands/makemessages.py b/django/core/management/commands/makemessages.py
a
|
b
|
|
128 | 128 | os.unlink(os.path.join(dirpath, thefile)) |
129 | 129 | elif domain == 'django' and (file_ext == '.py' or file_ext in extensions): |
130 | 130 | thefile = file |
| 131 | orig_file = os.path.join(dirpath, file) |
131 | 132 | if file_ext in extensions: |
132 | | src = open(os.path.join(dirpath, file), "rU").read() |
| 133 | src = open(orig_file, "rU").read() |
133 | 134 | thefile = '%s.py' % file |
134 | | try: |
135 | | open(os.path.join(dirpath, thefile), "w").write(templatize(src)) |
136 | | except SyntaxError, msg: |
137 | | msg = "%s (file: %s)" % (msg, os.path.join(dirpath, file)) |
138 | | raise SyntaxError(msg) |
| 135 | open(os.path.join(dirpath, thefile), "w").write(templatize(src, orig_file[2:])) |
139 | 136 | if verbosity > 1: |
140 | 137 | sys.stdout.write('processing file %s in %s\n' % (file, dirpath)) |
141 | 138 | cmd = 'xgettext -d %s -L Python --keyword=gettext_noop --keyword=gettext_lazy --keyword=ngettext_lazy:1,2 --keyword=ugettext_noop --keyword=ugettext_lazy --keyword=ungettext_lazy:1,2 --from-code UTF-8 -o - "%s"' % ( |
… |
… |
|
148 | 145 | |
149 | 146 | if thefile != file: |
150 | 147 | old = '#: '+os.path.join(dirpath, thefile)[2:] |
151 | | new = '#: '+os.path.join(dirpath, file)[2:] |
| 148 | new = '#: '+orig_file[2:] |
152 | 149 | msgs = msgs.replace(old, new) |
153 | 150 | if os.path.exists(potfile): |
154 | 151 | # Strip the header |
diff --git a/django/template/__init__.py b/django/template/__init__.py
a
|
b
|
|
193 | 193 | else: |
194 | 194 | lexer_class, parser_class = Lexer, Parser |
195 | 195 | lexer = lexer_class(template_string, origin) |
196 | | parser = parser_class(lexer.tokenize()) |
| 196 | tokens = lexer.tokenize() |
| 197 | tokens = zip(*tokens) |
| 198 | if tokens: |
| 199 | tokens = tokens[0] |
| 200 | parser = parser_class(list(tokens)) |
197 | 201 | return parser.parse() |
198 | 202 | |
199 | 203 | class Token(object): |
… |
… |
|
225 | 229 | def __init__(self, template_string, origin): |
226 | 230 | self.template_string = template_string |
227 | 231 | self.origin = origin |
| 232 | self.lineno = 1 |
228 | 233 | |
229 | 234 | def tokenize(self): |
230 | 235 | "Return a list of tokens from a given template_string." |
… |
… |
|
232 | 237 | result = [] |
233 | 238 | for bit in tag_re.split(self.template_string): |
234 | 239 | if bit: |
235 | | result.append(self.create_token(bit, in_tag)) |
| 240 | result.append((self.create_token(bit, in_tag), self.lineno)) |
236 | 241 | in_tag = not in_tag |
237 | 242 | return result |
238 | 243 | |
… |
… |
|
251 | 256 | token = Token(TOKEN_COMMENT, '') |
252 | 257 | else: |
253 | 258 | token = Token(TOKEN_TEXT, token_string) |
| 259 | self.lineno += token_string.count('\n') |
254 | 260 | return token |
255 | 261 | |
256 | 262 | class Parser(object): |
diff --git a/django/template/debug.py b/django/template/debug.py
a
|
b
|
|
14 | 14 | for match in tag_re.finditer(self.template_string): |
15 | 15 | start, end = match.span() |
16 | 16 | if start > upto: |
17 | | result.append(self.create_token(self.template_string[upto:start], (upto, start), False)) |
| 17 | result.append((self.create_token(self.template_string[upto:start], (upto, start), False), None)) |
18 | 18 | upto = start |
19 | | result.append(self.create_token(self.template_string[start:end], (start, end), True)) |
| 19 | result.append((self.create_token(self.template_string[start:end], (start, end), True), None)) |
20 | 20 | upto = end |
21 | 21 | last_bit = self.template_string[upto:] |
22 | 22 | if last_bit: |
23 | | result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), False)) |
| 23 | result.append((self.create_token(last_bit, (upto, upto + len(last_bit)), False), None)) |
24 | 24 | return result |
25 | 25 | |
26 | 26 | def create_token(self, token_string, source, in_tag): |
diff --git a/django/utils/translation/__init__.py b/django/utils/translation/__init__.py
a
|
b
|
|
96 | 96 | def get_language_from_request(request): |
97 | 97 | return real_get_language_from_request(request) |
98 | 98 | |
99 | | def templatize(src): |
100 | | return real_templatize(src) |
| 99 | def templatize(src, origin=None): |
| 100 | return real_templatize(src, origin) |
101 | 101 | |
102 | 102 | def deactivate_all(): |
103 | 103 | return real_deactivate_all() |
diff --git a/django/utils/translation/trans_real.py b/django/utils/translation/trans_real.py
a
|
b
|
|
244 | 244 | True = right-to-left layout |
245 | 245 | """ |
246 | 246 | from django.conf import settings |
247 | | |
| 247 | |
248 | 248 | base_lang = get_language().split('-')[0] |
249 | 249 | return base_lang in settings.LANGUAGES_BIDI |
250 | 250 | |
… |
… |
|
408 | 408 | plural_re = re.compile(r"""^\s*plural$""") |
409 | 409 | constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""") |
410 | 410 | |
411 | | def templatize(src): |
| 411 | def templatize(src, origin=None): |
412 | 412 | """ |
413 | 413 | Turns a Django template into something that is understood by xgettext. It |
414 | 414 | does so by translating the Django translation tags into standard gettext |
… |
… |
|
420 | 420 | inplural = False |
421 | 421 | singular = [] |
422 | 422 | plural = [] |
423 | | for t in Lexer(src, None).tokenize(): |
| 423 | for t, lineno in Lexer(src, origin).tokenize(): |
424 | 424 | if intrans: |
425 | 425 | if t.token_type == TOKEN_BLOCK: |
426 | 426 | endbmatch = endblock_re.match(t.contents) |
… |
… |
|
443 | 443 | elif pluralmatch: |
444 | 444 | inplural = True |
445 | 445 | else: |
446 | | raise SyntaxError("Translation blocks must not include other block tags: %s" % t.contents) |
| 446 | filemsg = '' |
| 447 | if origin: |
| 448 | filemsg = 'file %s, ' % origin |
| 449 | raise SyntaxError("Translation blocks must not include other block tags: %s (%sline %d)" % (t.contents, filemsg, lineno)) |
447 | 450 | elif t.token_type == TOKEN_VAR: |
448 | 451 | if inplural: |
449 | 452 | plural.append('%%(%s)s' % t.contents) |