Ticket #648: comment_tag.diff

File comment_tag.diff, 2.3 KB (added by Hawkeye, 18 years ago)

Patch to implement {# #} style comments

  • __init__.py

     
    6666TOKEN_TEXT = 0
    6767TOKEN_VAR = 1
    6868TOKEN_BLOCK = 2
     69TOKEN_COMMENT = 3
    6970
    7071# template syntax constants
    7172FILTER_SEPARATOR = '|'
    7273FILTER_ARGUMENT_SEPARATOR = ':'
    7374VARIABLE_ATTRIBUTE_SEPARATOR = '.'
     75COMMENT_TAG_START = '{#'
     76COMMENT_TAG_END = '#}'
    7477BLOCK_TAG_START = '{%'
    7578BLOCK_TAG_END = '%}'
    7679VARIABLE_TAG_START = '{{'
     
    8588UNKNOWN_SOURCE="<unknown source>"
    8689
    8790# match a variable or block tag and capture the entire tag, including start/end delimiters
    88 tag_re = re.compile('(%s.*?%s|%s.*?%s)' % (re.escape(BLOCK_TAG_START), re.escape(BLOCK_TAG_END),
    89                                           re.escape(VARIABLE_TAG_START), re.escape(VARIABLE_TAG_END)))
     91tag_re = re.compile('(%s.*?%s|%s.*?%s|%s.*?%s)' % (re.escape(BLOCK_TAG_START), re.escape(BLOCK_TAG_END),
     92                                          re.escape(VARIABLE_TAG_START), re.escape(VARIABLE_TAG_END),
     93                                          re.escape(COMMENT_TAG_START), re.escape(COMMENT_TAG_END)))
    9094
    9195# global dictionary of libraries that have been loaded using get_library
    9296libraries = {}
     
    163167
    164168class Token(object):
    165169    def __init__(self, token_type, contents):
    166         "The token_type must be TOKEN_TEXT, TOKEN_VAR or TOKEN_BLOCK"
     170        "The token_type must be TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, or TOKEN_COMMENT"
    167171        self.token_type, self.contents = token_type, contents
    168172
    169173    def __str__(self):
    170174        return '<%s token: "%s...">' % \
    171             ({TOKEN_TEXT: 'Text', TOKEN_VAR: 'Var', TOKEN_BLOCK: 'Block'}[self.token_type],
     175            ({TOKEN_TEXT: 'Text', TOKEN_VAR: 'Var', TOKEN_BLOCK: 'Block', TOKEN_COMMENT: 'Comment'}[self.token_type],
    172176            self.contents[:20].replace('\n', ''))
    173177
    174178    def split_contents(self):
     
    191195            token = Token(TOKEN_VAR, token_string[len(VARIABLE_TAG_START):-len(VARIABLE_TAG_END)].strip())
    192196        elif token_string.startswith(BLOCK_TAG_START):
    193197            token = Token(TOKEN_BLOCK, token_string[len(BLOCK_TAG_START):-len(BLOCK_TAG_END)].strip())
     198        elif token_string.startswith(COMMENT_TAG_START):
     199            token = Token(TOKEN_COMMENT, '')
    194200        else:
    195201            token = Token(TOKEN_TEXT, token_string)
    196202        return token
Back to Top