Index: django/template/__init__.py
===================================================================
--- django/template/__init__.py	(revision 3600)
+++ django/template/__init__.py	(working copy)
@@ -66,6 +66,7 @@
 TOKEN_TEXT = 0
 TOKEN_VAR = 1
 TOKEN_BLOCK = 2
+TOKEN_ENDLINE = 3
 
 # template syntax constants
 FILTER_SEPARATOR = '|'
@@ -85,8 +86,9 @@
 UNKNOWN_SOURCE="&lt;unknown source&gt;"
 
 # match a variable or block tag and capture the entire tag, including start/end delimiters
-tag_re = re.compile('(%s.*?%s|%s.*?%s)' % (re.escape(BLOCK_TAG_START), re.escape(BLOCK_TAG_END),
+tag_re = re.compile('(%s.*?%s|%s.*?%s|\n)' % (re.escape(BLOCK_TAG_START), re.escape(BLOCK_TAG_END),
                                           re.escape(VARIABLE_TAG_START), re.escape(VARIABLE_TAG_END)))
+whitespace_only_re = re.compile('^\s+$')
 
 # global dictionary of libraries that have been loaded using get_library
 libraries = {}
@@ -157,17 +159,71 @@
 def compile_string(template_string, origin):
     "Compiles template_string into NodeList ready for rendering"
     lexer = lexer_factory(template_string, origin)
-    parser = parser_factory(lexer.tokenize())
+    tokens = lexer.tokenize()
+    tokens = trim_whitespace(tokens)
+    parser = parser_factory(tokens)
     return parser.parse()
 
+def trim_whitespace(tokens):
+    num_tokens = len(tokens)
+    keep_tokens = []
+    line_start = 0
+    for pos, token in enumerate(tokens):
+        is_endline = token.token_type is TOKEN_ENDLINE
+        is_last_token = pos is (num_tokens-1)
+        # If we have reached the end of a line or the end of the file then
+        # process the line.
+        if is_endline or is_last_token:
+            # Add one to the slice to include the endline token we are currently at.
+            keep_tokens += get_line_tokens(tokens[line_start:pos+1])
+            # The next line starts at the next token.
+            line_start = pos + 1
+            continue
+    return keep_tokens
+
+def get_line_tokens(tokens):
+    line_has_block = False
+    # We don't need to look at the last (TOKEN_ENDLINE) token.
+    for token in tokens[:-1]:
+        if token.token_type is TOKEN_TEXT:
+            # Keep the line if we have non-whitespace text.
+            if not whitespace_only_re.match(token.contents):
+                return tokens
+        elif token.token_type is TOKEN_BLOCK:
+            line_has_block = True
+    # If we are at this point then the line only contains whitespace, blocks,
+    # and variables.  Continuing on...
+    # If the line doesn't have blocks then it must only have variables and
+    # whitespace, return the whole line.  This also catches the case where
+    # the line consists of only a TOKEN_ENDLINE.
+    if not line_has_block:
+        return tokens
+    # At this point we have only blocks and/or text and know we don't want the endline.
+    del tokens[-1]
+    # We also don't wan't beginning and trailing whitespace on the line.
+    start = first_non_whitespace_token(tokens)
+    tokens.reverse()
+    from_end = first_non_whitespace_token(tokens)
+    tokens.reverse()
+    return tokens[start:len(tokens)-from_end]
+
+def first_non_whitespace_token(tokens):
+    pos = 0
+    for token in tokens:
+        if token.token_type is TOKEN_TEXT and whitespace_only_re.match(token.contents):
+            pos += 1
+        else:
+            break
+    return pos
+
 class Token(object):
     def __init__(self, token_type, contents):
-        "The token_type must be TOKEN_TEXT, TOKEN_VAR or TOKEN_BLOCK"
+        "The token_type must be TOKEN_TEXT, TOKEN_VAR or TOKEN_BLOCK, or TOKEN_ENDLINE."
         self.token_type, self.contents = token_type, contents
 
     def __str__(self):
         return '<%s token: "%s...">' % \
-            ({TOKEN_TEXT: 'Text', TOKEN_VAR: 'Var', TOKEN_BLOCK: 'Block'}[self.token_type],
+            ({TOKEN_TEXT: 'Text', TOKEN_VAR: 'Var', TOKEN_BLOCK: 'Block', TOKEN_ENDLINE: 'Endline'}[self.token_type],
             self.contents[:20].replace('\n', ''))
 
     def split_contents(self):
@@ -190,6 +246,8 @@
             token = Token(TOKEN_VAR, token_string[len(VARIABLE_TAG_START):-len(VARIABLE_TAG_END)].strip())
         elif token_string.startswith(BLOCK_TAG_START):
             token = Token(TOKEN_BLOCK, token_string[len(BLOCK_TAG_START):-len(BLOCK_TAG_END)].strip())
+        elif token_string == '\n':
+            token = Token(TOKEN_ENDLINE, token_string)
         else:
             token = Token(TOKEN_TEXT, token_string)
         return token
@@ -233,6 +291,8 @@
             token = self.next_token()
             if token.token_type == TOKEN_TEXT:
                 self.extend_nodelist(nodelist, TextNode(token.contents), token)
+            elif token.token_type == TOKEN_ENDLINE:
+                self.extend_nodelist(nodelist, EndlineNode(token.contents), token)
             elif token.token_type == TOKEN_VAR:
                 if not token.contents:
                     self.empty_variable(token)
@@ -726,6 +786,16 @@
     def render(self, context):
         return self.s
 
+class EndlineNode(Node):
+    def __init__(self, s):
+        self.s = s
+
+    def __repr__(self):
+        return "<Endline Node: %r>" % self.s
+
+    def render(self, context):
+        return self.s
+
 class VariableNode(Node):
     def __init__(self, filter_expression):
         self.filter_expression = filter_expression
