Ticket #2333: runtest.patch

File runtest.patch, 16.6 KB (added by Russell Keith-Magee, 18 years ago)

Addendum to first group of patches; revised runtest.py

  • tests/runtests.py

     
    11#!/usr/bin/env python
    22
    3 import os, re, sys, time, traceback
     3import os, sys, traceback
     4import unittest
    45
    5 # doctest is included in the same package as this module, because this testing
    6 # framework uses features only available in the Python 2.4 version of doctest,
    7 # and Django aims to work with Python 2.3+.
    8 import doctest
    9 
    106MODEL_TESTS_DIR_NAME = 'modeltests'
    11 OTHER_TESTS_DIR = "othertests"
    127REGRESSION_TESTS_DIR_NAME = 'regressiontests'
    13 TEST_DATABASE_NAME = 'django_test_db'
    148
    15 error_list = []
    16 def log_error(model_name, title, description):
    17     error_list.append({
    18         'title': "%r module: %s" % (model_name, title),
    19         'description': description,
    20     })
    21 
    229MODEL_TEST_DIR = os.path.join(os.path.dirname(__file__), MODEL_TESTS_DIR_NAME)
    2310REGRESSION_TEST_DIR = os.path.join(os.path.dirname(__file__), REGRESSION_TESTS_DIR_NAME)
    2411
     
    3724    models = []
    3825    for loc, dirpath in (MODEL_TESTS_DIR_NAME, MODEL_TEST_DIR), (REGRESSION_TESTS_DIR_NAME, REGRESSION_TEST_DIR):
    3926        for f in os.listdir(dirpath):
    40             if f.startswith('__init__') or f.startswith('.') or f.startswith('sql'):
     27            if f.startswith('__init__') or f.startswith('.') or f.startswith('sql') or f.startswith('invalid'):
    4128                continue
    4229            models.append((loc, f))
    4330    return models
    4431
    45 class DjangoDoctestRunner(doctest.DocTestRunner):
    46     def __init__(self, verbosity_level, *args, **kwargs):
    47         self.verbosity_level = verbosity_level
    48         doctest.DocTestRunner.__init__(self, *args, **kwargs)
    49         self._checker = DjangoDoctestOutputChecker()
    50         self.optionflags = doctest.ELLIPSIS
    51 
    52     def report_start(self, out, test, example):
    53         if self.verbosity_level > 1:
    54             out("  >>> %s\n" % example.source.strip())
    55 
    56     def report_failure(self, out, test, example, got):
    57         log_error(test.name, "API test failed",
    58             "Code: %r\nLine: %s\nExpected: %r\nGot: %r" % (example.source.strip(), example.lineno, example.want, got))
    59 
    60     def report_unexpected_exception(self, out, test, example, exc_info):
    61         from django.db import transaction
    62         tb = ''.join(traceback.format_exception(*exc_info)[1:])
    63         log_error(test.name, "API test raised an exception",
    64             "Code: %r\nLine: %s\nException: %s" % (example.source.strip(), example.lineno, tb))
    65         # Rollback, in case of database errors. Otherwise they'd have
    66         # side effects on other tests.
    67         transaction.rollback_unless_managed()
    68 
    69 normalize_long_ints = lambda s: re.sub(r'(?<![\w])(\d+)L(?![\w])', '\\1', s)
    70 
    71 class DjangoDoctestOutputChecker(doctest.OutputChecker):
    72     def check_output(self, want, got, optionflags):
    73         ok = doctest.OutputChecker.check_output(self, want, got, optionflags)
    74 
    75         # Doctest does an exact string comparison of output, which means long
    76         # integers aren't equal to normal integers ("22L" vs. "22"). The
    77         # following code normalizes long integers so that they equal normal
    78         # integers.
    79         if not ok:
    80             return normalize_long_ints(want) == normalize_long_ints(got)
    81         return ok
    82 
    83 class TestRunner:
    84     def __init__(self, verbosity_level=0, which_tests=None):
    85         self.verbosity_level = verbosity_level
    86         self.which_tests = which_tests
    87 
    88     def output(self, required_level, message):
    89         if self.verbosity_level > required_level - 1:
    90             print message
    91 
    92     def run_tests(self):
    93         from django.conf import settings
    94 
    95         # An empty access of the settings to force the default options to be
    96         # installed prior to assigning to them.
    97         settings.INSTALLED_APPS
    98 
    99         # Manually set INSTALLED_APPS to point to the test models.
    100         settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS + ['.'.join(a) for a in get_test_models()]
    101 
    102         # Manually set DEBUG and USE_I18N.
    103         settings.DEBUG = False
    104         settings.USE_I18N = True
    105 
    106         from django.db import connection
     32def get_invalid_models():
     33    models = []
     34    for loc, dirpath in (MODEL_TESTS_DIR_NAME, MODEL_TEST_DIR), (REGRESSION_TESTS_DIR_NAME, REGRESSION_TEST_DIR):
     35        for f in os.listdir(dirpath):
     36            if f.startswith('__init__') or f.startswith('.') or f.startswith('sql'):
     37                continue
     38            if f.startswith('invalid'):
     39                models.append((loc, f))
     40    return models
     41               
     42class InvalidModelTestCase(unittest.TestCase):
     43    def __init__(self, model_label):
     44        unittest.TestCase.__init__(self)
     45        self.model_label = model_label
     46       
     47    def runTest(self):
    10748        from django.core import management
    108         import django.db.models
    109 
    110         # Determine which models we're going to test.
    111         test_models = get_test_models()
    112         if 'othertests' in self.which_tests:
    113             self.which_tests.remove('othertests')
    114             run_othertests = True
    115             if not self.which_tests:
    116                 test_models = []
    117         else:
    118             run_othertests = not self.which_tests
    119 
    120         if self.which_tests:
    121             # Only run the specified tests.
    122             bad_models = [m for m in self.which_tests if (MODEL_TESTS_DIR_NAME, m) not in test_models and (REGRESSION_TESTS_DIR_NAME, m) not in test_models]
    123             if bad_models:
    124                 sys.stderr.write("Models not found: %s\n" % bad_models)
    125                 sys.exit(1)
    126             else:
    127                 all_tests = []
    128                 for test in self.which_tests:
    129                     for loc in MODEL_TESTS_DIR_NAME, REGRESSION_TESTS_DIR_NAME:
    130                         if (loc, test) in test_models:
    131                             all_tests.append((loc, test))
    132                 test_models = all_tests
    133 
    134         self.output(0, "Running tests with database %r" % settings.DATABASE_ENGINE)
    135 
    136         # If we're using SQLite, it's more convenient to test against an
    137         # in-memory database.
    138         if settings.DATABASE_ENGINE == "sqlite3":
    139             global TEST_DATABASE_NAME
    140             TEST_DATABASE_NAME = ":memory:"
    141         else:
    142             # Create the test database and connect to it. We need to autocommit
    143             # if the database supports it because PostgreSQL doesn't allow
    144             # CREATE/DROP DATABASE statements within transactions.
    145             cursor = connection.cursor()
    146             self._set_autocommit(connection)
    147             self.output(1, "Creating test database")
    148             try:
    149                 cursor.execute("CREATE DATABASE %s" % TEST_DATABASE_NAME)
    150             except Exception, e:
    151                 sys.stderr.write("Got an error creating the test database: %s\n" % e)
    152                 confirm = raw_input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_DATABASE_NAME)
    153                 if confirm == 'yes':
    154                     cursor.execute("DROP DATABASE %s" % TEST_DATABASE_NAME)
    155                     cursor.execute("CREATE DATABASE %s" % TEST_DATABASE_NAME)
    156                 else:
    157                     print "Tests cancelled."
    158                     return
    159         connection.close()
    160         old_database_name = settings.DATABASE_NAME
    161         settings.DATABASE_NAME = TEST_DATABASE_NAME
    162 
    163         # Initialize the test database.
    164         cursor = connection.cursor()
    165 
    16649        from django.db.models.loading import load_app
    167         # Install the core always installed apps
    168         for app in ALWAYS_INSTALLED_APPS:
    169             self.output(1, "Installing contrib app %s" % app)
    170             mod = load_app(app)
    171             management.install(mod)
     50        from cStringIO import StringIO
    17251
    173         # Run the tests for each test model.
    174         self.output(1, "Running app tests")
    175         for model_dir, model_name in test_models:
    176             self.output(1, "%s model: Importing" % model_name)
    177             try:
    178                 mod = load_app(model_dir + '.' + model_name)
    179             except Exception, e:
    180                 log_error(model_name, "Error while importing", ''.join(traceback.format_exception(*sys.exc_info())[1:]))
    181                 continue
     52        try:
     53            module = load_app(self.model_label)
     54        except Exception, e:
     55            self.fail('Unable to load invalid model module')
     56       
     57        s = StringIO()
     58        count = management.get_validation_errors(s, module)
     59        s.seek(0)
     60        error_log = s.read()
     61        actual = error_log.split('\n')
     62        expected = module.model_errors.split('\n')
    18263
    183             if not getattr(mod, 'error_log', None):
    184                 # Model is not marked as an invalid model
    185                 self.output(1, "%s.%s model: Installing" % (model_dir, model_name))
    186                 management.install(mod)
     64        unexpected = [err for err in actual if err not in expected]
     65        missing = [err for err in expected if err not in actual]
    18766
    188                 # Run the API tests.
    189                 p = doctest.DocTestParser()
    190                 test_namespace = dict([(m._meta.object_name, m) \
    191                                         for m in django.db.models.get_models(mod)])
    192                 dtest = p.get_doctest(mod.API_TESTS, test_namespace, model_name, None, None)
    193                 # Manually set verbose=False, because "-v" command-line parameter
    194                 # has side effects on doctest TestRunner class.
    195                 runner = DjangoDoctestRunner(verbosity_level=verbosity_level, verbose=False)
    196                 self.output(1, "%s.%s model: Running tests" % (model_dir, model_name))
    197                 runner.run(dtest, clear_globs=True, out=sys.stdout.write)
    198             else:
    199                 # Check that model known to be invalid is invalid for the right reasons.
    200                 self.output(1, "%s.%s model: Validating" % (model_dir, model_name))
     67        self.assert_(not unexpected, "Unexpected Errors: " + '\n'.join(unexpected))
     68        self.assert_(not missing, "Missing Errors: " + '\n'.join(missing))
    20169
    202                 from cStringIO import StringIO
    203                 s = StringIO()
    204                 count = management.get_validation_errors(s, mod)
    205                 s.seek(0)
    206                 error_log = s.read()
    207                 actual = error_log.split('\n')
    208                 expected = mod.error_log.split('\n')
    20970
    210                 unexpected = [err for err in actual if err not in expected]
    211                 missing = [err for err in expected if err not in actual]
     71def django_tests(verbosity, tests_to_run):
     72    from django.conf import settings
     73    from django.db.models.loading import get_apps, load_app
     74    old_installed_apps = settings.INSTALLED_APPS
     75   
     76    # load all the ALWAYS_INSTALLED_APPS
     77    settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
     78    get_apps()
     79   
     80    test_models = []
     81    # Load all the test model apps
     82    for model_dir, model_name in get_test_models():
     83        model_label = '.'.join([model_dir, model_name])
     84        try:
     85            # if the model was named on the command line, or
     86            # no models were named (i.e., run all), import
     87            # this model and add it to the list to test.
     88            if not tests_to_run or model_name in tests_to_run:
     89                if verbosity >= 1:
     90                    print "Importing model %s" % model_name
     91                mod = load_app(model_label)
     92                settings.INSTALLED_APPS.append(model_label)       
     93                test_models.append(mod)
     94        except Exception, e:
     95            sys.stderr.write("Error while importing %s:" % model_name + ''.join(traceback.format_exception(*sys.exc_info())[1:]))
     96            continue   
    21297
    213                 if unexpected or missing:
    214                     unexpected_log = '\n'.join(unexpected)
    215                     missing_log = '\n'.join(missing)
    216                     log_error(model_name,
    217                         "Validator found %d validation errors, %d expected" % (count, len(expected) - 1),
    218                         "Missing errors:\n%s\n\nUnexpected errors:\n%s" % (missing_log, unexpected_log))
    219 
    220         if run_othertests:
    221             # Run the non-model tests in the other tests dir
    222             self.output(1, "Running other tests")
    223             other_tests_dir = os.path.join(os.path.dirname(__file__), OTHER_TESTS_DIR)
    224             test_modules = [f[:-3] for f in os.listdir(other_tests_dir) if f.endswith('.py') and not f.startswith('__init__')]
    225             for module in test_modules:
    226                 self.output(1, "%s module: Importing" % module)
    227                 try:
    228                     mod = __import__("othertests." + module, '', '', [''])
    229                 except Exception, e:
    230                     log_error(module, "Error while importing", ''.join(traceback.format_exception(*sys.exc_info())[1:]))
    231                     continue
    232                 if mod.__doc__:
    233                     p = doctest.DocTestParser()
    234                     dtest = p.get_doctest(mod.__doc__, mod.__dict__, module, None, None)
    235                     runner = DjangoDoctestRunner(verbosity_level=verbosity_level, verbose=False)
    236                     self.output(1, "%s module: running tests" % module)
    237                     runner.run(dtest, clear_globs=True, out=sys.stdout.write)
    238                 if hasattr(mod, "run_tests") and callable(mod.run_tests):
    239                     self.output(1, "%s module: running tests" % module)
    240                     try:
    241                         mod.run_tests(verbosity_level)
    242                     except Exception, e:
    243                         log_error(module, "Exception running tests", ''.join(traceback.format_exception(*sys.exc_info())[1:]))
    244                         continue
    245 
    246         # Unless we're using SQLite, remove the test database to clean up after
    247         # ourselves. Connect to the previous database (not the test database)
    248         # to do so, because it's not allowed to delete a database while being
    249         # connected to it.
    250         if settings.DATABASE_ENGINE != "sqlite3":
    251             connection.close()
    252             settings.DATABASE_NAME = old_database_name
    253             cursor = connection.cursor()
    254             self.output(1, "Deleting test database")
    255             self._set_autocommit(connection)
    256             time.sleep(1) # To avoid "database is being accessed by other users" errors.
    257             cursor.execute("DROP DATABASE %s" % TEST_DATABASE_NAME)
    258 
    259         # Display output.
    260         if error_list:
    261             for d in error_list:
    262                 print
    263                 print d['title']
    264                 print "=" * len(d['title'])
    265                 print d['description']
    266             print "%s error%s:" % (len(error_list), len(error_list) != 1 and 's' or '')
    267         else:
    268             print "All tests passed."
    269 
    270     def _set_autocommit(self, connection):
    271         """
    272         Make sure a connection is in autocommit mode.
    273         """
    274         if hasattr(connection.connection, "autocommit"):
    275             connection.connection.autocommit(True)
    276         elif hasattr(connection.connection, "set_isolation_level"):
    277             connection.connection.set_isolation_level(0)
    278 
     98    # Add tests for invalid models
     99    extra_tests = []
     100    for model_dir, model_name in get_invalid_models():
     101        model_label = '.'.join([model_dir, model_name])
     102        if not tests_to_run or model_name in tests_to_run:
     103            extra_tests.append(InvalidModelTestCase(model_label))
     104   
     105    # Run the test suite, including the extra validation tests.
     106    from django.test.simple import run_tests
     107    run_tests(test_models, verbosity, extra_tests=extra_tests)
     108 
     109    # Restore the old INSTALLED_APPS setting
     110    settings.INSTALLED_APPS = old_installed_apps
     111     
    279112if __name__ == "__main__":
    280113    from optparse import OptionParser
    281114    usage = "%prog [options] [model model model ...]"
    282115    parser = OptionParser(usage=usage)
    283     parser.add_option('-v', help='How verbose should the output be? Choices are 0, 1 and 2, where 2 is most verbose. Default is 0.',
    284         type='choice', choices=['0', '1', '2'])
     116    parser.add_option('-v','--verbosity', action='store', dest='verbosity', default='0',
     117        type='choice', choices=['0', '1', '2'],
     118        help='Verbosity level; 0=minimal output, 1=normal output, 2=all output')   
    285119    parser.add_option('--settings',
    286120        help='Python path to settings module, e.g. "myproject.settings". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.')
    287121    options, args = parser.parse_args()
    288     verbosity_level = 0
    289     if options.v:
    290         verbosity_level = int(options.v)
    291122    if options.settings:
    292123        os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
    293     t = TestRunner(verbosity_level, args)
    294     t.run_tests()
     124       
     125    django_tests(int(options.verbosity), args)
Back to Top