45 | | class DjangoDoctestRunner(doctest.DocTestRunner): |
46 | | def __init__(self, verbosity_level, *args, **kwargs): |
47 | | self.verbosity_level = verbosity_level |
48 | | doctest.DocTestRunner.__init__(self, *args, **kwargs) |
49 | | self._checker = DjangoDoctestOutputChecker() |
50 | | self.optionflags = doctest.ELLIPSIS |
51 | | |
52 | | def report_start(self, out, test, example): |
53 | | if self.verbosity_level > 1: |
54 | | out(" >>> %s\n" % example.source.strip()) |
55 | | |
56 | | def report_failure(self, out, test, example, got): |
57 | | log_error(test.name, "API test failed", |
58 | | "Code: %r\nLine: %s\nExpected: %r\nGot: %r" % (example.source.strip(), example.lineno, example.want, got)) |
59 | | |
60 | | def report_unexpected_exception(self, out, test, example, exc_info): |
61 | | from django.db import transaction |
62 | | tb = ''.join(traceback.format_exception(*exc_info)[1:]) |
63 | | log_error(test.name, "API test raised an exception", |
64 | | "Code: %r\nLine: %s\nException: %s" % (example.source.strip(), example.lineno, tb)) |
65 | | # Rollback, in case of database errors. Otherwise they'd have |
66 | | # side effects on other tests. |
67 | | transaction.rollback_unless_managed() |
68 | | |
69 | | normalize_long_ints = lambda s: re.sub(r'(?<![\w])(\d+)L(?![\w])', '\\1', s) |
70 | | |
71 | | class DjangoDoctestOutputChecker(doctest.OutputChecker): |
72 | | def check_output(self, want, got, optionflags): |
73 | | ok = doctest.OutputChecker.check_output(self, want, got, optionflags) |
74 | | |
75 | | # Doctest does an exact string comparison of output, which means long |
76 | | # integers aren't equal to normal integers ("22L" vs. "22"). The |
77 | | # following code normalizes long integers so that they equal normal |
78 | | # integers. |
79 | | if not ok: |
80 | | return normalize_long_ints(want) == normalize_long_ints(got) |
81 | | return ok |
82 | | |
83 | | class TestRunner: |
84 | | def __init__(self, verbosity_level=0, which_tests=None): |
85 | | self.verbosity_level = verbosity_level |
86 | | self.which_tests = which_tests |
87 | | |
88 | | def output(self, required_level, message): |
89 | | if self.verbosity_level > required_level - 1: |
90 | | print message |
91 | | |
92 | | def run_tests(self): |
93 | | from django.conf import settings |
94 | | |
95 | | # An empty access of the settings to force the default options to be |
96 | | # installed prior to assigning to them. |
97 | | settings.INSTALLED_APPS |
98 | | |
99 | | # Manually set INSTALLED_APPS to point to the test models. |
100 | | settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS + ['.'.join(a) for a in get_test_models()] |
101 | | |
102 | | # Manually set DEBUG and USE_I18N. |
103 | | settings.DEBUG = False |
104 | | settings.USE_I18N = True |
105 | | |
106 | | from django.db import connection |
| 32 | def get_invalid_models(): |
| 33 | models = [] |
| 34 | for loc, dirpath in (MODEL_TESTS_DIR_NAME, MODEL_TEST_DIR), (REGRESSION_TESTS_DIR_NAME, REGRESSION_TEST_DIR): |
| 35 | for f in os.listdir(dirpath): |
| 36 | if f.startswith('__init__') or f.startswith('.') or f.startswith('sql'): |
| 37 | continue |
| 38 | if f.startswith('invalid'): |
| 39 | models.append((loc, f)) |
| 40 | return models |
| 41 | |
| 42 | class InvalidModelTestCase(unittest.TestCase): |
| 43 | def __init__(self, model_label): |
| 44 | unittest.TestCase.__init__(self) |
| 45 | self.model_label = model_label |
| 46 | |
| 47 | def runTest(self): |
108 | | import django.db.models |
109 | | |
110 | | # Determine which models we're going to test. |
111 | | test_models = get_test_models() |
112 | | if 'othertests' in self.which_tests: |
113 | | self.which_tests.remove('othertests') |
114 | | run_othertests = True |
115 | | if not self.which_tests: |
116 | | test_models = [] |
117 | | else: |
118 | | run_othertests = not self.which_tests |
119 | | |
120 | | if self.which_tests: |
121 | | # Only run the specified tests. |
122 | | bad_models = [m for m in self.which_tests if (MODEL_TESTS_DIR_NAME, m) not in test_models and (REGRESSION_TESTS_DIR_NAME, m) not in test_models] |
123 | | if bad_models: |
124 | | sys.stderr.write("Models not found: %s\n" % bad_models) |
125 | | sys.exit(1) |
126 | | else: |
127 | | all_tests = [] |
128 | | for test in self.which_tests: |
129 | | for loc in MODEL_TESTS_DIR_NAME, REGRESSION_TESTS_DIR_NAME: |
130 | | if (loc, test) in test_models: |
131 | | all_tests.append((loc, test)) |
132 | | test_models = all_tests |
133 | | |
134 | | self.output(0, "Running tests with database %r" % settings.DATABASE_ENGINE) |
135 | | |
136 | | # If we're using SQLite, it's more convenient to test against an |
137 | | # in-memory database. |
138 | | if settings.DATABASE_ENGINE == "sqlite3": |
139 | | global TEST_DATABASE_NAME |
140 | | TEST_DATABASE_NAME = ":memory:" |
141 | | else: |
142 | | # Create the test database and connect to it. We need to autocommit |
143 | | # if the database supports it because PostgreSQL doesn't allow |
144 | | # CREATE/DROP DATABASE statements within transactions. |
145 | | cursor = connection.cursor() |
146 | | self._set_autocommit(connection) |
147 | | self.output(1, "Creating test database") |
148 | | try: |
149 | | cursor.execute("CREATE DATABASE %s" % TEST_DATABASE_NAME) |
150 | | except Exception, e: |
151 | | sys.stderr.write("Got an error creating the test database: %s\n" % e) |
152 | | confirm = raw_input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_DATABASE_NAME) |
153 | | if confirm == 'yes': |
154 | | cursor.execute("DROP DATABASE %s" % TEST_DATABASE_NAME) |
155 | | cursor.execute("CREATE DATABASE %s" % TEST_DATABASE_NAME) |
156 | | else: |
157 | | print "Tests cancelled." |
158 | | return |
159 | | connection.close() |
160 | | old_database_name = settings.DATABASE_NAME |
161 | | settings.DATABASE_NAME = TEST_DATABASE_NAME |
162 | | |
163 | | # Initialize the test database. |
164 | | cursor = connection.cursor() |
165 | | |
188 | | # Run the API tests. |
189 | | p = doctest.DocTestParser() |
190 | | test_namespace = dict([(m._meta.object_name, m) \ |
191 | | for m in django.db.models.get_models(mod)]) |
192 | | dtest = p.get_doctest(mod.API_TESTS, test_namespace, model_name, None, None) |
193 | | # Manually set verbose=False, because "-v" command-line parameter |
194 | | # has side effects on doctest TestRunner class. |
195 | | runner = DjangoDoctestRunner(verbosity_level=verbosity_level, verbose=False) |
196 | | self.output(1, "%s.%s model: Running tests" % (model_dir, model_name)) |
197 | | runner.run(dtest, clear_globs=True, out=sys.stdout.write) |
198 | | else: |
199 | | # Check that model known to be invalid is invalid for the right reasons. |
200 | | self.output(1, "%s.%s model: Validating" % (model_dir, model_name)) |
| 67 | self.assert_(not unexpected, "Unexpected Errors: " + '\n'.join(unexpected)) |
| 68 | self.assert_(not missing, "Missing Errors: " + '\n'.join(missing)) |
210 | | unexpected = [err for err in actual if err not in expected] |
211 | | missing = [err for err in expected if err not in actual] |
| 71 | def django_tests(verbosity, tests_to_run): |
| 72 | from django.conf import settings |
| 73 | from django.db.models.loading import get_apps, load_app |
| 74 | old_installed_apps = settings.INSTALLED_APPS |
| 75 | |
| 76 | # load all the ALWAYS_INSTALLED_APPS |
| 77 | settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS |
| 78 | get_apps() |
| 79 | |
| 80 | test_models = [] |
| 81 | # Load all the test model apps |
| 82 | for model_dir, model_name in get_test_models(): |
| 83 | model_label = '.'.join([model_dir, model_name]) |
| 84 | try: |
| 85 | # if the model was named on the command line, or |
| 86 | # no models were named (i.e., run all), import |
| 87 | # this model and add it to the list to test. |
| 88 | if not tests_to_run or model_name in tests_to_run: |
| 89 | if verbosity >= 1: |
| 90 | print "Importing model %s" % model_name |
| 91 | mod = load_app(model_label) |
| 92 | settings.INSTALLED_APPS.append(model_label) |
| 93 | test_models.append(mod) |
| 94 | except Exception, e: |
| 95 | sys.stderr.write("Error while importing %s:" % model_name + ''.join(traceback.format_exception(*sys.exc_info())[1:])) |
| 96 | continue |
213 | | if unexpected or missing: |
214 | | unexpected_log = '\n'.join(unexpected) |
215 | | missing_log = '\n'.join(missing) |
216 | | log_error(model_name, |
217 | | "Validator found %d validation errors, %d expected" % (count, len(expected) - 1), |
218 | | "Missing errors:\n%s\n\nUnexpected errors:\n%s" % (missing_log, unexpected_log)) |
219 | | |
220 | | if run_othertests: |
221 | | # Run the non-model tests in the other tests dir |
222 | | self.output(1, "Running other tests") |
223 | | other_tests_dir = os.path.join(os.path.dirname(__file__), OTHER_TESTS_DIR) |
224 | | test_modules = [f[:-3] for f in os.listdir(other_tests_dir) if f.endswith('.py') and not f.startswith('__init__')] |
225 | | for module in test_modules: |
226 | | self.output(1, "%s module: Importing" % module) |
227 | | try: |
228 | | mod = __import__("othertests." + module, '', '', ['']) |
229 | | except Exception, e: |
230 | | log_error(module, "Error while importing", ''.join(traceback.format_exception(*sys.exc_info())[1:])) |
231 | | continue |
232 | | if mod.__doc__: |
233 | | p = doctest.DocTestParser() |
234 | | dtest = p.get_doctest(mod.__doc__, mod.__dict__, module, None, None) |
235 | | runner = DjangoDoctestRunner(verbosity_level=verbosity_level, verbose=False) |
236 | | self.output(1, "%s module: running tests" % module) |
237 | | runner.run(dtest, clear_globs=True, out=sys.stdout.write) |
238 | | if hasattr(mod, "run_tests") and callable(mod.run_tests): |
239 | | self.output(1, "%s module: running tests" % module) |
240 | | try: |
241 | | mod.run_tests(verbosity_level) |
242 | | except Exception, e: |
243 | | log_error(module, "Exception running tests", ''.join(traceback.format_exception(*sys.exc_info())[1:])) |
244 | | continue |
245 | | |
246 | | # Unless we're using SQLite, remove the test database to clean up after |
247 | | # ourselves. Connect to the previous database (not the test database) |
248 | | # to do so, because it's not allowed to delete a database while being |
249 | | # connected to it. |
250 | | if settings.DATABASE_ENGINE != "sqlite3": |
251 | | connection.close() |
252 | | settings.DATABASE_NAME = old_database_name |
253 | | cursor = connection.cursor() |
254 | | self.output(1, "Deleting test database") |
255 | | self._set_autocommit(connection) |
256 | | time.sleep(1) # To avoid "database is being accessed by other users" errors. |
257 | | cursor.execute("DROP DATABASE %s" % TEST_DATABASE_NAME) |
258 | | |
259 | | # Display output. |
260 | | if error_list: |
261 | | for d in error_list: |
262 | | print |
263 | | print d['title'] |
264 | | print "=" * len(d['title']) |
265 | | print d['description'] |
266 | | print "%s error%s:" % (len(error_list), len(error_list) != 1 and 's' or '') |
267 | | else: |
268 | | print "All tests passed." |
269 | | |
270 | | def _set_autocommit(self, connection): |
271 | | """ |
272 | | Make sure a connection is in autocommit mode. |
273 | | """ |
274 | | if hasattr(connection.connection, "autocommit"): |
275 | | connection.connection.autocommit(True) |
276 | | elif hasattr(connection.connection, "set_isolation_level"): |
277 | | connection.connection.set_isolation_level(0) |
278 | | |
| 98 | # Add tests for invalid models |
| 99 | extra_tests = [] |
| 100 | for model_dir, model_name in get_invalid_models(): |
| 101 | model_label = '.'.join([model_dir, model_name]) |
| 102 | if not tests_to_run or model_name in tests_to_run: |
| 103 | extra_tests.append(InvalidModelTestCase(model_label)) |
| 104 | |
| 105 | # Run the test suite, including the extra validation tests. |
| 106 | from django.test.simple import run_tests |
| 107 | run_tests(test_models, verbosity, extra_tests=extra_tests) |
| 108 | |
| 109 | # Restore the old INSTALLED_APPS setting |
| 110 | settings.INSTALLED_APPS = old_installed_apps |
| 111 | |