diff --git a/django/test/simple.py b/django/test/simple.py
a
|
b
|
|
73 | 73 | return suite |
74 | 74 | |
75 | 75 | def build_test(label): |
76 | | """Construct a test case a test with the specified label. Label should |
77 | | be of the form model.TestClass or model.TestClass.test_method. Returns |
| 76 | """Construct a test case with the specified label. Label should |
| 77 | be of the form app.TestClass or app.TestClass.test_method. Returns |
78 | 78 | an instantiated test or test suite corresponding to the label provided. |
79 | 79 | |
80 | 80 | """ |
… |
… |
|
99 | 99 | else: # label is app.TestClass.test_method |
100 | 100 | return TestClass(parts[2]) |
101 | 101 | |
102 | | def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]): |
| 102 | def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=None, exclude_labels=None): |
103 | 103 | """ |
104 | 104 | Run the unit tests for all the test labels in the provided list. |
105 | 105 | Labels must be of the form: |
… |
… |
|
115 | 115 | |
116 | 116 | A list of 'extra' tests may also be provided; these tests |
117 | 117 | will be added to the test suite. |
118 | | |
| 118 | |
| 119 | It's also possible to specify a list of labels to exclude from the |
| 120 | test suite by using the exclude_labels parameter. |
| 121 | |
119 | 122 | Returns the number of tests that failed. |
120 | 123 | """ |
121 | 124 | setup_test_environment() |
122 | | |
123 | | settings.DEBUG = False |
| 125 | |
| 126 | if extra_tests is None: |
| 127 | extra_tests = [] |
| 128 | if exclude_labels is None: |
| 129 | exclude_labels = [] |
| 130 | |
| 131 | settings.DEBUG = False |
124 | 132 | suite = unittest.TestSuite() |
125 | 133 | |
126 | 134 | if test_labels: |
127 | 135 | for label in test_labels: |
| 136 | if label in exclude_labels: |
| 137 | if verbosity >= 1: |
| 138 | print 'Skipping test %s' % label |
| 139 | continue |
128 | 140 | if '.' in label: |
129 | 141 | suite.addTest(build_test(label)) |
130 | 142 | else: |
diff --git a/tests/runtests.py b/tests/runtests.py
a
|
b
|
|
32 | 32 | 'django.contrib.admin', |
33 | 33 | ] |
34 | 34 | |
35 | | def get_test_models(): |
36 | | models = [] |
| 35 | def get_test_modules(): |
| 36 | modules = [] |
37 | 37 | for loc, dirpath in (MODEL_TESTS_DIR_NAME, MODEL_TEST_DIR), (REGRESSION_TESTS_DIR_NAME, REGRESSION_TEST_DIR), (CONTRIB_DIR_NAME, CONTRIB_DIR): |
38 | 38 | for f in os.listdir(dirpath): |
39 | 39 | if f.startswith('__init__') or f.startswith('.') or f.startswith('sql') or f.startswith('invalid'): |
40 | 40 | continue |
41 | | models.append((loc, f)) |
42 | | return models |
| 41 | modules.append((loc, f)) |
| 42 | return modules |
43 | 43 | |
44 | | def get_invalid_models(): |
45 | | models = [] |
| 44 | def get_invalid_tests_modules(): |
| 45 | modules = [] |
46 | 46 | for loc, dirpath in (MODEL_TESTS_DIR_NAME, MODEL_TEST_DIR), (REGRESSION_TESTS_DIR_NAME, REGRESSION_TEST_DIR), (CONTRIB_DIR_NAME, CONTRIB_DIR): |
47 | 47 | for f in os.listdir(dirpath): |
48 | 48 | if f.startswith('__init__') or f.startswith('.') or f.startswith('sql'): |
49 | 49 | continue |
50 | 50 | if f.startswith('invalid'): |
51 | | models.append((loc, f)) |
52 | | return models |
| 51 | modules.append((loc, f)) |
| 52 | return modules |
53 | 53 | |
54 | 54 | class InvalidModelTestCase(unittest.TestCase): |
55 | 55 | def __init__(self, model_label): |
… |
… |
|
85 | 85 | self.assert_(not unexpected, "Unexpected Errors: " + '\n'.join(unexpected)) |
86 | 86 | self.assert_(not missing, "Missing Errors: " + '\n'.join(missing)) |
87 | 87 | |
88 | | def django_tests(verbosity, interactive, test_labels): |
| 88 | def django_tests(verbosity, interactive, test_labels, exclude_labels=None): |
89 | 89 | from django.conf import settings |
| 90 | |
| 91 | if exclude_labels is None: |
| 92 | exclude_labels = [] |
90 | 93 | |
91 | 94 | old_installed_apps = settings.INSTALLED_APPS |
92 | 95 | old_test_database_name = settings.TEST_DATABASE_NAME |
… |
… |
|
118 | 121 | get_apps() |
119 | 122 | |
120 | 123 | # Load all the test model apps. |
121 | | for model_dir, model_name in get_test_models(): |
122 | | model_label = '.'.join([model_dir, model_name]) |
| 124 | test_labels_set = set([label.split('.')[0] for label in test_labels if label not in exclude_labels]) |
| 125 | for module_dir, module_name in get_test_modules(): |
| 126 | module_label = '.'.join([module_dir, module_name]) |
123 | 127 | try: |
124 | | # if the model was named on the command line, or |
125 | | # no models were named (i.e., run all), import |
126 | | # this model and add it to the list to test. |
127 | | if not test_labels or model_name in set([label.split('.')[0] for label in test_labels]): |
| 128 | # if the module was named on the command line, or |
| 129 | # no modules were named (i.e., run all), import |
| 130 | # this module and add it to the list to test. |
| 131 | if not test_labels or module_name in test_labels_set: |
| 132 | if module_name in exclude_labels: |
| 133 | if verbosity >= 1: |
| 134 | print "Skipping app %s" % module_name |
| 135 | continue |
128 | 136 | if verbosity >= 1: |
129 | | print "Importing model %s" % model_name |
130 | | mod = load_app(model_label) |
| 137 | print "Importing app %s" % module_name |
| 138 | mod = load_app(module_label) |
131 | 139 | if mod: |
132 | | if model_label not in settings.INSTALLED_APPS: |
133 | | settings.INSTALLED_APPS.append(model_label) |
| 140 | if module_label not in settings.INSTALLED_APPS: |
| 141 | settings.INSTALLED_APPS.append(module_label) |
134 | 142 | except Exception, e: |
135 | | sys.stderr.write("Error while importing %s:" % model_name + ''.join(traceback.format_exception(*sys.exc_info())[1:])) |
| 143 | sys.stderr.write("Error while importing %s:" % module_name + ''.join(traceback.format_exception(*sys.exc_info())[1:])) |
136 | 144 | continue |
137 | 145 | |
138 | | # Add tests for invalid models. |
| 146 | # Add tests for invalid models apps. |
139 | 147 | extra_tests = [] |
140 | | for model_dir, model_name in get_invalid_models(): |
141 | | model_label = '.'.join([model_dir, model_name]) |
142 | | if not test_labels or model_name in test_labels: |
143 | | extra_tests.append(InvalidModelTestCase(model_label)) |
| 148 | for module_dir, module_name in get_invalid_tests_modules(): |
| 149 | module_label = '.'.join([module_dir, module_name]) |
| 150 | if not test_labels or module_name in test_labels: |
| 151 | extra_tests.append(InvalidModelTestCase(module_label)) |
144 | 152 | try: |
145 | | # Invalid models are not working apps, so we cannot pass them into |
146 | | # the test runner with the other test_labels |
147 | | test_labels.remove(model_name) |
| 153 | # Invalid models apps are not working apps, so we cannot pass |
| 154 | # them into the test runner with the other test_labels |
| 155 | test_labels.remove(module_name) |
148 | 156 | except ValueError: |
149 | 157 | pass |
150 | 158 | |
151 | | # Run the test suite, including the extra validation tests. |
| 159 | # Run the test suite, including the extra validation tests and skipping |
| 160 | # the test explicitely excluded. |
152 | 161 | from django.test.simple import run_tests |
153 | | failures = run_tests(test_labels, verbosity=verbosity, interactive=interactive, extra_tests=extra_tests) |
| 162 | failures = run_tests(test_labels, verbosity=verbosity, interactive=interactive, |
| 163 | extra_tests=extra_tests, exclude_labels=exclude_labels) |
154 | 164 | if failures: |
155 | 165 | sys.exit(failures) |
156 | 166 | |
… |
… |
|
165 | 175 | |
166 | 176 | if __name__ == "__main__": |
167 | 177 | from optparse import OptionParser |
168 | | usage = "%prog [options] [model model model ...]" |
| 178 | usage = "%prog [options] [module module module ...]" |
169 | 179 | parser = OptionParser(usage=usage) |
170 | 180 | parser.add_option('-v','--verbosity', action='store', dest='verbosity', default='0', |
171 | 181 | type='choice', choices=['0', '1', '2'], |
… |
… |
|
174 | 184 | help='Tells Django to NOT prompt the user for input of any kind.') |
175 | 185 | parser.add_option('--settings', |
176 | 186 | help='Python path to settings module, e.g. "myproject.settings". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.') |
| 187 | parser.add_option('-e', '--exclude', action='append', dest='exclude', default=[], |
| 188 | help='Test to exclude (use multiple times to exclude multiple tests).') |
177 | 189 | options, args = parser.parse_args() |
178 | 190 | if options.settings: |
179 | 191 | os.environ['DJANGO_SETTINGS_MODULE'] = options.settings |
180 | 192 | elif "DJANGO_SETTINGS_MODULE" not in os.environ: |
181 | 193 | parser.error("DJANGO_SETTINGS_MODULE is not set in the environment. " |
182 | 194 | "Set it or use --settings.") |
183 | | django_tests(int(options.verbosity), options.interactive, args) |
| 195 | django_tests(int(options.verbosity), options.interactive, args, options.exclude) |