diff --git a/django/db/models/sql/compiler.py b/django/db/models/sql/compiler.py
index 6c516e2..4177405 100644
a
|
b
|
class SQLCompiler(object):
|
100 | 100 | result.append('WHERE %s' % where) |
101 | 101 | params.extend(w_params) |
102 | 102 | |
103 | | grouping, gb_params = self.get_grouping() |
| 103 | grouping, gb_params = self.get_grouping(ordering_group_by) |
104 | 104 | if grouping: |
105 | 105 | if distinct_fields: |
106 | 106 | raise NotImplementedError( |
107 | 107 | "annotate() + distinct(fields) not implemented.") |
108 | | if ordering: |
109 | | # If the backend can't group by PK (i.e., any database |
110 | | # other than MySQL), then any fields mentioned in the |
111 | | # ordering clause needs to be in the group by clause. |
112 | | if not self.connection.features.allows_group_by_pk: |
113 | | for col, col_params in ordering_group_by: |
114 | | if col not in grouping: |
115 | | grouping.append(str(col)) |
116 | | gb_params.extend(col_params) |
117 | | else: |
| 108 | if not ordering: |
118 | 109 | ordering = self.connection.ops.force_no_ordering() |
119 | 110 | result.append('GROUP BY %s' % ', '.join(grouping)) |
120 | 111 | params.extend(gb_params) |
… |
… |
class SQLCompiler(object):
|
378 | 369 | else: |
379 | 370 | order = asc |
380 | 371 | result.append('%s %s' % (field, order)) |
381 | | group_by.append((field, [])) |
| 372 | group_by.append((str(field), [])) |
382 | 373 | continue |
383 | 374 | col, order = get_order_dir(field, asc) |
384 | 375 | if col in self.query.aggregate_select: |
… |
… |
class SQLCompiler(object):
|
536 | 527 | first = False |
537 | 528 | return result, [] |
538 | 529 | |
539 | | def get_grouping(self): |
| 530 | def get_grouping(self, ordering_group_by): |
540 | 531 | """ |
541 | 532 | Returns a tuple representing the SQL elements in the "group by" clause. |
542 | 533 | """ |
543 | 534 | qn = self.quote_name_unless_alias |
544 | 535 | result, params = [], [] |
545 | 536 | if self.query.group_by is not None: |
546 | | if (len(self.query.model._meta.fields) == len(self.query.select) and |
547 | | self.connection.features.allows_group_by_pk): |
| 537 | select_cols = self.query.select + self.query.related_select_cols |
| 538 | if (len(self.query.model._meta.fields) == len(self.query.select) |
| 539 | and self.connection.features.allows_group_by_pk): |
548 | 540 | self.query.group_by = [ |
549 | | (self.query.model._meta.db_table, self.query.model._meta.pk.column) |
| 541 | (self.query.model._meta.db_table, |
| 542 | self.query.model._meta.pk.column) |
550 | 543 | ] |
551 | | |
552 | | group_by = self.query.group_by or [] |
553 | | |
554 | | extra_selects = [] |
555 | | for extra_select, extra_params in self.query.extra_select.itervalues(): |
556 | | extra_selects.append(extra_select) |
557 | | params.extend(extra_params) |
558 | | cols = (group_by + self.query.select + |
559 | | self.query.related_select_cols + extra_selects) |
| 544 | select_cols = [] |
560 | 545 | seen = set() |
| 546 | cols = self.query.group_by + select_cols |
561 | 547 | for col in cols: |
562 | | if col in seen: |
563 | | continue |
564 | | seen.add(col) |
565 | 548 | if isinstance(col, (list, tuple)): |
566 | | result.append('%s.%s' % (qn(col[0]), qn(col[1]))) |
| 549 | sql = '%s.%s' % (qn(col[0]), qn(col[1])) |
567 | 550 | elif hasattr(col, 'as_sql'): |
568 | | result.append(col.as_sql(qn, self.connection)) |
| 551 | sql = col.as_sql(qn, self.connection) |
569 | 552 | else: |
570 | | result.append('(%s)' % str(col)) |
| 553 | # Not needed any more, but kept for cases where users pass |
| 554 | # strings directly into query.group_by. (refs #17998). |
| 555 | sql = '(%s)' % str(col) |
| 556 | if sql not in seen: |
| 557 | result.append(sql) |
| 558 | seen.add(sql) |
| 559 | # Still, we need to add all stuff in ordering (except if the backend can |
| 560 | # group by just by PK). |
| 561 | if ordering_group_by and not self.connection.features.allows_group_by_pk: |
| 562 | for order, order_params in ordering_group_by: |
| 563 | # Even if we have seen the same SQL string, it might have different params, |
| 564 | # so, we add same SQL in "has params" case. |
| 565 | if order not in seen or params: |
| 566 | result.append(order) |
| 567 | params.extend(order_params) |
| 568 | seen.add(order) |
| 569 | |
| 570 | # Unconditionally add the extra_select items. |
| 571 | for extra_select, extra_params in self.query.extra_select.itervalues(): |
| 572 | sql = '(%s)' % str(extra_select) |
| 573 | result.append(sql) |
| 574 | params.extend(extra_params) |
| 575 | |
571 | 576 | return result, params |
572 | 577 | |
573 | 578 | def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1, |
diff --git a/django/db/models/sql/query.py b/django/db/models/sql/query.py
index ce11716..63f3da7 100644
a
|
b
|
class Query(object):
|
105 | 105 | self.table_map = {} # Maps table names to list of aliases. |
106 | 106 | self.join_map = {} |
107 | 107 | self.rev_join_map = {} # Reverse of join_map. |
108 | | self.quote_cache = {} |
109 | 108 | self.default_cols = True |
110 | 109 | self.default_ordering = True |
111 | 110 | self.standard_ordering = True |
… |
… |
class Query(object):
|
245 | 244 | obj.table_map = self.table_map.copy() |
246 | 245 | obj.join_map = self.join_map.copy() |
247 | 246 | obj.rev_join_map = self.rev_join_map.copy() |
248 | | obj.quote_cache = {} |
249 | 247 | obj.default_cols = self.default_cols |
250 | 248 | obj.default_ordering = self.default_ordering |
251 | 249 | obj.standard_ordering = self.standard_ordering |
diff --git a/tests/regressiontests/aggregation_regress/tests.py b/tests/regressiontests/aggregation_regress/tests.py
index 36a54c0..006bdd3 100644
a
|
b
|
class AggregationTests(TestCase):
|
469 | 469 | # Regression for #15709 - Ensure each group_by field only exists once |
470 | 470 | # per query |
471 | 471 | qs = Book.objects.values('publisher').annotate(max_pages=Max('pages')).order_by() |
472 | | grouping, gb_params = qs.query.get_compiler(qs.db).get_grouping() |
| 472 | grouping, gb_params = qs.query.get_compiler(qs.db).get_grouping([]) |
473 | 473 | self.assertEqual(len(grouping), 1) |
474 | 474 | |
475 | 475 | def test_duplicate_alias(self): |
… |
… |
class AggregationTests(TestCase):
|
865 | 865 | ['Peter Norvig'], |
866 | 866 | lambda b: b.name |
867 | 867 | ) |
| 868 | |
| 869 | @skipUnlessDBFeature("allows_group_by_pk") |
| 870 | def test_aggregate_duplicate_columns(self): |
| 871 | # Regression test for #17144 |
| 872 | |
| 873 | results = Author.objects.annotate(num_contacts=Count('book_contact_set')) |
| 874 | |
| 875 | # There should only be one GROUP BY clause, for the `id` column. |
| 876 | # `name` and `age` should not be grouped on. |
| 877 | grouping, gb_params = results.query.get_compiler(using='default').get_grouping([]) |
| 878 | self.assertEqual(len(grouping), 1) |
| 879 | assert 'id' in grouping[0] |
| 880 | assert 'name' not in grouping[0] |
| 881 | assert 'age' not in grouping[0] |
| 882 | |
| 883 | # The query group_by property should also only show the `id`. |
| 884 | self.assertEqual(results.query.group_by, [('aggregation_regress_author', 'id')]) |
| 885 | |
| 886 | # Ensure that we get correct results. |
| 887 | self.assertEqual( |
| 888 | [(a.name, a.num_contacts) for a in results.order_by('name')], |
| 889 | [ |
| 890 | ('Adrian Holovaty', 1), |
| 891 | ('Brad Dayley', 1), |
| 892 | ('Jacob Kaplan-Moss', 0), |
| 893 | ('James Bennett', 1), |
| 894 | ('Jeffrey Forcier', 1), |
| 895 | ('Paul Bissex', 0), |
| 896 | ('Peter Norvig', 2), |
| 897 | ('Stuart Russell', 0), |
| 898 | ('Wesley J. Chun', 0), |
| 899 | ] |
| 900 | ) |