diff --git a/django/db/backends/postgresql/compiler.py b/django/db/backends/postgresql/compiler.py
new file mode 100644
index 0000000..a562e34
--- /dev/null
+++ b/django/db/backends/postgresql/compiler.py
@@ -0,0 +1,36 @@
+from django.db.models.sql import compiler
+
+
+SQLCompiler = compiler.SQLCompiler
+SQLDeleteCompiler = compiler.SQLDeleteCompiler
+SQLUpdateCompiler = compiler.SQLUpdateCompiler
+SQLAggregateCompiler = compiler.SQLAggregateCompiler
+
+
+class SQLInsertCompiler(compiler.SQLInsertCompiler):
+    def as_sql(self):
+        """
+        Create queries that work like "INSERT INTO .. ON CONFLICT DO NOTHING RETURNUNG *"
+        but return the same amount of rows as in the input, setting NULL on already existing
+        rows.  The cited query does not return anything for rows that were already in the
+        database.  The drawback is that the pg-sequence counter increases everytime with
+        the numers of rows in the input, irrespective of the actually inserted rows.
+        Works only with PostgreSQL >= 9.5.
+        """
+        fields = self.query.fields
+        if fields and self.connection.pg_version >= 90500 and getattr(self.query, 'on_conflict', '') == 'ignore':
+            qn = self.quote_name_unless_alias
+            opts = self.query.get_meta()
+            return [("WITH r AS (SELECT * FROM(VALUES (" + "),(".join(
+                ",".join("%s" for f in fields) for obj in self.query.objs
+            ) + ")) AS g(" + ",".join(qn(field.column) for field in fields) + "))," +
+                " s AS (INSERT INTO " + qn(opts.db_table) + " (" + ", ".join(
+                    qn(field.column) for field in fields) +
+                ") SELECT * FROM r ON CONFLICT DO NOTHING RETURNING *) SELECT s." +
+                qn(opts.pk.column) + " FROM r LEFT JOIN s USING (" + ", ".join(
+                    qn(field.column) for field in fields) + ")",
+                tuple(p for ps in self.assemble_as_sql(fields, [
+                    [self.prepare_value(field, self.pre_save_val(
+                        field, obj)) for field in fields] for obj in self.query.objs
+                ])[1] for p in ps))]
+        return super().as_sql()
diff --git a/django/db/backends/postgresql/operations.py b/django/db/backends/postgresql/operations.py
index 6f48cfa..b698b50 100644
--- a/django/db/backends/postgresql/operations.py
+++ b/django/db/backends/postgresql/operations.py
@@ -7,6 +7,7 @@ from django.db.backends.base.operations import BaseDatabaseOperations
 
 class DatabaseOperations(BaseDatabaseOperations):
     cast_char_field_without_max_length = 'varchar'
+    compiler_module = "django.db.backends.postgresql.compiler"
 
     def unification_cast_sql(self, output_field):
         internal_type = output_field.get_internal_type()
diff --git a/django/db/models/query.py b/django/db/models/query.py
index 71ebf66..7c571a2 100644
--- a/django/db/models/query.py
+++ b/django/db/models/query.py
@@ -1,8 +1,8 @@
 """
 The main QuerySet implementation. This provides the public API for the ORM.
 """
-
 import copy
+import functools
 import operator
 import warnings
 from collections import OrderedDict, namedtuple
@@ -10,11 +10,12 @@ from functools import lru_cache
 
 from django.conf import settings
 from django.core import exceptions
+from django.contrib.postgres.fields import CIText
 from django.db import (
     DJANGO_VERSION_PICKLE_KEY, IntegrityError, connections, router,
     transaction,
 )
-from django.db.models import DateField, DateTimeField, sql
+from django.db.models import DateField, DateTimeField, signals, sql
 from django.db.models.constants import LOOKUP_SEP
 from django.db.models.deletion import Collector
 from django.db.models.expressions import F
@@ -417,13 +418,24 @@ class QuerySet:
             if obj.pk is None:
                 obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
 
-    def bulk_create(self, objs, batch_size=None):
+    def bulk_create(self, objs, batch_size=None, on_conflict=None, send_signal=True, all_ids=False):
         """
         Insert each of the instances into the database. Do *not* call
-        save() on each of the instances, do not send any pre/post_save
+        save() on each of the instances, do not send any pre_save
         signals, and do not set the primary key attribute if it is an
         autoincrement field (except if features.can_return_ids_from_bulk_insert=True).
         Multi-table models are not supported.
+
+        With postgresql >= 9.5:
+          * It is possible for objs to contain both instances contained in the database and
+            new instances.  However the underlaying PG sequence is incremented unnecessary
+            for each object that was already in the database.
+          * post save signals are sent to the new instances if send_signal is set.
+          * If all_ids is True, a second query is sent to the database which retrieves
+        the IDs of objs, which existed prior to calling bulk_create.  The query matches all
+        provided fields of the supplied objs.
+          * If all_ids is a list or a tuple, all fields mentioned in that list are ignored in the
+        latter call, when considering objects for equality.
         """
         # When you bulk insert you don't get the primary keys back (if it's an
         # autoincrement, except if can_return_ids_from_bulk_insert=True), so
@@ -445,6 +457,8 @@ class QuerySet:
         for parent in self.model._meta.get_parent_list():
             if parent._meta.concrete_model is not self.model._meta.concrete_model:
                 raise ValueError("Can't bulk create a multi-table inherited model")
+        if on_conflict and on_conflict.lower() != 'ignore':
+            raise ValueError("'%s' is an invalid value for on_conflict. Allowed values: 'ignore'" % on_conflict)
         if not objs:
             return objs
         self._for_write = True
@@ -455,10 +469,10 @@ class QuerySet:
         with transaction.atomic(using=self.db, savepoint=False):
             objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
             if objs_with_pk:
-                self._batched_insert(objs_with_pk, fields, batch_size)
+                self._batched_insert(objs_with_pk, fields, batch_size, on_conflict=on_conflict)
             if objs_without_pk:
                 fields = [f for f in fields if not isinstance(f, AutoField)]
-                ids = self._batched_insert(objs_without_pk, fields, batch_size)
+                ids = self._batched_insert(objs_without_pk, fields, batch_size, on_conflict=on_conflict)
                 if connection.features.can_return_ids_from_bulk_insert:
                     assert len(ids) == len(objs_without_pk)
                 for obj_without_pk, pk in zip(objs_without_pk, ids):
@@ -466,6 +480,28 @@ class QuerySet:
                     obj_without_pk._state.adding = False
                     obj_without_pk._state.db = self.db
 
+        if (send_signal or all_ids) and connection.features.can_return_ids_from_bulk_insert:
+            objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs_without_pk)
+            if send_signal:
+                for obj in objs_with_pk:
+                    signals.post_save.send(sender=obj.__class__, instance=obj, created=True, using=self.db)
+
+            if all_ids and objs_without_pk and getattr(connection, 'pg_version', 0) >= 90500:
+                all_ids = [] if all_ids is True else all_ids
+                # f.attname in obj.__dict__ and f!= obj._meta.pk means the field is not deferred and is not primary key
+                obj0 = objs_without_pk[0]
+                fields = [f.attname for f in obj0._meta.concrete_fields if f.attname
+                          in obj0.__dict__ and f != obj0._meta.pk and f.attname not in all_ids]
+                q = [Q(**{f.attname: getattr(obj, f.attname) for f in obj._meta.concrete_fields if f.attname in
+                          obj.__dict__ and f != obj._meta.pk and f.attname not in all_ids}) for obj in objs_without_pk]
+                if q:
+                    output = self.filter(functools.reduce(Q.__or__, q)).values(*fields, obj0._meta.pk.attname)
+                    for obj in objs_without_pk:
+                        for o in output:
+                            if all((getattr(obj, f).lower() == o[f].lower()) if isinstance(
+                                    obj._meta.get_field(f), CIText) else (getattr(obj, f) == o[f]) for f in fields):
+                                obj.pk = o[obj0._meta.pk.attname]
+                                break
         return objs
 
     def get_or_create(self, defaults=None, **kwargs):
@@ -1108,7 +1144,7 @@ class QuerySet:
     # PRIVATE METHODS #
     ###################
 
-    def _insert(self, objs, fields, return_id=False, raw=False, using=None):
+    def _insert(self, objs, fields, return_id=False, raw=False, using=None, on_conflict=None):
         """
         Insert a new record for the given model. This provides an interface to
         the InsertQuery class and is how Model.save() is implemented.
@@ -1117,12 +1153,14 @@ class QuerySet:
         if using is None:
             using = self.db
         query = sql.InsertQuery(self.model)
+        if on_conflict:
+            query.on_conflict = on_conflict.lower()
         query.insert_values(fields, objs, raw=raw)
         return query.get_compiler(using=using).execute_sql(return_id)
     _insert.alters_data = True
     _insert.queryset_only = False
 
-    def _batched_insert(self, objs, fields, batch_size):
+    def _batched_insert(self, objs, fields, batch_size, on_conflict=None):
         """
         Helper method for bulk_create() to insert objs one batch at a time.
         """
@@ -1131,7 +1169,7 @@ class QuerySet:
         inserted_ids = []
         for item in [objs[i:i + batch_size] for i in range(0, len(objs), batch_size)]:
             if connections[self.db].features.can_return_ids_from_bulk_insert:
-                inserted_id = self._insert(item, fields=fields, using=self.db, return_id=True)
+                inserted_id = self._insert(item, fields=fields, using=self.db, return_id=True, on_conflict=on_conflict)
                 if isinstance(inserted_id, list):
                     inserted_ids.extend(inserted_id)
                 else:
diff --git a/docs/ref/models/querysets.txt b/docs/ref/models/querysets.txt
index f2abf1c..08167d9 100644
--- a/docs/ref/models/querysets.txt
+++ b/docs/ref/models/querysets.txt
@@ -1995,7 +1995,7 @@ exists in the database, an :exc:`~django.db.IntegrityError` is raised.
 ``bulk_create()``
 ~~~~~~~~~~~~~~~~~
 
-.. method:: bulk_create(objs, batch_size=None)
+.. method:: bulk_create(objs, batch_size=None, on_conflict=None, send_signal=True, all_ids=False)
 
 This method inserts the provided list of objects into the database in an
 efficient manner (generally only 1 query, no matter how many objects there
@@ -2009,7 +2009,8 @@ are)::
 This has a number of caveats though:
 
 * The model's ``save()`` method will not be called, and the ``pre_save`` and
-  ``post_save`` signals will not be sent.
+  signal will not be sent.
+* If both Postgresql is used and send_signal is True, ``post_save`` signal will be sent.
 * It does not work with child models in a multi-table inheritance scenario.
 * If the model's primary key is an :class:`~django.db.models.AutoField` it
   does not retrieve and set the primary key attribute, as ``save()`` does,
@@ -2035,6 +2036,37 @@ The ``batch_size`` parameter controls how many objects are created in a single
 query. The default is to create all objects in one batch, except for SQLite
 where the default is such that at most 999 variables per query are used.
 
+If Postgresql >= 9.5 and `on_conflict='ignore'` are used, contrary to the above statements:
+
+* It is possible for objs to contain both instances, contained in the database
+  prior to the call, and new instances.  The underlaying Postgresql sequence
+  is incremented for each object what was already in the database.
+* If `all_ids` is True, a second query is sent to the database which retrieves the IDs
+  of those objs, that existed prior to calling ``bulk_create()``.  The query matches all
+  provided fields of the supplied objs.
+* If `all_ids` is a non-empty list, all fields mentioned in that list are
+  ignored in the latter query, when considering objects for equality::
+
+    >>> from django.db import models
+
+    >>> class T(models.Model):
+    ...    d = models.DateTimeField(default=django.utils.timezone.now)
+    ...    n = models.IntegerField(unique=True)
+
+    >>> T.objects.bulk_create([T(n=1), T(n=1)], on_conflict='ignore', all_ids=True)
+    # Now the database contains one object with n=1 and a timestamp when the first
+    # constructor was called.  The returned list has two objects, and the second object
+    # has no pk set.  The cause is that the second T(n=1) has d with a timestamp that
+    # is different from the timestamp of the first T(n=1), and querying the database
+    # for the second T-object returned no results.  Even if the second object is not
+    # inserted into the database, but only the first one, the corresponding Postgresql
+    # sequence is increased by two
+
+    >>> T.objects.bulk_create([T(n=1), T(n=1)], on_conflict='ignore', all_ids=['d'])
+    # Now the database will check if there is an object with n=1 and ignore the d field.
+    # The pk field of each element in the list will be set. The Postgresql sequence
+    # is increased by two.
+
 ``count()``
 ~~~~~~~~~~~
 
