diff -x .svn -rN trunk/AUTHORS schema-evolution/AUTHORS 47a48 > Derek Anderson diff -x .svn -rN trunk/django/core/management.py schema-evolution/django/core/management.py 7a8 > from django.conf import settings 172c173 < if f.unique and (not f.primary_key or backend.allows_unique_and_pk): --- > if (f.unique and (not f.primary_key or backend.allows_unique_and_pk)) or (f.primary_key and backend.pk_requires_unique): 482a484,623 > > def get_sql_fingerprint(app): > "Returns the fingerprint of the current schema, used in schema evolution." > from django.db import get_creation_module, models, backend, get_introspection_module, connection > # This should work even if a connecton isn't available > try: > cursor = connection.cursor() > except: > cursor = None > introspection = get_introspection_module() > app_name = app.__name__.split('.')[-2] > schema_fingerprint = introspection.get_schema_fingerprint(cursor, app) > try: > # is this a schema we recognize? > app_se = __import__(app_name +'.schema_evolution').schema_evolution > schema_recognized = schema_fingerprint in app_se.fingerprints > if schema_recognized: > sys.stderr.write(style.NOTICE("Notice: Current schema fingerprint for '%s' is '%s' (recognized)\n" % (app_name, schema_fingerprint))) > else: > sys.stderr.write(style.NOTICE("Notice: Current schema fingerprint for '%s' is '%s' (unrecognized)\n" % (app_name, schema_fingerprint))) > except: > sys.stderr.write(style.NOTICE("Notice: Current schema fingerprint for '%s' is '%s' (no schema_evolution module found)\n" % (app_name, schema_fingerprint))) > return > get_sql_fingerprint.help_doc = "Returns the fingerprint of the current schema, used in schema evolution." > get_sql_fingerprint.args = APP_ARGS > > def get_sql_evolution(app): > "Returns SQL to update an existing schema to match the existing models." > return get_sql_evolution_detailed(app)[2] > > def get_sql_evolution_detailed(app): > "Returns SQL to update an existing schema to match the existing models." > import schema_evolution > from django.db import get_creation_module, models, backend, get_introspection_module, connection > data_types = get_creation_module().DATA_TYPES > > if not data_types: > # This must be the "dummy" database backend, which means the user > # hasn't set DATABASE_ENGINE. > sys.stderr.write(style.ERROR("Error: Django doesn't know which syntax to use for your SQL statements,\n" + > "because you haven't specified the DATABASE_ENGINE setting.\n" + > "Edit your settings file and change DATABASE_ENGINE to something like 'postgresql' or 'mysql'.\n")) > sys.exit(1) > > try: > backend.get_add_column_sql > except: > # This must be an unsupported database backend > sys.stderr.write(style.ERROR("Error: Django doesn't know which syntax to use for your SQL statements, " + > "because schema evolution support isn't built into your database backend yet. Sorry!\n")) > sys.exit(1) > > # First, try validating the models. > _check_for_validation_errors() > > # This should work even if a connecton isn't available > try: > cursor = connection.cursor() > except: > cursor = None > > introspection = get_introspection_module() > app_name = app.__name__.split('.')[-2] > > final_output = [] > > schema_fingerprint = introspection.get_schema_fingerprint(cursor, app) > try: > # is this a schema we recognize? > app_se = __import__(app_name +'.schema_evolution').schema_evolution > schema_recognized = schema_fingerprint in app_se.fingerprints > if schema_recognized: > sys.stderr.write(style.NOTICE("Notice: Current schema fingerprint for '%s' is '%s' (recognized)\n" % (app_name, schema_fingerprint))) > available_upgrades = [] > for (vfrom, vto), upgrade in app_se.evolutions.iteritems(): > if vfrom == schema_fingerprint: > try: > distance = app_se.fingerprints.index(vto)-app_se.fingerprints.index(vfrom) > available_upgrades.append( ( vfrom, vto, upgrade, distance ) ) > sys.stderr.write(style.NOTICE("\tan upgrade from %s to %s is available (distance: %i)\n" % ( vfrom, vto, distance ))) > except: > available_upgrades.append( ( vfrom, vto, upgrade, -1 ) ) > sys.stderr.write(style.NOTICE("\tan upgrade from %s to %s is available, but %s is not in schema_evolution.fingerprints\n" % ( vfrom, vto, vto ))) > if len(available_upgrades): > best_upgrade = available_upgrades[0] > for an_upgrade in available_upgrades: > if an_upgrade[3] > best_upgrade[3]: > best_upgrade = an_upgrade > final_output.extend( best_upgrade[2] ) > return schema_fingerprint, False, final_output > else: > sys.stderr.write(style.NOTICE("Notice: Current schema fingerprint for '%s' is '%s' (unrecognized)\n" % (app_name, schema_fingerprint))) > except: > # sys.stderr.write(style.NOTICE("Notice: Current schema fingerprint for '%s' is '%s' (no schema_evolution module found)\n" % (app_name, schema_fingerprint))) > pass # ^^^ lets not be chatty > > # stolen and trimmed from syncdb so that we know which models are about > # to be created (so we don't check them for updates) > table_list = _get_table_list() > seen_models = _get_installed_models(table_list) > created_models = set() > pending_references = {} > > model_list = models.get_models(app) > for model in model_list: > # Create the model's database table, if it doesn't already exist. > if model._meta.db_table in table_list or model._meta.aka in table_list or len(set(model._meta.aka) & set(table_list))>0: > continue > sql, references = _get_sql_model_create(model, seen_models) > seen_models.add(model) > created_models.add(model) > table_list.append(model._meta.db_table) > > # get the existing models, minus the models we've just created > app_models = models.get_models(app) > for model in created_models: > if model in app_models: > app_models.remove(model) > > for klass in app_models: > > output, new_table_name = schema_evolution.get_sql_evolution_check_for_changed_model_name(klass) > final_output.extend(output) > > output = schema_evolution.get_sql_evolution_check_for_changed_field_flags(klass, new_table_name) > final_output.extend(output) > > output = schema_evolution.get_sql_evolution_check_for_changed_field_name(klass, new_table_name) > final_output.extend(output) > > output = schema_evolution.get_sql_evolution_check_for_new_fields(klass, new_table_name) > final_output.extend(output) > > output = schema_evolution.get_sql_evolution_check_for_dead_fields(klass, new_table_name) > final_output.extend(output) > > return schema_fingerprint, True, final_output > > get_sql_evolution.help_doc = "Returns SQL to update an existing schema to match the existing models." > get_sql_evolution.args = APP_ARGS 543c684 < if table_name_converter(model._meta.db_table) in table_list: --- > if table_name_converter(model._meta.db_table) in table_list or table_name_converter(model._meta.aka) in table_list or len(set(model._meta.aka) & set(table_list))>0: 570a712,724 > # keep evolving until there is nothing left to do > schema_fingerprint, introspected_upgrade, evolution = get_sql_evolution_detailed(app) > last_schema_fingerprint = None > while evolution and schema_fingerprint!=last_schema_fingerprint: > for sql in evolution: > if introspected_upgrade: > print sql > else: > cursor.execute(sql) > last_schema_fingerprint = schema_fingerprint > if not introspected_upgrade: # only do one round of introspection generated upgrades > schema_fingerprint, introspected_upgrade, evolution = get_sql_evolution_detailed(app) > 1524a1679,1680 > 'sqlevolve': get_sql_evolution, > 'sqlfingerprint': get_sql_fingerprint, diff -x .svn -rN trunk/django/core/schema_evolution.py schema-evolution/django/core/schema_evolution.py 0a1,153 > import django > from django.core.exceptions import ImproperlyConfigured > from optparse import OptionParser > from django.utils import termcolors > from django.conf import settings > import os, re, shutil, sys, textwrap > import management > > > def get_sql_evolution_check_for_new_fields(klass, new_table_name): > "checks for model fields that are not in the existing data structure" > from django.db import backend, get_creation_module, models, get_introspection_module, connection > data_types = get_creation_module().DATA_TYPES > cursor = connection.cursor() > introspection = get_introspection_module() > opts = klass._meta > output = [] > db_table = klass._meta.db_table > if new_table_name: > db_table = new_table_name > for f in opts.fields: > existing_fields = introspection.get_columns(cursor,db_table) > if f.column not in existing_fields and (not f.aka or f.aka not in existing_fields and len(set(f.aka) & set(existing_fields))==0): > rel_field = f > data_type = f.get_internal_type() > col_type = data_types.get(data_type) > if col_type is not None: > output.extend( backend.get_add_column_sql( klass._meta.db_table, f.column, management.style.SQL_COLTYPE(col_type % rel_field.__dict__), f.null, f.unique, f.primary_key, f.default ) ) > return output > > def get_sql_evolution_check_for_changed_model_name(klass): > from django.db import backend, get_creation_module, models, get_introspection_module, connection > cursor = connection.cursor() > introspection = get_introspection_module() > table_list = introspection.get_table_list(cursor) > if klass._meta.db_table in table_list: > return [], None > if klass._meta.aka in table_list: > return backend.get_change_table_name_sql( klass._meta.db_table, klass._meta.aka), klass._meta.aka > elif len(set(klass._meta.aka) & set(table_list))==1: > return backend.get_change_table_name_sql( klass._meta.db_table, klass._meta.aka[0]), klass._meta.aka[0] > else: > return [], None > > def get_sql_evolution_check_for_changed_field_name(klass, new_table_name): > from django.db import backend, get_creation_module, models, get_introspection_module, connection > data_types = get_creation_module().DATA_TYPES > cursor = connection.cursor() > introspection = get_introspection_module() > opts = klass._meta > output = [] > db_table = klass._meta.db_table > if new_table_name: > db_table = new_table_name > for f in opts.fields: > existing_fields = introspection.get_columns(cursor,db_table) > if f.column not in existing_fields and f.aka and (f.aka in existing_fields or len(set(f.aka) & set(existing_fields)))==1: > old_col = None > if isinstance( f.aka, str ): > old_col = f.aka > else: > old_col = f.aka[0] > rel_field = f > data_type = f.get_internal_type() > col_type = data_types[data_type] > if col_type is not None: > col_def = management.style.SQL_COLTYPE(col_type % rel_field.__dict__) +' '+ management.style.SQL_KEYWORD('%sNULL' % (not f.null and 'NOT ' or '')) > if f.unique: > col_def += management.style.SQL_KEYWORD(' UNIQUE') > if f.primary_key: > col_def += management.style.SQL_KEYWORD(' PRIMARY KEY') > output.extend( backend.get_change_column_name_sql( klass._meta.db_table, introspection.get_indexes(cursor,db_table), old_col, f.column, col_def ) ) > return output > > def get_sql_evolution_check_for_changed_field_flags(klass, new_table_name): > from django.db import backend, get_creation_module, models, get_introspection_module, connection > from django.db.models.fields import CharField, SlugField > from django.db.models.fields.related import RelatedField, ForeignKey > data_types = get_creation_module().DATA_TYPES > cursor = connection.cursor() > introspection = get_introspection_module() > opts = klass._meta > output = [] > db_table = klass._meta.db_table > if new_table_name: > db_table = new_table_name > for f in opts.fields: > existing_fields = introspection.get_columns(cursor,db_table) > # print existing_fields > cf = None # current field, ie what it is before any renames > if f.column in existing_fields: > cf = f.column > elif f.aka in existing_fields: > cf = f.aka > elif f.aka and len(set(f.aka) & set(existing_fields))==1: > cf = f.aka[0] > else: > continue # no idea what column you're talking about - should be handled by get_sql_evolution_check_for_new_fields()) > data_type = f.get_internal_type() > if data_types.has_key(data_type): > column_flags = introspection.get_known_column_flags(cursor, db_table, cf) > # print db_table, cf, column_flags > if column_flags['allow_null']!=f.null or \ > ( not f.primary_key and isinstance(f, CharField) and column_flags['maxlength']!=str(f.maxlength) ) or \ > ( not f.primary_key and isinstance(f, SlugField) and column_flags['maxlength']!=str(f.maxlength) ) or \ > ( column_flags['unique']!=f.unique and ( settings.DATABASE_ENGINE!='postgresql' or not f.primary_key ) ) or \ > column_flags['primary_key']!=f.primary_key: > #column_flags['foreign_key']!=f.foreign_key: > # print 'need to change' > # print db_table, f.column, column_flags > # print "column_flags['allow_null']!=f.null", column_flags['allow_null']!=f.null > # print "not f.primary_key and isinstance(f, CharField) and column_flags['maxlength']!=str(f.maxlength)", not f.primary_key and isinstance(f, CharField) and column_flags['maxlength']!=str(f.maxlength) > # print "not f.primary_key and isinstance(f, SlugField) and column_flags['maxlength']!=str(f.maxlength)", not f.primary_key and isinstance(f, SlugField) and column_flags['maxlength']!=str(f.maxlength) > # print "column_flags['unique']!=f.unique", column_flags['unique']!=f.unique > # print "column_flags['primary_key']!=f.primary_key", column_flags['primary_key']!=f.primary_key > col_type = data_types[data_type] > col_type_def = management.style.SQL_COLTYPE(col_type % f.__dict__) > # col_def = style.SQL_COLTYPE(col_type % f.__dict__) +' '+ style.SQL_KEYWORD('%sNULL' % (not f.null and 'NOT ' or '')) > # if f.unique: > # col_def += ' '+ style.SQL_KEYWORD('UNIQUE') > # if f.primary_key: > # col_def += ' '+ style.SQL_KEYWORD('PRIMARY KEY') > output.extend( backend.get_change_column_def_sql( klass._meta.db_table, cf, col_type_def, f.null, f.unique, f.primary_key, f.default ) ) > #print db_table, cf, f.maxlength, introspection.get_known_column_flags(cursor, db_table, cf) > return output > > def get_sql_evolution_check_for_dead_fields(klass, new_table_name): > from django.db import backend, get_creation_module, models, get_introspection_module, connection > from django.db.models.fields import CharField, SlugField > from django.db.models.fields.related import RelatedField, ForeignKey > data_types = get_creation_module().DATA_TYPES > cursor = connection.cursor() > introspection = get_introspection_module() > opts = klass._meta > output = [] > db_table = klass._meta.db_table > if new_table_name: > db_table = new_table_name > suspect_fields = set(introspection.get_columns(cursor,db_table)) > # print 'suspect_fields = ', suspect_fields > for f in opts.fields: > # print 'f = ', f > # print 'f.aka = ', f.aka > suspect_fields.discard(f.column) > suspect_fields.discard(f.aka) > if f.aka: suspect_fields.difference_update(f.aka) > if len(suspect_fields)>0: > output.append( '-- warning: the following may cause data loss' ) > for suspect_field in suspect_fields: > output.extend( backend.get_drop_column_sql( klass._meta.db_table, suspect_field ) ) > output.append( '-- end warning' ) > return output > diff -x .svn -rN trunk/django/db/backends/ado_mssql/base.py schema-evolution/django/db/backends/ado_mssql/base.py 93a94 > pk_requires_unique = False diff -x .svn -rN trunk/django/db/backends/mysql/base.py schema-evolution/django/db/backends/mysql/base.py 138a139 > pk_requires_unique = False 244a246,295 > def get_change_table_name_sql( table_name, old_table_name ): > return ['ALTER TABLE '+ quote_name(old_table_name) +' RENAME TO '+ quote_name(table_name) + ';'] > > def get_change_column_name_sql( table_name, indexes, old_col_name, new_col_name, col_def ): > # mysql doesn't support column renames (AFAIK), so we fake it > # TODO: only supports a single primary key so far > pk_name = None > for key in indexes.keys(): > if indexes[key]['primary_key']: pk_name = key > output = [] > output.append( 'ALTER TABLE '+ quote_name(table_name) +' CHANGE COLUMN '+ quote_name(old_col_name) +' '+ quote_name(new_col_name) +' '+ col_def + ';' ) > return output > > def get_change_column_def_sql( table_name, col_name, col_type, null, unique, primary_key, default ): > output = [] > col_def = col_type +' '+ ('%sNULL' % (not null and 'NOT ' or '')) > if unique: > col_def += ' '+ 'UNIQUE' > if primary_key: > col_def += ' '+ 'PRIMARY KEY' > if default and str(default) != 'django.db.models.fields.NOT_PROVIDED': > col_def += ' '+ 'DEFAULT '+ quote_name(str(default)) > output.append( 'ALTER TABLE '+ quote_name(table_name) +' MODIFY COLUMN '+ quote_name(col_name) +' '+ col_def + ';' ) > return output > > def get_add_column_sql( table_name, col_name, col_type, null, unique, primary_key, default ): > output = [] > field_output = [] > field_output.append('ALTER TABLE') > field_output.append(quote_name(table_name)) > field_output.append('ADD COLUMN') > field_output.append(quote_name(col_name)) > field_output.append(col_type) > field_output.append(('%sNULL' % (not null and 'NOT ' or ''))) > if unique: > field_output.append(('UNIQUE')) > if primary_key: > field_output.append(('PRIMARY KEY')) > if default and str(default) != 'django.db.models.fields.NOT_PROVIDED': > field_output.append(('DEFAULT')) > field_output.append((quote_name(str(default)))) > output.append(' '.join(field_output) + ';') > return output > > def get_drop_column_sql( table_name, col_name ): > output = [] > output.append( 'ALTER TABLE '+ quote_name(table_name) +' DROP COLUMN '+ quote_name(col_name) + ';' ) > return output > > diff -x .svn -rN trunk/django/db/backends/mysql/introspection.py schema-evolution/django/db/backends/mysql/introspection.py 75a76,142 > def get_columns(cursor, table_name): > try: > cursor.execute("describe %s" % quote_name(table_name)) > return [row[0] for row in cursor.fetchall()] > except: > return [] > > def get_known_column_flags( cursor, table_name, column_name ): > cursor.execute("describe %s" % quote_name(table_name)) > dict = {} > for row in cursor.fetchall(): > if row[0] == column_name: > > # maxlength check goes here > if row[1][0:7]=='varchar': > dict['maxlength'] = row[1][8:len(row[1])-1] > > # default flag check goes here > if row[2]=='YES': dict['allow_null'] = True > else: dict['allow_null'] = False > > # primary/foreign/unique key flag check goes here > if row[3]=='PRI': dict['primary_key'] = True > else: dict['primary_key'] = False > if row[3]=='FOR': dict['foreign_key'] = True > else: dict['foreign_key'] = False > if row[3]=='UNI': dict['unique'] = True > else: dict['unique'] = False > > # default value check goes here > # if row[4]=='NULL': dict['default'] = None > # else: dict['default'] = row[4] > dict['default'] = row[4] > > # print table_name, column_name, dict > return dict > > def get_schema_fingerprint(cursor, app): > """it's important that the output of these methods don't change, otherwise the hashes they > produce will be inconsistent (and detection of existing schemas will fail. unless you are > absolutely sure the outout for ALL valid inputs will remain the same, you should bump the version by creating a new method""" > return get_schema_fingerprint_fv1(cursor, app) > > def get_schema_fingerprint_fv1(cursor, app): > from django.db import models > app_name = app.__name__.split('.')[-2] > > schema = ['app_name := '+ app_name] > > cursor.execute('SHOW TABLES;') > for table_name in [row[0] for row in cursor.fetchall()]: > if not table_name.startswith(app_name): > continue # skip tables not in this app > schema.append('table_name := '+ table_name) > cursor.execute("describe %s" % quote_name(table_name)) > for row in cursor.fetchall(): > tmp = [] > for x in row: > tmp.append(str(x)) > schema.append( '\t'.join(tmp) ) > cursor.execute("SHOW INDEX FROM %s" % quote_name(table_name)) > for row in cursor.fetchall(): > schema.append( '\t'.join([ str(row[0]), str(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5]), str(row[9]), ]) ) > > return 'fv1:'+ str('\n'.join(schema).__hash__()) > > diff -x .svn -rN trunk/django/db/backends/mysql_old/base.py schema-evolution/django/db/backends/mysql_old/base.py 153a154 > pk_requires_unique = False 259a261,305 > def get_change_table_name_sql( table_name, old_table_name ): > return ['ALTER TABLE '+ quote_name(old_table_name) +' RENAME TO '+ quote_name(table_name) + ';'] > > def get_change_column_name_sql( table_name, indexes, old_col_name, new_col_name, col_def ): > # mysql doesn't support column renames (AFAIK), so we fake it > # TODO: only supports a single primary key so far > pk_name = None > for key in indexes.keys(): > if indexes[key]['primary_key']: pk_name = key > output = [] > output.append( 'ALTER TABLE '+ quote_name(table_name) +' CHANGE COLUMN '+ quote_name(old_col_name) +' '+ quote_name(new_col_name) +' '+ col_def + ';' ) > return output > > def get_change_column_def_sql( table_name, col_name, col_type, null, unique, primary_key ): > output = [] > col_def = col_type +' '+ ('%sNULL' % (not null and 'NOT ' or '')) > if unique: > col_def += ' '+ 'UNIQUE' > if primary_key: > col_def += ' '+ 'PRIMARY KEY' > output.append( 'ALTER TABLE '+ quote_name(table_name) +' MODIFY COLUMN '+ quote_name(col_name) +' '+ col_def + ';' ) > return output > > def get_add_column_sql( table_name, col_name, col_type, null, unique, primary_key ): > output = [] > field_output = [] > field_output.append('ALTER TABLE') > field_output.append(quote_name(table_name)) > field_output.append('ADD COLUMN') > field_output.append(quote_name(col_name)) > field_output.append(col_type) > field_output.append(('%sNULL' % (not null and 'NOT ' or ''))) > if unique: > field_output.append(('UNIQUE')) > if primary_key: > field_output.append(('PRIMARY KEY')) > output.append(' '.join(field_output) + ';') > return output > > def get_drop_column_sql( table_name, col_name ): > output = [] > output.append( 'ALTER TABLE '+ quote_name(table_name) +' DROP COLUMN '+ quote_name(col_name) + ';' ) > return output > > diff -x .svn -rN trunk/django/db/backends/mysql_old/introspection.py schema-evolution/django/db/backends/mysql_old/introspection.py 75a76,112 > def get_columns(cursor, table_name): > try: > cursor.execute("describe %s" % quote_name(table_name)) > return [row[0] for row in cursor.fetchall()] > except: > return [] > > def get_known_column_flags( cursor, table_name, column_name ): > cursor.execute("describe %s" % quote_name(table_name)) > dict = {} > for row in cursor.fetchall(): > if row[0] == column_name: > > # maxlength check goes here > if row[1][0:7]=='varchar': > dict['maxlength'] = row[1][8:len(row[1])-1] > > # default flag check goes here > if row[2]=='YES': dict['allow_null'] = True > else: dict['allow_null'] = False > > # primary/foreign/unique key flag check goes here > if row[3]=='PRI': dict['primary_key'] = True > else: dict['primary_key'] = False > if row[3]=='FOR': dict['foreign_key'] = True > else: dict['foreign_key'] = False > if row[3]=='UNI': dict['unique'] = True > else: dict['unique'] = False > > # default value check goes here > # if row[4]=='NULL': dict['default'] = None > # else: dict['default'] = row[4] > dict['default'] = row[4] > > # print table_name, column_name, dict > return dict > diff -x .svn -rN trunk/django/db/backends/postgresql/base.py schema-evolution/django/db/backends/postgresql/base.py 119a120 > pk_requires_unique = False 284a286,330 > def get_change_table_name_sql( table_name, old_table_name ): > output = [] > output.append('ALTER TABLE '+ quote_name(old_table_name) +' RENAME TO '+ quote_name(table_name) + ';') > return output > > def get_change_column_name_sql( table_name, indexes, old_col_name, new_col_name, col_def ): > # TODO: only supports a single primary key so far > pk_name = None > for key in indexes.keys(): > if indexes[key]['primary_key']: pk_name = key > output = [] > output.append( 'ALTER TABLE '+ quote_name(table_name) +' RENAME COLUMN '+ quote_name(old_col_name) +' TO '+ quote_name(new_col_name) +';' ) > return output > > def get_change_column_def_sql( table_name, col_name, col_type, null, unique, primary_key, default ): > output = [] > output.append( 'ALTER TABLE '+ quote_name(table_name) +' ADD COLUMN '+ quote_name(col_name+'_tmp') +' '+ col_type + ';' ) > output.append( 'UPDATE '+ quote_name(table_name) +' SET '+ quote_name(col_name+'_tmp') +' = '+ quote_name(col_name) + ';' ) > output.append( 'ALTER TABLE '+ quote_name(table_name) +' DROP COLUMN '+ quote_name(col_name) +';' ) > output.append( 'ALTER TABLE '+ quote_name(table_name) +' RENAME COLUMN '+ quote_name(col_name+'_tmp') +' TO '+ quote_name(col_name) + ';' ) > if default and str(default) != 'django.db.models.fields.NOT_PROVIDED': > output.append( 'ALTER TABLE '+ quote_name(table_name) +' ALTER COLUMN '+ quote_name(col_name) +' SET DEFAULT '+ quote_name(str(default)) +';' ) > if not null: > output.append( 'ALTER TABLE '+ quote_name(table_name) +' ALTER COLUMN '+ quote_name(col_name) +' SET NOT NULL;' ) > if unique: > output.append( 'ALTER TABLE '+ quote_name(table_name) +' ADD CONSTRAINT '+ table_name +'_'+ col_name +'_unique_constraint UNIQUE('+ col_name +');' ) > > return output > > def get_add_column_sql( table_name, col_name, col_type, null, unique, primary_key, default ): > output = [] > output.append( 'ALTER TABLE '+ quote_name(table_name) +' ADD COLUMN '+ quote_name(col_name) +' '+ col_type + ';' ) > if default and str(default) != 'django.db.models.fields.NOT_PROVIDED': > output.append( 'ALTER TABLE '+ quote_name(table_name) +' ALTER COLUMN '+ quote_name(col_name) +' SET DEFAULT '+ quote_name(str(default)) +';' ) > if not null: > output.append( 'ALTER TABLE '+ quote_name(table_name) +' ALTER COLUMN '+ quote_name(col_name) +' SET NOT NULL;' ) > if unique: > output.append( 'ALTER TABLE '+ quote_name(table_name) +' ADD CONSTRAINT '+ table_name +'_'+ col_name +'_unique_constraint UNIQUE('+ col_name +');' ) > return output > > def get_drop_column_sql( table_name, col_name ): > output = [] > output.append( 'ALTER TABLE '+ quote_name(table_name) +' DROP COLUMN '+ quote_name(col_name) + ';' ) > return output > diff -x .svn -rN trunk/django/db/backends/postgresql/introspection.py schema-evolution/django/db/backends/postgresql/introspection.py 68a69,121 > def get_columns(cursor, table_name): > try: > cursor.execute("SELECT a.attname, pg_catalog.format_type(a.atttypid, a.atttypmod), (SELECT substring(d.adsrc for 128) FROM pg_catalog.pg_attrdef d WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef), a.attnotnull, a.attnum, pg_catalog.col_description(a.attrelid, a.attnum) FROM pg_catalog.pg_attribute a WHERE a.attrelid = (SELECT c.oid from pg_catalog.pg_class c where c.relname ~ '^%s$') AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum" % table_name) > return [row[0] for row in cursor.fetchall()] > except: > return [] > > def get_known_column_flags( cursor, table_name, column_name ): > # print "SELECT a.attname, pg_catalog.format_type(a.atttypid, a.atttypmod), (SELECT substring(d.adsrc for 128) FROM pg_catalog.pg_attrdef d WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef), a.attnotnull, a.attnum, pg_catalog.col_description(a.attrelid, a.attnum) FROM pg_catalog.pg_attribute a WHERE a.attrelid = (SELECT c.oid from pg_catalog.pg_class c where c.relname ~ '^%s$') AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum" % table_name > cursor.execute("SELECT a.attname, pg_catalog.format_type(a.atttypid, a.atttypmod), (SELECT substring(d.adsrc for 128) FROM pg_catalog.pg_attrdef d WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef), a.attnotnull, a.attnum, pg_catalog.col_description(a.attrelid, a.attnum) FROM pg_catalog.pg_attribute a WHERE a.attrelid = (SELECT c.oid from pg_catalog.pg_class c where c.relname ~ '^%s$') AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum" % table_name) > dict = {} > dict['primary_key'] = False > dict['foreign_key'] = False > dict['unique'] = False > dict['default'] = '' > dict['allow_null'] = False > > for row in cursor.fetchall(): > if row[0] == column_name: > > # maxlength check goes here > if row[1][0:17]=='character varying': > dict['maxlength'] = row[1][18:len(row[1])-1] > > # null flag check goes here > dict['allow_null'] = not row[3] > > # pk, fk and unique checks go here > # print "select pg_constraint.conname, pg_constraint.contype, pg_attribute.attname from pg_constraint, pg_attribute where pg_constraint.conrelid=pg_attribute.attrelid and pg_attribute.attnum=any(pg_constraint.conkey) and pg_constraint.conname~'^%s'" % table_name > unique_conname = None > shared_unique_connames = set() > cursor.execute("select pg_constraint.conname, pg_constraint.contype, pg_attribute.attname from pg_constraint, pg_attribute, pg_class where pg_constraint.conrelid=pg_class.oid and pg_constraint.conrelid=pg_attribute.attrelid and pg_attribute.attnum=any(pg_constraint.conkey) and pg_class.relname='%s'" % table_name ) > for row in cursor.fetchall(): > # print row > if row[2] == column_name: > if row[1]=='p': dict['primary_key'] = True > if row[1]=='f': dict['foreign_key'] = True > if row[1]=='u': unique_conname = row[0] > else: > if row[1]=='u': shared_unique_connames.add( row[0] ) > if unique_conname and unique_conname not in shared_unique_connames: > dict['unique'] = True > > # default value check goes here > cursor.execute("select pg_attribute.attname, adsrc from pg_attrdef, pg_attribute WHERE pg_attrdef.adrelid=pg_attribute.attrelid and pg_attribute.attnum=pg_attrdef.adnum and pg_attrdef.adrelid = (SELECT c.oid from pg_catalog.pg_class c where c.relname ~ '^%s$')" % table_name ) > for row in cursor.fetchall(): > if row[0] == column_name: > if row[1][0:7] == 'nextval': continue > dict['default'] = row[1][1:row[1].index("'",1)] > > # print table_name, column_name, dict > return dict > diff -x .svn -rN trunk/django/db/backends/postgresql_psycopg2/base.py schema-evolution/django/db/backends/postgresql_psycopg2/base.py 81a82 > pk_requires_unique = False 226a228,272 > def get_change_table_name_sql( table_name, old_table_name ): > output = [] > output.append('ALTER TABLE '+ quote_name(old_table_name) +' RENAME TO '+ quote_name(table_name) + ';') > return output > > def get_change_column_name_sql( table_name, indexes, old_col_name, new_col_name, col_def ): > # TODO: only supports a single primary key so far > pk_name = None > for key in indexes.keys(): > if indexes[key]['primary_key']: pk_name = key > output = [] > output.append( 'ALTER TABLE '+ quote_name(table_name) +' RENAME COLUMN '+ quote_name(old_col_name) +' TO '+ quote_name(new_col_name) +';' ) > return output > > def get_change_column_def_sql( table_name, col_name, col_type, null, unique, primary_key, default ): > output = [] > output.append( 'ALTER TABLE '+ quote_name(table_name) +' ADD COLUMN '+ quote_name(col_name+'_tmp') +' '+ col_type + ';' ) > output.append( 'UPDATE '+ quote_name(table_name) +' SET '+ quote_name(col_name+'_tmp') +' = '+ quote_name(col_name) + ';' ) > output.append( 'ALTER TABLE '+ quote_name(table_name) +' DROP COLUMN '+ quote_name(col_name) +';' ) > output.append( 'ALTER TABLE '+ quote_name(table_name) +' RENAME COLUMN '+ quote_name(col_name+'_tmp') +' TO '+ quote_name(col_name) + ';' ) > if default and str(default) != 'django.db.models.fields.NOT_PROVIDED': > output.append( 'ALTER TABLE '+ quote_name(table_name) +' ALTER COLUMN '+ quote_name(col_name) +' SET DEFAULT '+ quote_name(str(default)) +';' ) > if not null: > output.append( 'ALTER TABLE '+ quote_name(table_name) +' ALTER COLUMN '+ quote_name(col_name) +' SET NOT NULL;' ) > if unique: > output.append( 'ALTER TABLE '+ quote_name(table_name) +' ADD CONSTRAINT '+ table_name +'_'+ col_name +'_unique_constraint UNIQUE('+ col_name +');' ) > > return output > > def get_add_column_sql( table_name, col_name, col_type, null, unique, primary_key, default ): > output = [] > output.append( 'ALTER TABLE '+ quote_name(table_name) +' ADD COLUMN '+ quote_name(col_name) +' '+ col_type + ';' ) > if default and str(default) != 'django.db.models.fields.NOT_PROVIDED': > output.append( 'ALTER TABLE '+ quote_name(table_name) +' ALTER COLUMN '+ quote_name(col_name) +' SET DEFAULT '+ quote_name(str(default)) +';' ) > if not null: > output.append( 'ALTER TABLE '+ quote_name(table_name) +' ALTER COLUMN '+ quote_name(col_name) +' SET NOT NULL;' ) > if unique: > output.append( 'ALTER TABLE '+ quote_name(table_name) +' ADD CONSTRAINT '+ table_name +'_'+ col_name +'_unique_constraint UNIQUE('+ col_name +');' ) > return output > > def get_drop_column_sql( table_name, col_name ): > output = [] > output.append( 'ALTER TABLE '+ quote_name(table_name) +' DROP COLUMN '+ quote_name(col_name) + ';' ) > return output > diff -x .svn -rN trunk/django/db/backends/postgresql_psycopg2/introspection.py schema-evolution/django/db/backends/postgresql_psycopg2/introspection.py 65a66,118 > def get_columns(cursor, table_name): > try: > cursor.execute("SELECT a.attname, pg_catalog.format_type(a.atttypid, a.atttypmod), (SELECT substring(d.adsrc for 128) FROM pg_catalog.pg_attrdef d WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef), a.attnotnull, a.attnum, pg_catalog.col_description(a.attrelid, a.attnum) FROM pg_catalog.pg_attribute a WHERE a.attrelid = (SELECT c.oid from pg_catalog.pg_class c where c.relname ~ '^%s$') AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum" % table_name) > return [row[0] for row in cursor.fetchall()] > except: > return [] > > def get_known_column_flags( cursor, table_name, column_name ): > # print "SELECT a.attname, pg_catalog.format_type(a.atttypid, a.atttypmod), (SELECT substring(d.adsrc for 128) FROM pg_catalog.pg_attrdef d WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef), a.attnotnull, a.attnum, pg_catalog.col_description(a.attrelid, a.attnum) FROM pg_catalog.pg_attribute a WHERE a.attrelid = (SELECT c.oid from pg_catalog.pg_class c where c.relname ~ '^%s$') AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum" % table_name > cursor.execute("SELECT a.attname, pg_catalog.format_type(a.atttypid, a.atttypmod), (SELECT substring(d.adsrc for 128) FROM pg_catalog.pg_attrdef d WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef), a.attnotnull, a.attnum, pg_catalog.col_description(a.attrelid, a.attnum) FROM pg_catalog.pg_attribute a WHERE a.attrelid = (SELECT c.oid from pg_catalog.pg_class c where c.relname ~ '^%s$') AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum" % table_name) > dict = {} > dict['primary_key'] = False > dict['foreign_key'] = False > dict['unique'] = False > dict['default'] = '' > dict['allow_null'] = False > > for row in cursor.fetchall(): > if row[0] == column_name: > > # maxlength check goes here > if row[1][0:17]=='character varying': > dict['maxlength'] = row[1][18:len(row[1])-1] > > # null flag check goes here > dict['allow_null'] = not row[3] > > # pk, fk and unique checks go here > # print "select pg_constraint.conname, pg_constraint.contype, pg_attribute.attname from pg_constraint, pg_attribute where pg_constraint.conrelid=pg_attribute.attrelid and pg_attribute.attnum=any(pg_constraint.conkey) and pg_constraint.conname~'^%s'" % table_name > unique_conname = None > shared_unique_connames = set() > cursor.execute("select pg_constraint.conname, pg_constraint.contype, pg_attribute.attname from pg_constraint, pg_attribute, pg_class where pg_constraint.conrelid=pg_class.oid and pg_constraint.conrelid=pg_attribute.attrelid and pg_attribute.attnum=any(pg_constraint.conkey) and pg_class.relname='%s'" % table_name ) > for row in cursor.fetchall(): > # print row > if row[2] == column_name: > if row[1]=='p': dict['primary_key'] = True > if row[1]=='f': dict['foreign_key'] = True > if row[1]=='u': unique_conname = row[0] > else: > if row[1]=='u': shared_unique_connames.add( row[0] ) > if unique_conname and unique_conname not in shared_unique_connames: > dict['unique'] = True > > # default value check goes here > cursor.execute("select pg_attribute.attname, adsrc from pg_attrdef, pg_attribute WHERE pg_attrdef.adrelid=pg_attribute.attrelid and pg_attribute.attnum=pg_attrdef.adnum and pg_attrdef.adrelid = (SELECT c.oid from pg_catalog.pg_class c where c.relname ~ '^%s$')" % table_name ) > for row in cursor.fetchall(): > if row[0] == column_name: > if row[1][0:7] == 'nextval': continue > dict['default'] = row[1][1:row[1].index("'",1)] > > # print table_name, column_name, dict > return dict > diff -x .svn -rN trunk/django/db/backends/sqlite3/base.py schema-evolution/django/db/backends/sqlite3/base.py 4a5 > from django.core import management 104a106 > pk_requires_unique = True # or else the constraint is never created 216a219,308 > def get_change_table_name_sql( table_name, old_table_name ): > return ['ALTER TABLE '+ quote_name(old_table_name) +' RENAME TO '+ quote_name(table_name) + ';'] > > def get_change_column_name_sql( table_name, indexes, old_col_name, new_col_name, col_def ): > # sqlite doesn't support column renames, so we fake it > model = get_model_from_table_name(table_name) > output = [] > output.append( '-- FYI: sqlite does not support renaming columns, so we create a new '+ quote_name(table_name) +' and delete the old (ie, this could take a while)' ) > > tmp_table_name = table_name + '_1337_TMP' # unlikely to produce a namespace conflict > output.extend( get_change_table_name_sql( tmp_table_name, table_name ) ) > output.extend( management._get_sql_model_create(model, set())[0] ) > > old_cols = [] > for f in model._meta.fields: > if f.column != new_col_name: > old_cols.append( quote_name(f.column) ) > else: > old_cols.append( quote_name(old_col_name) ) > > output.append( 'INSERT INTO '+ quote_name(table_name) +' SELECT '+ ','.join(old_cols) +' FROM '+ quote_name(tmp_table_name) +';' ) > output.append( 'DROP TABLE '+ quote_name(tmp_table_name) +';' ) > > return output > > def get_change_column_def_sql( table_name, col_name, col_type, null, unique, primary_key, default ): > # sqlite doesn't support column modifications, so we fake it > > model = get_model_from_table_name(table_name) > if not model: return ['-- model not found'] > output = [] > output.append( '-- FYI: sqlite does not support changing columns, so we create a new '+ quote_name(table_name) +' and delete the old (ie, this could take a while)' ) > > tmp_table_name = table_name + '_1337_TMP' # unlikely to produce a namespace conflict > output.extend( get_change_table_name_sql( tmp_table_name, table_name ) ) > output.extend( management._get_sql_model_create(model, set())[0] ) > > old_cols = [] > for f in model._meta.fields: > old_cols.append( quote_name(f.column) ) > > output.append( 'INSERT INTO '+ quote_name(table_name) +' SELECT '+ ','.join(old_cols) +' FROM '+ quote_name(tmp_table_name) +';' ) > output.append( 'DROP TABLE '+ quote_name(tmp_table_name) +';' ) > > return output > > def get_add_column_sql( table_name, col_name, col_type, null, unique, primary_key, default ): > output = [] > field_output = [] > field_output.append('ALTER TABLE') > field_output.append(quote_name(table_name)) > field_output.append('ADD COLUMN') > field_output.append(quote_name(col_name)) > field_output.append(col_type) > field_output.append(('%sNULL' % (not null and 'NOT ' or ''))) > if unique or primary_key: > field_output.append(('UNIQUE')) > if primary_key: > field_output.append(('PRIMARY KEY')) > if default and str(default) != 'django.db.models.fields.NOT_PROVIDED': > field_output.append(('DEFAULT')) > field_output.append((quote_name(str(default)))) > output.append(' '.join(field_output) + ';') > return output > > def get_drop_column_sql( table_name, col_name ): > model = get_model_from_table_name(table_name) > output = [] > output.append( '-- FYI: sqlite does not support deleting columns, so we create a new '+ quote_name(col_name) +' and delete the old (ie, this could take a while)' ) > tmp_table_name = table_name + '_1337_TMP' # unlikely to produce a namespace conflict > output.extend( get_change_table_name_sql( tmp_table_name, table_name ) ) > output.extend( management._get_sql_model_create(model, set())[0] ) > new_cols = [] > for f in model._meta.fields: > new_cols.append( quote_name(f.column) ) > output.append( 'INSERT INTO '+ quote_name(table_name) +' SELECT '+ ','.join(new_cols) +' FROM '+ quote_name(tmp_table_name) +';' ) > output.append( 'DROP TABLE '+ quote_name(tmp_table_name) +';' ) > return output > > def get_model_from_table_name(table_name): > from django.db import models > for app in models.get_apps(): > app_name = app.__name__.split('.')[-2] > if app_name == table_name.split('_')[0] or app_name == '_'.join(table_name.split('_')[0:1]) or app_name == '_'.join(table_name.split('_')[0:2]): > for model in models.get_models(app): > if model._meta.db_table == table_name: > return model > return None > > diff -x .svn -rN trunk/django/db/backends/sqlite3/introspection.py schema-evolution/django/db/backends/sqlite3/introspection.py 29,31d28 < for info in _table_info(cursor, table_name): < indexes[info['name']] = {'primary_key': info['pk'] != 0, < 'unique': False} 39,43c36,45 < # Skip indexes across multiple fields < if len(info) != 1: < continue < name = info[0][2] # seqno, cid, name < indexes[name]['unique'] = True --- > for x in info: > name = x[2] # seqno, cid, name > cursor.execute('PRAGMA table_info(%s)' % quote_name(table_name)) > for row in cursor.fetchall(): > if row[1]==name: > indexes[name] = {'primary_key': False, 'unique': False} > if row[2]=='integer': > indexes[name]['primary_key'] = True > else: > indexes[name]['unique'] = True 45a48,101 > def get_columns(cursor, table_name): > try: > cursor.execute("PRAGMA table_info(%s)" % quote_name(table_name)) > return [row[1] for row in cursor.fetchall()] > except: > return [] > > def get_known_column_flags( cursor, table_name, column_name ): > cursor.execute("PRAGMA table_info(%s)" % quote_name(table_name)) > dict = {} > dict['primary_key'] = False > dict['foreign_key'] = False > dict['unique'] = False > dict['default'] = '' > dict['allow_null'] = True > > for row in cursor.fetchall(): > # print row > if row[1] == column_name: > col_type = row[2] > > # maxlength check goes here > if row[2][0:7]=='varchar': > dict['maxlength'] = row[2][8:len(row[2])-1] > > # default flag check goes here > dict['allow_null'] = row[3]==0 > > # default value check goes here > dict['default'] = row[4] > > cursor.execute("PRAGMA index_list(%s)" % quote_name(table_name)) > index_names = [] > for row in cursor.fetchall(): > index_names.append(row[1]) > for index_name in index_names: > cursor.execute("PRAGMA index_info(%s)" % quote_name(index_name)) > for row in cursor.fetchall(): > if row[2]==column_name: > if col_type=='integer': dict['primary_key'] = True # sqlite3 does not distinguish between unique and pk; all > else: dict['unique'] = True # unique integer columns are treated as part of the pk. > > # primary/foreign/unique key flag check goes here > #if row[3]=='PRI': dict['primary_key'] = True > #else: dict['primary_key'] = False > #if row[3]=='FOR': dict['foreign_key'] = True > #else: dict['foreign_key'] = False > #if row[3]=='UNI': dict['unique'] = True > #else: dict['unique'] = False > > > # print dict > return dict > diff -x .svn -rN trunk/django/db/models/fields/__init__.py schema-evolution/django/db/models/fields/__init__.py 83c83 < help_text='', db_column=None, db_tablespace=None): --- > help_text='', db_column=None, aka=None, db_tablespace=None): 103a104 > self.aka = aka diff -x .svn -rN trunk/django/db/models/options.py schema-evolution/django/db/models/options.py 18c18 < 'order_with_respect_to', 'app_label', 'db_tablespace') --- > 'order_with_respect_to', 'app_label', 'aka', 'db_tablespace') 25a26 > self.aka = '' 78a80,87 > if isinstance(self.aka, str): > self.aka = "%s_%s" % (self.app_label, self.aka.lower()) > if isinstance(self.aka, tuple): > real_aka = [] > for some_aka in self.aka: > real_aka.append( "%s_%s" % (self.app_label, some_aka.lower()) ) > self.aka = tuple(real_aka) > diff -x .svn -rN trunk/django/test/simple.py schema-evolution/django/test/simple.py 149c149 < \ No newline at end of file --- > diff -x .svn -rN trunk/docs/schema-evolution.txt schema-evolution/docs/schema-evolution.txt 0a1,476 > = Schema Evolution Documentation = > > == Introduction == > > Schema evolution is the function of updating an existing Django generated database schema to a newer/modified version based upon a newer/modified set of Django models, and/or a set of developer written upgrade scripts. > > It's important to note that different developers wish to approach schema evolution in different ways. As detailed in the original SchemaEvolution document (and elsewhere), there are four basic categories of developers: > > 1. users who trust introspection and never want to touch/see SQL (Malcolm) > 1. users who mostly trust introspection but want the option of auto-applied upgrades for specific situations (Wash) > 1. users who use introspection-generated SQL, but don't trust it (they want it generated at development and stored for use in production - Kaylee) > 1. users who hate introspection and just want auto-application of their own scripts (Zoe) > > who wish to perform different combinations of the two basic subtasks of schema evolution: > > 1. generation of SQL via magical introspection > 1. storage and auto-application of upgrade SQL > > This implementation of schema evolution should satisfy all four groups, while keeping the complexities of the parts you don't use out of sight. Scroll down to the usage sections to see examples of how each developer would approach their jobs. > > == Downloading / Installing == > > This functionality is not yet in Django/trunk, but in a separate schema-evolution branch. To download this branch, run the following: > > {{{ > svn co http://code.djangoproject.com/svn/django/branches/schema-evolution/ django_se_src > ln -s `pwd`/django_se_src/django SITE-PACKAGES-DIR/django > }}} > > Or, if you're currently running Django v0.96, run the following: > > {{{ > cd //site-packages/django/ > wget http://kered.org/blog/wp-content/uploads/2007/07/django_schema_evolution-v096patch.txt > patch -p1 < django_schema_evolution-v096patch.txt > }}} > > The last command will produce the following output: > > {{{ > patching file core/management.py > patching file db/backends/mysql/base.py > patching file db/backends/mysql/introspection.py > patching file db/backends/postgresql/base.py > patching file db/backends/postgresql/introspection.py > patching file db/backends/sqlite3/base.py > patching file db/backends/sqlite3/introspection.py > patching file db/models/fields/__init__.py > patching file db/models/options.py > }}} > > == How To Use: Malcolm == > > For the most part, schema evolution can be performed via introspection, as long as you're not doing anything too radical. If you have an established application the ''vast'' majority of changes are either additions or renames (either tables or columns). Or if you're new to SQL, introspection keeps things very simple for you. To use schema evolution as Malcolm just make changes to your models, run syncdb, and you're done. But like all schema changes, it's wise to preview what is going to be run. To do this, run the following: > > {{{ > $ ./manage sqlevolve app_name > }}} > > This will output to the command line the SQL to be run to bring your database schema up to date with your model structure. > > However not everything can be handled through introspection. A small amount of metadata is used in the cases of model or field renames, so that the introspection code can match up the old field to the new field. (therefore preserving your data) > > For renaming a column, use an "aka" attribute: > > {{{ > # this field used to be called pub_date > publish_date = models.DateTimeField('date published', aka='pub_date') > }}} > > If you have renamed this twice and still wish to support migration from both older schemas, "aka"s can be tuples: > > {{{ > # this field used to be called pub_date > publish_date = models.DateTimeField('date published', aka=('pub_date','other_old_field_name')) > }}} > > For renaming a model, add an "aka" field to the Meta section: > > {{{ > # the original name for this model was 'Choice' > class Option(models.Model): > [...] > class Meta: > aka = 'Choice' > }}} > > And after time you make a series of changes, run sqlevolve or syncdb and your schema changes will be either shown to you or applied for you. > > For further examples, scroll down to the Introspection Examples section. > > == How To Use: Wash == > > Note that most Malcolm developers (likely new developers) will eventually run up against a limitation inherent in introspection. They love their incredibly intuitive tool but it can't do everything. But they don't want to give it up, because it's a great 90% solution. If only they can add a simple script without having to throw away all the convenient candy, past or future. > > All Wash has to do is store a little bit of extra metadata. Namely two things: > > 1. a fingerprint of the known schema > 1. an sql script > > in the file 'app_name/schema_evolution.py'. (conveniently next to models.py) > > This module looks as follows: > > {{{ > # list of all known schema fingerprints, in order > fingerprints = [ > 'fv1:1742830097', > 'fv1:907953071', > # add future fingerprints here > ] > > # all of your evolution scripts, mapping the from_version and to_version > # to a list if sql commands > evolutions = { > # ('from_fingerprint','to_fingerprint'): ['-- some sql'], > ('fv1:1742830097','fv1:907953071'): [ > '-- some list of sql statements, constituting an upgrade', > '-- some list of sql statements, constituting an upgrade', > ], > } > }}} > > To create this file, he would first fingerprint his schema with the following command: > > {{{ > $ ./manage sqlfingerprint app_name > Notice: Current schema fingerprint for 'app_name' is 'fv1:1742830097' (unrecognized) > }}} > > He would add this fingerprint to the end of the 'fingerprints' list in the schema_evolution module, and it would become an automatically recognized schema, ripe for the upgrade. And then he would write an upgrade script, placing it in the 'evolutions' dictionary object, mapped against the current fingerprint and some fake/temporary fingerprint ('fv1:xxxxxxxx'). Finally, he would run his script (either manually or via syncdb), re-fingerprint and save it in both the fingerprints list and the 'to_fingerprint' part of the mapping. > > Later, when he runs sqlevolve (or syncdb) against his production database, sqlevolve will detect his current schema and attempt an upgrade using the upgrade script, and then verify it. If it succeeds, will continue applying all available upgrade scripts until one either fails or it reaches the latest database schema version. (more technically, syncdb will recursively apply all available scripts...sqlevolve since it simply prints to the console, only prints the next available script) > > '''Note:''' Manually defined upgrade scripts always are prioritized over introspected scripts. And introspected scripts are never applied recursively. > > This way Wash can continue using introspections for the majority of his tasks, only stopping to define fingerprints/scripts on those rare occasions he needs them. > > == How To Use: Kaylee == > > Kaylee, like Wash and Malcolm, likes the time-saving features of automatic introspection, but likes much more control over deployments to "her baby". So she typically still uses introspection during development, but never in production. What she does is instead of saving the occasional "hard" migration scripts like Wash, she saves them all. This builds a neat chain of upgrades in her schema_evolution module which are then applied in series. Additionally, she likes the ability to automatically back out changes as well, so she stores revert scripts (also usually automatically generated at development) in the same module. > > == How To Use: Zoe == > > Zoe simply doesn't like the whole idea of introspection. She's an expert SQL swinger and never wants to see it generated for her (much less have those ugly "aka" fields buggering up her otherwise pristine models. She simply writes her own SQL scripts and stores them all in her schema_evolution module. > > == Introspection Examples == > > The following documentation will take you through several common model changes and show you how Django's schema evolution introspection handles them. Each example provides the pre and post model source code, as well as the SQL output. > > === Adding / Removing Fields === > > Model: version 1 > > {{{ > from django.db import models > > class Poll(models.Model): > question = models.CharField(maxlength=200) > pub_date = models.DateTimeField('date published') > author = models.CharField(maxlength=200) > def __str__(self): > return self.question > > class Choice(models.Model): > poll = models.ForeignKey(Poll) > choice = models.CharField(maxlength=200) > votes = models.IntegerField() > def __str__(self): > return self.choice > }}} > > Model: version 2 > > {{{ > from django.db import models > > class Poll(models.Model): > question = models.CharField(maxlength=200) > pub_date = models.DateTimeField('date published') > author = models.CharField(maxlength=200) > def __str__(self): > return self.question > > # new fields > pub_date2 = models.DateTimeField('date published') > > class Choice(models.Model): > poll = models.ForeignKey(Poll) > choice = models.CharField(maxlength=200) > votes = models.IntegerField() > def __str__(self): > return self.choice > > # new fields > votes2 = models.IntegerField() > hasSomething = models.BooleanField() > creatorIp = models.IPAddressField() > }}} > > Output: v1⇒v2 > > {{{ > BEGIN; > ALTER TABLE `case01_add_field_poll` ADD COLUMN `pub_date2` datetime NOT NULL; > ALTER TABLE `case01_add_field_choice` ADD COLUMN `votes2` integer NOT NULL; > ALTER TABLE `case01_add_field_choice` ADD COLUMN `hasSomething` bool NOT NULL; > ALTER TABLE `case01_add_field_choice` ADD COLUMN `creatorIp` char(15) NOT NULL; > COMMIT; > }}} > > Output: v2⇒v1 > > {{{ > -- warning: as the following may cause data loss, it/they must be run manually > -- ALTER TABLE `case01_add_field_poll` DROP COLUMN `pub_date2`; > -- end warning > -- warning: as the following may cause data loss, it/they must be run manually > -- ALTER TABLE `case01_add_field_choice` DROP COLUMN `votes2`; > -- ALTER TABLE `case01_add_field_choice` DROP COLUMN `creatorIp`; > -- ALTER TABLE `case01_add_field_choice` DROP COLUMN `hasSomething`; > -- end warning > }}} > > === Renaming Fields === > > Model: version 1 > > {{{ > from django.db import models > > class Poll(models.Model): > """this model originally had fields named pub_date and the_author. you can use > either a str or a tuple for the aka value. (tuples are used if you have changed > its name more than once)""" > question = models.CharField(maxlength=200) > pub_date = models.DateTimeField('date published', aka='publish_date') > the_author = models.CharField(maxlength=200, aka='the_author') > def __str__(self): > return self.question > > class Choice(models.Model): > poll = models.ForeignKey(Poll) > choice = models.CharField(maxlength=200) > votes = models.IntegerField(aka='votes') > def __str__(self): > return self.choice > }}} > > Model: version 2 > > {{{ > from django.db import models > > class Poll(models.Model): > """this model originally had fields named pub_date and the_author. you can use > either a str or a tuple for the aka value. (tuples are used if you have changed > its name more than once)""" > question = models.CharField(maxlength=200) > published_date = models.DateTimeField('date published', aka=('pub_date', 'publish_date')) > author = models.CharField(maxlength=200, aka='the_author') > def __str__(self): > return self.question > > class Choice(models.Model): > poll = models.ForeignKey(Poll) > choice = models.CharField(maxlength=200) > number_of_votes = models.IntegerField(aka='votes') > def __str__(self): > return self.choice > }}} > > Output: v1⇒v2 > > {{{ > BEGIN; > ALTER TABLE `case02_rename_field_poll` CHANGE COLUMN `pub_date` `published_date` datetime NOT NULL; > ALTER TABLE `case02_rename_field_poll` CHANGE COLUMN `the_author` `author` varchar(200) NOT NULL; > ALTER TABLE `case02_rename_field_choice` CHANGE COLUMN `votes` `number_of_votes` integer NOT NULL; > COMMIT; > }}} > > === Renaming Models === > > Model: version 1 > > {{{ > from django.db import models > > class Poll(models.Model): > question = models.CharField(maxlength=200) > pub_date = models.DateTimeField('date published') > author = models.CharField(maxlength=200) > def __str__(self): > return self.question > > class Choice(models.Model): > "the original name for this model was 'Choice'" > poll = models.ForeignKey(Poll) > choice = models.CharField(maxlength=200) > number_of_votes = models.IntegerField() > def __str__(self): > return self.choice > class Meta: > aka = ('Choice', 'OtherBadName') > }}} > > Model: version 2 > > {{{ > from django.db import models > > class Poll(models.Model): > question = models.CharField(maxlength=200) > pub_date = models.DateTimeField('date published') > author = models.CharField(maxlength=200) > def __str__(self): > return self.question > > class Option(models.Model): > "the original name for this model was 'Choice'" > poll = models.ForeignKey(Poll) > choice = models.CharField(maxlength=200) > # show that field name changes work too > votes = models.IntegerField(aka='number_of_votes') > def __str__(self): > return self.choice > class Meta: > aka = ('Choice', 'BadName') > }}} > > Output: v1⇒v2 > > {{{ > BEGIN; > ALTER TABLE `case03_rename_model_choice` RENAME TO `case03_rename_model_option`; > ALTER TABLE `case03_rename_model_option` CHANGE COLUMN `number_of_votes` `votes` integer NOT NULL; > COMMIT; > }}} > > === Changing Flags === > > Model: version 1 > > {{{ > from django.db import models > > class Poll(models.Model): > question = models.CharField(maxlength=200) > pub_date = models.DateTimeField('date published') > author = models.CharField(maxlength=200) > def __str__(self): > return self.question > > class Choice(models.Model): > "the original name for this model was 'Choice'" > poll = models.ForeignKey(Poll) > choice = models.CharField(maxlength=200) > votes = models.IntegerField() > def __str__(self): > return self.choice > > class Foo(models.Model): > GENDER_CHOICES = ( > ('M', 'Male'), > ('F', 'Female'), > ) > gender = models.CharField(maxlength=1, choices=GENDER_CHOICES) > }}} > > Model: version 2 > > {{{ > from django.db import models > > class Poll(models.Model): > question = models.CharField(maxlength=100) > pub_date = models.DateTimeField('date published') > author = models.CharField(maxlength=200) > def __str__(self): > return self.question > > class Choice(models.Model): > "the original name for this model was 'Choice'" > poll = models.ForeignKey(Poll) > # make sure aka still works with a flag change > option = models.CharField(maxlength=400, aka='choice') > votes = models.IntegerField() > votes2 = models.IntegerField() # make sure column adds still work > def __str__(self): > return self.choice > > class Foo(models.Model): > GENDER_CHOICES = ( > ('M', 'Male'), > ('F', 'Female'), > ) > gender = models.CharField(maxlength=1, choices=GENDER_CHOICES, db_index=True) > gender2 = models.CharField(maxlength=1, null=True, unique=True) > > }}} > > Output: v1⇒v2 > > {{{ > BEGIN; > ALTER TABLE `case04_change_flag_poll` MODIFY COLUMN `question` varchar(100) NOT NULL; > ALTER TABLE `case04_change_flag_foo` ADD COLUMN `gender2` varchar(1) NULL UNIQUE; > ALTER TABLE `case04_change_flag_choice` MODIFY COLUMN `choice` varchar(400) NOT NULL; > ALTER TABLE `case04_change_flag_choice` CHANGE COLUMN `choice` `option` varchar(400) NOT NULL; > ALTER TABLE `case04_change_flag_choice` ADD COLUMN `votes2` integer NOT NULL; > COMMIT; > }}} > > == Criticisms == > > ''I _really_ don't like the aka representations in the model. The models file should always be a clean statement of the current state of the model. Migration is about > getting an old database to match the currently required models - if I don't have a legacy database, I don't really want the cruft hanging around in my models. Migration plans or historical models really should be kept out of the core model file, IMHO.'' > > We currently store all sorts of non-DB related metadata in the model that arguably should not be there, including presentation information. We do this for clarity and convenience - you would have to duplicate a lot of information otherwise in multiple locations without any obvious direct connection. So which paradigm is more critical here, DRY or MVC? Or do we continue with the status-quo of a common-sense balance? As far as cruft, if you don't have a legacy database, you wouldn't have any aka fields to begin with. And as you phase out legacy support, simply delete them. > > ''Unless I'm missing something, the aka approach doesn't track exactly which name versions correlate with other versions. Consider; your aka chain for one field could indicate a rename from 'foo' to 'bar' to 'foo' to 'bar'; a second field could indicate renames from 'whiz' to 'foo' to 'whiz', etc. A tuple of historical names doesn't tell me which sets of names were in use concurrently; so if I find a database with a field called 'foo' which requires migration, is 'foo' the first field or the second?'' > > Correct, however I thought this to be a highly unlikely scenario, not warranting the extra notational complexity. But just as we support strings and tuples, there is nothing to say we can't extend it to support say a mapping of historical names to date ranges, if the need arises. > > ''The 'aka' approach is ambiguous for all but trivial use cases. It doesn't capture the idea that database changes occur in bulk, in sequence. For example, On Monday, I add two fields, remove 1 field, rename a table. That creates v2 of the database. On Tuesday, I bring back the deleted field, and remove one of the added fields, creating v3 of the database. This approach doesn't track which state a given database is in, and doesn't apply changes in blocks appropriate to versioned changes.'' > > It does not matter how you get from v1 => v3, as long as you get there with minimum theoretical information loss. The following: > > '''v1 => v2 => v3''' > 1. // v1 t1:{A} > 1. add_field(B); > 1. add_field(C); > 1. del_field(A); > 1. rename_table(t1,t2); > 1. // v2 t2{B,C} > 1. add_field(A); > 1. del_field(C); > 1. // v3 t2:{A,B} > > is functionally equivalent to: > > '''v1 => v3''' > 1. // v1 t1:{A} > 1. add_field(B); > 1. rename_table(t1,t2); > 1. // v3 t2:{A,B} > > And this can be supported completely through introspection + metadata about what tables and columns used to be called. If you load v2 or v3, the available information can get you there from v1, and if you load v3, the available information can get you there from v1 or v2. > > A more detailed breakdown of this critique is available [http://kered.org/blog/2007-08-03/schema-evolution-confusion-example-case/ here], complete with working code examples. > > == Future Work == > > The biggest missing piece I believe to be changing column types. For instance, say you currently have: > > {{{ > ssn = models.IntegerField() > }}} > > Which you want to change into: > > {{{ > ssn = models.CharField(maxlength=12) > }}} > > Schema evolution should generate SQL to add the new column, push the data from the old to the new column, then delete the old column. Warnings should be provided for completely incompatible types or other loss-of-information scenarios. > > The second biggest missing piece is foreign/m2m key support. > > Lastly, for the migration scripts, sometimes it's easier to write python than it is to write sql. I intend for you to be able to interleave function calls in with the sql statements and have the schema evolution code just Do The Right Thing(tm). But this isn't coded yet. > > == Conclusion == > > That's pretty much it. If you can suggest additional examples or test cases you think would be of value, please email me at public@kered.org. > diff -x .svn -rN trunk/tests/modeltests/schema_evolution/models.py schema-evolution/tests/modeltests/schema_evolution/models.py 0a1,313 > """ > Schema Evolution Tests > """ > > from django.db import models > from django.conf import settings > > GENDER_CHOICES = ( > ('M', 'Male'), > ('F', 'Female'), > ) > > class Person(models.Model): > name = models.CharField(maxlength=20) > gender = models.CharField(maxlength=1, choices=GENDER_CHOICES) > gender2 = models.CharField(maxlength=1, choices=GENDER_CHOICES, aka='gender_old') > > def __unicode__(self): > return self.name > > class Meta: > aka = ('PersonOld', 'OtherBadName') > > class Muebles(models.Model): > tipo = models.CharField(maxlength=40, default="woot") > # new fields > fecha_publicacion = models.DateTimeField('date published') > > __test__ = {'API_TESTS':""" > >>> import django > >>> from django.core import management > >>> from django.db import backend, models > >>> from django.db import connection, get_introspection_module > >>> app = models.get_apps()[-1] > >>> cursor = connection.cursor() > """} > > if settings.DATABASE_ENGINE == 'mysql': > __test__['API_TESTS'] += """ > # the table as it is supposed to be > >>> create_table_sql = management.get_sql_all(app) > > # make sure we don't evolve an unedited table > >>> management.get_sql_evolution(app) > [] > > # delete a column, so it looks like we've recently added a field > >>> sql = backend.get_drop_column_sql( 'schema_evolution_person', 'gender' ) > >>> print sql > ['ALTER TABLE `schema_evolution_person` DROP COLUMN `gender`;'] > >>> for s in sql: cursor.execute(s) > 0L > >>> management.get_sql_evolution(app) > ['ALTER TABLE `schema_evolution_person` ADD COLUMN `gender` varchar(1) NOT NULL;'] > > # reset the db > >>> cursor.execute('DROP TABLE schema_evolution_person;'); cursor.execute(create_table_sql[0]) > 0L\n0L > > # add a column, so it looks like we've recently deleted a field > >>> cursor.execute('ALTER TABLE `schema_evolution_person` ADD COLUMN `gender_nothere` varchar(1) NOT NULL;') > 0L > >>> management.get_sql_evolution(app) > ['-- warning: the following may cause data loss', u'ALTER TABLE `schema_evolution_person` DROP COLUMN `gender_nothere`;', '-- end warning'] > > # reset the db > >>> cursor.execute('DROP TABLE schema_evolution_person;'); cursor.execute(create_table_sql[0]) > 0L\n0L > > # rename column, so it looks like we've recently renamed a field > >>> cursor.execute('ALTER TABLE `schema_evolution_person` CHANGE COLUMN `gender2` `gender_old` varchar(1) NOT NULL;') > 0L > >>> management.get_sql_evolution(app) > ['ALTER TABLE `schema_evolution_person` CHANGE COLUMN `gender_old` `gender2` varchar(1) NOT NULL;'] > > # reset the db > >>> cursor.execute('DROP TABLE schema_evolution_person;'); cursor.execute(create_table_sql[0]) > 0L\n0L > > # rename table, so it looks like we've recently renamed a model > >>> cursor.execute('ALTER TABLE `schema_evolution_person` RENAME TO `schema_evolution_personold`') > 0L > >>> management.get_sql_evolution(app) > ['ALTER TABLE `schema_evolution_personold` RENAME TO `schema_evolution_person`;'] > > # reset the db > >>> cursor.execute(create_table_sql[0]) > 0L > > # change column flags, so it looks like we've recently changed a column flag > >>> cursor.execute('ALTER TABLE `schema_evolution_person` MODIFY COLUMN `name` varchar(10) NULL;') > 0L > >>> management.get_sql_evolution(app) > ['ALTER TABLE `schema_evolution_person` MODIFY COLUMN `name` varchar(20) NOT NULL;'] > > # reset the db > >>> cursor.execute('DROP TABLE schema_evolution_person;'); cursor.execute(create_table_sql[0]) > 0L\n0L > > # delete a datetime column, so it looks like we've recently added a datetime field > >>> for sql in backend.get_drop_column_sql( 'schema_evolution_muebles', 'fecha_publicacion' ): print sql; cursor.execute(sql) > ALTER TABLE `schema_evolution_muebles` DROP COLUMN `fecha_publicacion`; > 0L > >>> management.get_sql_evolution(app) > ['ALTER TABLE `schema_evolution_muebles` ADD COLUMN `fecha_publicacion` datetime NOT NULL;'] > > # reset the db > >>> cursor.execute('DROP TABLE schema_evolution_muebles;'); cursor.execute(create_table_sql[1]) > 0L\n0L > > # delete a column with a default value, so it looks like we've recently added a column > >>> for sql in backend.get_drop_column_sql( 'schema_evolution_muebles', 'tipo' ): print sql; cursor.execute(sql) > ALTER TABLE `schema_evolution_muebles` DROP COLUMN `tipo`; > 0L > >>> management.get_sql_evolution(app) > ['ALTER TABLE `schema_evolution_muebles` ADD COLUMN `tipo` varchar(40) NOT NULL DEFAULT `woot`;'] > > """ > > if settings.DATABASE_ENGINE == 'postgresql' or settings.DATABASE_ENGINE == 'postgresql_psycopg2' : > __test__['API_TESTS'] += """ > # the table as it is supposed to be > >>> create_table_sql = management.get_sql_all(app) > > # make sure we don't evolve an unedited table > >>> management.get_sql_evolution(app) > [] > > # delete a column, so it looks like we've recently added a field > >>> for sql in backend.get_drop_column_sql( 'schema_evolution_person', 'gender' ): cursor.execute(sql) > >>> management.get_sql_evolution(app) > ['ALTER TABLE "schema_evolution_person" ADD COLUMN "gender" varchar(1);', 'ALTER TABLE "schema_evolution_person" ALTER COLUMN "gender" SET NOT NULL;'] > > # reset the db > >>> cursor.execute('DROP TABLE schema_evolution_person;'); cursor.execute(create_table_sql[0]) > > # add a column, so it looks like we've recently deleted a field > >>> for sql in backend.get_add_column_sql( 'schema_evolution_person', 'gender_nothere', 'varchar(1)', True, False, False, None ): cursor.execute(sql) > >>> management.get_sql_evolution(app) > ['-- warning: the following may cause data loss', u'ALTER TABLE "schema_evolution_person" DROP COLUMN "gender_nothere";', '-- end warning'] > > # reset the db > >>> cursor.execute('DROP TABLE schema_evolution_person;'); cursor.execute(create_table_sql[0]) > > # rename column, so it looks like we've recently renamed a field > >>> for sql in backend.get_change_column_name_sql( 'schema_evolution_person', {}, 'gender2', 'gender_old', 'varchar(1)' ): cursor.execute(sql) > >>> management.get_sql_evolution(app) > ['ALTER TABLE "schema_evolution_person" RENAME COLUMN "gender_old" TO "gender2";'] > > # reset the db > >>> cursor.execute('DROP TABLE schema_evolution_person;'); cursor.execute(create_table_sql[0]) > > # rename table, so it looks like we've recently renamed a model > >>> for sql in backend.get_change_table_name_sql( 'schema_evolution_personold', 'schema_evolution_person' ): cursor.execute(sql) > >>> management.get_sql_evolution(app) > ['ALTER TABLE "schema_evolution_personold" RENAME TO "schema_evolution_person";'] > > # reset the db > >>> cursor.execute(create_table_sql[0]) > > # change column flags, so it looks like we've recently changed a column flag > >>> for sql in backend.get_change_column_def_sql( 'schema_evolution_person', 'name', 'varchar(10)', True, False, False, None ): cursor.execute(sql) > >>> management.get_sql_evolution(app) > ['ALTER TABLE "schema_evolution_person" ADD COLUMN "name_tmp" varchar(20);', 'UPDATE "schema_evolution_person" SET "name_tmp" = "name";', 'ALTER TABLE "schema_evolution_person" DROP COLUMN "name";', 'ALTER TABLE "schema_evolution_person" RENAME COLUMN "name_tmp" TO "name";', 'ALTER TABLE "schema_evolution_person" ALTER COLUMN "name" SET NOT NULL;'] > > # reset the db > >>> cursor.execute('DROP TABLE schema_evolution_person;'); cursor.execute(create_table_sql[0]) > > # delete a datetime column pair, so it looks like we've recently added a datetime field > >>> for sql in backend.get_drop_column_sql( 'schema_evolution_muebles', 'fecha_publicacion' ): print sql; cursor.execute(sql) > ALTER TABLE "schema_evolution_muebles" DROP COLUMN "fecha_publicacion"; > >>> management.get_sql_evolution(app) > ['ALTER TABLE "schema_evolution_muebles" ADD COLUMN "fecha_publicacion" timestamp with time zone;', 'ALTER TABLE "schema_evolution_muebles" ALTER COLUMN "fecha_publicacion" SET NOT NULL;'] > > # reset the db > >>> cursor.execute('DROP TABLE schema_evolution_muebles;'); cursor.execute(create_table_sql[1]) > > # delete a column with a default value, so it looks like we've recently added a column > >>> for sql in backend.get_drop_column_sql( 'schema_evolution_muebles', 'tipo' ): print sql; cursor.execute(sql) > ALTER TABLE "schema_evolution_muebles" DROP COLUMN "tipo"; > >>> management.get_sql_evolution(app) > ['ALTER TABLE "schema_evolution_muebles" ADD COLUMN "tipo" varchar(40);', 'ALTER TABLE "schema_evolution_muebles" ALTER COLUMN "tipo" SET DEFAULT "woot";', 'ALTER TABLE "schema_evolution_muebles" ALTER COLUMN "tipo" SET NOT NULL;'] > """ > > if settings.DATABASE_ENGINE == 'sqlite3': > __test__['API_TESTS'] += """ > # the table as it is supposed to be > >>> create_table_sql = management.get_sql_all(app) > > # make sure we don't evolve an unedited table > >>> management.get_sql_evolution(app) > [] > > # delete a column, so it looks like we've recently added a field > >>> cursor.execute( 'DROP TABLE "schema_evolution_person";' ).__class__ > > >>> cursor.execute( 'CREATE TABLE "schema_evolution_person" ( "id" integer NOT NULL UNIQUE PRIMARY KEY, "name" varchar(20) NOT NULL, "gender" varchar(1) NOT NULL );' ).__class__ > > >>> management.get_sql_evolution(app) > ['ALTER TABLE "schema_evolution_person" ADD COLUMN "gender2" varchar(1) NOT NULL;'] > > # reset the db > >>> cursor.execute('DROP TABLE schema_evolution_person;').__class__ > > >>> cursor.execute(create_table_sql[0]).__class__ > > > # add a column, so it looks like we've recently deleted a field > >>> cursor.execute( 'DROP TABLE "schema_evolution_person";' ).__class__ > > >>> cursor.execute( 'CREATE TABLE "schema_evolution_person" ( "id" integer NOT NULL UNIQUE PRIMARY KEY, "name" varchar(20) NOT NULL, "gender" varchar(1) NOT NULL, "gender2" varchar(1) NOT NULL, "gender_new" varchar(1) NOT NULL );' ).__class__ > > >>> cursor.execute( 'insert into "schema_evolution_person" values (1,2,3,4,5);' ).__class__ > > >>> sql = management.get_sql_evolution(app) > >>> print sql > ['-- warning: the following may cause data loss', u'-- FYI: sqlite does not support deleting columns, so we create a new "gender_new" and delete the old (ie, this could take a while)', 'ALTER TABLE "schema_evolution_person" RENAME TO "schema_evolution_person_1337_TMP";', 'CREATE TABLE "schema_evolution_person" (\\n "id" integer NOT NULL UNIQUE PRIMARY KEY,\\n "name" varchar(20) NOT NULL,\\n "gender" varchar(1) NOT NULL,\\n "gender2" varchar(1) NOT NULL\\n)\\n;', 'INSERT INTO "schema_evolution_person" SELECT "id","name","gender","gender2" FROM "schema_evolution_person_1337_TMP";', 'DROP TABLE "schema_evolution_person_1337_TMP";', '-- end warning'] > >>> for s in sql: cursor.execute(s).__class__ > > > > > > > > >>> cursor.execute('select * from "schema_evolution_person";').__class__ > > >>> cursor.fetchall()[0] > (1, u'2', u'3', u'4') > > # reset the db > >>> cursor.execute('DROP TABLE schema_evolution_person;').__class__ > > >>> cursor.execute(create_table_sql[0]).__class__ > > > # rename column, so it looks like we've recently renamed a field > >>> cursor.execute('DROP TABLE "schema_evolution_person"').__class__ > > >>> cursor.execute('').__class__ > > >>> cursor.execute('CREATE TABLE "schema_evolution_person" ("id" integer NOT NULL UNIQUE PRIMARY KEY, "name" varchar(20) NOT NULL, "gender" varchar(1) NOT NULL, "gender_old" varchar(1) NOT NULL );').__class__ > > >>> cursor.execute( 'insert into "schema_evolution_person" values (1,2,3,4);' ).__class__ > > >>> sql = management.get_sql_evolution(app) > >>> print sql > ['-- FYI: sqlite does not support renaming columns, so we create a new "schema_evolution_person" and delete the old (ie, this could take a while)', 'ALTER TABLE "schema_evolution_person" RENAME TO "schema_evolution_person_1337_TMP";', 'CREATE TABLE "schema_evolution_person" (\\n "id" integer NOT NULL UNIQUE PRIMARY KEY,\\n "name" varchar(20) NOT NULL,\\n "gender" varchar(1) NOT NULL,\\n "gender2" varchar(1) NOT NULL\\n)\\n;', 'INSERT INTO "schema_evolution_person" SELECT "id","name","gender","gender_old" FROM "schema_evolution_person_1337_TMP";', 'DROP TABLE "schema_evolution_person_1337_TMP";'] > >>> for s in sql: cursor.execute(s).__class__ > > > > > > >>> cursor.execute('select * from "schema_evolution_person";').__class__ > > >>> cursor.fetchall()[0] > (1, u'2', u'3', u'4') > > # reset the db > >>> cursor.execute('DROP TABLE schema_evolution_person;').__class__ > > >>> cursor.execute(create_table_sql[0]).__class__ > > > # rename table, so it looks like we've recently renamed a model > >>> for sql in backend.get_change_table_name_sql( 'schema_evolution_personold', 'schema_evolution_person' ): cursor.execute(sql).__class__ > > >>> management.get_sql_evolution(app) > ['ALTER TABLE "schema_evolution_personold" RENAME TO "schema_evolution_person";'] > > # reset the db > >>> cursor.execute('DROP TABLE schema_evolution_personold;').__class__ > > >>> cursor.execute(create_table_sql[0]).__class__ > > > # change column flags, so it looks like we've recently changed a column flag > >>> cursor.execute('DROP TABLE "schema_evolution_person";').__class__ > > >>> cursor.execute('CREATE TABLE "schema_evolution_person" ( "id" integer NOT NULL UNIQUE PRIMARY KEY, "name" varchar(20) NOT NULL, "gender" varchar(1) NOT NULL, "gender2" varchar(1) NULL);').__class__ > > >>> management.get_sql_evolution(app) > ['-- FYI: sqlite does not support changing columns, so we create a new "schema_evolution_person" and delete the old (ie, this could take a while)', 'ALTER TABLE "schema_evolution_person" RENAME TO "schema_evolution_person_1337_TMP";', 'CREATE TABLE "schema_evolution_person" (\\n "id" integer NOT NULL UNIQUE PRIMARY KEY,\\n "name" varchar(20) NOT NULL,\\n "gender" varchar(1) NOT NULL,\\n "gender2" varchar(1) NOT NULL\\n)\\n;', 'INSERT INTO "schema_evolution_person" SELECT "id","name","gender","gender2" FROM "schema_evolution_person_1337_TMP";', 'DROP TABLE "schema_evolution_person_1337_TMP";'] > > # reset the db > >>> cursor.execute('DROP TABLE schema_evolution_person;').__class__ > > >>> cursor.execute(create_table_sql[0]).__class__ > > > # delete a datetime column pair, so it looks like we've recently added a datetime field > >>> for sql in ['DROP TABLE schema_evolution_muebles;','CREATE TABLE "schema_evolution_muebles" ("id" integer NOT NULL UNIQUE PRIMARY KEY,"tipo" varchar(40) NOT NULL);']: cursor.execute(sql).__class__ > > > >>> management.get_sql_evolution(app) > ['ALTER TABLE "schema_evolution_muebles" ADD COLUMN "fecha_publicacion" datetime NOT NULL;'] > > # reset the db > >>> cursor.execute('DROP TABLE schema_evolution_muebles;').__class__ > > >>> cursor.execute(create_table_sql[1]).__class__ > > > # delete a column with a default value, so it looks like we've recently added a column > >>> for sql in ['DROP TABLE schema_evolution_muebles;','CREATE TABLE "schema_evolution_muebles" ("id" integer NOT NULL UNIQUE PRIMARY KEY,"fecha_publicacion" datetime NOT NULL);']: cursor.execute(sql).__class__ > > > >>> management.get_sql_evolution(app) > ['ALTER TABLE "schema_evolution_muebles" ADD COLUMN "tipo" varchar(40) NOT NULL DEFAULT "woot";'] > > """ >