1
0
mirror of https://github.com/django/django.git synced 2025-10-10 07:19:11 +00:00

gis: Merged revisions 7772-7808,7811-7814,7816-7823,7826-7829,7831-7833,7835 via svnmerge from trunk. Modified GeoWhereNode accordingly for changes in r7835.

git-svn-id: http://code.djangoproject.com/svn/django/branches/gis@7836 bcc190cf-cafb-0310-a4f2-bffc1f526a37
This commit is contained in:
Justin Bronn 2008-07-04 20:16:22 +00:00
parent aef8a8305d
commit bc3d6b4908
104 changed files with 6846 additions and 2128 deletions

View File

@ -59,7 +59,7 @@ answer newbie questions, and generally made Django that much better:
Arthur <avandorp@gmail.com>
av0000@mail.ru
David Avsajanishvili <avsd05@gmail.com>
axiak@mit.edu
Mike Axiak <axiak@mit.edu>
Niran Babalola <niran@niran.org>
Morten Bagai <m@bagai.com>
Mikaël Barbero <mikael.barbero nospam at nospam free.fr>
@ -94,6 +94,7 @@ answer newbie questions, and generally made Django that much better:
Sengtha Chay <sengtha@e-khmer.com>
ivan.chelubeev@gmail.com
Bryan Chow <bryan at verdjn dot com>
Antonis Christofides <anthony@itia.ntua.gr>
Michal Chruszcz <troll@pld-linux.org>
Can Burak Çilingir <canburak@cs.bilgi.edu.tr>
Ian Clelland <clelland@gmail.com>
@ -140,7 +141,9 @@ answer newbie questions, and generally made Django that much better:
Marc Fargas <telenieko@telenieko.com>
Szilveszter Farkas <szilveszter.farkas@gmail.com>
favo@exoweb.net
fdr <drfarina@gmail.com>
Dmitri Fedortchenko <zeraien@gmail.com>
Jonathan Feignberg <jdf@pobox.com>
Liang Feng <hutuworm@gmail.com>
Bill Fenner <fenner@gmail.com>
Stefane Fermgier <sf@fermigier.com>
@ -192,6 +195,7 @@ answer newbie questions, and generally made Django that much better:
james_027@yahoo.com
jcrasta@gmail.com
Zak Johnson <zakj@nox.cx>
Nis Jørgensen <nis@superlativ.dk>
Michael Josephson <http://www.sdjournal.com/>
jpellerin@gmail.com
junzhang.jn@gmail.com

View File

@ -231,6 +231,21 @@ MEDIA_ROOT = ''
# Example: "http://media.lawrence.com"
MEDIA_URL = ''
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# Default formatting for date objects. See all available format strings here:
# http://www.djangoproject.com/documentation/templates/#now
DATE_FORMAT = 'N j, Y'

File diff suppressed because it is too large Load Diff

View File

@ -6,7 +6,7 @@ msgstr ""
"Project-Id-Version: Django\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2007-03-14 01:38+0200\n"
"PO-Revision-Date: 2007-12-30 12:15+0200\n"
"PO-Revision-Date: 2008-07-02 23:37+0200\n"
"Last-Translator: Can Burak Çilingir <canburak@cs.bilgi.edu.tr>\n"
"Language-Team: Turkish <bahadir@pardus.org.tr>\n"
"MIME-Version: 1.0\n"
@ -2466,3 +2466,8 @@ msgstr "evet,hayır,olabilir"
#~ msgid "Have you <a href=\"/password_reset/\">forgotten your password</a>?"
#~ msgstr "<a href=\"/password_reset/\">Şifrenizi mi unuttunuz?</a>"
#: contrib/auth/forms.py:107
#, python-format
msgid "Password reset on %s"
msgstr "%s sitesindeki hesabınızın parolasının sıfırlanması"

View File

@ -1,14 +1,15 @@
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
from django.db import connection
from django.contrib.auth.models import User
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
class ModelBackend(object):
"""
Authenticate against django.contrib.auth.models.User
Authenticates against django.contrib.auth.models.User.
"""
# TODO: Model, login attribute name and password attribute name should be
# configurable.
@ -21,7 +22,10 @@ class ModelBackend(object):
return None
def get_group_permissions(self, user_obj):
"Returns a list of permission strings that this user has through his/her groups."
"""
Returns a set of permission strings that this user has through his/her
groups.
"""
if not hasattr(user_obj, '_group_perm_cache'):
cursor = connection.cursor()
# The SQL below works out to the following, after DB quoting:
@ -50,7 +54,7 @@ class ModelBackend(object):
cursor.execute(sql, [user_obj.id])
user_obj._group_perm_cache = set(["%s.%s" % (row[0], row[1]) for row in cursor.fetchall()])
return user_obj._group_perm_cache
def get_all_permissions(self, user_obj):
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = set([u"%s.%s" % (p.content_type.app_label, p.codename) for p in user_obj.user_permissions.select_related()])
@ -61,7 +65,13 @@ class ModelBackend(object):
return perm in self.get_all_permissions(user_obj)
def has_module_perms(self, user_obj, app_label):
return bool(len([p for p in self.get_all_permissions(user_obj) if p[:p.index('.')] == app_label]))
"""
Returns True if user_obj has any permissions in the given app_label.
"""
for perm in self.get_all_permissions(user_obj):
if perm[:perm.index('.')] == app_label:
return True
return False
def get_user(self, user_id):
try:

View File

@ -0,0 +1,56 @@
[
{
"pk": "1",
"model": "auth.user",
"fields": {
"username": "testclient",
"first_name": "Test",
"last_name": "Client",
"is_active": true,
"is_superuser": false,
"is_staff": false,
"last_login": "2006-12-17 07:03:31",
"groups": [],
"user_permissions": [],
"password": "sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161",
"email": "testclient@example.com",
"date_joined": "2006-12-17 07:03:31"
}
},
{
"pk": "2",
"model": "auth.user",
"fields": {
"username": "inactive",
"first_name": "Inactive",
"last_name": "User",
"is_active": false,
"is_superuser": false,
"is_staff": false,
"last_login": "2006-12-17 07:03:31",
"groups": [],
"user_permissions": [],
"password": "sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161",
"email": "testclient@example.com",
"date_joined": "2006-12-17 07:03:31"
}
},
{
"pk": "3",
"model": "auth.user",
"fields": {
"username": "staff",
"first_name": "Staff",
"last_name": "Member",
"is_active": true,
"is_superuser": false,
"is_staff": true,
"last_login": "2006-12-17 07:03:31",
"groups": [],
"user_permissions": [],
"password": "sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161",
"email": "staffmember@example.com",
"date_joined": "2006-12-17 07:03:31"
}
}
]

View File

@ -96,10 +96,10 @@ class Group(models.Model):
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
ordering = ('name',)
class Admin:
search_fields = ('name',)
ordering = ('name',)
def __unicode__(self):
return self.name
@ -153,7 +153,6 @@ class User(models.Model):
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
ordering = ('username',)
class Admin:
fields = (
@ -166,6 +165,7 @@ class User(models.Model):
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff')
list_filter = ('is_staff', 'is_superuser')
search_fields = ('username', 'first_name', 'last_name', 'email')
ordering = ('username',)
def __unicode__(self):
return self.username

View File

@ -1,5 +1,5 @@
"""
>>> from models import User, AnonymousUser
>>> from django.contrib.auth.models import User, AnonymousUser
>>> u = User.objects.create_user('testuser', 'test@example.com', 'testpw')
>>> u.has_usable_password()
True
@ -52,4 +52,24 @@ Superuser created successfully.
u'joe@somewhere.org'
>>> u.password
u'!'
"""
"""
from django.test import TestCase
from django.core import mail
class PasswordResetTest(TestCase):
fixtures = ['authtestdata.json']
urls = 'django.contrib.auth.urls'
def test_email_not_found(self):
"Error is raised if the provided email address isn't currently registered"
response = self.client.get('/password_reset/')
self.assertEquals(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'})
self.assertContains(response, "That e-mail address doesn&#39;t have an associated user account")
self.assertEquals(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEquals(response.status_code, 302)
self.assertEquals(len(mail.outbox), 1)

View File

@ -0,0 +1,13 @@
# These URLs are normally mapped to /admin/urls.py. This URLs file is
# provided as a convenience to those who want to deploy these URLs elsewhere.
# This file is also used to provide a reliable view deployment for test purposes.
from django.conf.urls.defaults import *
urlpatterns = patterns('',
('^logout/$', 'django.contrib.auth.views.logout'),
('^password_change/$', 'django.contrib.auth.views.password_change'),
('^password_change/done/$', 'django.contrib.auth.views.password_change_done'),
('^password_reset/$', 'django.contrib.auth.views.password_reset')
)

View File

@ -8,7 +8,7 @@ class FlatPage(models.Model):
url = models.CharField(_('URL'), max_length=100, validator_list=[validators.isAlphaNumericURL], db_index=True,
help_text=_("Example: '/about/contact/'. Make sure to have leading and trailing slashes."))
title = models.CharField(_('title'), max_length=200)
content = models.TextField(_('content'))
content = models.TextField(_('content'), blank=True)
enable_comments = models.BooleanField(_('enable comments'))
template_name = models.CharField(_('template name'), max_length=70, blank=True,
help_text=_("Example: 'flatpages/contact_page.html'. If this isn't provided, the system will use 'flatpages/default.html'."))

View File

@ -21,18 +21,14 @@ class TestForm(forms.Form):
class PreviewTests(TestCase):
urls = 'django.contrib.formtools.test_urls'
def setUp(self):
self._old_root_urlconf = settings.ROOT_URLCONF
settings.ROOT_URLCONF = 'django.contrib.formtools.test_urls'
# Create a FormPreview instance to share between tests
self.preview = preview.FormPreview(TestForm)
input_template = '<input type="hidden" name="%s" value="%s" />'
self.input = input_template % (self.preview.unused_name('stage'), "%d")
def tearDown(self):
settings.ROOT_URLCONF = self._old_root_urlconf
def test_unused_name(self):
"""
Verifies name mangling to get uniue field name.

View File

@ -38,12 +38,12 @@ MISC_TERMS = ['isnull']
# Assacceptable lookup types for Oracle spatial.
MYSQL_GIS_TERMS = MYSQL_GIS_FUNCTIONS.keys()
MYSQL_GIS_TERMS += MISC_TERMS
MYSQL_GIS_TERMS = tuple(MYSQL_GIS_TERMS) # Making immutable
MYSQL_GIS_TERMS = dict((term, None) for term in MYSQL_GIS_TERMS) # Making dictionary
def get_geo_where_clause(lookup_type, table_prefix, field, value):
def get_geo_where_clause(table_alias, name, lookup_type, geo_annot):
"Returns the SQL WHERE clause for use in MySQL spatial SQL construction."
# Getting the quoted field as `geo_col`.
geo_col = '%s.%s' % (qn(table_prefix), qn(field.column))
geo_col = '%s.%s' % (qn(table_alias), qn(name))
# See if a MySQL Geometry function matches the lookup type next
lookup_info = MYSQL_GIS_FUNCTIONS.get(lookup_type, False)
@ -54,6 +54,6 @@ def get_geo_where_clause(lookup_type, table_prefix, field, value):
# TODO: Is this needed because MySQL cannot handle NULL
# geometries in its spatial indices.
if lookup_type == 'isnull':
return "%s IS %sNULL" % (geo_col, (not value and 'NOT ' or ''))
return "%s IS %sNULL" % (geo_col, (not geo_annot.value and 'NOT ' or ''))
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))

View File

@ -110,10 +110,10 @@ ORACLE_SPATIAL_TERMS += MISC_TERMS
ORACLE_SPATIAL_TERMS = tuple(ORACLE_SPATIAL_TERMS) # Making immutable
#### The `get_geo_where_clause` function for Oracle ####
def get_geo_where_clause(lookup_type, table_prefix, field, value):
def get_geo_where_clause(table_alias, name, lookup_type, geo_annot):
"Returns the SQL WHERE clause for use in Oracle spatial SQL construction."
# Getting the quoted table name as `geo_col`.
geo_col = '%s.%s' % (qn(table_prefix), qn(field.column))
geo_col = '%s.%s' % (qn(table_alias), qn(name))
# See if a Oracle Geometry function matches the lookup type next
lookup_info = ORACLE_GEOMETRY_FUNCTIONS.get(lookup_type, False)
@ -126,19 +126,19 @@ def get_geo_where_clause(lookup_type, table_prefix, field, value):
sdo_op, arg_type = lookup_info
# Ensuring that a tuple _value_ was passed in from the user
if not isinstance(value, tuple):
if not isinstance(geo_annot.value, tuple):
raise TypeError('Tuple required for `%s` lookup type.' % lookup_type)
if len(value) != 2:
if len(geo_annot.value) != 2:
raise ValueError('2-element tuple required for %s lookup type.' % lookup_type)
# Ensuring the argument type matches what we expect.
if not isinstance(value[1], arg_type):
raise TypeError('Argument type should be %s, got %s instead.' % (arg_type, type(value[1])))
if not isinstance(geo_annot.value[1], arg_type):
raise TypeError('Argument type should be %s, got %s instead.' % (arg_type, type(geo_annot.value[1])))
if lookup_type == 'relate':
# The SDORelate class handles construction for these queries,
# and verifies the mask argument.
return sdo_op(value[1]).as_sql(geo_col)
return sdo_op(geo_annot.value[1]).as_sql(geo_col)
else:
# Otherwise, just call the `as_sql` method on the SDOOperation instance.
return sdo_op.as_sql(geo_col)
@ -149,6 +149,6 @@ def get_geo_where_clause(lookup_type, table_prefix, field, value):
return lookup_info.as_sql(geo_col)
elif lookup_type == 'isnull':
# Handling 'isnull' lookup type
return "%s IS %sNULL" % (geo_col, (not value and 'NOT ' or ''))
return "%s IS %sNULL" % (geo_col, (not geo_annot.value and 'NOT ' or ''))
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))

View File

@ -218,7 +218,7 @@ MISC_TERMS = ['isnull']
POSTGIS_TERMS = POSTGIS_OPERATORS.keys() # Getting the operators first
POSTGIS_TERMS += POSTGIS_GEOMETRY_FUNCTIONS.keys() # Adding on the Geometry Functions
POSTGIS_TERMS += MISC_TERMS # Adding any other miscellaneous terms (e.g., 'isnull')
POSTGIS_TERMS = tuple(POSTGIS_TERMS) # Making immutable
POSTGIS_TERMS = dict((term, None) for term in POSTGIS_TERMS) # Making a dictionary for fast lookups
# For checking tuple parameters -- not very pretty but gets job done.
def exactly_two(val): return val == 2
@ -228,10 +228,10 @@ def num_params(lookup_type, val):
else: return exactly_two(val)
#### The `get_geo_where_clause` function for PostGIS. ####
def get_geo_where_clause(lookup_type, table_prefix, field, value):
def get_geo_where_clause(table_alias, name, lookup_type, geo_annot):
"Returns the SQL WHERE clause for use in PostGIS SQL construction."
# Getting the quoted field as `geo_col`.
geo_col = '%s.%s' % (qn(table_prefix), qn(field.column))
geo_col = '%s.%s' % (qn(table_alias), qn(name))
if lookup_type in POSTGIS_OPERATORS:
# See if a PostGIS operator matches the lookup type.
return POSTGIS_OPERATORS[lookup_type].as_sql(geo_col)
@ -248,30 +248,31 @@ def get_geo_where_clause(lookup_type, table_prefix, field, value):
op, arg_type = tmp
# Ensuring that a tuple _value_ was passed in from the user
if not isinstance(value, (tuple, list)):
if not isinstance(geo_annot.value, (tuple, list)):
raise TypeError('Tuple required for `%s` lookup type.' % lookup_type)
# Number of valid tuple parameters depends on the lookup type.
nparams = len(value)
nparams = len(geo_annot.value)
if not num_params(lookup_type, nparams):
raise ValueError('Incorrect number of parameters given for `%s` lookup type.' % lookup_type)
# Ensuring the argument type matches what we expect.
if not isinstance(value[1], arg_type):
raise TypeError('Argument type should be %s, got %s instead.' % (arg_type, type(value[1])))
if not isinstance(geo_annot.value[1], arg_type):
raise TypeError('Argument type should be %s, got %s instead.' % (arg_type, type(geo_annot.value[1])))
# For lookup type `relate`, the op instance is not yet created (has
# to be instantiated here to check the pattern parameter).
if lookup_type == 'relate':
op = op(value[1])
op = op(geo_annot.value[1])
elif lookup_type in DISTANCE_FUNCTIONS and lookup_type != 'dwithin':
if field.geodetic:
if geo_annot.geodetic:
# Geodetic distances are only availble from Points to PointFields.
if field._geom != 'POINT':
if geo_annot.geom_type != 'POINT':
raise TypeError('PostGIS spherical operations are only valid on PointFields.')
if value[0].geom_typeid != 0:
if geo_annot.value[0].geom_typeid != 0:
raise TypeError('PostGIS geometry distance parameter is required to be of type Point.')
# Setting up the geodetic operation appropriately.
if nparams == 3 and value[2] == 'spheroid': op = op[2]
if nparams == 3 and geo_annot.value[2] == 'spheroid': op = op[2]
else: op = op[1]
else:
op = op[0]
@ -281,6 +282,6 @@ def get_geo_where_clause(lookup_type, table_prefix, field, value):
return op.as_sql(geo_col)
elif lookup_type == 'isnull':
# Handling 'isnull' lookup type
return "%s IS %sNULL" % (geo_col, (not value and 'NOT ' or ''))
return "%s IS %sNULL" % (geo_col, (not geo_annot.value and 'NOT ' or ''))
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))

View File

@ -111,7 +111,7 @@ class GeometryField(SpatialBackend.Field):
except SpatialBackend.GeometryException:
raise ValueError('Could not create geometry from lookup value: %s' % str(value))
else:
raise TypeError('Cannot use parameter of `%s` type as lookup parameter.' % type(value))
raise TypeError('Cannot use parameter of `%s` type as a geometry lookup parameter.' % type(value))
# Assigning the SRID value.
geom.srid = self.get_srid(geom)

View File

@ -103,7 +103,8 @@ class GeoQuery(sql.Query):
self._select_aliases = aliases
return result
def get_default_columns(self, with_aliases=False, col_aliases=None):
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False):
"""
Computes the default columns for selecting every field in the base
model.
@ -115,11 +116,14 @@ class GeoQuery(sql.Query):
geometry columns.
"""
result = []
table_alias = self.tables[0]
if opts is None:
opts = self.model._meta
if start_alias:
table_alias = start_alias
else:
table_alias = self.tables[0]
root_pk = self.model._meta.pk.column
seen = {None: table_alias}
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
aliases = set()
for field, model in self.model._meta.get_fields_with_model():
try:
@ -128,12 +132,13 @@ class GeoQuery(sql.Query):
alias = self.join((table_alias, model._meta.db_table,
root_pk, model._meta.pk.column))
seen[model] = alias
if as_pairs:
result.append((alias, field.column))
continue
# This part of the function is customized for GeoQuery. We
# see if there was any custom selection specified in the
# dictionary, and set up the selection format appropriately.
field_sel = self.get_field_select(field, alias)
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (field_sel, c_alias))
@ -145,6 +150,8 @@ class GeoQuery(sql.Query):
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
if as_pairs:
return result, None
return result, aliases
def get_ordering(self):

View File

@ -1,21 +1,78 @@
import datetime
from django.db.models.fields import Field
from django.db.models.sql.where import WhereNode
from django.contrib.gis.db.backend import get_geo_where_clause, SpatialBackend
class GeoAnnotation(object):
"""
The annotation used for GeometryFields; basically a placeholder
for metadata needed by the `get_geo_where_clause` of the spatial
backend.
"""
def __init__(self, field, value, where):
self.geodetic = field.geodetic
self.geom_type = field._geom
self.value = value
self.where = tuple(where)
class GeoWhereNode(WhereNode):
"""
The GeoWhereNode calls the `get_geo_where_clause` from the appropriate
spatial backend in order to construct correct spatial SQL.
Used to represent the SQL where-clause for spatial databases --
these are tied to the GeoQuery class that created it.
"""
def add(self, data, connector):
"""
This is overridden from the regular WhereNode to handle the
peculiarties of GeometryFields, because they need a special
annotation object that contains the spatial metadata from the
field so that the correct spatial SQL is generated.
"""
if not isinstance(data, (list, tuple)):
super(WhereNode, self).add(data, connector)
return
alias, col, field, lookup_type, value = data
# Do we have a geographic field?
geo_field = hasattr(field, '_geom')
if field:
if geo_field:
# `GeometryField.get_db_prep_lookup` returns a where clause
# substitution array in addition to the parameters.
where, params = field.get_db_prep_lookup(lookup_type, value)
else:
params = field.get_db_prep_lookup(lookup_type, value)
db_type = field.db_type()
else:
# This is possible when we add a comparison to NULL sometimes (we
# don't really need to waste time looking up the associated field
# object).
params = Field().get_db_prep_lookup(lookup_type, value)
db_type = None
if geo_field:
# The annotation will be a `GeoAnnotation` object that
# will contain the necessary geometry field metadata for
# the `get_geo_where_clause` to construct the appropriate
# spatial SQL when `make_atom` is called.
annotation = GeoAnnotation(field, value, where)
elif isinstance(value, datetime.datetime):
annotation = datetime.datetime
else:
annotation = bool(value)
super(WhereNode, self).add((alias, col, db_type, lookup_type,
annotation, params), connector)
def make_atom(self, child, qn):
table_alias, name, field, lookup_type, value = child
if hasattr(field, '_geom'):
table_alias, name, db_type, lookup_type, value_annot, params = child
if isinstance(value_annot, GeoAnnotation):
if lookup_type in SpatialBackend.gis_terms:
# Getting the geographic where clause; substitution parameters
# will be populated in the GeoFieldSQL object returned by the
# GeometryField.
gwc = get_geo_where_clause(lookup_type, table_alias, field, value)
where, params = field.get_db_prep_lookup(lookup_type, value)
return gwc % tuple(where), params
gwc = get_geo_where_clause(table_alias, name, lookup_type, value_annot)
return gwc % value_annot.where, params
else:
raise TypeError('Invalid lookup type: %r' % lookup_type)
else:

View File

@ -88,14 +88,14 @@ class DistanceTest(unittest.TestCase):
else: dist = dist[0]
# Creating the query set.
qs = AustraliaCity.objects.filter(point__dwithin=(self.au_pnt, dist)).order_by('name')
qs = AustraliaCity.objects.order_by('name')
if type_error:
# A TypeError should be raised on PostGIS when trying to pass
# Distance objects into a DWithin query using a geodetic field.
self.assertRaises(TypeError, qs.count)
self.assertRaises(TypeError, AustraliaCity.objects.filter, point__dwithin=(self.au_pnt, dist))
else:
self.assertEqual(au_cities, self.get_names(qs))
self.assertEqual(au_cities, self.get_names(qs.filter(point__dwithin=(self.au_pnt, dist))))
def test03a_distance_method(self):
"Testing the `distance` GeoQuerySet method on projected coordinate systems."
# The point for La Grange, TX
@ -217,11 +217,11 @@ class DistanceTest(unittest.TestCase):
AustraliaCity.objects.filter(point__distance_lte=(mp, D(km=100))))
# Too many params (4 in this case) should raise a ValueError.
self.assertRaises(ValueError,
AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)', D(km=100), 'spheroid', '4')).count)
AustraliaCity.objects.filter, point__distance_lte=('POINT(5 23)', D(km=100), 'spheroid', '4'))
# Not enough params should raise a ValueError.
self.assertRaises(ValueError,
AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)',)).count)
AustraliaCity.objects.filter, point__distance_lte=('POINT(5 23)',))
# Getting all cities w/in 550 miles of Hobart.
hobart = AustraliaCity.objects.get(name='Hobart')

View File

@ -198,6 +198,7 @@ class GeoModelTest(unittest.TestCase):
@no_oracle
def test06_make_line(self):
"Testing the `make_line` GeoQuerySet method."
if DISABLE: return
# Ensuring that a `TypeError` is raised on models without PointFields.
self.assertRaises(TypeError, State.objects.make_line)
self.assertRaises(TypeError, Country.objects.make_line)
@ -381,14 +382,13 @@ class GeoModelTest(unittest.TestCase):
pnt1 = fromstr('POINT (649287.0363174 4177429.4494686)', srid=2847)
pnt2 = fromstr('POINT(-98.4919715741052 29.4333344025053)', srid=4326)
# Testing bad argument tuples that should return a TypeError or
# a ValueError.
bad_args = [((pnt1, 0), TypeError),
((pnt2, 'T*T***FF*', 0), ValueError),
((23, 'foo'), ValueError),
]
for args, e in bad_args:
qs = Country.objects.filter(mpoly__relate=args)
# Not passing in a geometry as first param shoud
# raise a type error when initializing the GeoQuerySet
self.assertRaises(TypeError, Country.objects.filter, mpoly__relate=(23, 'foo'))
# Making sure the right exception is raised for the given
# bad arguments.
for bad_args, e in [((pnt1, 0), TypeError), ((pnt2, 'T*T***FF*', 0), ValueError)]:
qs = Country.objects.filter(mpoly__relate=bad_args)
self.assertRaises(e, qs.count)
# Relate works differently for the different backends.
@ -471,6 +471,7 @@ class GeoModelTest(unittest.TestCase):
def test19_centroid(self):
"Testing the `centroid` GeoQuerySet method."
if DISABLE: return
qs = State.objects.exclude(poly__isnull=True).centroid()
if oracle: tol = 0.1
else: tol = 0.000000001
@ -479,6 +480,7 @@ class GeoModelTest(unittest.TestCase):
def test20_pointonsurface(self):
"Testing the `point_on_surface` GeoQuerySet method."
if DISABLE: return
# Reference values.
if SpatialBackend.oracle:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(SDO_GEOM.SDO_POINTONSURFACE(GEOAPP_COUNTRY.MPOLY, 0.05)) FROM GEOAPP_COUNTRY;
@ -497,6 +499,7 @@ class GeoModelTest(unittest.TestCase):
@no_oracle
def test21_scale(self):
"Testing the `scale` GeoQuerySet method."
if DISABLE: return
xfac, yfac = 2, 3
qs = Country.objects.scale(xfac, yfac, model_att='scaled')
for c in qs:
@ -509,6 +512,7 @@ class GeoModelTest(unittest.TestCase):
@no_oracle
def test22_translate(self):
"Testing the `translate` GeoQuerySet method."
if DISABLE: return
xfac, yfac = 5, -23
qs = Country.objects.translate(xfac, yfac, model_att='translated')
for c in qs:
@ -520,6 +524,7 @@ class GeoModelTest(unittest.TestCase):
def test23_numgeom(self):
"Testing the `num_geom` GeoQuerySet method."
if DISABLE: return
# Both 'countries' only have two geometries.
for c in Country.objects.num_geom(): self.assertEqual(2, c.num_geom)
for c in City.objects.filter(point__isnull=False).num_geom():
@ -530,6 +535,7 @@ class GeoModelTest(unittest.TestCase):
def test24_numpoints(self):
"Testing the `num_points` GeoQuerySet method."
if DISABLE: return
for c in Country.objects.num_points(): self.assertEqual(c.mpoly.num_points, c.num_points)
if postgis:
# Oracle cannot count vertices in Point geometries.
@ -538,6 +544,7 @@ class GeoModelTest(unittest.TestCase):
@no_oracle
def test25_geoset(self):
"Testing the `difference`, `intersection`, `sym_difference`, and `union` GeoQuerySet methods."
if DISABLE: return
geom = Point(5, 23)
for c in Country.objects.all().intersection(geom).difference(geom).sym_difference(geom).union(geom):
self.assertEqual(c.mpoly.difference(geom), c.difference)

View File

@ -0,0 +1,14 @@
from django.core.management.base import BaseCommand
from django.contrib.sitemaps import ping_google
class Command(BaseCommand):
help = "Ping google with an updated sitemap, pass optional url of sitemap"
def execute(self, *args, **options):
if len(args) == 1:
sitemap_url = args[0]
else:
sitemap_url = None
ping_google(sitemap_url=sitemap_url)

View File

View File

@ -0,0 +1,66 @@
"""
Portable file locking utilities.
Based partially on example by Jonathan Feignberg <jdf@pobox.com> in the Python
Cookbook, licensed under the Python Software License.
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203
Example Usage::
>>> from django.core.files import locks
>>> f = open('./file', 'wb')
>>> locks.lock(f, locks.LOCK_EX)
>>> f.write('Django')
>>> f.close()
"""
__all__ = ('LOCK_EX','LOCK_SH','LOCK_NB','lock','unlock')
system_type = None
try:
import win32con
import win32file
import pywintypes
LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
LOCK_SH = 0
LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY
__overlapped = pywintypes.OVERLAPPED()
system_type = 'nt'
except (ImportError, AttributeError):
pass
try:
import fcntl
LOCK_EX = fcntl.LOCK_EX
LOCK_SH = fcntl.LOCK_SH
LOCK_NB = fcntl.LOCK_NB
system_type = 'posix'
except (ImportError, AttributeError):
pass
if system_type == 'nt':
def lock(file, flags):
hfile = win32file._get_osfhandle(file.fileno())
win32file.LockFileEx(hfile, flags, 0, -0x10000, __overlapped)
def unlock(file):
hfile = win32file._get_osfhandle(file.fileno())
win32file.UnlockFileEx(hfile, 0, -0x10000, __overlapped)
elif system_type == 'posix':
def lock(file, flags):
fcntl.flock(file.fileno(), flags)
def unlock(file):
fcntl.flock(file.fileno(), fcntl.LOCK_UN)
else:
# File locking is not supported.
LOCK_EX = LOCK_SH = LOCK_NB = None
# Dummy functions that don't do anything.
def lock(file, flags):
pass
def unlock(file):
pass

59
django/core/files/move.py Normal file
View File

@ -0,0 +1,59 @@
"""
Move a file in the safest way possible::
>>> from django.core.files.move import file_move_save
>>> file_move_save("/tmp/old_file", "/tmp/new_file")
"""
import os
from django.core.files import locks
__all__ = ['file_move_safe']
try:
import shutil
file_move = shutil.move
except ImportError:
file_move = os.rename
def file_move_safe(old_file_name, new_file_name, chunk_size = 1024*64, allow_overwrite=False):
"""
Moves a file from one location to another in the safest way possible.
First, try using ``shutils.move``, which is OS-dependent but doesn't break
if moving across filesystems. Then, try ``os.rename``, which will break
across filesystems. Finally, streams manually from one file to another in
pure Python.
If the destination file exists and ``allow_overwrite`` is ``False``, this
function will throw an ``IOError``.
"""
# There's no reason to move if we don't have to.
if old_file_name == new_file_name:
return
if not allow_overwrite and os.path.exists(new_file_name):
raise IOError("Cannot overwrite existing file '%s'." % new_file_name)
try:
file_move(old_file_name, new_file_name)
return
except OSError:
# This will happen with os.rename if moving to another filesystem
pass
# If the built-in didn't work, do it the hard way.
new_file = open(new_file_name, 'wb')
locks.lock(new_file, locks.LOCK_EX)
old_file = open(old_file_name, 'rb')
current_chunk = None
while current_chunk != '':
current_chunk = old_file.read(chunk_size)
new_file.write(current_chunk)
new_file.close()
old_file.close()
os.remove(old_file_name)

View File

@ -0,0 +1,190 @@
"""
Classes representing uploaded files.
"""
import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ('UploadedFile', 'TemporaryUploadedFile', 'InMemoryUploadedFile')
class UploadedFile(object):
"""
A abstract uploaded file (``TemporaryUploadedFile`` and
``InMemoryUploadedFile`` are the built-in concrete subclasses).
An ``UploadedFile`` object behaves somewhat like a file object and
represents some file data that the user submitted with a form.
"""
DEFAULT_CHUNK_SIZE = 64 * 2**10
def __init__(self, file_name=None, content_type=None, file_size=None, charset=None):
self.file_name = file_name
self.file_size = file_size
self.content_type = content_type
self.charset = charset
def __repr__(self):
return "<%s: %s (%s)>" % (self.__class__.__name__, self.file_name, self.content_type)
def _set_file_name(self, name):
# Sanitize the file name so that it can't be dangerous.
if name is not None:
# Just use the basename of the file -- anything else is dangerous.
name = os.path.basename(name)
# File names longer than 255 characters can cause problems on older OSes.
if len(name) > 255:
name, ext = os.path.splitext(name)
name = name[:255 - len(ext)] + ext
self._file_name = name
def _get_file_name(self):
return self._file_name
file_name = property(_get_file_name, _set_file_name)
def chunk(self, chunk_size=None):
"""
Read the file and yield chucks of ``chunk_size`` bytes (defaults to
``UploadedFile.DEFAULT_CHUNK_SIZE``).
"""
if not chunk_size:
chunk_size = UploadedFile.DEFAULT_CHUNK_SIZE
if hasattr(self, 'seek'):
self.seek(0)
# Assume the pointer is at zero...
counter = self.file_size
while counter > 0:
yield self.read(chunk_size)
counter -= chunk_size
def multiple_chunks(self, chunk_size=None):
"""
Returns ``True`` if you can expect multiple chunks.
NB: If a particular file representation is in memory, subclasses should
always return ``False`` -- there's no good reason to read from memory in
chunks.
"""
if not chunk_size:
chunk_size = UploadedFile.DEFAULT_CHUNK_SIZE
return self.file_size < chunk_size
# Abstract methods; subclasses *must* default read() and probably should
# define open/close.
def read(self, num_bytes=None):
raise NotImplementedError()
def open(self):
pass
def close(self):
pass
# Backwards-compatible support for uploaded-files-as-dictionaries.
def __getitem__(self, key):
import warnings
warnings.warn(
message = "The dictionary access of uploaded file objects is deprecated. Use the new object interface instead.",
category = DeprecationWarning,
stacklevel = 2
)
backwards_translate = {
'filename': 'file_name',
'content-type': 'content_type',
}
if key == 'content':
return self.read()
elif key == 'filename':
return self.file_name
elif key == 'content-type':
return self.content_type
else:
return getattr(self, key)
class TemporaryUploadedFile(UploadedFile):
"""
A file uploaded to a temporary location (i.e. stream-to-disk).
"""
def __init__(self, file, file_name, content_type, file_size, charset):
super(TemporaryUploadedFile, self).__init__(file_name, content_type, file_size, charset)
self.file = file
self.path = file.name
self.file.seek(0)
def temporary_file_path(self):
"""
Returns the full path of this file.
"""
return self.path
def read(self, *args, **kwargs):
return self.file.read(*args, **kwargs)
def open(self):
self.seek(0)
def seek(self, *args, **kwargs):
self.file.seek(*args, **kwargs)
class InMemoryUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, file, field_name, file_name, content_type, file_size, charset):
super(InMemoryUploadedFile, self).__init__(file_name, content_type, file_size, charset)
self.file = file
self.field_name = field_name
self.file.seek(0)
def seek(self, *args, **kwargs):
self.file.seek(*args, **kwargs)
def open(self):
self.seek(0)
def read(self, *args, **kwargs):
return self.file.read(*args, **kwargs)
def chunk(self, chunk_size=None):
self.file.seek(0)
yield self.read()
def multiple_chunks(self, chunk_size=None):
# Since it's in memory, we'll never have multiple chunks.
return False
class SimpleUploadedFile(InMemoryUploadedFile):
"""
A simple representation of a file, which just has content, size, and a name.
"""
def __init__(self, name, content, content_type='text/plain'):
self.file = StringIO(content or '')
self.file_name = name
self.field_name = None
self.file_size = len(content or '')
self.content_type = content_type
self.charset = None
self.file.seek(0)
def from_dict(cls, file_dict):
"""
Creates a SimpleUploadedFile object from
a dictionary object with the following keys:
- filename
- content-type
- content
"""
return cls(file_dict['filename'],
file_dict['content'],
file_dict.get('content-type', 'text/plain'))
from_dict = classmethod(from_dict)

View File

@ -0,0 +1,245 @@
"""
Base file upload handler classes, and the built-in concrete subclasses
"""
import os
import tempfile
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.uploadedfile import TemporaryUploadedFile, InMemoryUploadedFile
__all__ = ['UploadFileException','StopUpload', 'SkipFile', 'FileUploadHandler',
'TemporaryFileUploadHandler', 'MemoryFileUploadHandler',
'load_handler']
class UploadFileException(Exception):
"""
Any error having to do with uploading files.
"""
pass
class StopUpload(UploadFileException):
"""
This exception is raised when an upload must abort.
"""
def __init__(self, connection_reset=False):
"""
If ``connection_reset`` is ``True``, Django knows will halt the upload
without consuming the rest of the upload. This will cause the browser to
show a "connection reset" error.
"""
self.connection_reset = connection_reset
def __unicode__(self):
if self.connection_reset:
return u'StopUpload: Halt current upload.'
else:
return u'StopUpload: Consume request data, then halt.'
class SkipFile(UploadFileException):
"""
This exception is raised by an upload handler that wants to skip a given file.
"""
pass
class StopFutureHandlers(UploadFileException):
"""
Upload handers that have handled a file and do not want future handlers to
run should raise this exception instead of returning None.
"""
pass
class FileUploadHandler(object):
"""
Base class for streaming upload handlers.
"""
chunk_size = 64 * 2 ** 10 #: The default chunk size is 64 KB.
def __init__(self, request=None):
self.file_name = None
self.content_type = None
self.content_length = None
self.charset = None
self.request = request
def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None):
"""
Handle the raw input from the client.
Parameters:
:input_data:
An object that supports reading via .read().
:META:
``request.META``.
:content_length:
The (integer) value of the Content-Length header from the
client.
:boundary: The boundary from the Content-Type header. Be sure to
prepend two '--'.
"""
pass
def new_file(self, field_name, file_name, content_type, content_length, charset=None):
"""
Signal that a new file has been started.
Warning: As with any data from the client, you should not trust
content_length (and sometimes won't even get it).
"""
self.field_name = field_name
self.file_name = file_name
self.content_type = content_type
self.content_length = content_length
self.charset = charset
def receive_data_chunk(self, raw_data, start):
"""
Receive data from the streamed upload parser. ``start`` is the position
in the file of the chunk.
"""
raise NotImplementedError()
def file_complete(self, file_size):
"""
Signal that a file has completed. File size corresponds to the actual
size accumulated by all the chunks.
Subclasses must should return a valid ``UploadedFile`` object.
"""
raise NotImplementedError()
def upload_complete(self):
"""
Signal that the upload is complete. Subclasses should perform cleanup
that is necessary for this handler.
"""
pass
class TemporaryFileUploadHandler(FileUploadHandler):
"""
Upload handler that streams data into a temporary file.
"""
def __init__(self, *args, **kwargs):
super(TemporaryFileUploadHandler, self).__init__(*args, **kwargs)
def new_file(self, file_name, *args, **kwargs):
"""
Create the file object to append to as data is coming in.
"""
super(TemporaryFileUploadHandler, self).new_file(file_name, *args, **kwargs)
self.file = TemporaryFile(settings.FILE_UPLOAD_TEMP_DIR)
self.write = self.file.write
def receive_data_chunk(self, raw_data, start):
self.write(raw_data)
def file_complete(self, file_size):
self.file.seek(0)
return TemporaryUploadedFile(
file = self.file,
file_name = self.file_name,
content_type = self.content_type,
file_size = file_size,
charset = self.charset
)
class MemoryFileUploadHandler(FileUploadHandler):
"""
File upload handler to stream uploads into memory (used for small files).
"""
def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None):
"""
Use the content_length to signal whether or not this handler should be in use.
"""
# Check the content-length header to see if we should
# If the the post is too large, we cannot use the Memory handler.
if content_length > settings.FILE_UPLOAD_MAX_MEMORY_SIZE:
self.activated = False
else:
self.activated = True
def new_file(self, *args, **kwargs):
super(MemoryFileUploadHandler, self).new_file(*args, **kwargs)
if self.activated:
self.file = StringIO()
raise StopFutureHandlers()
def receive_data_chunk(self, raw_data, start):
"""
Add the data to the StringIO file.
"""
if self.activated:
self.file.write(raw_data)
else:
return raw_data
def file_complete(self, file_size):
"""
Return a file object if we're activated.
"""
if not self.activated:
return
return InMemoryUploadedFile(
file = self.file,
field_name = self.field_name,
file_name = self.file_name,
content_type = self.content_type,
file_size = file_size,
charset = self.charset
)
class TemporaryFile(object):
"""
A temporary file that tries to delete itself when garbage collected.
"""
def __init__(self, dir):
if not dir:
dir = tempfile.gettempdir()
try:
(fd, name) = tempfile.mkstemp(suffix='.upload', dir=dir)
self.file = os.fdopen(fd, 'w+b')
except (OSError, IOError):
raise OSError("Could not create temporary file for uploading, have you set settings.FILE_UPLOAD_TEMP_DIR correctly?")
self.name = name
def __getattr__(self, name):
a = getattr(self.__dict__['file'], name)
if type(a) != type(0):
setattr(self, name, a)
return a
def __del__(self):
try:
os.unlink(self.name)
except OSError:
pass
def load_handler(path, *args, **kwargs):
"""
Given a path to a handler, return an instance of that handler.
E.g.::
>>> load_handler('django.core.files.uploadhandler.TemporaryFileUploadHandler', request)
<TemporaryFileUploadHandler object at 0x...>
"""
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = __import__(module, {}, {}, [attr])
except ImportError, e:
raise ImproperlyConfigured('Error importing upload handler module %s: "%s"' % (module, e))
except ValueError, e:
raise ImproperlyConfigured('Error importing upload handler module. Is FILE_UPLOAD_HANDLERS a correctly defined list or tuple?')
try:
cls = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" upload handler backend' % (module, attr))
return cls(*args, **kwargs)

View File

@ -53,7 +53,8 @@ class ModPythonRequest(http.HttpRequest):
def _load_post_and_files(self):
"Populates self._post and self._files"
if 'content-type' in self._req.headers_in and self._req.headers_in['content-type'].startswith('multipart'):
self._post, self._files = http.parse_file_upload(self._req.headers_in, self.raw_post_data)
self._raw_post_data = ''
self._post, self._files = self.parse_file_upload(self.META, self._req)
else:
self._post, self._files = http.QueryDict(self.raw_post_data, encoding=self._encoding), datastructures.MultiValueDict()

View File

@ -112,9 +112,8 @@ class WSGIRequest(http.HttpRequest):
# Populates self._post and self._files
if self.method == 'POST':
if self.environ.get('CONTENT_TYPE', '').startswith('multipart'):
header_dict = dict([(k, v) for k, v in self.environ.items() if k.startswith('HTTP_')])
header_dict['Content-Type'] = self.environ.get('CONTENT_TYPE', '')
self._post, self._files = http.parse_file_upload(header_dict, self.raw_post_data)
self._raw_post_data = ''
self._post, self._files = self.parse_file_upload(self.META, self.environ['wsgi.input'])
else:
self._post, self._files = http.QueryDict(self.raw_post_data, encoding=self._encoding), datastructures.MultiValueDict()
else:

View File

@ -21,10 +21,10 @@ class Command(LabelCommand):
for f in fields:
field_output = [qn(f.name), f.db_type()]
field_output.append("%sNULL" % (not f.null and "NOT " or ""))
if f.unique:
field_output.append("UNIQUE")
if f.primary_key:
field_output.append("PRIMARY KEY")
elif f.unique:
field_output.append("UNIQUE")
if f.db_index:
unique = f.unique and "UNIQUE " or ""
index_output.append("CREATE %sINDEX %s_%s ON %s (%s);" % \

View File

@ -162,3 +162,9 @@ class Command(BaseCommand):
else:
if verbosity > 0:
print "Installed %d object(s) from %d fixture(s)" % (object_count, fixture_count)
# Close the DB connection. This is required as a workaround for an
# edge case in MySQL: if the same connection is used to
# create tables, load data, and query, the query can return
# incorrect results. See Django #7572, MySQL #37735.
connection.close()

View File

@ -268,11 +268,11 @@ def sql_model_create(model, style, known_models=set()):
field_output = [style.SQL_FIELD(qn(f.column)),
style.SQL_COLTYPE(col_type)]
field_output.append(style.SQL_KEYWORD('%sNULL' % (not f.null and 'NOT ' or '')))
if f.unique and (not f.primary_key or connection.features.allows_unique_and_pk):
field_output.append(style.SQL_KEYWORD('UNIQUE'))
if f.primary_key:
field_output.append(style.SQL_KEYWORD('PRIMARY KEY'))
if tablespace and connection.features.supports_tablespaces and (f.unique or f.primary_key) and connection.features.autoindexes_primary_keys:
elif f.unique:
field_output.append(style.SQL_KEYWORD('UNIQUE'))
if tablespace and connection.features.supports_tablespaces and f.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
field_output.append(connection.ops.tablespace_sql(tablespace, inline=True))
@ -355,7 +355,7 @@ def many_to_many_sql_for_model(model, style):
for f in opts.local_many_to_many:
if not isinstance(f.rel, generic.GenericRel):
tablespace = f.db_tablespace or opts.db_tablespace
if tablespace and connection.features.supports_tablespaces and connection.features.autoindexes_primary_keys:
if tablespace and connection.features.supports_tablespaces:
tablespace_sql = ' ' + connection.ops.tablespace_sql(tablespace, inline=True)
else:
tablespace_sql = ''
@ -468,15 +468,14 @@ def sql_indexes_for_model(model, style):
qn = connection.ops.quote_name
for f in model._meta.local_fields:
if f.db_index and not ((f.primary_key or f.unique) and connection.features.autoindexes_primary_keys):
unique = f.unique and 'UNIQUE ' or ''
if f.db_index and not f.unique:
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace and connection.features.supports_tablespaces:
tablespace_sql = ' ' + connection.ops.tablespace_sql(tablespace)
else:
tablespace_sql = ''
output.append(
style.SQL_KEYWORD('CREATE %sINDEX' % unique) + ' ' + \
style.SQL_KEYWORD('CREATE INDEX') + ' ' + \
style.SQL_TABLE(qn('%s_%s' % (model._meta.db_table, f.column))) + ' ' + \
style.SQL_KEYWORD('ON') + ' ' + \
style.SQL_TABLE(qn(model._meta.db_table)) + ' ' + \

View File

@ -173,7 +173,10 @@ class ObjectPaginator(Paginator):
if self._count is None:
try:
self._count = self.object_list.count()
except TypeError:
except (AttributeError, TypeError):
# AttributeError if object_list has no count() method.
# TypeError if object_list.count() requires arguments
# (i.e. is of type list).
self._count = len(self.object_list)
return self._count
count = property(_get_count)

View File

@ -551,6 +551,9 @@ class WSGIRequestHandler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
from django.conf import settings
self.admin_media_prefix = settings.ADMIN_MEDIA_PREFIX
# We set self.path to avoid crashes in log_message() on unsupported
# requests (like "OPTIONS").
self.path = ''
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def get_environ(self):

View File

@ -40,6 +40,7 @@ Optional Fcgi settings: (setting=value)
workdir=DIRECTORY change to this directory when daemonizing.
outlog=FILE write stdout to this file.
errlog=FILE write stderr to this file.
umask=UMASK umask to use when daemonizing (default 022).
Examples:
Run a "standard" fastcgi process on a file-descriptor
@ -73,6 +74,7 @@ FASTCGI_OPTIONS = {
'maxrequests': 0,
'outlog': None,
'errlog': None,
'umask': None,
}
def fastcgi_help(message=None):
@ -159,6 +161,8 @@ def runfastcgi(argset=[], **kwargs):
daemon_kwargs['out_log'] = options['outlog']
if options['errlog']:
daemon_kwargs['err_log'] = options['errlog']
if options['umask']:
daemon_kwargs['umask'] = int(options['umask'])
if daemonize:
from django.utils.daemonize import become_daemon

View File

@ -296,3 +296,8 @@ def reverse(viewname, urlconf=None, args=None, kwargs=None):
kwargs = kwargs or {}
return iri_to_uri(u'/' + get_resolver(urlconf).reverse(viewname, *args, **kwargs))
def clear_url_caches():
global _resolver_cache
global _callable_cache
_resolver_cache.clear()
_callable_cache.clear()

View File

@ -41,8 +41,6 @@ class BaseDatabaseWrapper(local):
class BaseDatabaseFeatures(object):
allows_group_by_ordinal = True
allows_unique_and_pk = True
autoindexes_primary_keys = True
inline_fk_references = True
needs_datetime_string_cast = True
supports_constraints = True

View File

@ -60,7 +60,6 @@ server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class DatabaseFeatures(BaseDatabaseFeatures):
autoindexes_primary_keys = False
inline_fk_references = False
empty_fetchmany_value = ()
update_can_self_select = False
@ -136,7 +135,7 @@ class DatabaseWrapper(BaseDatabaseWrapper):
features = DatabaseFeatures()
ops = DatabaseOperations()
operators = {
'exact': '= %s',
'exact': '= BINARY %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',

View File

@ -64,7 +64,6 @@ class MysqlDebugWrapper:
return getattr(self.cursor, attr)
class DatabaseFeatures(BaseDatabaseFeatures):
autoindexes_primary_keys = False
inline_fk_references = False
empty_fetchmany_value = ()
update_can_self_select = False
@ -140,7 +139,7 @@ class DatabaseWrapper(BaseDatabaseWrapper):
features = DatabaseFeatures()
ops = DatabaseOperations()
operators = {
'exact': '= %s',
'exact': '= BINARY %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',

View File

@ -24,7 +24,6 @@ IntegrityError = Database.IntegrityError
class DatabaseFeatures(BaseDatabaseFeatures):
allows_group_by_ordinal = False
allows_unique_and_pk = False # Suppress UNIQUE/PK for Oracle (ORA-02259)
empty_fetchmany_value = ()
needs_datetime_string_cast = False
supports_tablespaces = True
@ -147,11 +146,11 @@ class DatabaseOperations(BaseDatabaseOperations):
# Since we've just deleted all the rows, running our sequence
# ALTER code will reset the sequence to 0.
for sequence_info in sequences:
table_name = sequence_info['table']
seq_name = get_sequence_name(table_name)
sequence_name = get_sequence_name(sequence_info['table'])
table_name = self.quote_name(sequence_info['table'])
column_name = self.quote_name(sequence_info['column'] or 'id')
query = _get_sequence_reset_sql() % {'sequence': seq_name,
'table': self.quote_name(table_name),
query = _get_sequence_reset_sql() % {'sequence': sequence_name,
'table': table_name,
'column': column_name}
sql.append(query)
return sql
@ -163,19 +162,22 @@ class DatabaseOperations(BaseDatabaseOperations):
output = []
query = _get_sequence_reset_sql()
for model in model_list:
for f in model._meta.fields:
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
table_name = self.quote_name(model._meta.db_table)
sequence_name = get_sequence_name(model._meta.db_table)
column_name = self.quote_name(f.db_column or f.name)
column_name = self.quote_name(f.column)
output.append(query % {'sequence': sequence_name,
'table': model._meta.db_table,
'table': table_name,
'column': column_name})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
table_name = self.quote_name(f.m2m_db_table())
sequence_name = get_sequence_name(f.m2m_db_table())
column_name = self.quote_name('id')
output.append(query % {'sequence': sequence_name,
'table': f.m2m_db_table(),
'column': self.quote_name('id')})
'table': table_name,
'column': column_name})
return output
def start_transaction_sql(self):

View File

@ -23,7 +23,7 @@ DATA_TYPES = {
'ImageField': 'NVARCHAR2(%(max_length)s)',
'IntegerField': 'NUMBER(11)',
'IPAddressField': 'VARCHAR2(15)',
'NullBooleanField': 'NUMBER(1) CHECK ((%(qn_column)s IN (0,1)) OR (%(column)s IS NULL))',
'NullBooleanField': 'NUMBER(1) CHECK ((%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL))',
'OneToOneField': 'NUMBER(11)',
'PhoneNumberField': 'VARCHAR2(20)',
'PositiveIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',

View File

@ -97,7 +97,7 @@ class DatabaseOperations(BaseDatabaseOperations):
# Use `coalesce` to set the sequence for each model to the max pk value if there are records,
# or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true
# if there are records (as the max pk value is already in use), otherwise set it to false.
for f in model._meta.fields:
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
output.append("%s setval('%s', coalesce(max(%s), 1), max(%s) %s null) %s %s;" % \
(style.SQL_KEYWORD('SELECT'),

View File

@ -19,6 +19,8 @@ from django.dispatch import dispatcher
from django.utils.datastructures import SortedDict
from django.utils.functional import curry
from django.utils.encoding import smart_str, force_unicode, smart_unicode
from django.core.files.move import file_move_safe
from django.core.files import locks
from django.conf import settings
try:
@ -50,7 +52,15 @@ class ModelBase(type):
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
new_class.add_to_class('_meta', Options(meta))
if getattr(meta, 'app_label', None) is None:
# Figure out the app_label by looking one level up.
# For 'django.contrib.sites.models', this would be 'sites'.
model_module = sys.modules[new_class.__module__]
kwargs = {"app_label": model_module.__name__.split('.')[-2]}
else:
kwargs = {}
new_class.add_to_class('_meta', Options(meta, **kwargs))
if not abstract:
new_class.add_to_class('DoesNotExist',
subclass_exception('DoesNotExist', ObjectDoesNotExist, module))
@ -71,11 +81,6 @@ class ModelBase(type):
if new_class._default_manager.model._meta.abstract:
old_default_mgr = new_class._default_manager
new_class._default_manager = None
if getattr(new_class._meta, 'app_label', None) is None:
# Figure out the app_label by looking one level up.
# For 'django.contrib.sites.models', this would be 'sites'.
model_module = sys.modules[new_class.__module__]
new_class._meta.app_label = model_module.__name__.split('.')[-2]
# Bail out early if we have already created this class.
m = get_model(new_class._meta.app_label, name, False)
@ -392,6 +397,21 @@ class Model(object):
for sub_obj in getattr(self, rel_opts_name).all():
sub_obj._collect_sub_objects(seen_objs, self.__class__, related.field.null)
# Handle any ancestors (for the model-inheritance case). We do this by
# traversing to the most remote parent classes -- those with no parents
# themselves -- and then adding those instances to the collection. That
# will include all the child instances down to "self".
parent_stack = self._meta.parents.values()
while parent_stack:
link = parent_stack.pop()
parent_obj = getattr(self, link.name)
if parent_obj._meta.parents:
parent_stack.extend(parent_obj._meta.parents.values())
continue
# At this point, parent_obj is base class (no ancestor models). So
# delete it and all its descendents.
parent_obj._collect_sub_objects(seen_objs)
def delete(self):
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname)
@ -439,7 +459,7 @@ class Model(object):
def _get_FIELD_filename(self, field):
if getattr(self, field.attname): # value is not blank
return os.path.join(settings.MEDIA_ROOT, getattr(self, field.attname))
return os.path.normpath(os.path.join(settings.MEDIA_ROOT, getattr(self, field.attname)))
return ''
def _get_FIELD_url(self, field):
@ -451,16 +471,51 @@ class Model(object):
def _get_FIELD_size(self, field):
return os.path.getsize(self._get_FIELD_filename(field))
def _save_FIELD_file(self, field, filename, raw_contents, save=True):
def _save_FIELD_file(self, field, filename, raw_field, save=True):
directory = field.get_directory_name()
try: # Create the date-based directory if it doesn't exist.
os.makedirs(os.path.join(settings.MEDIA_ROOT, directory))
except OSError: # Directory probably already exists.
pass
#
# Check for old-style usage (files-as-dictionaries). Warn here first
# since there are multiple locations where we need to support both new
# and old usage.
#
if isinstance(raw_field, dict):
import warnings
warnings.warn(
message = "Representing uploaded files as dictionaries is"\
" deprected. Use django.core.files.SimpleUploadedFile"\
" instead.",
category = DeprecationWarning,
stacklevel = 2
)
from django.core.files.uploadedfile import SimpleUploadedFile
raw_field = SimpleUploadedFile.from_dict(raw_field)
elif isinstance(raw_field, basestring):
import warnings
warnings.warn(
message = "Representing uploaded files as strings is "\
" deprecated. Use django.core.files.SimpleUploadedFile "\
" instead.",
category = DeprecationWarning,
stacklevel = 2
)
from django.core.files.uploadedfile import SimpleUploadedFile
raw_field = SimpleUploadedFile(filename, raw_field)
if filename is None:
filename = raw_field.file_name
filename = field.get_filename(filename)
#
# If the filename already exists, keep adding an underscore to the name of
# the file until the filename doesn't exist.
#
while os.path.exists(os.path.join(settings.MEDIA_ROOT, filename)):
try:
dot_index = filename.rindex('.')
@ -468,14 +523,27 @@ class Model(object):
filename += '_'
else:
filename = filename[:dot_index] + '_' + filename[dot_index:]
#
# Save the file name on the object and write the file to disk
#
# Write the file to disk.
setattr(self, field.attname, filename)
full_filename = self._get_FIELD_filename(field)
fp = open(full_filename, 'wb')
fp.write(raw_contents)
fp.close()
if hasattr(raw_field, 'temporary_file_path'):
# This file has a file path that we can move.
raw_field.close()
file_move_safe(raw_field.temporary_file_path(), full_filename)
else:
# This is a normal uploadedfile that we can stream.
fp = open(full_filename, 'wb')
locks.lock(fp, locks.LOCK_EX)
for chunk in raw_field.chunk():
fp.write(chunk)
locks.unlock(fp)
fp.close()
# Save the width and/or height, if applicable.
if isinstance(field, ImageField) and (field.width_field or field.height_field):

View File

@ -91,7 +91,7 @@ class Field(object):
self.name = name
self.verbose_name = verbose_name
self.primary_key = primary_key
self.max_length, self.unique = max_length, unique
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
@ -168,6 +168,10 @@ class Field(object):
except KeyError:
return None
def unique(self):
return self._unique or self.primary_key
unique = property(unique)
def validate_full(self, field_data, all_data):
"""
Returns a list of errors for this field. This is the main interface,
@ -695,7 +699,7 @@ class DecimalField(Field):
_("This value must be a decimal number."))
def _format(self, value):
if isinstance(value, basestring):
if isinstance(value, basestring) or value is None:
return value
else:
return self.format_number(value)
@ -716,8 +720,7 @@ class DecimalField(Field):
return u"%.*f" % (self.decimal_places, value)
def get_db_prep_save(self, value):
if value is not None:
value = self._format(value)
value = self._format(value)
return super(DecimalField, self).get_db_prep_save(value)
def get_db_prep_lookup(self, lookup_type, value):
@ -812,7 +815,7 @@ class FileField(Field):
setattr(cls, 'get_%s_filename' % self.name, curry(cls._get_FIELD_filename, field=self))
setattr(cls, 'get_%s_url' % self.name, curry(cls._get_FIELD_url, field=self))
setattr(cls, 'get_%s_size' % self.name, curry(cls._get_FIELD_size, field=self))
setattr(cls, 'save_%s_file' % self.name, lambda instance, filename, raw_contents, save=True: instance._save_FIELD_file(self, filename, raw_contents, save))
setattr(cls, 'save_%s_file' % self.name, lambda instance, filename, raw_field, save=True: instance._save_FIELD_file(self, filename, raw_field, save))
dispatcher.connect(self.delete_file, signal=signals.post_delete, sender=cls)
def delete_file(self, instance):
@ -835,9 +838,19 @@ class FileField(Field):
if new_data.get(upload_field_name, False):
func = getattr(new_object, 'save_%s_file' % self.name)
if rel:
func(new_data[upload_field_name][0]["filename"], new_data[upload_field_name][0]["content"], save)
file = new_data[upload_field_name][0]
else:
func(new_data[upload_field_name]["filename"], new_data[upload_field_name]["content"], save)
file = new_data[upload_field_name]
# Backwards-compatible support for files-as-dictionaries.
# We don't need to raise a warning because Model._save_FIELD_file will
# do so for us.
try:
file_name = file.file_name
except AttributeError:
file_name = file['filename']
func(file_name, file, save)
def get_directory_name(self):
return os.path.normpath(force_unicode(datetime.datetime.now().strftime(smart_str(self.upload_to))))
@ -850,7 +863,7 @@ class FileField(Field):
def save_form_data(self, instance, data):
from django.newforms.fields import UploadedFile
if data and isinstance(data, UploadedFile):
getattr(instance, "save_%s_file" % self.name)(data.filename, data.content, save=False)
getattr(instance, "save_%s_file" % self.name)(data.filename, data.data, save=False)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FileField}
@ -1166,12 +1179,3 @@ class XMLField(TextField):
def get_manipulator_field_objs(self):
return [curry(oldforms.XMLLargeTextField, schema_path=self.schema_path)]
class OrderingField(IntegerField):
empty_strings_allowed=False
def __init__(self, with_respect_to, **kwargs):
self.wrt = with_respect_to
kwargs['null'] = True
IntegerField.__init__(self, **kwargs )
def get_manipulator_fields(self, opts, manipulator, change, name_prefix='', rel=False, follow=True):
return [oldforms.HiddenField(name_prefix + self.name)]

View File

@ -185,11 +185,11 @@ class SingleRelatedObjectDescriptor(object):
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self.related.opts.object_name
# The similarity of the code below to the code in
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null == False:
@ -197,14 +197,14 @@ class SingleRelatedObjectDescriptor(object):
(instance._meta.object_name, self.related.get_accessor_name()))
elif value is not None and not isinstance(value, self.related.model):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
(value, instance._meta.object_name,
self.related.get_accessor_name(), self.related.opts.object_name))
# Set the value of the related field
setattr(value, self.related.field.rel.get_related_field().attname, instance)
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
@ -243,7 +243,7 @@ class ReverseSingleRelatedObjectDescriptor(object):
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self._field.name
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null == False:
@ -251,9 +251,9 @@ class ReverseSingleRelatedObjectDescriptor(object):
(instance._meta.object_name, self.field.name))
elif value is not None and not isinstance(value, self.field.rel.to):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
(value, instance._meta.object_name,
self.field.name, self.field.rel.to._meta.object_name))
# Set the value of the related field
try:
val = getattr(value, self.field.rel.get_related_field().attname)
@ -262,7 +262,7 @@ class ReverseSingleRelatedObjectDescriptor(object):
setattr(instance, self.field.attname, val)
# Since we already know what the related object is, seed the related
# object cache now, too. This avoids another db hit if you get the
# object cache now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.field.get_cache_name(), value)
@ -322,7 +322,9 @@ class ForeignRelatedObjectsDescriptor(object):
clear.alters_data = True
manager = RelatedManager()
manager.core_filters = {'%s__pk' % rel_field.name: getattr(instance, rel_field.rel.get_related_field().attname)}
attname = rel_field.rel.get_related_field().name
manager.core_filters = {'%s__%s' % (rel_field.name, attname):
getattr(instance, attname)}
manager.model = self.related.model
return manager
@ -692,6 +694,11 @@ class ForeignKey(RelatedField, Field):
def contribute_to_class(self, cls, name):
super(ForeignKey, self).contribute_to_class(cls, name)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
if isinstance(self.rel.to, basestring):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "o2m")
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(), ForeignRelatedObjectsDescriptor(related))
@ -829,6 +836,12 @@ class ManyToManyField(RelatedField, Field):
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
if isinstance(self.rel.to, basestring):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "m2m")
def contribute_to_related_class(self, cls, related):
# m2m relations to self do not have a ManyRelatedObjectsDescriptor,
# as it would be redundant - unless the field is non-symmetrical.

View File

@ -25,7 +25,7 @@ DEFAULT_NAMES = ('verbose_name', 'db_table', 'ordering',
'abstract')
class Options(object):
def __init__(self, meta):
def __init__(self, meta, app_label=None):
self.local_fields, self.local_many_to_many = [], []
self.module_name, self.verbose_name = None, None
self.verbose_name_plural = None
@ -33,7 +33,7 @@ class Options(object):
self.ordering = []
self.unique_together = []
self.permissions = []
self.object_name, self.app_label = None, None
self.object_name, self.app_label = None, app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
@ -44,8 +44,12 @@ class Options(object):
self.one_to_one_field = None
self.abstract = False
self.parents = SortedDict()
self.duplicate_targets = {}
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.util import truncate_name
cls._meta = self
self.installed = re.sub('\.models$', '', cls.__module__) in settings.INSTALLED_APPS
# First, construct the default values for these options.
@ -87,9 +91,13 @@ class Options(object):
self.verbose_name_plural = string_concat(self.verbose_name, 's')
del self.meta
# If the db_table wasn't provided, use the app_label + module_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.module_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _prepare(self, model):
from django.db import connection
from django.db.backends.util import truncate_name
if self.order_with_respect_to:
self.order_with_respect_to = self.get_field(self.order_with_respect_to)
self.ordering = ('_order',)
@ -108,10 +116,23 @@ class Options(object):
auto_created=True)
model.add_to_class('id', auto)
# If the db_table wasn't provided, use the app_label + module_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.module_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
# Determine any sets of fields that are pointing to the same targets
# (e.g. two ForeignKeys to the same remote model). The query
# construction code needs to know this. At the end of this,
# self.duplicate_targets will map each duplicate field column to the
# columns it duplicates.
collections = {}
for column, target in self.duplicate_targets.iteritems():
try:
collections[target].add(column)
except KeyError:
collections[target] = set([column])
self.duplicate_targets = {}
for elt in collections.itervalues():
if len(elt) == 1:
continue
for column in elt:
self.duplicate_targets[column] = elt.difference(set([column]))
def add_field(self, field):
# Insert the given field in the order in which it was created, using

View File

@ -3,7 +3,7 @@ import warnings
from django.conf import settings
from django.db import connection, transaction, IntegrityError
from django.db.models.fields import DateField, FieldDoesNotExist
from django.db.models.query_utils import Q
from django.db.models.query_utils import Q, select_related_descend
from django.db.models import signals, sql
from django.dispatch import dispatcher
from django.utils.datastructures import SortedDict
@ -761,8 +761,7 @@ def get_cached_row(klass, row, index_start, max_depth=0, cur_depth=0,
index_end = index_start + len(klass._meta.fields)
obj = klass(*row[index_start:index_end])
for f in klass._meta.fields:
if (not f.rel or (not restricted and f.null) or
(restricted and f.name not in requested) or f.rel.parent_link):
if not select_related_descend(f, restricted, requested):
continue
if restricted:
next = requested[f.name]

View File

@ -48,3 +48,20 @@ class Q(tree.Node):
obj.negate()
return obj
def select_related_descend(field, restricted, requested):
"""
Returns True if this field should be used to descend deeper for
select_related() purposes. Used by both the query construction code
(sql.query.fill_related_selections()) and the model instance creation code
(query.get_cached_row()).
"""
if not field.rel:
return False
if field.rel.parent_link:
return False
if restricted and field.name not in requested:
return False
if not restricted and field.null:
return False
return True

View File

@ -7,6 +7,7 @@ databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
import datetime
from copy import deepcopy
from django.utils.tree import Node
@ -14,9 +15,10 @@ from django.utils.datastructures import SortedDict
from django.dispatch import dispatcher
from django.db import connection
from django.db.models import signals
from django.db.models.fields import FieldDoesNotExist
from django.db.models.query_utils import select_related_descend
from django.db.models.sql.where import WhereNode, EverythingNode, AND, OR
from django.db.models.sql.datastructures import Count
from django.db.models.fields import FieldDoesNotExist
from django.core.exceptions import FieldError
from datastructures import EmptyResultSet, Empty, MultiJoin
from constants import *
@ -56,6 +58,7 @@ class Query(object):
self.start_meta = None
self.select_fields = []
self.related_select_fields = []
self.dupe_avoidance = {}
# SQL-related attributes
self.select = []
@ -164,6 +167,7 @@ class Query(object):
obj.start_meta = self.start_meta
obj.select_fields = self.select_fields[:]
obj.related_select_fields = self.related_select_fields[:]
obj.dupe_avoidance = self.dupe_avoidance.copy()
obj.select = self.select[:]
obj.tables = self.tables[:]
obj.where = deepcopy(self.where)
@ -214,7 +218,7 @@ class Query(object):
obj.select_related = False
obj.related_select_cols = []
obj.related_select_fields = []
if obj.distinct and len(obj.select) > 1:
if len(obj.select) > 1:
obj = self.clone(CountQuery, _query=obj, where=self.where_class(),
distinct=False)
obj.select = []
@ -362,10 +366,21 @@ class Query(object):
item.relabel_aliases(change_map)
self.select.append(item)
self.select_fields = rhs.select_fields[:]
self.extra_select = rhs.extra_select.copy()
self.extra_tables = rhs.extra_tables
self.extra_where = rhs.extra_where
self.extra_params = rhs.extra_params
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self.extra_select and rhs.extra_select:
raise ValueError("When merging querysets using 'or', you "
"cannot have extra(select=...) on both sides.")
if self.extra_where and rhs.extra_where:
raise ValueError("When merging querysets using 'or', you "
"cannot have extra(where=...) on both sides.")
self.extra_select.update(rhs.extra_select)
self.extra_tables += rhs.extra_tables
self.extra_where += rhs.extra_where
self.extra_params += rhs.extra_params
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
@ -439,28 +454,39 @@ class Query(object):
self._select_aliases = aliases
return result
def get_default_columns(self, with_aliases=False, col_aliases=None):
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False):
"""
Computes the default columns for selecting every field in the base
model.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement.
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
table_alias = self.tables[0]
root_pk = self.model._meta.pk.column
if opts is None:
opts = self.model._meta
if start_alias:
table_alias = start_alias
else:
table_alias = self.tables[0]
root_pk = opts.pk.column
seen = {None: table_alias}
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
aliases = set()
for field, model in self.model._meta.get_fields_with_model():
for field, model in opts.get_fields_with_model():
try:
alias = seen[model]
except KeyError:
alias = self.join((table_alias, model._meta.db_table,
root_pk, model._meta.pk.column))
seen[model] = alias
if as_pairs:
result.append((alias, field.column))
continue
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
@ -473,6 +499,8 @@ class Query(object):
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
if as_pairs:
return result, None
return result, aliases
def get_from_clause(self):
@ -609,6 +637,11 @@ class Query(object):
alias, False)
alias = joins[-1]
col = target.column
if not field.rel:
# To avoid inadvertent trimming of a necessary alias, use the
# refcount to show that we are referencing a non-relation field on
# the model.
self.ref_alias(alias)
# Must use left outer joins for nullable fields.
for join in joins:
@ -829,8 +862,8 @@ class Query(object):
if reuse and always_create and table in self.table_map:
# Convert the 'reuse' to case to be "exclude everything but the
# reusable set for this table".
exclusions = set(self.table_map[table]).difference(reuse)
# reusable set, minus exclusions, for this table".
exclusions = set(self.table_map[table]).difference(reuse).union(set(exclusions))
always_create = False
t_ident = (lhs_table, table, lhs_col, col)
if not always_create:
@ -865,7 +898,8 @@ class Query(object):
return alias
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
used=None, requested=None, restricted=None, nullable=None):
used=None, requested=None, restricted=None, nullable=None,
dupe_set=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
@ -875,6 +909,7 @@ class Query(object):
if not restricted and self.max_depth and cur_depth > self.max_depth:
# We've recursed far enough; bail out.
return
if not opts:
opts = self.get_meta()
root_alias = self.get_initial_alias()
@ -882,6 +917,10 @@ class Query(object):
self.related_select_fields = []
if not used:
used = set()
if dupe_set is None:
dupe_set = set()
orig_dupe_set = dupe_set
orig_used = used
# Setup for the case when only particular related fields should be
# included in the related selection.
@ -893,9 +932,10 @@ class Query(object):
restricted = False
for f, model in opts.get_fields_with_model():
if (not f.rel or (restricted and f.name not in requested) or
(not restricted and f.null) or f.rel.parent_link):
if not select_related_descend(f, restricted, requested):
continue
dupe_set = orig_dupe_set.copy()
used = orig_used.copy()
table = f.rel.to._meta.db_table
if nullable or f.null:
promote = True
@ -906,18 +946,32 @@ class Query(object):
alias = root_alias
for int_model in opts.get_base_chain(model):
lhs_col = int_opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
used.update(self.dupe_avoidance.get(id(opts), lhs_col),
())
dupe_set.add((opts, lhs_col))
int_opts = int_model._meta
alias = self.join((alias, int_opts.db_table, lhs_col,
int_opts.pk.column), exclusions=used,
promote=promote)
for (dupe_opts, dupe_col) in dupe_set:
self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
else:
alias = root_alias
dedupe = f.column in opts.duplicate_targets
if dupe_set or dedupe:
used.update(self.dupe_avoidance.get((id(opts), f.column), ()))
if dedupe:
dupe_set.add((opts, f.column))
alias = self.join((alias, table, f.column,
f.rel.get_related_field().column), exclusions=used,
promote=promote)
used.add(alias)
self.related_select_cols.extend([(alias, f2.column)
for f2 in f.rel.to._meta.fields])
self.related_select_cols.extend(self.get_default_columns(
start_alias=alias, opts=f.rel.to._meta, as_pairs=True)[0])
self.related_select_fields.extend(f.rel.to._meta.fields)
if restricted:
next = requested.get(f.name, {})
@ -927,8 +981,10 @@ class Query(object):
new_nullable = f.null
else:
new_nullable = None
for dupe_opts, dupe_col in dupe_set:
self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
used, next, restricted, new_nullable)
used, next, restricted, new_nullable, dupe_set)
def add_filter(self, filter_expr, connector=AND, negate=False, trim=False,
can_reuse=None):
@ -1058,15 +1114,17 @@ class Query(object):
for alias in join_list:
if self.alias_map[alias][JOIN_TYPE] == self.LOUTER:
j_col = self.alias_map[alias][RHS_JOIN_COL]
entry = Node([(alias, j_col, None, 'isnull', True)])
entry = self.where_class()
entry.add((alias, j_col, None, 'isnull', True), AND)
entry.negate()
self.where.add(entry, AND)
break
elif not (lookup_type == 'in' and not value):
elif not (lookup_type == 'in' and not value) and field.null:
# Leaky abstraction artifact: We have to specifically
# exclude the "foo__in=[]" case from this handling, because
# it's short-circuited in the Where class.
entry = Node([(alias, col, field, 'isnull', True)])
entry = self.where_class()
entry.add((alias, col, None, 'isnull', True), AND)
entry.negate()
self.where.add(entry, AND)
@ -1114,7 +1172,9 @@ class Query(object):
(which gives the table we are joining to), 'alias' is the alias for the
table we are joining to. If dupe_multis is True, any many-to-many or
many-to-one joins will always create a new alias (necessary for
disjunctive filters).
disjunctive filters). If can_reuse is not None, it's a list of aliases
that can be reused in these joins (nothing else can be reused in this
case).
Returns the final field involved in the join, the target database
column (used for any 'where' constraint), the final 'opts' value and the
@ -1122,7 +1182,14 @@ class Query(object):
"""
joins = [alias]
last = [0]
dupe_set = set()
exclusions = set()
for pos, name in enumerate(names):
try:
exclusions.add(int_alias)
except NameError:
pass
exclusions.add(alias)
last.append(len(joins))
if name == 'pk':
name = opts.pk.name
@ -1141,6 +1208,7 @@ class Query(object):
names = opts.get_all_field_names()
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
if not allow_many and (m2m or not direct):
for alias in joins:
self.unref_alias(alias)
@ -1150,12 +1218,27 @@ class Query(object):
alias_list = []
for int_model in opts.get_base_chain(model):
lhs_col = opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
exclusions.update(self.dupe_avoidance.get(
(id(opts), lhs_col), ()))
dupe_set.add((opts, lhs_col))
opts = int_model._meta
alias = self.join((alias, opts.db_table, lhs_col,
opts.pk.column), exclusions=joins)
opts.pk.column), exclusions=exclusions)
joins.append(alias)
exclusions.add(alias)
for (dupe_opts, dupe_col) in dupe_set:
self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
cached_data = opts._join_cache.get(name)
orig_opts = opts
dupe_col = direct and field.column or field.field.column
dedupe = dupe_col in opts.duplicate_targets
if dupe_set or dedupe:
if dedupe:
dupe_set.add((opts, dupe_col))
exclusions.update(self.dupe_avoidance.get((id(opts), dupe_col),
()))
if direct:
if m2m:
@ -1177,9 +1260,11 @@ class Query(object):
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, joins, nullable=True, reuse=can_reuse)
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
alias = self.join((int_alias, table2, from_col2, to_col2),
dupe_multis, joins, nullable=True, reuse=can_reuse)
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
elif field.rel:
# One-to-one or many-to-one field
@ -1195,7 +1280,7 @@ class Query(object):
opts, target)
alias = self.join((alias, table, from_col, to_col),
exclusions=joins, nullable=field.null)
exclusions=exclusions, nullable=field.null)
joins.append(alias)
else:
# Non-relation fields.
@ -1223,9 +1308,11 @@ class Query(object):
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, joins, nullable=True, reuse=can_reuse)
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
alias = self.join((int_alias, table2, from_col2, to_col2),
dupe_multis, joins, nullable=True, reuse=can_reuse)
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
else:
# One-to-many field (ForeignKey defined on the target model)
@ -1243,14 +1330,34 @@ class Query(object):
opts, target)
alias = self.join((alias, table, from_col, to_col),
dupe_multis, joins, nullable=True, reuse=can_reuse)
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.append(alias)
for (dupe_opts, dupe_col) in dupe_set:
try:
self.update_dupe_avoidance(dupe_opts, dupe_col, int_alias)
except NameError:
self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
if pos != len(names) - 1:
raise FieldError("Join on field %r not permitted." % name)
return field, target, opts, joins, last
def update_dupe_avoidance(self, opts, col, alias):
"""
For a column that is one of multiple pointing to the same table, update
the internal data structures to note that this alias shouldn't be used
for those other columns.
"""
ident = id(opts)
for name in opts.duplicate_targets[col]:
try:
self.dupe_avoidance[ident, name].add(alias)
except KeyError:
self.dupe_avoidance[ident, name] = set([alias])
def split_exclude(self, filter_expr, prefix):
"""
When doing an exclude against any kind of N-to-many relation, we need

View File

@ -21,12 +21,42 @@ class WhereNode(tree.Node):
the correct SQL).
The children in this tree are usually either Q-like objects or lists of
[table_alias, field_name, field_class, lookup_type, value]. However, a
child could also be any class with as_sql() and relabel_aliases() methods.
[table_alias, field_name, db_type, lookup_type, value_annotation,
params]. However, a child could also be any class with as_sql() and
relabel_aliases() methods.
"""
default = AND
def as_sql(self, node=None, qn=None):
def add(self, data, connector):
"""
Add a node to the where-tree. If the data is a list or tuple, it is
expected to be of the form (alias, col_name, field_obj, lookup_type,
value), which is then slightly munged before being stored (to avoid
storing any reference to field objects). Otherwise, the 'data' is
stored unchanged and can be anything with an 'as_sql()' method.
"""
if not isinstance(data, (list, tuple)):
super(WhereNode, self).add(data, connector)
return
alias, col, field, lookup_type, value = data
if field:
params = field.get_db_prep_lookup(lookup_type, value)
db_type = field.db_type()
else:
# This is possible when we add a comparison to NULL sometimes (we
# don't really need to waste time looking up the associated field
# object).
params = Field().get_db_prep_lookup(lookup_type, value)
db_type = None
if isinstance(value, datetime.datetime):
annotation = datetime.datetime
else:
annotation = bool(value)
super(WhereNode, self).add((alias, col, db_type, lookup_type,
annotation, params), connector)
def as_sql(self, qn=None):
"""
Returns the SQL version of the where clause and the value to be
substituted in. Returns None, None if this node is empty.
@ -35,82 +65,73 @@ class WhereNode(tree.Node):
(generally not needed except by the internal implementation for
recursion).
"""
if node is None:
node = self
if not qn:
qn = connection.ops.quote_name
if not node.children:
if not self.children:
return None, []
result = []
result_params = []
empty = True
for child in node.children:
for child in self.children:
try:
if hasattr(child, 'as_sql'):
sql, params = child.as_sql(qn=qn)
format = '(%s)'
elif isinstance(child, tree.Node):
sql, params = self.as_sql(child, qn)
if child.negated:
format = 'NOT (%s)'
elif len(child.children) == 1:
format = '%s'
else:
format = '(%s)'
else:
# A leaf node in the tree.
sql, params = self.make_atom(child, qn)
format = '%s'
except EmptyResultSet:
if node.connector == AND and not node.negated:
if self.connector == AND and not self.negated:
# We can bail out early in this particular case (only).
raise
elif node.negated:
elif self.negated:
empty = False
continue
except FullResultSet:
if self.connector == OR:
if node.negated:
if self.negated:
empty = True
break
# We match everything. No need for any constraints.
return '', []
if node.negated:
if self.negated:
empty = True
continue
empty = False
if sql:
result.append(format % sql)
result.append(sql)
result_params.extend(params)
if empty:
raise EmptyResultSet
conn = ' %s ' % node.connector
return conn.join(result), result_params
conn = ' %s ' % self.connector
sql_string = conn.join(result)
if sql_string:
if self.negated:
sql_string = 'NOT (%s)' % sql_string
elif len(self.children) != 1:
sql_string = '(%s)' % sql_string
return sql_string, result_params
def make_atom(self, child, qn):
"""
Turn a tuple (table_alias, field_name, field_class, lookup_type, value)
into valid SQL.
Turn a tuple (table_alias, column_name, db_type, lookup_type,
value_annot, params) into valid SQL.
Returns the string for the SQL fragment and the parameters to use for
it.
"""
table_alias, name, field, lookup_type, value = child
table_alias, name, db_type, lookup_type, value_annot, params = child
if table_alias:
lhs = '%s.%s' % (qn(table_alias), qn(name))
else:
lhs = qn(name)
db_type = field and field.db_type() or None
field_sql = connection.ops.field_cast_sql(db_type) % lhs
if isinstance(value, datetime.datetime):
if value_annot is datetime.datetime:
cast_sql = connection.ops.datetime_cast_sql()
else:
cast_sql = '%s'
if field:
params = field.get_db_prep_lookup(lookup_type, value)
else:
params = Field().get_db_prep_lookup(lookup_type, value)
if isinstance(params, QueryWrapper):
extra, params = params.data
else:
@ -123,11 +144,11 @@ class WhereNode(tree.Node):
connection.operators[lookup_type] % cast_sql), params)
if lookup_type == 'in':
if not value:
if not value_annot:
raise EmptyResultSet
if extra:
return ('%s IN %s' % (field_sql, extra), params)
return ('%s IN (%s)' % (field_sql, ', '.join(['%s'] * len(value))),
return ('%s IN (%s)' % (field_sql, ', '.join(['%s'] * len(params))),
params)
elif lookup_type in ('range', 'year'):
return ('%s BETWEEN %%s and %%s' % field_sql, params)
@ -135,8 +156,8 @@ class WhereNode(tree.Node):
return ('%s = %%s' % connection.ops.date_extract_sql(lookup_type,
field_sql), params)
elif lookup_type == 'isnull':
return ('%s IS %sNULL' % (field_sql, (not value and 'NOT ' or '')),
params)
return ('%s IS %sNULL' % (field_sql,
(not value_annot and 'NOT ' or '')), ())
elif lookup_type == 'search':
return (connection.ops.fulltext_search_sql(field_sql), params)
elif lookup_type in ('regex', 'iregex'):

View File

@ -196,7 +196,10 @@ def commit_on_success(func):
managed(True)
try:
res = func(*args, **kw)
except Exception, e:
except (Exception, KeyboardInterrupt, SystemExit):
# (We handle KeyboardInterrupt and SystemExit specially, since
# they don't inherit from Exception in Python 2.5, but we
# should treat them uniformly here.)
if is_dirty():
rollback()
raise

View File

@ -9,14 +9,15 @@ try:
except ImportError:
from cgi import parse_qsl
from django.utils.datastructures import MultiValueDict, FileDict
from django.utils.datastructures import MultiValueDict, ImmutableList
from django.utils.encoding import smart_str, iri_to_uri, force_unicode
from django.http.multipartparser import MultiPartParser
from django.conf import settings
from django.core.files import uploadhandler
from utils import *
RESERVED_CHARS="!*'();:@&=+$,/?%#[]"
class Http404(Exception):
pass
@ -25,6 +26,7 @@ class HttpRequest(object):
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
self.GET, self.POST, self.COOKIES, self.META, self.FILES = {}, {}, {}, {}, {}
@ -102,39 +104,31 @@ class HttpRequest(object):
encoding = property(_get_encoding, _set_encoding)
def parse_file_upload(header_dict, post_data):
"""Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
import email, email.Message
from cgi import parse_header
raw_message = '\r\n'.join(['%s:%s' % pair for pair in header_dict.items()])
raw_message += '\r\n\r\n' + post_data
msg = email.message_from_string(raw_message)
POST = QueryDict('', mutable=True)
FILES = MultiValueDict()
for submessage in msg.get_payload():
if submessage and isinstance(submessage, email.Message.Message):
name_dict = parse_header(submessage['Content-Disposition'])[1]
# name_dict is something like {'name': 'file', 'filename': 'test.txt'} for file uploads
# or {'name': 'blah'} for POST fields
# We assume all uploaded files have a 'filename' set.
if 'filename' in name_dict:
assert type([]) != type(submessage.get_payload()), "Nested MIME messages are not supported"
if not name_dict['filename'].strip():
continue
# IE submits the full path, so trim everything but the basename.
# (We can't use os.path.basename because that uses the server's
# directory separator, which may not be the same as the
# client's one.)
filename = name_dict['filename'][name_dict['filename'].rfind("\\")+1:]
FILES.appendlist(name_dict['name'], FileDict({
'filename': filename,
'content-type': 'Content-Type' in submessage and submessage['Content-Type'] or None,
'content': submessage.get_payload(),
}))
else:
POST.appendlist(name_dict['name'], submessage.get_payload())
return POST, FILES
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
def _set_upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def _get_upload_handlers(self):
if not self._upload_handlers:
# If thre are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
upload_handlers = property(_get_upload_handlers, _set_upload_handlers)
def parse_file_upload(self, META, post_data):
"""Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning = "You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
class QueryDict(MultiValueDict):
"""

View File

@ -0,0 +1,658 @@
"""
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
import cgi
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_unicode
from django.utils.text import unescape_entities
from django.core.files.uploadhandler import StopUpload, SkipFile, StopFutureHandlers
__all__ = ('MultiPartParser','MultiPartParserError','InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
class MultiPartParser(object):
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``. If
``file_upload_dir`` is defined files will be streamed to temporary files in
that directory.
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a bytestring.
:upload_handler:
An UploadHandler instance that performs operations on the uploaded
data.
:encoding:
The encoding with which to treat the incoming data.
"""
#
# Content-Type should containt multipart and the boundary information.
#
content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type)
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
#
# Content-Length should contain the length of the body we are about
# to receive.
#
try:
content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH',0)))
except (ValueError, TypeError):
# For now set it to 0; we'll try again later on down.
content_length = 0
if content_length <= 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
self._chunk_size = min(2**31-4, *[x.chunk_size for x in upload_handlers if x.chunk_size])
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Returns a tuple containing the POST and FILES dictionary, respectively.
"""
# We have to import QueryDict down here to avoid a circular import.
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
limited_input_data = LimitBytes(self._input_data, self._content_length)
# See if the handler will want to take care of the parsing.
# This allows overriding everything if somebody wants it.
for handler in handlers:
result = handler.handle_raw_input(limited_input_data,
self._meta,
self._content_length,
self._boundary,
encoding)
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict('', mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(limited_input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
field_name = force_unicode(field_name, encoding, errors='replace')
if item_type == FIELD:
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read()
try:
data = str(raw_data).decode('base64')
except:
data = raw_data
else:
data = field_stream.read()
self._post.appendlist(field_name,
force_unicode(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_successful = True
file_name = disposition.get('filename')
if not file_name:
continue
file_name = force_unicode(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
content_type = meta_data.get('content-type', ('',))[0].strip()
try:
charset = meta_data.get('content-type', (0,{}))[1].get('charset', None)
except:
charset = None
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(field_name, file_name,
content_type, content_length,
charset)
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
try:
chunk = str(chunk).decode('base64')
except Exception, e:
# Since this is only a chunk, any error is an unfixable error.
raise MultiPartParserError("Could not decode base64 data: %r" % e)
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk,
counters[i])
counters[i] += chunk_length
if chunk is None:
# If the chunk received by the handler is None, then don't continue.
break
except SkipFile, e:
file_successful = False
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload, e:
if not e.connection_reset:
exhaust(limited_input_data)
else:
# Make sure that the request data is all fed
exhaust(limited_input_data)
# Signal that the upload has completed.
for handler in handlers:
retval = handler.upload_complete()
if retval:
break
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signalling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(force_unicode(old_field_name,
self._encoding,
errors='replace'),
file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\")+1:].strip()
class LazyStream(object):
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = ''
self.length = length
self._position = 0
self._remaining = length
# These fields are to do sanity checking to make sure we don't
# have infinite loops getting/ungetting from the stream. The
# purpose overall is to raise an exception if we perform lots
# of stream get/unget gymnastics without getting
# anywhere. Naturally this is not sound, but most probably
# would indicate a bug if the exception is raised.
# largest position tell us how far this lazystream has ever
# been advanced
self._largest_position = 0
# "modifications since" will start at zero and increment every
# time the position is modified but a new largest position is
# not achieved.
self._modifications_since = 0
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = (size is not None and [size] or [self._remaining])[0]
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield ''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
chunk = self.next()
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = ''.join(parts())
return out
def next(self):
"""
Used when the exact number of bytes to read is unimportant.
This procedure just returns whatever is chunk is conveniently returned
from the iterator instead. Useful to avoid unnecessary bookkeeping if
performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = ''
else:
output = self._producer.next()
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replaces the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Places bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
self.position -= len(bytes)
self._leftover = ''.join([bytes, self._leftover])
def _set_position(self, value):
if value > self._largest_position:
self._modifications_since = 0
self._largest_position = value
else:
self._modifications_since += 1
if self._modifications_since > 500:
raise SuspiciousOperation(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
self._position = value
position = property(lambda self: self._position, _set_position)
class ChunkIter(object):
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, this object will yield chunks of read operations from that
object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def next(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class LimitBytes(object):
""" Limit bytes for a file object. """
def __init__(self, fileobject, length):
self._file = fileobject
self.remaining = length
def read(self, num_bytes=None):
"""
Read data from the underlying file.
If you ask for too much or there isn't anything left,
this will raise an InputStreamExhausted error.
"""
if self.remaining <= 0:
raise InputStreamExhausted()
if num_bytes is None:
num_bytes = self.remaining
else:
num_bytes = min(num_bytes, self.remaining)
self.remaining -= num_bytes
return self._file.read(num_bytes)
class InterBoundaryIter(object):
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def next(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter(object):
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to .next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
try:
from mx.TextTools import FS
self._fs = FS(boundary).find
except ImportError:
self._fs = lambda data: data.find(boundary)
def __iter__(self):
return self
def next(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = ''.join(chunks)
boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we dont treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]:# and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data, eof = False):
"""
Finds a multipart boundary in data.
Should no boundry exist in the data None is returned instead. Otherwise
a tuple containing the indices of the following are returned:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = self._fs(data)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
data_len = len(data) - 1
# backup over CRLF
if data[max(0,end-1)] == '\n':
end -= 1
if data[max(0,end-1)] == '\r':
end -= 1
# skip over --CRLF
#if data[min(data_len,next)] == '-':
# next += 1
#if data[min(data_len,next)] == '-':
# next += 1
#if data[min(data_len,next)] == '\r':
# next += 1
#if data[min(data_len,next)] == '\n':
# next += 1
return end, next
def exhaust(stream_or_iterable):
"""
Completely exhausts an iterator or stream.
Raise a MultiPartParserError if the argument is not a stream or an iterable.
"""
iterator = None
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
if iterator is None:
raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parses one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find('\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split('\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser(object):
def __init__(self, stream, boundary):
self._stream = stream
self._separator = '--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
""" Parse the header into a key-value. """
plist = _parse_header_params(';' + line)
key = plist.pop(0).lower()
pdict = {}
for p in plist:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and s.count('"', 0, end) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist

View File

@ -19,14 +19,14 @@ class ConditionalGetMiddleware(object):
# Setting the status is enough here. The response handling path
# automatically removes content for this status code (in
# http.conditional_content_removal()).
response.status = 304
response.status_code = 304
if response.has_header('Last-Modified'):
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE', None)
if if_modified_since == response['Last-Modified']:
# Setting the status code is enough here (same reasons as
# above).
response.status = 304
response.status_code = 304
return response

View File

@ -7,6 +7,11 @@ import datetime
import os
import re
import time
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Python 2.3 fallbacks
try:
from decimal import Decimal, DecimalException
@ -416,9 +421,9 @@ except ImportError:
class UploadedFile(StrAndUnicode):
"A wrapper for files uploaded in a FileField"
def __init__(self, filename, content):
def __init__(self, filename, data):
self.filename = filename
self.content = content
self.data = data
def __unicode__(self):
"""
@ -444,15 +449,34 @@ class FileField(Field):
return None
elif not data and initial:
return initial
if isinstance(data, dict):
# We warn once, then support both ways below.
import warnings
warnings.warn(
message = "Representing uploaded files as dictionaries is"\
" deprecated. Use django.core.files.SimpleUploadedFile "\
" instead.",
category = DeprecationWarning,
stacklevel = 2
)
try:
f = UploadedFile(data['filename'], data['content'])
except TypeError:
file_name = data.file_name
file_size = data.file_size
except AttributeError:
try:
file_name = data.get('filename')
file_size = bool(data['content'])
except (AttributeError, KeyError):
raise ValidationError(self.error_messages['invalid'])
if not file_name:
raise ValidationError(self.error_messages['invalid'])
except KeyError:
raise ValidationError(self.error_messages['missing'])
if not f.content:
if not file_size:
raise ValidationError(self.error_messages['empty'])
return f
return UploadedFile(file_name, data)
class ImageField(FileField):
default_error_messages = {
@ -470,15 +494,31 @@ class ImageField(FileField):
elif not data and initial:
return initial
from PIL import Image
from cStringIO import StringIO
# We need to get a file object for PIL. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
file = data.temporary_file_path()
else:
if hasattr(data, 'read'):
file = StringIO(data.read())
else:
file = StringIO(data['content'])
try:
# load() is the only method that can spot a truncated JPEG,
# but it cannot be called sanely after verify()
trial_image = Image.open(StringIO(f.content))
trial_image = Image.open(file)
trial_image.load()
# Since we're about to use the file again we have to reset the
# file object if possible.
if hasattr(file, 'reset'):
file.reset()
# verify() is the only method that can spot a corrupt PNG,
# but it must be called immediately after the constructor
trial_image = Image.open(StringIO(f.content))
trial_image = Image.open(file)
trial_image.verify()
except Exception: # Python Imaging Library doesn't recognize it as an image
raise ValidationError(self.error_messages['invalid_image'])
@ -535,13 +575,17 @@ class BooleanField(Field):
def clean(self, value):
"""Returns a Python boolean object."""
super(BooleanField, self).clean(value)
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Because bool("True") == True, we don't need to
# handle that explicitly.
if value == 'False':
return False
return bool(value)
value = False
else:
value = bool(value)
super(BooleanField, self).clean(value)
if not value and self.required:
raise ValidationError(self.error_messages['required'])
return value
class NullBooleanField(BooleanField):
"""

View File

@ -680,18 +680,27 @@ class FileUploadField(FormField):
self.field_name, self.is_required = field_name, is_required
self.validator_list = [self.isNonEmptyFile] + validator_list
def isNonEmptyFile(self, field_data, all_data):
def isNonEmptyFile(self, new_data, all_data):
if hasattr(new_data, 'upload_errors'):
upload_errors = new_data.upload_errors()
if upload_errors:
raise validators.CriticalValidationError, upload_errors
try:
content = field_data['content']
except TypeError:
raise validators.CriticalValidationError, ugettext("No file was submitted. Check the encoding type on the form.")
if not content:
file_size = new_data.file_size
except AttributeError:
file_size = len(new_data['content'])
if not file_size:
raise validators.CriticalValidationError, ugettext("The submitted file is empty.")
def render(self, data):
return mark_safe(u'<input type="file" id="%s" class="v%s" name="%s" />' % \
(self.get_id(), self.__class__.__name__, self.field_name))
def prepare(self, new_data):
if hasattr(new_data, 'upload_errors'):
upload_errors = new_data.upload_errors()
new_data[self.field_name] = { '_file_upload_error': upload_errors }
def html2python(data):
if data is None:
raise EmptyValue

View File

@ -1,7 +1,11 @@
import urllib
import sys
import os
from cStringIO import StringIO
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.core.handlers.base import BaseHandler
@ -19,6 +23,27 @@ from django.utils.itercompat import is_iterable
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
class FakePayload(object):
"""
A wrapper around StringIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in Real Life.
"""
def __init__(self, content):
self.__content = StringIO(content)
self.__len = len(content)
def read(self, num_bytes=None):
if num_bytes is None:
num_bytes = self.__len or 1
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes.
@ -39,7 +64,7 @@ class ClientHandler(BaseHandler):
request = WSGIRequest(environ)
response = self.get_response(request)
# Apply response middleware
# Apply response middleware.
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
@ -49,14 +74,15 @@ class ClientHandler(BaseHandler):
return response
def store_rendered_templates(store, signal, sender, template, context):
"A utility function for storing templates and contexts that are rendered"
"""
Stores templates and contexts that are rendered.
"""
store.setdefault('template',[]).append(template)
store.setdefault('context',[]).append(context)
def encode_multipart(boundary, data):
"""
A simple method for encoding multipart POST data from a dictionary of
form values.
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
@ -68,7 +94,8 @@ def encode_multipart(boundary, data):
if isinstance(value, file):
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"; filename="%s"' % (to_str(key), to_str(os.path.basename(value.name))),
'Content-Disposition: form-data; name="%s"; filename="%s"' \
% (to_str(key), to_str(os.path.basename(value.name))),
'Content-Type: application/octet-stream',
'',
value.read()
@ -122,13 +149,14 @@ class Client:
def store_exc_info(self, *args, **kwargs):
"""
Utility method that can be used to store exceptions when they are
generated by a view.
Stores exceptions when they are generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"Obtain the current session variables"
"""
Obtains the current session variables.
"""
if 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = __import__(settings.SESSION_ENGINE, {}, {}, [''])
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME, None)
@ -144,7 +172,6 @@ class Client:
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = {
'HTTP_COOKIE': self.cookies,
'PATH_INFO': '/',
@ -158,13 +185,13 @@ class Client:
environ.update(self.defaults)
environ.update(request)
# Curry a data dictionary into an instance of
# the template renderer callback function
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
dispatcher.connect(on_template_render, signal=signals.template_rendered)
# Capture exceptions created by the handler
# Capture exceptions created by the handler.
dispatcher.connect(self.store_exc_info, signal=got_request_exception)
try:
@ -187,14 +214,14 @@ class Client:
exc_info = self.exc_info
self.exc_info = None
raise exc_info[1], None, exc_info[2]
# Save the client and request that stimulated the response
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response
# Add any rendered template detail to the response.
# If there was only one template rendered (the most likely case),
# flatten the list to a single element
# flatten the list to a single element.
for detail in ('template', 'context'):
if data.get(detail):
if len(data[detail]) == 1:
@ -204,14 +231,16 @@ class Client:
else:
setattr(response, detail, None)
# Update persistent cookie data
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
def get(self, path, data={}, **extra):
"Request a response from the server using GET."
"""
Requests a response from the server using GET.
"""
r = {
'CONTENT_LENGTH': None,
'CONTENT_TYPE': 'text/html; charset=utf-8',
@ -224,8 +253,9 @@ class Client:
return self.request(**r)
def post(self, path, data={}, content_type=MULTIPART_CONTENT, **extra):
"Request a response from the server using POST."
"""
Requests a response from the server using POST.
"""
if content_type is MULTIPART_CONTENT:
post_data = encode_multipart(BOUNDARY, data)
else:
@ -236,37 +266,43 @@ class Client:
'CONTENT_TYPE': content_type,
'PATH_INFO': urllib.unquote(path),
'REQUEST_METHOD': 'POST',
'wsgi.input': StringIO(post_data),
'wsgi.input': FakePayload(post_data),
}
r.update(extra)
return self.request(**r)
def login(self, **credentials):
"""Set the Client to appear as if it has sucessfully logged into a site.
"""
Sets the Client to appear as if it has successfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or the user is inactive, or if the sessions framework is
not available.
"""
user = authenticate(**credentials)
if user and user.is_active and 'django.contrib.sessions' in settings.INSTALLED_APPS:
if user and user.is_active \
and 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = __import__(settings.SESSION_ENGINE, {}, {}, [''])
# Create a fake request to store login details
# Create a fake request to store login details.
request = HttpRequest()
request.session = engine.SessionStore()
login(request, user)
# Set the cookie to represent the session
self.cookies[settings.SESSION_COOKIE_NAME] = request.session.session_key
self.cookies[settings.SESSION_COOKIE_NAME]['max-age'] = None
self.cookies[settings.SESSION_COOKIE_NAME]['path'] = '/'
self.cookies[settings.SESSION_COOKIE_NAME]['domain'] = settings.SESSION_COOKIE_DOMAIN
self.cookies[settings.SESSION_COOKIE_NAME]['secure'] = settings.SESSION_COOKIE_SECURE or None
self.cookies[settings.SESSION_COOKIE_NAME]['expires'] = None
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
# Save the session values
# Save the session values.
request.session.save()
return True
@ -274,7 +310,8 @@ class Client:
return False
def logout(self):
"""Removes the authenticated user's cookies.
"""
Removes the authenticated user's cookies.
Causes the authenticated user to be logged out.
"""

View File

@ -4,10 +4,12 @@ from urlparse import urlsplit, urlunsplit
from django.http import QueryDict
from django.db import transaction
from django.conf import settings
from django.core import mail
from django.core.management import call_command
from django.test import _doctest as doctest
from django.test.client import Client
from django.core.urlresolvers import clear_url_caches
normalize_long_ints = lambda s: re.sub(r'(?<![\w])(\d+)L(?![\w])', '\\1', s)
@ -54,6 +56,8 @@ class TestCase(unittest.TestCase):
* Flushing the database.
* If the Test Case class has a 'fixtures' member, installing the
named fixtures.
* If the Test Case class has a 'urls' member, replace the
ROOT_URLCONF with it.
* Clearing the mail test outbox.
"""
call_command('flush', verbosity=0, interactive=False)
@ -61,6 +65,10 @@ class TestCase(unittest.TestCase):
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures, **{'verbosity': 0})
if hasattr(self, 'urls'):
self._old_root_urlconf = settings.ROOT_URLCONF
settings.ROOT_URLCONF = self.urls
clear_url_caches()
mail.outbox = []
def __call__(self, result=None):
@ -79,6 +87,23 @@ class TestCase(unittest.TestCase):
result.addError(self, sys.exc_info())
return
super(TestCase, self).__call__(result)
try:
self._post_teardown()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
import sys
result.addError(self, sys.exc_info())
return
def _post_teardown(self):
""" Performs any post-test things. This includes:
* Putting back the original ROOT_URLCONF if it was changed.
"""
if hasattr(self, '_old_root_urlconf'):
settings.ROOT_URLCONF = self._old_root_urlconf
clear_url_caches()
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, host=None):

View File

@ -2,7 +2,8 @@ import os
import sys
if os.name == 'posix':
def become_daemon(our_home_dir='.', out_log='/dev/null', err_log='/dev/null'):
def become_daemon(our_home_dir='.', out_log='/dev/null',
err_log='/dev/null', umask=022):
"Robustly turn into a UNIX daemon, running in our_home_dir."
# First fork
try:
@ -13,7 +14,7 @@ if os.name == 'posix':
sys.exit(1)
os.setsid()
os.chdir(our_home_dir)
os.umask(0)
os.umask(umask)
# Second fork
try:
@ -32,13 +33,13 @@ if os.name == 'posix':
# Set custom file descriptors so that they get proper buffering.
sys.stdout, sys.stderr = so, se
else:
def become_daemon(our_home_dir='.', out_log=None, err_log=None):
def become_daemon(our_home_dir='.', out_log=None, err_log=None, umask=022):
"""
If we're not running under a POSIX system, just simulate the daemon
mode by doing redirections and directory changing.
"""
os.chdir(our_home_dir)
os.umask(0)
os.umask(umask)
sys.stdin.close()
sys.stdout.close()
sys.stderr.close()

View File

@ -332,17 +332,49 @@ class DotExpandedDict(dict):
except TypeError: # Special-case if current isn't a dict.
current = {bits[-1]: v}
class FileDict(dict):
class ImmutableList(tuple):
"""
A dictionary used to hold uploaded file contents. The only special feature
here is that repr() of this object won't dump the entire contents of the
file to the output. A handy safeguard for a large file upload.
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __repr__(self):
if 'content' in self:
d = dict(self, content='<omitted>')
return dict.__repr__(d)
return dict.__repr__(self)
def __new__(cls, *args, **kwargs):
if 'warning' in kwargs:
warning = kwargs['warning']
del kwargs['warning']
else:
warning = 'ImmutableList object is immutable.'
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError, self.warning
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""

View File

@ -3,6 +3,7 @@ from django.conf import settings
from django.utils.encoding import force_unicode
from django.utils.functional import allow_lazy
from django.utils.translation import ugettext_lazy
from htmlentitydefs import name2codepoint
# Capitalizes the first letter of a string.
capfirst = lambda x: x and force_unicode(x)[0].upper() + force_unicode(x)[1:]
@ -222,3 +223,26 @@ def smart_split(text):
yield bit
smart_split = allow_lazy(smart_split, unicode)
def _replace_entity(match):
text = match.group(1)
if text[0] == u'#':
text = text[1:]
try:
if text[0] in u'xX':
c = int(text[1:], 16)
else:
c = int(text)
return unichr(c)
except ValueError:
return match.group(0)
else:
try:
return unichr(name2codepoint[text])
except (ValueError, KeyError):
return match.group(0)
_entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape_entities(text):
return _entity_re.sub(_replace_entity, text)
unescape_entities = allow_lazy(unescape_entities, unicode)

View File

@ -29,6 +29,22 @@ class Node(object):
self.subtree_parents = []
self.negated = negated
# We need this because of django.db.models.query_utils.Q. Q. __init__() is
# problematic, but it is a natural Node subclass in all other respects.
def _new_instance(cls, children=None, connector=None, negated=False):
"""
This is called to create a new instance of this class when we need new
Nodes (or subclasses) in the internal code in this class. Normally, it
just shadows __init__(). However, subclasses with an __init__ signature
that is not an extension of Node.__init__ might need to implement this
method to allow a Node to create a new instance of them (if they have
any extra setting up to do).
"""
obj = Node(children, connector, negated)
obj.__class__ = cls
return obj
_new_instance = classmethod(_new_instance)
def __str__(self):
if self.negated:
return '(NOT (%s: %s))' % (self.connector, ', '.join([str(c) for c
@ -82,7 +98,8 @@ class Node(object):
else:
self.children.append(node)
else:
obj = Node(self.children, self.connector, self.negated)
obj = self._new_instance(self.children, self.connector,
self.negated)
self.connector = conn_type
self.children = [obj, node]
@ -96,7 +113,8 @@ class Node(object):
Interpreting the meaning of this negate is up to client code. This
method is useful for implementing "not" arrangements.
"""
self.children = [Node(self.children, self.connector, not self.negated)]
self.children = [self._new_instance(self.children, self.connector,
not self.negated)]
self.connector = self.default
def start_subtree(self, conn_type):
@ -108,12 +126,13 @@ class Node(object):
if len(self.children) == 1:
self.connector = conn_type
elif self.connector != conn_type:
self.children = [Node(self.children, self.connector, self.negated)]
self.children = [self._new_instance(self.children, self.connector,
self.negated)]
self.connector = conn_type
self.negated = False
self.subtree_parents.append(Node(self.children, self.connector,
self.negated))
self.subtree_parents.append(self.__class__(self.children,
self.connector, self.negated))
self.connector = self.default
self.negated = False
self.children = []
@ -126,7 +145,7 @@ class Node(object):
the current instances state to be the parent.
"""
obj = self.subtree_parents.pop()
node = Node(self.children, self.connector)
node = self.__class__(self.children, self.connector)
self.connector = obj.connector
self.negated = obj.negated
self.children = obj.children

View File

@ -39,9 +39,10 @@ with the standard ``Auth*`` and ``Require`` directives::
example at the bottom of this note).
You'll also need to insert configuration directives that prevent Apache
from trying to use other authentication modules. Depending on which other
authentication modules you have loaded, you might need one or more of
the following directives::
from trying to use other authentication modules, as well as specifying
the ``AuthUserFile`` directive and pointing it to ``/dev/null``. Depending
on which other authentication modules you have loaded, you might need one
or more of the following directives::
AuthBasicAuthoritative Off
AuthDefaultAuthoritative Off
@ -65,6 +66,7 @@ with the standard ``Auth*`` and ``Require`` directives::
<Location /example/>
AuthType Basic
AuthName "example.com"
**AuthUserFile /dev/null**
**AuthBasicAuthoritative Off**
Require valid-user

View File

@ -443,6 +443,31 @@ This is roughly equivalent to::
Note, however, that the first of these will raise ``IndexError`` while the
second will raise ``DoesNotExist`` if no objects match the given criteria.
Combining QuerySets
-------------------
If you have two ``QuerySet`` instances that act on the same model, you can
combine them using ``&`` and ``|`` to get the items that are in both result
sets or in either results set, respectively. For example::
Entry.objects.filter(pubdate__gte=date1) & \
Entry.objects.filter(headline__startswith="What")
will combine the two queries into a single SQL query. Of course, in this case
you could have achieved the same result using multiple filters on the same
``QuerySet``, but sometimes the ability to combine individual ``QuerySet``
instance is useful.
Be careful, if you are using ``extra()`` to add custom handling to your
``QuerySet`` however. All the ``extra()`` components are merged and the result
may or may not make sense. If you are using custom SQL fragments in your
``extra()`` calls, Django will not inspect these fragments to see if they need
to be rewritten because of changes in the merged query. So test the effects
carefully. Also realise that if you are combining two ``QuerySets`` with
``|``, you cannot use ``extra(select=...)`` or ``extra(where=...)`` on *both*
``QuerySets``. You can only use those calls on one or the other (Django will
raise a ``ValueError`` if you try to use this incorrectly).
QuerySet methods that return new QuerySets
------------------------------------------

View File

@ -14,9 +14,14 @@ custom Django application.
A flatpage can use a custom template or a default, systemwide flatpage
template. It can be associated with one, or multiple, sites.
**New in Django development version**
The content field may optionally be left blank if you prefer to put your
content in a custom template.
Here are some examples of flatpages on Django-powered sites:
* http://www.chicagocrime.org/about/
* http://www.everyblock.com/about/
* http://www.lawrence.com/about/contact/
Installation

View File

@ -2155,7 +2155,7 @@ still only creating one database table per child model at the database level.
When an abstract base class is created, Django makes any ``Meta`` inner class
you declared on the base class available as an attribute. If a child class
does not declared its own ``Meta`` class, it will inherit the parent's
does not declare its own ``Meta`` class, it will inherit the parent's
``Meta``. If the child wants to extend the parent's ``Meta`` class, it can
subclass it. For example::

View File

@ -805,12 +805,12 @@ ContactForm to include an ``ImageField`` called ``mugshot``, we
need to bind the file data containing the mugshot image::
# Bound form with an image field
>>> from django.core.files.uploadedfile import SimpleUploadedFile
>>> data = {'subject': 'hello',
... 'message': 'Hi there',
... 'sender': 'foo@example.com',
... 'cc_myself': True}
>>> file_data = {'mugshot': {'filename':'face.jpg'
... 'content': <file data>}}
>>> file_data = {'mugshot': SimpleUploadedFile('face.jpg', <file data>)}
>>> f = ContactFormWithMugshot(data, file_data)
In practice, you will usually specify ``request.FILES`` as the source

View File

@ -80,19 +80,36 @@ All attributes except ``session`` should be considered read-only.
strings.
``FILES``
.. admonition:: Changed in Django development version
In previous versions of Django, ``request.FILES`` contained
simple ``dict`` objects representing uploaded files. This is
no longer true -- files are represented by ``UploadedFile``
objects as described below.
These ``UploadedFile`` objects will emulate the old-style ``dict``
interface, but this is deprecated and will be removed in the next
release of Django.
A dictionary-like object containing all uploaded files. Each key in
``FILES`` is the ``name`` from the ``<input type="file" name="" />``. Each
value in ``FILES`` is a standard Python dictionary with the following three
keys:
value in ``FILES`` is an ``UploadedFile`` object containing the following
attributes:
* ``filename`` -- The name of the uploaded file, as a Python string.
* ``content-type`` -- The content type of the uploaded file.
* ``content`` -- The raw content of the uploaded file.
* ``read(num_bytes=None)`` -- Read a number of bytes from the file.
* ``file_name`` -- The name of the uploaded file.
* ``file_size`` -- The size, in bytes, of the uploaded file.
* ``chunk()`` -- A generator that yields sequential chunks of data.
See `File Uploads`_ for more information.
Note that ``FILES`` will only contain data if the request method was POST
and the ``<form>`` that posted to the request had
``enctype="multipart/form-data"``. Otherwise, ``FILES`` will be a blank
dictionary-like object.
.. _File Uploads: ../upload_handling/
``META``
A standard Python dictionary containing all available HTTP headers.

View File

@ -279,7 +279,7 @@ Default: ``''`` (Empty string)
The database backend to use. The build-in database backends are
``'postgresql_psycopg2'``, ``'postgresql'``, ``'mysql'``, ``'mysql_old'``,
``'sqlite3'`` and ``'oracle'``.
``'sqlite3'``, ``'oracle'``, and ``'oracle'``.
In the Django development version, you can use a database backend that doesn't
ship with Django by setting ``DATABASE_ENGINE`` to a fully-qualified path (i.e.
@ -530,6 +530,43 @@ Default: ``'utf-8'``
The character encoding used to decode any files read from disk. This includes
template files and initial SQL data files.
FILE_UPLOAD_HANDLERS
--------------------
**New in Django development version**
Default::
("django.core.files.fileuploadhandler.MemoryFileUploadHandler",
"django.core.files.fileuploadhandler.TemporaryFileUploadHandler",)
A tuple of handlers to use for uploading. See `file uploads`_ for details.
.. _file uploads: ../upload_handling/
FILE_UPLOAD_MAX_MEMORY_SIZE
---------------------------
**New in Django development version**
Default: ``2621440`` (i.e. 2.5 MB).
The maximum size (in bytes) that an upload will be before it gets streamed to
the file system. See `file uploads`_ for details.
FILE_UPLOAD_TEMP_DIR
--------------------
**New in Django development version**
Default: ``None``
The directory to store data temporarily while uploading files. If ``None``,
Django will use the standard temporary directory for the operating system. For
example, this will default to '/tmp' on *nix-style operating systems.
See `file uploads`_ for details.
FIXTURE_DIRS
-------------

View File

@ -317,3 +317,14 @@ A more efficient solution, however, would be to call ``ping_google()`` from a
cron script, or some other scheduled task. The function makes an HTTP request
to Google's servers, so you may not want to introduce that network overhead
each time you call ``save()``.
Pinging Google via `manage.py`
------------------------------
**New in Django development version**
Once the sitemaps application is added to your project, you may also
ping the Google server's through the command line manage.py interface::
python manage.py ping_google [/sitemap.xml]

View File

@ -477,11 +477,11 @@ escaped when the template is written.
This means you would write ::
{{ data|default:"3 &gt; 2" }}
{{ data|default:"3 &lt; 2" }}
...rather than ::
{{ data|default:"3 > 2" }} <-- Bad! Don't do this.
{{ data|default:"3 < 2" }} <-- Bad! Don't do this.
This doesn't affect what happens to data coming from the variable itself.
The variable's contents are still automatically escaped, if necessary, because

View File

@ -797,6 +797,37 @@ another test, or by the order of test execution.
.. _dumpdata documentation: ../django-admin/#dumpdata-appname-appname
.. _loaddata documentation: ../django-admin/#loaddata-fixture-fixture
URLconf configuration
~~~~~~~~~~~~~~~~~~~~~
**New in Django development version**
If your application provides views, you may want to include tests that
use the test client to exercise those views. However, an end user is free
to deploy the views in your application at any URL of their choosing.
This means that your tests can't rely upon the fact that your views will
be available at a particular URL.
In order to provide a reliable URL space for your test,
``django.test.TestCase`` provides the ability to customize the URLconf
configuration for the duration of the execution of a test suite.
If your ``TestCase`` instance defines an ``urls`` attribute, the
``TestCase`` will use the value of that attribute as the ``ROOT_URLCONF``
for the duration of that test.
For example::
from django.test import TestCase
class TestMyViews(TestCase):
urls = 'myapp.test_urls'
def testIndexPageView(self):
# Here you'd test your view using ``Client``.
This test case will use the contents of ``myapp.test_urls`` as the
URLconf for the duration of the test case.
Emptying the test outbox
~~~~~~~~~~~~~~~~~~~~~~~~

346
docs/upload_handling.txt Normal file
View File

@ -0,0 +1,346 @@
============
File Uploads
============
**New in Django development version**
Most Web sites wouldn't be complete without a way to upload files. When Django
handles a file upload, the file data ends up placed in ``request.FILES`` (for
more on the ``request`` object see the documentation for `request and response
objects`_). This document explains how files are stored on disk and in memory,
and how to customize the default behavior.
.. _request and response objects: ../request_response/#attributes
Basic file uploads
==================
Consider a simple form containing a ``FileField``::
from django import newforms as forms
class UploadFileForm(forms.Form):
title = forms.CharField(max_length=50)
file = forms.FileField()
A view handling this form will receive the file data in ``request.FILES``, which
is a dictionary containing a key for each ``FileField`` (or ``ImageField``, or
other ``FileField`` subclass) in the form. So the data from the above form would
be accessible as ``request.FILES['file']``.
Most of the time, you'll simply pass the file data from ``request`` into the
form as described in `binding uploaded files to a form`_. This would look
something like::
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
# Imaginary function to handle an uploaded file.
from somewhere import handle_uploaded_file
def upload_file(request):
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
handle_uploaded_file(request.FILES['file'])
return HttpResponseRedirect('/success/url/')
else:
form = UploadFileForm()
return render_to_response('upload.html', {'form': form})
.. _binding uploaded files to a form: ../newforms/#binding-uploaded-files-to-a- form
Notice that we have to pass ``request.FILES`` into the form's constructor; this
is how file data gets bound into a form.
Handling uploaded files
-----------------------
The final piece of the puzzle is handling the actual file data from
``request.FILES``. Each entry in this dictionary is an ``UploadedFile`` object
-- a simple wrapper around an uploaded file. You'll usually use one of these
methods to access the uploaded content:
``UploadedFile.read()``
Read the entire uploaded data from the file. Be careful with this
method: if the uploaded file is huge it can overwhelm your system if you
try to read it into memory. You'll probably want to use ``chunk()``
instead; see below.
``UploadedFile.multiple_chunks()``
Returns ``True`` if the uploaded file is big enough to require
reading in multiple chunks. By default this will be any file
larger than 2.5 megabytes, but that's configurable; see below.
``UploadedFile.chunk()``
A generator returning chunks of the file. If ``multiple_chunks()`` is
``True``, you should use this method in a loop instead of ``read()``.
In practice, it's often easiest simply to use ``chunks()`` all the time;
see the example below.
``UploadedFile.file_name``
The name of the uploaded file (e.g. ``my_file.txt``).
``UploadedFile.file_size``
The size, in bytes, of the uploaded file.
There are a few other methods and attributes available on ``UploadedFile``
objects; see `UploadedFile objects`_ for a complete reference.
Putting it all together, here's a common way you might handle an uploaded file::
def handle_uploaded_file(f):
destination = open('some/file/name.txt', 'wb')
for chunk in f.chunks():
destination.write(chunk)
Looping over ``UploadedFile.chunks()`` instead of using ``read()`` ensures that
large files don't overwhelm your system's memory.
Where uploaded data is stored
-----------------------------
Before you save uploaded files, the data needs to be stored somewhere.
By default, if an uploaded file is smaller than 2.5 megabytes, Django will hold
the entire contents of the upload in memory. This means that saving the file
involves only a read from memory and a write to disk and thus is very fast.
However, if an uploaded file is too large, Django will write the uploaded file
to a temporary file stored in your system's temporary directory. On a Unix-like
platform this means you can expect Django to generate a file called something
like ``/tmp/tmpzfp6I6.upload``. If an upload is large enough, you can watch this
file grow in size as Django streams the data onto disk.
These specifics -- 2.5 megabytes; ``/tmp``; etc. -- are simply "reasonable
defaults". Read on for details on how you can customize or completely replace
upload behavior.
Changing upload handler behavior
--------------------------------
Three `settings`_ control Django's file upload behavior:
``FILE_UPLOAD_MAX_MEMORY_SIZE``
The maximum size, in bytes, for files that will be uploaded
into memory. Files larger than ``FILE_UPLOAD_MAX_MEMORY_SIZE``
will be streamed to disk.
Defaults to 2.5 megabytes.
``FILE_UPLOAD_TEMP_DIR``
The directory where uploaded files larger than ``FILE_UPLOAD_TEMP_DIR``
will be stored.
Defaults to your system's standard temporary directory (i.e. ``/tmp`` on
most Unix-like systems).
``FILE_UPLOAD_HANDLERS``
The actual handlers for uploaded files. Changing this setting
allows complete customization -- even replacement -- of
Django's upload process. See `upload handlers`_, below,
for details.
Defaults to::
("django.core.files.uploadhandler.MemoryFileUploadHandler",
"django.core.files.uploadhandler.TemporaryFileUploadHandler",)
Which means "try to upload to memory first, then fall back to temporary
files."
.. _settings: ../settings/
``UploadedFile`` objects
========================
All ``UploadedFile`` objects define the following methods/attributes:
``UploadedFile.read(self, num_bytes=None)``
Returns a byte string of length ``num_bytes``, or the complete file if
``num_bytes`` is ``None``.
``UploadedFile.chunk(self, chunk_size=None)``
A generator yielding small chunks from the file. If ``chunk_size`` isn't
given, chunks will be 64 kb.
``UploadedFile.multiple_chunks(self, chunk_size=None)``
Returns ``True`` if you can expect more than one chunk when calling
``UploadedFile.chunk(self, chunk_size)``.
``UploadedFile.file_size``
The size, in bytes, of the uploaded file.
``UploadedFile.file_name``
The name of the uploaded file as provided by the user.
``UploadedFile.content_type``
The content-type header uploaded with the file (e.g. ``text/plain`` or
``application/pdf``). Like any data supplied by the user, you shouldn't
trust that the uploaded file is actually this type. You'll still need to
validate that the file contains the content that the content-type header
claims -- "trust but verify."
``UploadedFile.charset``
For ``text/*`` content-types, the character set (i.e. ``utf8``) supplied
by the browser. Again, "trust but verify" is the best policy here.
``UploadedFile.temporary_file_path()``
Only files uploaded onto disk will have this method; it returns the full
path to the temporary uploaded file.
Upload Handlers
===============
When a user uploads a file, Django passes off the file data to an *upload
handler* -- a small class that handles file data as it gets uploaded. Upload
handlers are initially defined in the ``FILE_UPLOAD_HANDLERS`` setting, which
defaults to::
("django.core.files.uploadhandler.MemoryFileUploadHandler",
"django.core.files.uploadhandler.TemporaryFileUploadHandler",)
Together the ``MemoryFileUploadHandler`` and ``TemporaryFileUploadHandler``
provide Django's default file upload behavior of reading small files into memory
and large ones onto disk.
You can write custom handlers that customize how Django handles files. You
could, for example, use custom handlers to enforce user-level quotas, compress
data on the fly, render progress bars, and even send data to another storage
location directly without storing it locally.
Modifying upload handlers on the fly
------------------------------------
Sometimes particular views require different upload behavior. In these cases,
you can override upload handlers on a per-request basis by modifying
``request.upload_handlers``. By default, this list will contain the upload
handlers given by ``FILE_UPLOAD_HANDLERS``, but you can modify the list as you
would any other list.
For instance, suppose you've written a ``ProgressBarUploadHandler`` that
provides feedback on upload progress to some sort of AJAX widget. You'd add this
handler to your upload handers like this::
request.upload_handlers.insert(0, ProgressBarUploadHandler())
You'd probably want to use ``list.insert()`` in this case (instead of
``append()``) because a progress bar handler would need to run *before* any
other handlers. Remember, the upload handlers are processed in order.
If you want to replace the upload handlers completely, you can just assign a new
list::
request.upload_handlers = [ProgressBarUploadHandler()]
.. note::
You can only modify upload handlers *before* accessing ``request.FILES`` --
it doesn't make sense to change upload handlers after upload handling has
already started. If you try to modify ``request.upload_handlers`` after
reading from ``request.FILES`` Django will throw an error.
Thus, you should always modify uploading handlers as early in your view as
possible.
Writing custom upload handlers
------------------------------
All file upload handlers should be subclasses of
``django.core.files.uploadhandler.FileUploadHandler``. You can define upload
handlers wherever you wish.
Required methods
~~~~~~~~~~~~~~~~
Custom file upload handlers **must** define the following methods:
``FileUploadHandler.receive_data_chunk(self, raw_data, start)``
Receives a "chunk" of data from the file upload.
``raw_data`` is a byte string containing the uploaded data.
``start`` is the position in the file where this ``raw_data`` chunk
begins.
The data you return will get fed into the subsequent upload handlers'
``receive_data_chunk`` methods. In this way, one handler can be a
"filter" for other handlers.
Return ``None`` from ``receive_data_chunk`` to sort-circuit remaining
upload handlers from getting this chunk.. This is useful if you're
storing the uploaded data yourself and don't want future handlers to
store a copy of the data.
If you raise a ``StopUpload`` or a ``SkipFile`` exception, the upload
will abort or the file will be completely skipped.
``FileUploadHandler.file_complete(self, file_size)``
Called when a file has finished uploading.
The handler should return an ``UploadedFile`` object that will be stored
in ``request.FILES``. Handlers may also return ``None`` to indicate that
the ``UploadedFile`` object should come from subsequent upload handlers.
Optional methods
~~~~~~~~~~~~~~~~
Custom upload handlers may also define any of the following optional methods or
attributes:
``FileUploadHandler.chunk_size``
Size, in bytes, of the "chunks" Django should store into memory and feed
into the handler. That is, this attribute controls the size of chunks
fed into ``FileUploadHandler.receive_data_chunk``.
For maximum performance the chunk sizes should be divisible by ``4`` and
should not exceed 2 GB (2\ :sup:`31` bytes) in size. When there are
multiple chunk sizes provided by multiple handlers, Django will use the
smallest chunk size defined by any handler.
The default is 64*2\ :sup:`10` bytes, or 64 KB.
``FileUploadHandler.new_file(self, field_name, file_name, content_type, content_length, charset)``
Callback signaling that a new file upload is starting. This is called
before any data has been fed to any upload handlers.
``field_name`` is a string name of the file ``<input>`` field.
``file_name`` is the unicode filename that was provided by the browser.
``content_type`` is the MIME type provided by the browser -- E.g.
``'image/jpeg'``.
``content_length`` is the length of the image given by the browser.
Sometimes this won't be provided and will be ``None``., ``None``
otherwise.
``charset`` is the character set (i.e. ``utf8``) given by the browser.
Like ``content_length``, this sometimes won't be provided.
This method may raise a ``StopFutureHandlers`` exception to prevent
future handlers from handling this file.
``FileUploadHandler.upload_complete(self)``
Callback signaling that the entire upload (all files) has completed.
``FileUploadHandler.handle_raw_input(self, input_data, META, content_length, boundary, encoding)``
Allows the handler to completely override the parsing of the raw
HTTP input.
``input_data`` is a file-like object that supports ``read()``-ing.
``META`` is the same object as ``request.META``.
``content_length`` is the length of the data in ``input_data``. Don't
read more than ``content_length`` bytes from ``input_data``.
``boundary`` is the MIME boundary for this request.
``encoding`` is the encoding of the request.
Return ``None`` if you want upload handling to continue, or a tuple of
``(POST, FILES)`` if you want to return the new data structures suitable
for the request directly.

View File

@ -67,7 +67,13 @@ class TextFile(models.Model):
class ImageFile(models.Model):
description = models.CharField(max_length=20)
image = models.FileField(upload_to=tempfile.gettempdir())
try:
# If PIL is available, try testing PIL.
# Otherwise, it's equivalent to TextFile above.
import Image
image = models.ImageField(upload_to=tempfile.gettempdir())
except ImportError:
image = models.FileField(upload_to=tempfile.gettempdir())
def __unicode__(self):
return self.description
@ -75,6 +81,7 @@ class ImageFile(models.Model):
__test__ = {'API_TESTS': """
>>> from django import newforms as forms
>>> from django.newforms.models import ModelForm
>>> from django.core.files.uploadedfile import SimpleUploadedFile
The bare bones, absolutely nothing custom, basic case.
@ -792,7 +799,18 @@ False
# Upload a file and ensure it all works as expected.
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': {'filename': 'test1.txt', 'content': 'hello world'}})
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': SimpleUploadedFile('test1.txt', 'hello world')})
>>> f.is_valid()
True
>>> type(f.cleaned_data['file'])
<class 'django.newforms.fields.UploadedFile'>
>>> instance = f.save()
>>> instance.file
u'...test1.txt'
>>> os.unlink(instance.get_file_filename())
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': SimpleUploadedFile('test1.txt', 'hello world')})
>>> f.is_valid()
True
>>> type(f.cleaned_data['file'])
@ -814,18 +832,30 @@ u'...test1.txt'
u'...test1.txt'
# Delete the current file since this is not done by Django.
>>> os.unlink(instance.get_file_filename())
# Override the file by uploading a new one.
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': {'filename': 'test2.txt', 'content': 'hello world'}}, instance=instance)
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': SimpleUploadedFile('test2.txt', 'hello world')}, instance=instance)
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.file
u'...test2.txt'
# Delete the current file since this is not done by Django.
>>> os.unlink(instance.get_file_filename())
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': SimpleUploadedFile('test2.txt', 'hello world')})
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.file
u'...test2.txt'
# Delete the current file since this is not done by Django.
>>> os.unlink(instance.get_file_filename())
>>> instance.delete()
# Test the non-required FileField
@ -838,12 +868,26 @@ True
>>> instance.file
''
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': {'filename': 'test3.txt', 'content': 'hello world'}}, instance=instance)
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': SimpleUploadedFile('test3.txt', 'hello world')}, instance=instance)
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.file
u'...test3.txt'
# Delete the current file since this is not done by Django.
>>> os.unlink(instance.get_file_filename())
>>> instance.delete()
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': SimpleUploadedFile('test3.txt', 'hello world')})
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.file
u'...test3.txt'
# Delete the current file since this is not done by Django.
>>> os.unlink(instance.get_file_filename())
>>> instance.delete()
# ImageField ###################################################################
@ -858,7 +902,19 @@ u'...test3.txt'
>>> image_data = open(os.path.join(os.path.dirname(__file__), "test.png")).read()
>>> f = ImageFileForm(data={'description': u'An image'}, files={'image': {'filename': 'test.png', 'content': image_data}})
>>> f = ImageFileForm(data={'description': u'An image'}, files={'image': SimpleUploadedFile('test.png', image_data)})
>>> f.is_valid()
True
>>> type(f.cleaned_data['image'])
<class 'django.newforms.fields.UploadedFile'>
>>> instance = f.save()
>>> instance.image
u'...test.png'
# Delete the current file since this is not done by Django.
>>> os.unlink(instance.get_image_filename())
>>> f = ImageFileForm(data={'description': u'An image'}, files={'image': SimpleUploadedFile('test.png', image_data)})
>>> f.is_valid()
True
>>> type(f.cleaned_data['image'])
@ -885,13 +941,26 @@ u'...test.png'
# Override the file by uploading a new one.
>>> f = ImageFileForm(data={'description': u'Changed it'}, files={'image': {'filename': 'test2.png', 'content': image_data}}, instance=instance)
>>> f = ImageFileForm(data={'description': u'Changed it'}, files={'image': SimpleUploadedFile('test2.png', image_data)}, instance=instance)
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.image
u'...test2.png'
# Delete the current file since this is not done by Django.
>>> os.unlink(instance.get_image_filename())
>>> instance.delete()
>>> f = ImageFileForm(data={'description': u'Changed it'}, files={'image': SimpleUploadedFile('test2.png', image_data)})
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.image
u'...test2.png'
# Delete the current file since this is not done by Django.
>>> os.unlink(instance.get_image_filename())
>>> instance.delete()
# Test the non-required ImageField
@ -904,7 +973,18 @@ True
>>> instance.image
''
>>> f = ImageFileForm(data={'description': u'And a final one'}, files={'image': {'filename': 'test3.png', 'content': image_data}}, instance=instance)
>>> f = ImageFileForm(data={'description': u'And a final one'}, files={'image': SimpleUploadedFile('test3.png', image_data)}, instance=instance)
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.image
u'...test3.png'
# Delete the current file since this is not done by Django.
>>> os.unlink(instance.get_image_filename())
>>> instance.delete()
>>> f = ImageFileForm(data={'description': u'And a final one'}, files={'image': SimpleUploadedFile('test3.png', image_data)})
>>> f.is_valid()
True
>>> instance = f.save()

View File

@ -200,6 +200,29 @@ InvalidPage: ...
>>> paginator.page_range
[1]
# ObjectPaginator can be passed lists too.
>>> paginator = ObjectPaginator([1, 2, 3], 5)
>>> paginator.hits
3
>>> paginator.pages
1
>>> paginator.page_range
[1]
# ObjectPaginator can be passed other objects with a count() method.
>>> class Container:
... def __len__(self):
... return 42
>>> paginator = ObjectPaginator(Container(), 10)
>>> paginator.hits
42
>>> paginator.pages
5
>>> paginator.page_range
[1, 2, 3, 4, 5]
##################
# Orphan support #
##################

View File

@ -9,6 +9,7 @@ import unittest
from regressiontests.bug639.models import Photo
from django.http import QueryDict
from django.utils.datastructures import MultiValueDict
from django.core.files.uploadedfile import SimpleUploadedFile
class Bug639Test(unittest.TestCase):
@ -21,12 +22,8 @@ class Bug639Test(unittest.TestCase):
# Fake a request query dict with the file
qd = QueryDict("title=Testing&image=", mutable=True)
qd["image_file"] = {
"filename" : "test.jpg",
"content-type" : "image/jpeg",
"content" : img
}
qd["image_file"] = SimpleUploadedFile('test.jpg', img, 'image/jpeg')
manip = Photo.AddManipulator()
manip.do_html2python(qd)
p = manip.save(qd)
@ -39,4 +36,4 @@ class Bug639Test(unittest.TestCase):
Make sure to delete the "uploaded" file to avoid clogging /tmp.
"""
p = Photo.objects.get()
os.unlink(p.get_image_filename())
os.unlink(p.get_image_filename())

View File

@ -117,14 +117,25 @@ Init from sequence of tuples
>>> d['person']['2']['firstname']
['Adrian']
### FileDict ################################################################
>>> d = FileDict({'content': 'once upon a time...'})
### ImmutableList ################################################################
>>> d = ImmutableList(range(10))
>>> d.sort()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/var/lib/python-support/python2.5/django/utils/datastructures.py", line 359, in complain
raise AttributeError, self.warning
AttributeError: ImmutableList object is immutable.
>>> repr(d)
"{'content': '<omitted>'}"
>>> d = FileDict({'other-key': 'once upon a time...'})
>>> repr(d)
"{'other-key': 'once upon a time...'}"
'(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)'
>>> d = ImmutableList(range(10), warning="Object is immutable!")
>>> d[1]
1
>>> d[1] = 'test'
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/var/lib/python-support/python2.5/django/utils/datastructures.py", line 359, in complain
raise AttributeError, self.warning
AttributeError: Object is immutable!
### DictWrapper #############################################################

View File

@ -0,0 +1,55 @@
import copy
from django.db import models
from django.db.models.query import Q
class RevisionableModel(models.Model):
base = models.ForeignKey('self', null=True)
title = models.CharField(blank=True, max_length=255)
def __unicode__(self):
return u"%s (%s, %s)" % (self.title, self.id, self.base.id)
def save(self):
super(RevisionableModel, self).save()
if not self.base:
self.base = self
super(RevisionableModel, self).save()
def new_revision(self):
new_revision = copy.copy(self)
new_revision.pk = None
return new_revision
__test__ = {"API_TESTS": """
### Regression tests for #7314 and #7372
>>> rm = RevisionableModel.objects.create(title='First Revision')
>>> rm.pk, rm.base.pk
(1, 1)
>>> rm2 = rm.new_revision()
>>> rm2.title = "Second Revision"
>>> rm2.save()
>>> print u"%s of %s" % (rm2.title, rm2.base.title)
Second Revision of First Revision
>>> rm2.pk, rm2.base.pk
(2, 1)
Queryset to match most recent revision:
>>> qs = RevisionableModel.objects.extra(where=["%(table)s.id IN (SELECT MAX(rev.id) FROM %(table)s rev GROUP BY rev.base_id)" % {'table': RevisionableModel._meta.db_table,}],)
>>> qs
[<RevisionableModel: Second Revision (2, 1)>]
Queryset to search for string in title:
>>> qs2 = RevisionableModel.objects.filter(title__contains="Revision")
>>> qs2
[<RevisionableModel: First Revision (1, 1)>, <RevisionableModel: Second Revision (2, 1)>]
Following queryset should return the most recent revision:
>>> qs & qs2
[<RevisionableModel: Second Revision (2, 1)>]
"""}

View File

@ -0,0 +1,2 @@
# This file unintentionally left blank.
# Oops.

View File

@ -0,0 +1,158 @@
import os
import sha
import tempfile
from django.test import TestCase, client
from django.utils import simplejson
class FileUploadTests(TestCase):
def test_simple_upload(self):
post_data = {
'name': 'Ringo',
'file_field': open(__file__),
}
response = self.client.post('/file_uploads/upload/', post_data)
self.assertEqual(response.status_code, 200)
def test_large_upload(self):
tdir = tempfile.gettempdir()
file1 = tempfile.NamedTemporaryFile(suffix=".file1", dir=tdir)
file1.write('a' * (2 ** 21))
file1.seek(0)
file2 = tempfile.NamedTemporaryFile(suffix=".file2", dir=tdir)
file2.write('a' * (10 * 2 ** 20))
file2.seek(0)
# This file contains chinese symbols for a name.
file3 = open(os.path.join(tdir, u'test_&#20013;&#25991;_Orl\u00e9ans.jpg'), 'w+b')
file3.write('b' * (2 ** 10))
file3.seek(0)
post_data = {
'name': 'Ringo',
'file_field1': open(file1.name),
'file_field2': open(file2.name),
'file_unicode': file3,
}
for key in post_data.keys():
try:
post_data[key + '_hash'] = sha.new(post_data[key].read()).hexdigest()
post_data[key].seek(0)
except AttributeError:
post_data[key + '_hash'] = sha.new(post_data[key]).hexdigest()
response = self.client.post('/file_uploads/verify/', post_data)
try:
os.unlink(file3.name)
except:
pass
self.assertEqual(response.status_code, 200)
def test_dangerous_file_names(self):
"""Uploaded file names should be sanitized before ever reaching the view."""
# This test simulates possible directory traversal attacks by a
# malicious uploader We have to do some monkeybusiness here to construct
# a malicious payload with an invalid file name (containing os.sep or
# os.pardir). This similar to what an attacker would need to do when
# trying such an attack.
scary_file_names = [
"/tmp/hax0rd.txt", # Absolute path, *nix-style.
"C:\\Windows\\hax0rd.txt", # Absolute path, win-syle.
"C:/Windows/hax0rd.txt", # Absolute path, broken-style.
"\\tmp\\hax0rd.txt", # Absolute path, broken in a different way.
"/tmp\\hax0rd.txt", # Absolute path, broken by mixing.
"subdir/hax0rd.txt", # Descendant path, *nix-style.
"subdir\\hax0rd.txt", # Descendant path, win-style.
"sub/dir\\hax0rd.txt", # Descendant path, mixed.
"../../hax0rd.txt", # Relative path, *nix-style.
"..\\..\\hax0rd.txt", # Relative path, win-style.
"../..\\hax0rd.txt" # Relative path, mixed.
]
payload = []
for i, name in enumerate(scary_file_names):
payload.extend([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file%s"; filename="%s"' % (i, name),
'Content-Type: application/octet-stream',
'',
'You got pwnd.'
])
payload.extend([
'--' + client.BOUNDARY + '--',
'',
])
payload = "\r\n".join(payload)
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/file_uploads/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': client.FakePayload(payload),
}
response = self.client.request(**r)
# The filenames should have been sanitized by the time it got to the view.
recieved = simplejson.loads(response.content)
for i, name in enumerate(scary_file_names):
got = recieved["file%s" % i]
self.assertEqual(got, "hax0rd.txt")
def test_filename_overflow(self):
"""File names over 256 characters (dangerous on some platforms) get fixed up."""
name = "%s.txt" % ("f"*500)
payload = "\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="%s"' % name,
'Content-Type: application/octet-stream',
'',
'Oops.'
'--' + client.BOUNDARY + '--',
'',
])
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/file_uploads/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': client.FakePayload(payload),
}
got = simplejson.loads(self.client.request(**r).content)
self.assert_(len(got['file']) < 256, "Got a long file name (%s characters)." % len(got['file']))
def test_custom_upload_handler(self):
# A small file (under the 5M quota)
smallfile = tempfile.NamedTemporaryFile()
smallfile.write('a' * (2 ** 21))
# A big file (over the quota)
bigfile = tempfile.NamedTemporaryFile()
bigfile.write('a' * (10 * 2 ** 20))
# Small file posting should work.
response = self.client.post('/file_uploads/quota/', {'f': open(smallfile.name)})
got = simplejson.loads(response.content)
self.assert_('f' in got)
# Large files don't go through.
response = self.client.post("/file_uploads/quota/", {'f': open(bigfile.name)})
got = simplejson.loads(response.content)
self.assert_('f' not in got)
def test_broken_custom_upload_handler(self):
f = tempfile.NamedTemporaryFile()
f.write('a' * (2 ** 21))
# AttributeError: You cannot alter upload handlers after the upload has been processed.
self.assertRaises(
AttributeError,
self.client.post,
'/file_uploads/quota/broken/',
{'f': open(f.name)}
)

View File

@ -0,0 +1,26 @@
"""
Upload handlers to test the upload API.
"""
from django.core.files.uploadhandler import FileUploadHandler, StopUpload
class QuotaUploadHandler(FileUploadHandler):
"""
This test upload handler terminates the connection if more than a quota
(5MB) is uploaded.
"""
QUOTA = 5 * 2**20 # 5 MB
def __init__(self, request=None):
super(QuotaUploadHandler, self).__init__(request)
self.total_upload = 0
def receive_data_chunk(self, raw_data, start):
self.total_upload += len(raw_data)
if self.total_upload >= self.QUOTA:
raise StopUpload(connection_reset=True)
return raw_data
def file_complete(self, file_size):
return None

View File

@ -0,0 +1,10 @@
from django.conf.urls.defaults import *
import views
urlpatterns = patterns('',
(r'^upload/$', views.file_upload_view),
(r'^verify/$', views.file_upload_view_verify),
(r'^echo/$', views.file_upload_echo),
(r'^quota/$', views.file_upload_quota),
(r'^quota/broken/$', views.file_upload_quota_broken),
)

View File

@ -0,0 +1,70 @@
import os
import sha
from django.core.files.uploadedfile import UploadedFile
from django.http import HttpResponse, HttpResponseServerError
from django.utils import simplejson
from uploadhandler import QuotaUploadHandler
def file_upload_view(request):
"""
Check that a file upload can be updated into the POST dictionary without
going pear-shaped.
"""
form_data = request.POST.copy()
form_data.update(request.FILES)
if isinstance(form_data.get('file_field'), UploadedFile) and isinstance(form_data['name'], unicode):
# If a file is posted, the dummy client should only post the file name,
# not the full path.
if os.path.dirname(form_data['file_field'].file_name) != '':
return HttpResponseServerError()
return HttpResponse('')
else:
return HttpResponseServerError()
def file_upload_view_verify(request):
"""
Use the sha digest hash to verify the uploaded contents.
"""
form_data = request.POST.copy()
form_data.update(request.FILES)
# Check to see if unicode names worked out.
if not request.FILES['file_unicode'].file_name.endswith(u'test_\u4e2d\u6587_Orl\xe9ans.jpg'):
return HttpResponseServerError()
for key, value in form_data.items():
if key.endswith('_hash'):
continue
if key + '_hash' not in form_data:
continue
submitted_hash = form_data[key + '_hash']
if isinstance(value, UploadedFile):
new_hash = sha.new(value.read()).hexdigest()
else:
new_hash = sha.new(value).hexdigest()
if new_hash != submitted_hash:
return HttpResponseServerError()
return HttpResponse('')
def file_upload_echo(request):
"""
Simple view to echo back info about uploaded files for tests.
"""
r = dict([(k, f.file_name) for k, f in request.FILES.items()])
return HttpResponse(simplejson.dumps(r))
def file_upload_quota(request):
"""
Dynamically add in an upload handler.
"""
request.upload_handlers.insert(0, QuotaUploadHandler())
return file_upload_echo(request)
def file_upload_quota_broken(request):
"""
You can't change handlers after reading FILES; this view shouldn't work.
"""
response = file_upload_echo(request)
request.upload_handlers.insert(0, QuotaUploadHandler())
return response

View File

@ -0,0 +1,83 @@
[
{
"pk": 6,
"model": "fixtures_regress.channel",
"fields": {
"name": "Business"
}
},
{
"pk": 1,
"model": "fixtures_regress.article",
"fields": {
"title": "Article Title 1",
"channels": [6]
}
},
{
"pk": 2,
"model": "fixtures_regress.article",
"fields": {
"title": "Article Title 2",
"channels": [6]
}
},
{
"pk": 3,
"model": "fixtures_regress.article",
"fields": {
"title": "Article Title 3",
"channels": [6]
}
},
{
"pk": 4,
"model": "fixtures_regress.article",
"fields": {
"title": "Article Title 4",
"channels": [6]
}
},
{
"pk": 5,
"model": "fixtures_regress.article",
"fields": {
"title": "Article Title 5",
"channels": [6]
}
},
{
"pk": 6,
"model": "fixtures_regress.article",
"fields": {
"title": "Article Title 6",
"channels": [6]
}
},
{
"pk": 7,
"model": "fixtures_regress.article",
"fields": {
"title": "Article Title 7",
"channels": [6]
}
},
{
"pk": 8,
"model": "fixtures_regress.article",
"fields": {
"title": "Article Title 8",
"channels": [6]
}
},
{
"pk": 9,
"model": "fixtures_regress.article",
"fields": {
"title": "Yet Another Article",
"channels": [6]
}
}
]

View File

@ -0,0 +1,4 @@
[
{"pk": 1, "model": "fixtures_regress.parent", "fields": {"name": "fred"}},
{"pk": 1, "model": "fixtures_regress.child", "fields": {"data": "apple"}}
]

View File

@ -20,7 +20,7 @@ class Plant(models.Model):
class Stuff(models.Model):
name = models.CharField(max_length=20, null=True)
owner = models.ForeignKey(User, null=True)
def __unicode__(self):
# Oracle doesn't distinguish between None and the empty string.
# This hack makes the test case pass using Oracle.
@ -38,13 +38,29 @@ class Absolute(models.Model):
super(Absolute, self).__init__(*args, **kwargs)
Absolute.load_count += 1
class Parent(models.Model):
name = models.CharField(max_length=10)
class Child(Parent):
data = models.CharField(max_length=10)
# Models to regresison check #7572
class Channel(models.Model):
name = models.CharField(max_length=255)
class Article(models.Model):
title = models.CharField(max_length=255)
channels = models.ManyToManyField(Channel)
class Meta:
ordering = ('id',)
__test__ = {'API_TESTS':"""
>>> from django.core import management
# Load a fixture that uses PK=1
>>> management.call_command('loaddata', 'sequence', verbosity=0)
# Create a new animal. Without a sequence reset, this new object
# will take a PK of 1 (on Postgres), and the save will fail.
# This is a regression test for ticket #3790.
@ -61,9 +77,9 @@ __test__ = {'API_TESTS':"""
[<Stuff: None is owned by None>]
###############################################
# Regression test for ticket #6436 --
# Regression test for ticket #6436 --
# os.path.join will throw away the initial parts of a path if it encounters
# an absolute path. This means that if a fixture is specified as an absolute path,
# an absolute path. This means that if a fixture is specified as an absolute path,
# we need to make sure we don't discover the absolute path in every fixture directory.
>>> load_absolute_path = os.path.join(os.path.dirname(__file__), 'fixtures', 'absolute.json')
@ -94,4 +110,28 @@ No fixture data found for 'bad_fixture2'. (File format may be invalid.)
>>> sys.stderr = savestderr
###############################################
# Test for ticket #7565 -- PostgreSQL sequence resetting checks shouldn't
# ascend to parent models when inheritance is used (since they are treated
# individually).
>>> management.call_command('loaddata', 'model-inheritance.json', verbosity=0)
###############################################
# Test for ticket #7572 -- MySQL has a problem if the same connection is
# used to create tables, load data, and then query over that data.
# To compensate, we close the connection after running loaddata.
# This ensures that a new connection is opened when test queries are issued.
>>> management.call_command('loaddata', 'big-fixture.json', verbosity=0)
>>> articles = Article.objects.exclude(id=9)
>>> articles.values_list('id', flat=True)
[1, 2, 3, 4, 5, 6, 7, 8]
# Just for good measure, run the same query again. Under the influence of
# ticket #7572, this will give a different result to the previous call.
>>> articles.values_list('id', flat=True)
[1, 2, 3, 4, 5, 6, 7, 8]
"""}

View File

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
tests = r"""
>>> from django.newforms import *
>>> from django.core.files.uploadedfile import SimpleUploadedFile
# CharField ###################################################################
@ -214,11 +215,11 @@ ValidationError: [u'REQUIRED']
Traceback (most recent call last):
...
ValidationError: [u'INVALID']
>>> f.clean({})
>>> f.clean(SimpleUploadedFile('name', None))
Traceback (most recent call last):
...
ValidationError: [u'MISSING']
>>> f.clean({'filename': 'name', 'content':''})
ValidationError: [u'EMPTY FILE']
>>> f.clean(SimpleUploadedFile('name', ''))
Traceback (most recent call last):
...
ValidationError: [u'EMPTY FILE']
@ -237,7 +238,7 @@ ValidationError: [u'REQUIRED']
Traceback (most recent call last):
...
ValidationError: [u'INVALID']
>>> f.clean('http://www.jfoiwjfoi23jfoijoaijfoiwjofiwjefewl.com')
>>> f.clean('http://www.broken.djangoproject.com')
Traceback (most recent call last):
...
ValidationError: [u'INVALID LINK']

View File

@ -2,6 +2,7 @@
tests = r"""
>>> from django.newforms import *
>>> from django.newforms.widgets import RadioFieldRenderer
>>> from django.core.files.uploadedfile import SimpleUploadedFile
>>> import datetime
>>> import time
>>> import re
@ -770,17 +771,17 @@ ValidationError: [u'This field is required.']
>>> f.clean(None, 'files/test2.pdf')
'files/test2.pdf'
>>> f.clean({})
>>> f.clean(SimpleUploadedFile('', ''))
Traceback (most recent call last):
...
ValidationError: [u'No file was submitted.']
ValidationError: [u'No file was submitted. Check the encoding type on the form.']
>>> f.clean({}, '')
>>> f.clean(SimpleUploadedFile('', ''), '')
Traceback (most recent call last):
...
ValidationError: [u'No file was submitted.']
ValidationError: [u'No file was submitted. Check the encoding type on the form.']
>>> f.clean({}, 'files/test3.pdf')
>>> f.clean(None, 'files/test3.pdf')
'files/test3.pdf'
>>> f.clean('some content that is not a file')
@ -788,20 +789,20 @@ Traceback (most recent call last):
...
ValidationError: [u'No file was submitted. Check the encoding type on the form.']
>>> f.clean({'filename': 'name', 'content': None})
>>> f.clean(SimpleUploadedFile('name', None))
Traceback (most recent call last):
...
ValidationError: [u'The submitted file is empty.']
>>> f.clean({'filename': 'name', 'content': ''})
>>> f.clean(SimpleUploadedFile('name', ''))
Traceback (most recent call last):
...
ValidationError: [u'The submitted file is empty.']
>>> type(f.clean({'filename': 'name', 'content': 'Some File Content'}))
>>> type(f.clean(SimpleUploadedFile('name', 'Some File Content')))
<class 'django.newforms.fields.UploadedFile'>
>>> type(f.clean({'filename': 'name', 'content': 'Some File Content'}, 'files/test4.pdf'))
>>> type(f.clean(SimpleUploadedFile('name', 'Some File Content'), 'files/test4.pdf'))
<class 'django.newforms.fields.UploadedFile'>
# URLField ##################################################################
@ -887,7 +888,7 @@ u'http://www.google.com'
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid URL.']
>>> f.clean('http://www.jfoiwjfoi23jfoijoaijfoiwjofiwjefewl.com') # bad domain
>>> f.clean('http://www.broken.djangoproject.com') # bad domain
Traceback (most recent call last):
...
ValidationError: [u'This URL appears to be a broken link.']
@ -937,18 +938,24 @@ ValidationError: [u'This field is required.']
>>> f.clean(True)
True
>>> f.clean(False)
False
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean(1)
True
>>> f.clean(0)
False
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean('Django rocks')
True
>>> f.clean('True')
True
>>> f.clean('False')
False
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f = BooleanField(required=False)
>>> f.clean('')

View File

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
tests = r"""
>>> from django.newforms import *
>>> from django.core.files.uploadedfile import SimpleUploadedFile
>>> import datetime
>>> import time
>>> import re
@ -1465,7 +1466,7 @@ not request.POST.
>>> print f
<tr><th>File1:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="file" name="file1" /></td></tr>
>>> f = FileForm(data={}, files={'file1': {'filename': 'name', 'content':''}}, auto_id=False)
>>> f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', '')}, auto_id=False)
>>> print f
<tr><th>File1:</th><td><ul class="errorlist"><li>The submitted file is empty.</li></ul><input type="file" name="file1" /></td></tr>
@ -1473,7 +1474,7 @@ not request.POST.
>>> print f
<tr><th>File1:</th><td><ul class="errorlist"><li>No file was submitted. Check the encoding type on the form.</li></ul><input type="file" name="file1" /></td></tr>
>>> f = FileForm(data={}, files={'file1': {'filename': 'name', 'content':'some content'}}, auto_id=False)
>>> f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', 'some content')}, auto_id=False)
>>> print f
<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>
>>> f.is_valid()

View File

@ -28,6 +28,24 @@ class Child(models.Model):
parent = models.ForeignKey(Parent)
# Multiple paths to the same model (#7110, #7125)
class Category(models.Model):
name = models.CharField(max_length=20)
def __unicode__(self):
return self.name
class Record(models.Model):
category = models.ForeignKey(Category)
class Relation(models.Model):
left = models.ForeignKey(Record, related_name='left_set')
right = models.ForeignKey(Record, related_name='right_set')
def __unicode__(self):
return u"%s - %s" % (self.left.category.name, self.right.category.name)
__test__ = {'API_TESTS':"""
>>> Third.objects.create(id='3', name='An example')
<Third: Third object>
@ -73,4 +91,26 @@ Traceback (most recent call last):
...
ValueError: Cannot assign "<First: First object>": "Child.parent" must be a "Parent" instance.
# Test of multiple ForeignKeys to the same model (bug #7125)
>>> c1 = Category.objects.create(name='First')
>>> c2 = Category.objects.create(name='Second')
>>> c3 = Category.objects.create(name='Third')
>>> r1 = Record.objects.create(category=c1)
>>> r2 = Record.objects.create(category=c1)
>>> r3 = Record.objects.create(category=c2)
>>> r4 = Record.objects.create(category=c2)
>>> r5 = Record.objects.create(category=c3)
>>> r = Relation.objects.create(left=r1, right=r2)
>>> r = Relation.objects.create(left=r3, right=r4)
>>> r = Relation.objects.create(left=r1, right=r3)
>>> r = Relation.objects.create(left=r5, right=r2)
>>> r = Relation.objects.create(left=r3, right=r2)
>>> Relation.objects.filter(left__category__name__in=['First'], right__category__name__in=['Second'])
[<Relation: First - Second>]
>>> Category.objects.filter(record__left_set__right__category__name='Second').order_by('name')
[<Category: First>, <Category: Second>]
"""}

View File

@ -15,4 +15,21 @@ Decimal("3.14")
Traceback (most recent call last):
...
ValidationError: [u'This value must be a decimal number.']
>>> f = DecimalField(max_digits=5, decimal_places=1)
>>> x = f.to_python(2)
>>> y = f.to_python('2.6')
>>> f.get_db_prep_save(x)
u'2.0'
>>> f.get_db_prep_save(y)
u'2.6'
>>> f.get_db_prep_save(None)
>>> f.get_db_prep_lookup('exact', x)
[u'2.0']
>>> f.get_db_prep_lookup('exact', y)
[u'2.6']
>>> f.get_db_prep_lookup('exact', None)
[None]
"""

View File

@ -131,4 +131,26 @@ __test__ = {'API_TESTS':"""
>>> Child.objects.dates('created', 'month')
[datetime.datetime(2008, 6, 1, 0, 0)]
# Regression test for #7276: calling delete() on a model with multi-table
# inheritance should delete the associated rows from any ancestor tables, as
# well as any descendent objects.
>>> ident = ItalianRestaurant.objects.all()[0].id
>>> Place.objects.get(pk=ident)
<Place: Guido's All New House of Pasta the place>
>>> xx = Restaurant.objects.create(name='a', address='xx', serves_hot_dogs=True, serves_pizza=False)
# This should delete both Restuarants, plus the related places, plus the ItalianRestaurant.
>>> Restaurant.objects.all().delete()
>>> Place.objects.get(pk=ident)
Traceback (most recent call last):
...
DoesNotExist: Place matching query does not exist.
>>> ItalianRestaurant.objects.get(pk=ident)
Traceback (most recent call last):
...
DoesNotExist: ItalianRestaurant matching query does not exist.
"""}

View File

@ -0,0 +1,47 @@
"""
Regression tests for the interaction between model inheritance and
select_related().
"""
from django.db import models
class Place(models.Model):
name = models.CharField(max_length=50)
class Meta:
ordering = ('name',)
def __unicode__(self):
return u"%s the place" % self.name
class Restaurant(Place):
serves_sushi = models.BooleanField()
serves_steak = models.BooleanField()
def __unicode__(self):
return u"%s the restaurant" % self.name
class Person(models.Model):
name = models.CharField(max_length=50)
favorite_restaurant = models.ForeignKey(Restaurant)
def __unicode__(self):
return self.name
__test__ = {'API_TESTS':"""
Regression test for #7246
>>> r1 = Restaurant.objects.create(name="Nobu", serves_sushi=True, serves_steak=False)
>>> r2 = Restaurant.objects.create(name="Craft", serves_sushi=False, serves_steak=True)
>>> p1 = Person.objects.create(name="John", favorite_restaurant=r1)
>>> p2 = Person.objects.create(name="Jane", favorite_restaurant=r2)
>>> Person.objects.order_by('name').select_related()
[<Person: Jane>, <Person: John>]
>>> jane = Person.objects.order_by('name').select_related('favorite_restaurant')[0]
>>> jane.favorite_restaurant.name
u'Craft'
"""}

View File

@ -3,13 +3,15 @@ Various complex queries that have been problematic in the past.
"""
import datetime
import pickle
from django.db import models
from django.db.models.query import Q
class Tag(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey('self', blank=True, null=True)
parent = models.ForeignKey('self', blank=True, null=True,
related_name='children')
def __unicode__(self):
return self.name
@ -24,6 +26,14 @@ class Note(models.Model):
def __unicode__(self):
return self.note
class Annotation(models.Model):
name = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
notes = models.ManyToManyField(Note)
def __unicode__(self):
return self.name
class ExtraInfo(models.Model):
info = models.CharField(max_length=100)
note = models.ForeignKey(Note)
@ -162,85 +172,67 @@ class Child(models.Model):
person = models.OneToOneField(Member, primary_key=True)
parent = models.ForeignKey(Member, related_name="children")
# Custom primary keys interfered with ordering in the past.
class CustomPk(models.Model):
name = models.CharField(max_length=10, primary_key=True)
extra = models.CharField(max_length=10)
class Meta:
ordering = ['name', 'extra']
class Related(models.Model):
custom = models.ForeignKey(CustomPk)
__test__ = {'API_TESTS':"""
>>> t1 = Tag(name='t1')
>>> t1.save()
>>> t2 = Tag(name='t2', parent=t1)
>>> t2.save()
>>> t3 = Tag(name='t3', parent=t1)
>>> t3.save()
>>> t4 = Tag(name='t4', parent=t3)
>>> t4.save()
>>> t5 = Tag(name='t5', parent=t3)
>>> t5.save()
>>> t1 = Tag.objects.create(name='t1')
>>> t2 = Tag.objects.create(name='t2', parent=t1)
>>> t3 = Tag.objects.create(name='t3', parent=t1)
>>> t4 = Tag.objects.create(name='t4', parent=t3)
>>> t5 = Tag.objects.create(name='t5', parent=t3)
>>> n1 = Note(note='n1', misc='foo')
>>> n1.save()
>>> n2 = Note(note='n2', misc='bar')
>>> n2.save()
>>> n3 = Note(note='n3', misc='foo')
>>> n3.save()
>>> n1 = Note.objects.create(note='n1', misc='foo')
>>> n2 = Note.objects.create(note='n2', misc='bar')
>>> n3 = Note.objects.create(note='n3', misc='foo')
Create these out of order so that sorting by 'id' will be different to sorting
by 'info'. Helps detect some problems later.
>>> e2 = ExtraInfo(info='e2', note=n2)
>>> e2.save()
>>> e1 = ExtraInfo(info='e1', note=n1)
>>> e1.save()
>>> e2 = ExtraInfo.objects.create(info='e2', note=n2)
>>> e1 = ExtraInfo.objects.create(info='e1', note=n1)
>>> a1 = Author(name='a1', num=1001, extra=e1)
>>> a1.save()
>>> a2 = Author(name='a2', num=2002, extra=e1)
>>> a2.save()
>>> a3 = Author(name='a3', num=3003, extra=e2)
>>> a3.save()
>>> a4 = Author(name='a4', num=4004, extra=e2)
>>> a4.save()
>>> a1 = Author.objects.create(name='a1', num=1001, extra=e1)
>>> a2 = Author.objects.create(name='a2', num=2002, extra=e1)
>>> a3 = Author.objects.create(name='a3', num=3003, extra=e2)
>>> a4 = Author.objects.create(name='a4', num=4004, extra=e2)
>>> time1 = datetime.datetime(2007, 12, 19, 22, 25, 0)
>>> time2 = datetime.datetime(2007, 12, 19, 21, 0, 0)
>>> time3 = datetime.datetime(2007, 12, 20, 22, 25, 0)
>>> time4 = datetime.datetime(2007, 12, 20, 21, 0, 0)
>>> i1 = Item(name='one', created=time1, modified=time1, creator=a1, note=n3)
>>> i1.save()
>>> i1 = Item.objects.create(name='one', created=time1, modified=time1, creator=a1, note=n3)
>>> i1.tags = [t1, t2]
>>> i2 = Item(name='two', created=time2, creator=a2, note=n2)
>>> i2.save()
>>> i2 = Item.objects.create(name='two', created=time2, creator=a2, note=n2)
>>> i2.tags = [t1, t3]
>>> i3 = Item(name='three', created=time3, creator=a2, note=n3)
>>> i3.save()
>>> i4 = Item(name='four', created=time4, creator=a4, note=n3)
>>> i4.save()
>>> i3 = Item.objects.create(name='three', created=time3, creator=a2, note=n3)
>>> i4 = Item.objects.create(name='four', created=time4, creator=a4, note=n3)
>>> i4.tags = [t4]
>>> r1 = Report(name='r1', creator=a1)
>>> r1.save()
>>> r2 = Report(name='r2', creator=a3)
>>> r2.save()
>>> r3 = Report(name='r3')
>>> r3.save()
>>> r1 = Report.objects.create(name='r1', creator=a1)
>>> r2 = Report.objects.create(name='r2', creator=a3)
>>> r3 = Report.objects.create(name='r3')
Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering
will be rank3, rank2, rank1.
>>> rank1 = Ranking(rank=2, author=a2)
>>> rank1.save()
>>> rank2 = Ranking(rank=1, author=a3)
>>> rank2.save()
>>> rank3 = Ranking(rank=3, author=a1)
>>> rank3.save()
>>> rank1 = Ranking.objects.create(rank=2, author=a2)
>>> rank2 = Ranking.objects.create(rank=1, author=a3)
>>> rank3 = Ranking.objects.create(rank=3, author=a1)
>>> c1 = Cover(title="first", item=i4)
>>> c1.save()
>>> c2 = Cover(title="second", item=i2)
>>> c2.save()
>>> c1 = Cover.objects.create(title="first", item=i4)
>>> c2 = Cover.objects.create(title="second", item=i2)
>>> n1 = Number(num=4)
>>> n1.save()
>>> n2 = Number(num=8)
>>> n2.save()
>>> n3 = Number(num=12)
>>> n3.save()
>>> num1 = Number.objects.create(num=4)
>>> num2 = Number.objects.create(num=8)
>>> num3 = Number.objects.create(num=12)
Bug #1050
>>> Item.objects.filter(tags__isnull=True)
@ -346,6 +338,10 @@ Bug #1878, #2939
4
>>> xx.delete()
Bug #7323
>>> Item.objects.values('creator', 'name').count()
4
Bug #2253
>>> q1 = Item.objects.order_by('name')
>>> q2 = Item.objects.filter(id=i1.id)
@ -387,6 +383,10 @@ Bug #4510
>>> Author.objects.filter(report__name='r1')
[<Author: a1>]
Bug #7378
>>> a1.report_set.all()
[<Report: r1>]
Bug #5324, #6704
>>> Item.objects.filter(tags__name='t4')
[<Item: four>]
@ -791,5 +791,19 @@ Empty querysets can be merged with others.
>>> Note.objects.all() & Note.objects.none()
[]
Bug #7204, #7506 -- make sure querysets with related fields can be pickled. If
this doesn't crash, it's a Good Thing.
>>> out = pickle.dumps(Item.objects.all())
Bug #7277
>>> ann1 = Annotation.objects.create(name='a1', tag=t1)
>>> ann1.notes.add(n1)
>>> n1.annotation_set.filter(Q(tag=t5) | Q(tag__children=t5) | Q(tag__children__children=t5))
[<Annotation: a1>]
Bug #7371
>>> Related.objects.order_by('custom')
[]
"""}

View File

@ -0,0 +1,60 @@
from django.db import models
class Building(models.Model):
name = models.CharField(max_length=10)
def __unicode__(self):
return u"Building: %s" % self.name
class Device(models.Model):
building = models.ForeignKey('Building')
name = models.CharField(max_length=10)
def __unicode__(self):
return u"device '%s' in building %s" % (self.name, self.building)
class Port(models.Model):
device = models.ForeignKey('Device')
port_number = models.CharField(max_length=10)
def __unicode__(self):
return u"%s/%s" % (self.device.name, self.port_number)
class Connection(models.Model):
start = models.ForeignKey(Port, related_name='connection_start',
unique=True)
end = models.ForeignKey(Port, related_name='connection_end', unique=True)
def __unicode__(self):
return u"%s to %s" % (self.start, self.end)
__test__ = {'API_TESTS': """
Regression test for bug #7110. When using select_related(), we must query the
Device and Building tables using two different aliases (each) in order to
differentiate the start and end Connection fields. The net result is that both
the "connections = ..." queries here should give the same results.
>>> b=Building.objects.create(name='101')
>>> dev1=Device.objects.create(name="router", building=b)
>>> dev2=Device.objects.create(name="switch", building=b)
>>> dev3=Device.objects.create(name="server", building=b)
>>> port1=Port.objects.create(port_number='4',device=dev1)
>>> port2=Port.objects.create(port_number='7',device=dev2)
>>> port3=Port.objects.create(port_number='1',device=dev3)
>>> c1=Connection.objects.create(start=port1, end=port2)
>>> c2=Connection.objects.create(start=port2, end=port3)
>>> connections=Connection.objects.filter(start__device__building=b, end__device__building=b).order_by('id')
>>> [(c.id, unicode(c.start), unicode(c.end)) for c in connections]
[(1, u'router/4', u'switch/7'), (2, u'switch/7', u'server/1')]
>>> connections=Connection.objects.filter(start__device__building=b, end__device__building=b).select_related().order_by('id')
>>> [(c.id, unicode(c.start), unicode(c.end)) for c in connections]
[(1, u'router/4', u'switch/7'), (2, u'switch/7', u'server/1')]
# This final query should only join seven tables (port, device and building
# twice each, plus connection once).
>>> connections.query.count_active_tables()
7
"""}

View File

@ -97,6 +97,12 @@ __test__ = {'API_TESTS': ur"""
>>> Article.objects.get(text__exact='The quick brown fox jumps over the lazy dog.')
<Article: Article Test>
# Regression tests for #2170: test case sensitiveness
>>> Article.objects.filter(text__exact='tHe qUick bRown fOx jUmps over tHe lazy dog.')
[]
>>> Article.objects.filter(text__iexact='tHe qUick bRown fOx jUmps over tHe lazy dog.')
[<Article: Article Test>]
>>> Article.objects.get(text__contains='quick brown fox')
<Article: Article Test>

Some files were not shown because too many files have changed in this diff Show More