2013-02-14 15:32:17 +04:00
|
|
|
#!/usr/bin/env python
|
2008-10-15 19:38:22 +04:00
|
|
|
|
|
|
|
"""
|
2014-01-13 21:24:49 +04:00
|
|
|
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
|
2010-10-15 03:18:29 +04:00
|
|
|
See the file 'doc/COPYING' for copying permission
|
2008-10-15 19:38:22 +04:00
|
|
|
"""
|
|
|
|
|
2010-06-30 16:09:33 +04:00
|
|
|
import codecs
|
2013-07-13 21:25:49 +04:00
|
|
|
import gzip
|
2011-04-20 02:54:13 +04:00
|
|
|
import logging
|
2008-10-15 19:38:22 +04:00
|
|
|
import re
|
2010-01-02 05:02:12 +03:00
|
|
|
import StringIO
|
2012-09-12 13:50:38 +04:00
|
|
|
import struct
|
2013-07-13 21:25:49 +04:00
|
|
|
import zlib
|
2008-10-15 19:38:22 +04:00
|
|
|
|
2010-12-25 13:16:20 +03:00
|
|
|
from lib.core.common import extractErrorMessage
|
2011-01-04 18:49:20 +03:00
|
|
|
from lib.core.common import extractRegexResult
|
2014-01-02 15:09:58 +04:00
|
|
|
from lib.core.common import getPublicTypeMembers
|
2010-09-09 18:03:45 +04:00
|
|
|
from lib.core.common import getUnicode
|
2012-01-11 18:28:08 +04:00
|
|
|
from lib.core.common import readInput
|
2012-03-08 14:19:34 +04:00
|
|
|
from lib.core.common import resetCookieJar
|
2011-06-08 18:42:48 +04:00
|
|
|
from lib.core.common import singleTimeLogMessage
|
2012-09-11 16:58:52 +04:00
|
|
|
from lib.core.common import singleTimeWarnMessage
|
2008-10-15 19:38:22 +04:00
|
|
|
from lib.core.data import conf
|
|
|
|
from lib.core.data import kb
|
2010-06-30 16:09:33 +04:00
|
|
|
from lib.core.data import logger
|
2013-03-20 14:10:24 +04:00
|
|
|
from lib.core.enums import HTTP_HEADER
|
2012-01-11 18:28:08 +04:00
|
|
|
from lib.core.enums import PLACE
|
2012-12-06 17:14:19 +04:00
|
|
|
from lib.core.exception import SqlmapCompressionException
|
2012-01-11 18:28:08 +04:00
|
|
|
from lib.core.settings import DEFAULT_COOKIE_DELIMITER
|
2012-10-19 13:02:14 +04:00
|
|
|
from lib.core.settings import EVENTVALIDATION_REGEX
|
2012-09-12 13:50:38 +04:00
|
|
|
from lib.core.settings import MAX_CONNECTION_TOTAL_SIZE
|
2011-04-01 20:40:28 +04:00
|
|
|
from lib.core.settings import ML
|
2011-01-04 18:49:20 +03:00
|
|
|
from lib.core.settings import META_CHARSET_REGEX
|
2011-11-22 16:18:24 +04:00
|
|
|
from lib.core.settings import PARSE_HEADERS_LIMIT
|
2012-10-19 13:02:14 +04:00
|
|
|
from lib.core.settings import VIEWSTATE_REGEX
|
2008-11-17 03:00:54 +03:00
|
|
|
from lib.parse.headers import headersParser
|
2008-10-15 19:38:22 +04:00
|
|
|
from lib.parse.html import htmlParser
|
2013-06-19 12:59:26 +04:00
|
|
|
from lib.utils.htmlentities import htmlEntities
|
2012-07-14 19:01:04 +04:00
|
|
|
from thirdparty.chardet import detect
|
2008-10-15 19:38:22 +04:00
|
|
|
|
2012-01-11 18:28:08 +04:00
|
|
|
def forgeHeaders(items=None):
|
2008-10-15 19:38:22 +04:00
|
|
|
"""
|
2011-02-12 02:07:03 +03:00
|
|
|
Prepare HTTP Cookie, HTTP User-Agent and HTTP Referer headers to use when performing
|
2008-10-15 19:38:22 +04:00
|
|
|
the HTTP requests
|
|
|
|
"""
|
|
|
|
|
2012-01-30 13:17:22 +04:00
|
|
|
items = items or {}
|
|
|
|
|
|
|
|
for _ in items.keys():
|
|
|
|
if items[_] is None:
|
|
|
|
del items[_]
|
|
|
|
|
2012-01-11 18:28:08 +04:00
|
|
|
headers = dict(conf.httpHeaders)
|
|
|
|
headers.update(items or {})
|
2008-10-15 19:38:22 +04:00
|
|
|
|
2014-01-02 15:09:58 +04:00
|
|
|
class _str(str):
|
|
|
|
def capitalize(self):
|
|
|
|
return _str(self)
|
|
|
|
|
|
|
|
def title(self):
|
|
|
|
return _str(self)
|
|
|
|
|
|
|
|
_ = headers
|
|
|
|
headers = {}
|
|
|
|
for key, value in _.items():
|
|
|
|
success = False
|
|
|
|
if key.upper() not in (_.upper() for _ in getPublicTypeMembers(HTTP_HEADER, True)):
|
|
|
|
try:
|
|
|
|
headers[_str(key)] = value # dirty hack for http://bugs.python.org/issue12455
|
|
|
|
except UnicodeEncodeError: # don't do the hack on non-ASCII header names (they have to be properly encoded later on)
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
success = True
|
|
|
|
if not success:
|
|
|
|
key = '-'.join(_.capitalize() for _ in key.split('-'))
|
|
|
|
headers[key] = value
|
2012-10-07 22:28:24 +04:00
|
|
|
|
2012-01-11 18:28:08 +04:00
|
|
|
if conf.cj:
|
2013-03-20 14:10:24 +04:00
|
|
|
if HTTP_HEADER.COOKIE in headers:
|
2012-01-11 18:28:08 +04:00
|
|
|
for cookie in conf.cj:
|
2013-04-12 21:20:33 +04:00
|
|
|
if cookie.domain_specified and not conf.hostname.endswith(cookie.domain):
|
|
|
|
continue
|
|
|
|
|
2013-03-20 14:10:24 +04:00
|
|
|
if ("%s=" % cookie.name) in headers[HTTP_HEADER.COOKIE]:
|
2013-04-12 21:20:33 +04:00
|
|
|
if conf.loadCookies:
|
|
|
|
conf.httpHeaders = filter(None, ((item if item[0] != HTTP_HEADER.COOKIE else None) for item in conf.httpHeaders))
|
|
|
|
elif kb.mergeCookies is None:
|
2013-03-20 14:10:24 +04:00
|
|
|
message = "you provided a HTTP %s header value. " % HTTP_HEADER.COOKIE
|
2013-04-09 13:48:42 +04:00
|
|
|
message += "The target URL provided its own cookies within "
|
2013-03-20 14:10:24 +04:00
|
|
|
message += "the HTTP %s header which intersect with yours. " % HTTP_HEADER.SET_COOKIE
|
2012-01-11 18:28:08 +04:00
|
|
|
message += "Do you want to merge them in futher requests? [Y/n] "
|
2012-07-13 12:28:03 +04:00
|
|
|
_ = readInput(message, default="Y")
|
|
|
|
kb.mergeCookies = not _ or _[0] in ("y", "Y")
|
2012-01-11 18:28:08 +04:00
|
|
|
|
|
|
|
if kb.mergeCookies:
|
2014-04-04 18:14:53 +04:00
|
|
|
_ = lambda x: re.sub("(?i)%s=[^%s]+" % (cookie.name, conf.cDel or DEFAULT_COOKIE_DELIMITER), "%s=%s" % (cookie.name, getUnicode(cookie.value)), x)
|
2013-03-20 14:10:24 +04:00
|
|
|
headers[HTTP_HEADER.COOKIE] = _(headers[HTTP_HEADER.COOKIE])
|
2012-01-11 18:28:08 +04:00
|
|
|
|
|
|
|
if PLACE.COOKIE in conf.parameters:
|
|
|
|
conf.parameters[PLACE.COOKIE] = _(conf.parameters[PLACE.COOKIE])
|
2012-07-13 12:28:03 +04:00
|
|
|
|
2013-03-20 14:10:24 +04:00
|
|
|
conf.httpHeaders = [(item[0], item[1] if item[0] != HTTP_HEADER.COOKIE else _(item[1])) for item in conf.httpHeaders]
|
2012-01-11 18:28:08 +04:00
|
|
|
|
|
|
|
elif not kb.testMode:
|
2014-04-04 18:14:53 +04:00
|
|
|
headers[HTTP_HEADER.COOKIE] += "%s %s=%s" % (conf.cDel or DEFAULT_COOKIE_DELIMITER, cookie.name, getUnicode(cookie.value))
|
2012-01-11 18:28:08 +04:00
|
|
|
|
|
|
|
if kb.testMode:
|
2012-03-08 14:19:34 +04:00
|
|
|
resetCookieJar(conf.cj)
|
2008-10-15 19:38:22 +04:00
|
|
|
|
|
|
|
return headers
|
|
|
|
|
2008-11-17 03:00:54 +03:00
|
|
|
def parseResponse(page, headers):
|
2008-10-15 19:38:22 +04:00
|
|
|
"""
|
|
|
|
@param page: the page to parse to feed the knowledge base htmlFp
|
|
|
|
(back-end DBMS fingerprint based upon DBMS error messages return
|
|
|
|
through the web application) list and absFilePaths (absolute file
|
|
|
|
paths) set.
|
|
|
|
"""
|
|
|
|
|
2008-11-17 03:00:54 +03:00
|
|
|
if headers:
|
|
|
|
headersParser(headers)
|
2008-10-15 19:38:22 +04:00
|
|
|
|
2008-11-17 03:00:54 +03:00
|
|
|
if page:
|
|
|
|
htmlParser(page)
|
2008-10-15 19:38:22 +04:00
|
|
|
|
2012-09-25 12:17:25 +04:00
|
|
|
def checkCharEncoding(encoding, warn=True):
|
2013-03-13 22:42:22 +04:00
|
|
|
"""
|
|
|
|
Checks encoding name, repairs common misspellings and adjusts to
|
|
|
|
proper namings used in codecs module
|
|
|
|
|
|
|
|
>>> checkCharEncoding('iso-8858', False)
|
|
|
|
'iso8859-1'
|
|
|
|
>>> checkCharEncoding('en_us', False)
|
|
|
|
'utf8'
|
|
|
|
"""
|
|
|
|
|
2010-07-15 12:44:42 +04:00
|
|
|
if encoding:
|
|
|
|
encoding = encoding.lower()
|
|
|
|
else:
|
|
|
|
return encoding
|
|
|
|
|
2013-01-10 16:18:44 +04:00
|
|
|
# Reference: http://www.destructor.de/charsets/index.htm
|
2014-01-23 11:33:21 +04:00
|
|
|
translate = {"windows-874": "iso-8859-11", "en_us": "utf8", "macintosh": "iso-8859-1", "euc_tw": "big5_tw", "th": "tis-620", "unicode": "utf8", "utc8": "utf8", "ebcdic": "ebcdic-cp-be", "iso-8859": "iso8859-1", "ansi": "ascii"}
|
2010-07-15 12:44:42 +04:00
|
|
|
|
2011-06-12 12:36:21 +04:00
|
|
|
for delimiter in (';', ',', '('):
|
2010-11-02 21:01:10 +03:00
|
|
|
if delimiter in encoding:
|
2011-06-12 12:36:21 +04:00
|
|
|
encoding = encoding[:encoding.find(delimiter)].strip()
|
2010-10-14 19:28:54 +04:00
|
|
|
|
2011-03-24 12:27:19 +03:00
|
|
|
# popular typos/errors
|
2012-02-06 13:48:44 +04:00
|
|
|
if "8858" in encoding:
|
2013-01-10 16:18:44 +04:00
|
|
|
encoding = encoding.replace("8858", "8859") # iso-8858 -> iso-8859
|
2012-02-06 13:48:44 +04:00
|
|
|
elif "8559" in encoding:
|
2013-01-10 16:18:44 +04:00
|
|
|
encoding = encoding.replace("8559", "8859") # iso-8559 -> iso-8859
|
2012-02-06 13:48:44 +04:00
|
|
|
elif "5889" in encoding:
|
2013-01-10 16:18:44 +04:00
|
|
|
encoding = encoding.replace("5889", "8859") # iso-5889 -> iso-8859
|
2012-06-25 20:24:33 +04:00
|
|
|
elif "5589" in encoding:
|
2013-01-10 16:18:44 +04:00
|
|
|
encoding = encoding.replace("5589", "8859") # iso-5589 -> iso-8859
|
2012-02-06 13:48:44 +04:00
|
|
|
elif "2313" in encoding:
|
2013-01-10 16:18:44 +04:00
|
|
|
encoding = encoding.replace("2313", "2312") # gb2313 -> gb2312
|
2013-10-21 22:04:48 +04:00
|
|
|
elif encoding.startswith("x-"):
|
|
|
|
encoding = encoding[len("x-"):] # x-euc-kr -> euc-kr / x-mac-turkish -> mac-turkish
|
2013-04-07 13:02:43 +04:00
|
|
|
elif "windows-cp" in encoding:
|
|
|
|
encoding = encoding.replace("windows-cp", "windows") # windows-cp-1254 -> windows-1254
|
2011-04-04 22:24:16 +04:00
|
|
|
|
|
|
|
# name adjustment for compatibility
|
2012-02-06 13:48:44 +04:00
|
|
|
if encoding.startswith("8859"):
|
|
|
|
encoding = "iso-%s" % encoding
|
|
|
|
elif encoding.startswith("cp-"):
|
|
|
|
encoding = "cp%s" % encoding[3:]
|
|
|
|
elif encoding.startswith("euc-"):
|
|
|
|
encoding = "euc_%s" % encoding[4:]
|
|
|
|
elif encoding.startswith("windows") and not encoding.startswith("windows-"):
|
|
|
|
encoding = "windows-%s" % encoding[7:]
|
|
|
|
elif encoding.find("iso-88") > 0:
|
|
|
|
encoding = encoding[encoding.find("iso-88"):]
|
|
|
|
elif encoding.startswith("is0-"):
|
|
|
|
encoding = "iso%s" % encoding[4:]
|
|
|
|
elif encoding.find("ascii") > 0:
|
|
|
|
encoding = "ascii"
|
|
|
|
elif encoding.find("utf8") > 0:
|
|
|
|
encoding = "utf8"
|
2011-04-04 22:24:16 +04:00
|
|
|
|
2013-01-10 16:18:44 +04:00
|
|
|
# Reference: http://philip.html5.org/data/charsets-2.html
|
2011-04-04 22:24:16 +04:00
|
|
|
if encoding in translate:
|
|
|
|
encoding = translate[encoding]
|
2012-02-06 13:48:44 +04:00
|
|
|
elif encoding in ("null", "{charset}", "*"):
|
2010-11-17 12:57:32 +03:00
|
|
|
return None
|
2010-10-14 19:28:54 +04:00
|
|
|
|
2013-01-10 16:18:44 +04:00
|
|
|
# Reference: http://www.iana.org/assignments/character-sets
|
|
|
|
# Reference: http://docs.python.org/library/codecs.html
|
2010-06-30 16:09:33 +04:00
|
|
|
try:
|
|
|
|
codecs.lookup(encoding)
|
|
|
|
except LookupError:
|
2012-09-25 12:17:25 +04:00
|
|
|
if warn:
|
|
|
|
warnMsg = "unknown web page charset '%s'. " % encoding
|
|
|
|
warnMsg += "Please report by e-mail to %s." % ML
|
|
|
|
singleTimeLogMessage(warnMsg, logging.WARN, encoding)
|
2011-04-18 17:38:46 +04:00
|
|
|
encoding = None
|
2010-11-02 21:01:10 +03:00
|
|
|
|
2010-06-30 16:09:33 +04:00
|
|
|
return encoding
|
|
|
|
|
2011-04-18 17:38:46 +04:00
|
|
|
def getHeuristicCharEncoding(page):
|
|
|
|
"""
|
|
|
|
Returns page encoding charset detected by usage of heuristics
|
|
|
|
Reference: http://chardet.feedparser.org/docs/
|
|
|
|
"""
|
2012-02-06 13:48:44 +04:00
|
|
|
retVal = detect(page)["encoding"]
|
2011-04-18 17:38:46 +04:00
|
|
|
|
2013-04-30 20:16:32 +04:00
|
|
|
if retVal:
|
|
|
|
infoMsg = "heuristics detected web page charset '%s'" % retVal
|
|
|
|
singleTimeLogMessage(infoMsg, logging.INFO, retVal)
|
2011-04-18 17:38:46 +04:00
|
|
|
|
|
|
|
return retVal
|
|
|
|
|
2010-06-09 18:40:36 +04:00
|
|
|
def decodePage(page, contentEncoding, contentType):
|
2010-01-02 05:02:12 +03:00
|
|
|
"""
|
2010-06-09 18:40:36 +04:00
|
|
|
Decode compressed/charset HTTP response
|
2010-01-02 05:02:12 +03:00
|
|
|
"""
|
|
|
|
|
2011-01-20 14:01:01 +03:00
|
|
|
if not page or (conf.nullConnection and len(page) < 2):
|
|
|
|
return getUnicode(page)
|
|
|
|
|
2012-02-06 13:48:44 +04:00
|
|
|
if isinstance(contentEncoding, basestring) and contentEncoding.lower() in ("gzip", "x-gzip", "deflate"):
|
2012-09-12 13:50:38 +04:00
|
|
|
if not kb.pageCompress:
|
|
|
|
return None
|
|
|
|
|
2011-07-06 09:44:47 +04:00
|
|
|
try:
|
2012-09-11 16:58:52 +04:00
|
|
|
if contentEncoding.lower() == "deflate":
|
2012-09-12 13:50:38 +04:00
|
|
|
data = StringIO.StringIO(zlib.decompress(page, -15)) # Reference: http://stackoverflow.com/questions/1089662/python-inflate-and-deflate-implementations
|
2012-09-11 14:08:34 +04:00
|
|
|
else:
|
|
|
|
data = gzip.GzipFile("", "rb", 9, StringIO.StringIO(page))
|
2012-09-12 13:50:38 +04:00
|
|
|
size = struct.unpack("<l", page[-4:])[0] # Reference: http://pydoc.org/get.cgi/usr/local/lib/python2.5/gzip.py
|
|
|
|
if size > MAX_CONNECTION_TOTAL_SIZE:
|
2013-01-04 02:20:55 +04:00
|
|
|
raise Exception("size too large")
|
2012-09-11 14:08:34 +04:00
|
|
|
|
2011-07-06 09:44:47 +04:00
|
|
|
page = data.read()
|
|
|
|
except Exception, msg:
|
|
|
|
errMsg = "detected invalid data for declared content "
|
|
|
|
errMsg += "encoding '%s' ('%s')" % (contentEncoding, msg)
|
|
|
|
singleTimeLogMessage(errMsg, logging.ERROR)
|
2012-09-11 16:58:52 +04:00
|
|
|
|
|
|
|
warnMsg = "turning off page compression"
|
|
|
|
singleTimeWarnMessage(warnMsg)
|
|
|
|
|
|
|
|
kb.pageCompress = False
|
2012-12-06 17:14:19 +04:00
|
|
|
raise SqlmapCompressionException
|
2010-11-03 13:08:27 +03:00
|
|
|
|
2011-05-18 02:55:22 +04:00
|
|
|
if not conf.charset:
|
|
|
|
httpCharset, metaCharset = None, None
|
2011-01-04 18:49:20 +03:00
|
|
|
|
2013-01-10 16:18:44 +04:00
|
|
|
# Reference: http://stackoverflow.com/questions/1020892/python-urllib2-read-to-unicode
|
2012-07-23 16:26:42 +04:00
|
|
|
if contentType and (contentType.find("charset=") != -1):
|
|
|
|
httpCharset = checkCharEncoding(contentType.split("charset=")[-1])
|
2010-11-07 19:23:03 +03:00
|
|
|
|
2012-12-03 15:13:59 +04:00
|
|
|
metaCharset = checkCharEncoding(extractRegexResult(META_CHARSET_REGEX, page))
|
2011-04-20 12:35:47 +04:00
|
|
|
|
2012-12-19 14:16:42 +04:00
|
|
|
if (any((httpCharset, metaCharset)) and not all((httpCharset, metaCharset)))\
|
|
|
|
or (httpCharset == metaCharset and all((httpCharset, metaCharset))):
|
2011-05-18 02:55:22 +04:00
|
|
|
kb.pageEncoding = httpCharset or metaCharset
|
2012-12-19 14:16:42 +04:00
|
|
|
debugMsg = "declared web page charset '%s'" % kb.pageEncoding
|
|
|
|
singleTimeLogMessage(debugMsg, logging.DEBUG, debugMsg)
|
2011-05-18 02:55:22 +04:00
|
|
|
else:
|
|
|
|
kb.pageEncoding = None
|
2011-04-20 12:35:47 +04:00
|
|
|
else:
|
2011-05-18 02:55:22 +04:00
|
|
|
kb.pageEncoding = conf.charset
|
2011-01-04 15:56:55 +03:00
|
|
|
|
2012-07-23 17:14:52 +04:00
|
|
|
# can't do for all responses because we need to support binary files too
|
2013-03-05 20:32:10 +04:00
|
|
|
if contentType and not isinstance(page, unicode) and "text/" in contentType.lower():
|
2012-07-23 20:38:46 +04:00
|
|
|
# e.g. Ãëàâà
|
2012-07-23 17:14:52 +04:00
|
|
|
if "&#" in page:
|
2013-03-05 20:32:10 +04:00
|
|
|
page = re.sub(r"&#(\d{1,3});", lambda _: chr(int(_.group(1))) if int(_.group(1)) < 256 else _.group(0), page)
|
|
|
|
|
|
|
|
# e.g. %20%28%29
|
|
|
|
if "%" in page:
|
|
|
|
page = re.sub(r"%([0-9a-fA-F]{2})", lambda _: _.group(1).decode("hex"), page)
|
2012-07-23 20:38:46 +04:00
|
|
|
|
2012-07-30 13:21:32 +04:00
|
|
|
# e.g. &
|
2013-03-05 20:32:10 +04:00
|
|
|
page = re.sub(r"&([^;]+);", lambda _: chr(htmlEntities[_.group(1)]) if htmlEntities.get(_.group(1), 256) < 256 else _.group(0), page)
|
2012-07-30 13:21:32 +04:00
|
|
|
|
2011-05-16 23:26:58 +04:00
|
|
|
kb.pageEncoding = kb.pageEncoding or checkCharEncoding(getHeuristicCharEncoding(page))
|
2011-01-28 01:00:34 +03:00
|
|
|
page = getUnicode(page, kb.pageEncoding)
|
|
|
|
|
2012-07-23 20:38:46 +04:00
|
|
|
# e.g. ’…™
|
|
|
|
if "&#" in page:
|
2013-03-26 23:06:50 +04:00
|
|
|
def _(match):
|
|
|
|
retVal = match.group(0)
|
|
|
|
try:
|
|
|
|
retVal = unichr(int(match.group(1)))
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
return retVal
|
|
|
|
page = re.sub(r"&#(\d+);", _, page)
|
2012-12-14 15:00:45 +04:00
|
|
|
|
2012-07-30 13:21:32 +04:00
|
|
|
# e.g. ζ
|
2013-03-05 20:32:10 +04:00
|
|
|
page = re.sub(r"&([^;]+);", lambda _: unichr(htmlEntities[_.group(1)]) if htmlEntities.get(_.group(1), 0) > 255 else _.group(0), page)
|
2012-07-23 20:38:46 +04:00
|
|
|
|
2011-01-28 01:00:34 +03:00
|
|
|
return page
|
2010-12-25 13:16:20 +03:00
|
|
|
|
|
|
|
def processResponse(page, responseHeaders):
|
2011-11-22 16:18:24 +04:00
|
|
|
kb.processResponseCounter += 1
|
|
|
|
|
2013-01-31 16:32:14 +04:00
|
|
|
parseResponse(page, responseHeaders if kb.processResponseCounter < PARSE_HEADERS_LIMIT else None)
|
2011-01-07 18:41:09 +03:00
|
|
|
|
2010-12-25 13:16:20 +03:00
|
|
|
if conf.parseErrors:
|
|
|
|
msg = extractErrorMessage(page)
|
|
|
|
|
|
|
|
if msg:
|
2013-05-20 18:15:35 +04:00
|
|
|
logger.warning("parsed DBMS error message: '%s'" % msg)
|
2012-10-19 13:02:14 +04:00
|
|
|
|
2012-10-19 13:29:03 +04:00
|
|
|
if kb.originalPage is None:
|
|
|
|
for regex in (EVENTVALIDATION_REGEX, VIEWSTATE_REGEX):
|
2012-10-29 13:48:49 +04:00
|
|
|
match = re.search(regex, page)
|
2012-10-19 13:29:03 +04:00
|
|
|
if match and PLACE.POST in conf.parameters:
|
|
|
|
name, value = match.groups()
|
|
|
|
if PLACE.POST in conf.paramDict and name in conf.paramDict[PLACE.POST]:
|
|
|
|
if conf.paramDict[PLACE.POST][name] in page:
|
|
|
|
continue
|
|
|
|
conf.paramDict[PLACE.POST][name] = value
|
|
|
|
conf.parameters[PLACE.POST] = re.sub("(?i)(%s=)[^&]+" % name, r"\g<1>%s" % value, conf.parameters[PLACE.POST])
|