minor update

This commit is contained in:
Miroslav Stampar 2011-12-22 20:42:57 +00:00
parent 087e29d272
commit abb401879c
2 changed files with 15 additions and 3 deletions

View File

@ -428,3 +428,6 @@ HASHDB_FLUSH_THRESHOLD = 32
# Warn user of possible delay due to large page dump in full UNION query injections # Warn user of possible delay due to large page dump in full UNION query injections
LARGE_OUTPUT_THRESHOLD = 1024**2 LARGE_OUTPUT_THRESHOLD = 1024**2
# On huge tables there is a considerable slowdown if every row retrieval requires ORDER BY (most noticable in table dumping using ERROR injections)
SLOW_ORDER_COUNT_THRESHOLD = 10000

View File

@ -24,6 +24,7 @@ from lib.core.common import initTechnique
from lib.core.common import isNumPosStrValue from lib.core.common import isNumPosStrValue
from lib.core.common import listToStrValue from lib.core.common import listToStrValue
from lib.core.common import randomInt from lib.core.common import randomInt
from lib.core.common import readInput
from lib.core.common import safeStringFormat from lib.core.common import safeStringFormat
from lib.core.convert import htmlunescape from lib.core.convert import htmlunescape
from lib.core.convert import safecharencode from lib.core.convert import safecharencode
@ -38,6 +39,7 @@ from lib.core.exception import sqlmapConnectionException
from lib.core.settings import FROM_TABLE from lib.core.settings import FROM_TABLE
from lib.core.settings import MYSQL_ERROR_CHUNK_LENGTH from lib.core.settings import MYSQL_ERROR_CHUNK_LENGTH
from lib.core.settings import MSSQL_ERROR_CHUNK_LENGTH from lib.core.settings import MSSQL_ERROR_CHUNK_LENGTH
from lib.core.settings import SLOW_ORDER_COUNT_THRESHOLD
from lib.core.settings import SQL_SCALAR_REGEX from lib.core.settings import SQL_SCALAR_REGEX
from lib.core.settings import TURN_OFF_RESUME_INFO_LIMIT from lib.core.settings import TURN_OFF_RESUME_INFO_LIMIT
from lib.core.threads import getCurrentThreadData from lib.core.threads import getCurrentThreadData
@ -292,9 +294,8 @@ def errorUse(expression, expected=None, resumeValue=True, dump=False):
# Count the number of SQL query entries output # Count the number of SQL query entries output
countedExpression = expression.replace(expressionFields, queries[Backend.getIdentifiedDbms()].count.query % '*', 1) countedExpression = expression.replace(expressionFields, queries[Backend.getIdentifiedDbms()].count.query % '*', 1)
if re.search(" ORDER BY ", expression, re.I): if " ORDER BY " in expression:
untilOrderChar = countedExpression.index(" ORDER BY ") countedExpression = countedExpression[:countedExpression.index(" ORDER BY ")]
countedExpression = countedExpression[:untilOrderChar]
count = resume(countedExpression, None) count = resume(countedExpression, None)
@ -328,6 +329,14 @@ def errorUse(expression, expected=None, resumeValue=True, dump=False):
return outputs return outputs
if " ORDER BY " in expression and (stopLimit - startLimit) > SLOW_ORDER_COUNT_THRESHOLD:
message = "due to huge table size do you want to remove "
message += "ORDER BY clause gaining speed over consistency? [y/N] "
output = readInput(message, default="N")
if output and output[0] in ("y", "Y"):
expression = expression[:expression.index(" ORDER BY ")]
threadData = getCurrentThreadData() threadData = getCurrentThreadData()
threadData.shared.limits = iter(xrange(startLimit, stopLimit)) threadData.shared.limits = iter(xrange(startLimit, stopLimit))
numThreads = min(conf.threads, (stopLimit - startLimit)) numThreads = min(conf.threads, (stopLimit - startLimit))