mirror of
https://github.com/sqlmapproject/sqlmap.git
synced 2025-02-02 20:54:13 +03:00
Fix for an Issue #570
This commit is contained in:
parent
02de2aee6d
commit
7718edac9b
|
@ -99,6 +99,9 @@ BACKDOOR_RUN_CMD_TIMEOUT = 5
|
||||||
# Maximum number of techniques used in inject.py/getValue() per one value
|
# Maximum number of techniques used in inject.py/getValue() per one value
|
||||||
MAX_TECHNIQUES_PER_VALUE = 2
|
MAX_TECHNIQUES_PER_VALUE = 2
|
||||||
|
|
||||||
|
# In case of missing piece of partial union dump, buffered array must be flushed after certain size
|
||||||
|
MAX_BUFFERED_PARTIAL_UNION_LENGTH = 1024
|
||||||
|
|
||||||
# Suffix used for naming meta databases in DBMS(es) without explicit database name
|
# Suffix used for naming meta databases in DBMS(es) without explicit database name
|
||||||
METADB_SUFFIX = "_masterdb"
|
METADB_SUFFIX = "_masterdb"
|
||||||
|
|
||||||
|
|
|
@ -44,6 +44,7 @@ from lib.core.dicts import FROM_DUMMY_TABLE
|
||||||
from lib.core.enums import DBMS
|
from lib.core.enums import DBMS
|
||||||
from lib.core.enums import PAYLOAD
|
from lib.core.enums import PAYLOAD
|
||||||
from lib.core.exception import SqlmapSyntaxException
|
from lib.core.exception import SqlmapSyntaxException
|
||||||
|
from lib.core.settings import MAX_BUFFERED_PARTIAL_UNION_LENGTH
|
||||||
from lib.core.settings import SQL_SCALAR_REGEX
|
from lib.core.settings import SQL_SCALAR_REGEX
|
||||||
from lib.core.settings import TURN_OFF_RESUME_INFO_LIMIT
|
from lib.core.settings import TURN_OFF_RESUME_INFO_LIMIT
|
||||||
from lib.core.threads import getCurrentThreadData
|
from lib.core.threads import getCurrentThreadData
|
||||||
|
@ -272,10 +273,10 @@ def unionUse(expression, unpack=True, dump=False):
|
||||||
break
|
break
|
||||||
|
|
||||||
if output:
|
if output:
|
||||||
if all(map(lambda _: _ in output, (kb.chars.start, kb.chars.stop))):
|
with kb.locks.value:
|
||||||
items = parseUnionPage(output)
|
if all(map(lambda _: _ in output, (kb.chars.start, kb.chars.stop))):
|
||||||
|
items = parseUnionPage(output)
|
||||||
|
|
||||||
with kb.locks.value:
|
|
||||||
if threadData.shared.showEta:
|
if threadData.shared.showEta:
|
||||||
threadData.shared.progress.progress(time.time() - valueStart, threadData.shared.counter)
|
threadData.shared.progress.progress(time.time() - valueStart, threadData.shared.counter)
|
||||||
# in case that we requested N columns and we get M!=N then we have to filter a bit
|
# in case that we requested N columns and we get M!=N then we have to filter a bit
|
||||||
|
@ -286,14 +287,7 @@ def unionUse(expression, unpack=True, dump=False):
|
||||||
if threadData.shared.buffered[index][0] >= num:
|
if threadData.shared.buffered[index][0] >= num:
|
||||||
break
|
break
|
||||||
threadData.shared.buffered.insert(index or 0, (num, items))
|
threadData.shared.buffered.insert(index or 0, (num, items))
|
||||||
while threadData.shared.buffered and threadData.shared.lastFlushed + 1 == threadData.shared.buffered[0][0]:
|
else:
|
||||||
threadData.shared.lastFlushed += 1
|
|
||||||
_ = threadData.shared.buffered[0][1]
|
|
||||||
if not isNoneValue(_):
|
|
||||||
threadData.shared.value.extend(arrayizeValue(_))
|
|
||||||
del threadData.shared.buffered[0]
|
|
||||||
else:
|
|
||||||
with kb.locks.value:
|
|
||||||
index = None
|
index = None
|
||||||
if threadData.shared.showEta:
|
if threadData.shared.showEta:
|
||||||
threadData.shared.progress.progress(time.time() - valueStart, threadData.shared.counter)
|
threadData.shared.progress.progress(time.time() - valueStart, threadData.shared.counter)
|
||||||
|
@ -301,7 +295,14 @@ def unionUse(expression, unpack=True, dump=False):
|
||||||
if threadData.shared.buffered[index][0] >= num:
|
if threadData.shared.buffered[index][0] >= num:
|
||||||
break
|
break
|
||||||
threadData.shared.buffered.insert(index or 0, (num, None))
|
threadData.shared.buffered.insert(index or 0, (num, None))
|
||||||
items = output.replace(kb.chars.start, "").replace(kb.chars.stop, "").split(kb.chars.delimiter)
|
|
||||||
|
items = output.replace(kb.chars.start, "").replace(kb.chars.stop, "").split(kb.chars.delimiter)
|
||||||
|
|
||||||
|
while threadData.shared.buffered and (threadData.shared.lastFlushed + 1 >= threadData.shared.buffered[0][0] or len(threadData.shared.buffered) > MAX_BUFFERED_PARTIAL_UNION_LENGTH):
|
||||||
|
threadData.shared.lastFlushed, _ = threadData.shared.buffered[0]
|
||||||
|
if not isNoneValue(_):
|
||||||
|
threadData.shared.value.extend(arrayizeValue(_))
|
||||||
|
del threadData.shared.buffered[0]
|
||||||
|
|
||||||
if conf.verbose == 1 and not (threadData.resumed and kb.suppressResumeInfo) and not threadData.shared.showEta:
|
if conf.verbose == 1 and not (threadData.resumed and kb.suppressResumeInfo) and not threadData.shared.showEta:
|
||||||
status = "[%s] [INFO] %s: %s" % (time.strftime("%X"), "resumed" if threadData.resumed else "retrieved", safecharencode(",".join("\"%s\"" % _ for _ in flattenValue(arrayizeValue(items)))))
|
status = "[%s] [INFO] %s: %s" % (time.strftime("%X"), "resumed" if threadData.resumed else "retrieved", safecharencode(",".join("\"%s\"" % _ for _ in flattenValue(arrayizeValue(items)))))
|
||||||
|
|
Loading…
Reference in New Issue
Block a user