hello big tables, this is sqlmap, sqlmap this is big tables

This commit is contained in:
Miroslav Stampar 2011-07-24 09:19:33 +00:00
parent 82e1e61554
commit ec1bc0219c
8 changed files with 108 additions and 26 deletions

View File

@ -23,7 +23,9 @@ import socket
import string
import struct
import sys
import tempfile
import time
import types
import urlparse
import unicodedata
@ -205,16 +207,48 @@ class BigArray(list):
self.chunks = [[]]
self.cache = None
self.length = 0
self.filenames = set()
def append(self, value):
self.chunks[-1].append(value)
if len(self.chunks[-1]) >= BIGARRAY_CHUNK_LENGTH:
fp = tempfile.TemporaryFile()
pickle.dump(self.chunks[-1], fp)
filename = self._dump(self.chunks[-1])
del(self.chunks[-1][:])
self.chunks[-1] = fp
self.chunks[-1] = filename
self.chunks.append([])
def pop(self):
if len(self.chunks[-1]) < 1:
self.chunks.pop()
fp = open(self.chunks[-1], 'rb')
self.chunks[-1] = pickle.load(fp)
fp.close()
return self.chunks[-1].pop()
def index(self, value):
for index in xrange(len(self)):
if self[index] == value:
return index
return ValueError, "%s is not in list" % value
def _dump(self, value):
handle, filename = tempfile.mkstemp()
self.filenames.add(filename)
os.close(handle)
fp = open(filename, 'w+b')
pickle.dump(value, fp)
fp.close()
return filename
def _checkcache(self, index):
if (self.cache and self.cache[0] != index and self.cache[2]):
filename = self._dump(self.cache[1])
self.chunks[self.cache[0]] = filename
if not (self.cache and self.cache[0] == index):
fp = open(self.chunks[index], 'rb')
self.cache = [index, pickle.load(fp), False]
fp.close()
def __getitem__(self, y):
index = y / BIGARRAY_CHUNK_LENGTH
offset = y % BIGARRAY_CHUNK_LENGTH
@ -222,14 +256,37 @@ class BigArray(list):
if isinstance(chunk, list):
return chunk[offset]
else:
if not (self.cache and self.cache[0] == index):
chunk.seek(0)
self.cache = (index, pickle.load(chunk))
self._checkcache(index)
return self.cache[1][offset]
def __setitem__(self, y, value):
index = y / BIGARRAY_CHUNK_LENGTH
offset = y % BIGARRAY_CHUNK_LENGTH
chunk = self.chunks[index]
if isinstance(chunk, list):
chunk[offset] = value
else:
self._checkcache(index)
self.cache[1][offset] = value
self.cache[2] = True # dirty flag
def __repr__(self):
return "%s%s" % ("..." if len(self.chunks) > 1 else "", self.chunks[-1].__repr__())
def __iter__(self):
for i in xrange(len(self)):
yield self[i]
def __len__(self):
return len(self.chunks[-1]) if len(self.chunks) == 1 else (len(self.chunks) - 1) * BIGARRAY_CHUNK_LENGTH + len(self.chunks[-1])
def __del__(self):
for filename in self.filenames:
try:
os.remove(filename)
except OSError:
pass
class DynamicContentItem:
"""
Represents line in content page with dynamic properties (candidate
@ -561,6 +618,15 @@ class Backend:
def isOs(os):
return Backend.getOs() is not None and Backend.getOs().lower() == os.lower()
# Reference: http://code.activestate.com/recipes/325205-cache-decorator-in-python-24/
def cachedmethod(f, cache={}):
def g(*args, **kwargs):
key = ( f, tuple(args), frozenset(kwargs.items()) )
if key not in cache:
cache[key] = f(*args, **kwargs)
return cache[key]
return g
def paramToDict(place, parameters=None):
"""
Split the parameters into names and values, check if these parameters
@ -1266,7 +1332,7 @@ def parseUnionPage(output, expression, partial=False, condition=None, sort=True)
if output is None:
return None
data = []
data = BigArray()
outCond1 = ( output.startswith(kb.misc.start) and output.endswith(kb.misc.stop) )
outCond2 = ( output.startswith(DUMP_START_MARKER) and output.endswith(DUMP_STOP_MARKER) )
@ -2204,6 +2270,7 @@ def isNumPosStrValue(value):
return value and isinstance(value, basestring) and value.isdigit() and value != "0"
@cachedmethod
def aliasToDbmsEnum(dbms):
"""
Returns major DBMS name from a given alias
@ -2730,8 +2797,8 @@ def isNoneValue(value):
if len(value) == 1:
return isNoneValue(value[0])
else:
for i in xrange(len(value)):
if value[i] and value[i] != "None":
for item in value:
if item and item != "None":
return False
return True
elif isinstance(value, dict):

View File

@ -24,6 +24,7 @@ from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import DBMS
from lib.core.replication import Replication
from lib.core.settings import TRIM_STDOUT_DUMP_SIZE
from lib.core.settings import UNICODE_ENCODING
class Dump:
@ -37,8 +38,9 @@ class Dump:
self.__outputFile = None
self.__outputFP = None
def __write(self, data, n=True):
def __write(self, data, n=True, console=True):
text = "%s%s" % (data, "\n" if n else " ")
if console:
dataToStdout(text)
self.__outputFP.write(text)
@ -407,7 +409,13 @@ class Dump:
if conf.replicate:
rtable.beginTransaction()
if count > TRIM_STDOUT_DUMP_SIZE:
warnMsg = "console output will be trimmed "
warnMsg += "due to the large table size"
logger.warning(warnMsg)
for i in range(count):
console = (i >= count - TRIM_STDOUT_DUMP_SIZE)
field = 1
values = []
@ -429,7 +437,7 @@ class Dump:
values.append(value)
maxlength = int(info["length"])
blank = " " * (maxlength - len(value))
self.__write("| %s%s" % (value, blank), n=False)
self.__write("| %s%s" % (value, blank), n=False, console=console)
if not conf.replicate:
if not conf.multipleTargets and field == fields:
@ -442,7 +450,7 @@ class Dump:
if conf.replicate:
rtable.insert(values)
self.__write("|")
self.__write("|", console=console)
if not conf.multipleTargets and not conf.replicate:
dataToDumpFile(dumpFP, "\n")

View File

@ -31,7 +31,7 @@ def profile(profileOutputFile=None, dotOutputFile=None, imageOutputFile=None):
errMsg = "profiling requires third-party libraries (%s). " % getUnicode(e, UNICODE_ENCODING)
errMsg += "Quick steps:%s" % os.linesep
errMsg += "1) Install http://code.google.com/p/pydot/%s" % os.linesep
errMsg += "2) sudo apt-get install python-profiler graphviz"
errMsg += "2) sudo apt-get install python-pyparsing python-profiler graphviz"
logger.error(errMsg)
return

View File

@ -383,5 +383,8 @@ IDS_WAF_CHECK_PAYLOAD = "AND 1=1 UNION ALL SELECT 1,2,3,table_name FROM informat
# Used for status representation in dictionary attack phase
ROTATING_CHARS = ('\\', '|', '|', '/', '-')
# Chunk length used in BigArray object (only last one is held in memory)
BIGARRAY_CHUNK_LENGTH = 10000
# Chunk length (in items) used by BigArray objects (only last chunk and cached one are held in memory)
BIGARRAY_CHUNK_LENGTH = 5000
# Only console display last n table rows
TRIM_STDOUT_DUMP_SIZE = 256

View File

@ -12,6 +12,7 @@ import time
from lib.core.agent import agent
from lib.core.common import Backend
from lib.core.common import BigArray
from lib.core.common import calculateDeltaSeconds
from lib.core.common import cleanQuery
from lib.core.common import dataToSessionFile
@ -123,7 +124,7 @@ def __goInferenceProxy(expression, fromUser=False, expected=None, batch=False, r
count = None
startLimit = 0
stopLimit = None
outputs = []
outputs = BigArray()
test = None
untilLimitChar = None
untilOrderChar = None

View File

@ -13,6 +13,7 @@ import time
from lib.core.agent import agent
from lib.core.common import Backend
from lib.core.common import BigArray
from lib.core.common import calculateDeltaSeconds
from lib.core.common import dataToSessionFile
from lib.core.common import dataToStdout
@ -321,7 +322,7 @@ def errorUse(expression, expected=None, resumeValue=True, dump=False):
threadData = getCurrentThreadData()
threadData.shared.limits = range(startLimit, stopLimit)
numThreads = min(conf.threads, len(threadData.shared.limits))
threadData.shared.outputs = []
threadData.shared.outputs = BigArray()
if stopLimit > TURN_OFF_RESUME_INFO_LIMIT:
kb.suppressResumeInfo = True

View File

@ -33,13 +33,12 @@ from zipfile import ZipFile
from extra.pydes.pyDes import des
from extra.pydes.pyDes import CBC
from lib.core.common import Backend
from lib.core.common import checkFile
from lib.core.common import clearConsoleLine
from lib.core.common import dataToStdout
from lib.core.common import getCompiledRegex
from lib.core.common import getFileItems
from lib.core.common import Backend
from lib.core.common import getCompiledRegex
from lib.core.common import getPublicTypeMembers
from lib.core.common import normalizeUnicode
from lib.core.common import paths
@ -252,6 +251,8 @@ def attackCachedUsersPasswords():
kb.data.cachedUsersPasswords[user][i] += "%s clear-text password: %s" % ('\n' if kb.data.cachedUsersPasswords[user][i][-1] != '\n' else '', password)
def attackDumpedTable():
isOracle, isMySQL = Backend.isDbms(DBMS.ORACLE), Backend.isDbms(DBMS.MYSQL)
if kb.data.dumpedTable:
table = kb.data.dumpedTable
columns = table.keys()
@ -275,7 +276,7 @@ def attackDumpedTable():
value = table[column]['values'][i]
if hashRecognition(value):
if hashRecognition(value, isOracle, isMySQL):
if colUser:
if table[colUser]['values'][i] not in attack_dict:
attack_dict[table[colUser]['values'][i]] = []
@ -310,15 +311,15 @@ def attackDumpedTable():
table[column]['values'][i] += " (%s)" % password
table[column]['length'] = max(table[column]['length'], len(table[column]['values'][i]))
def hashRecognition(value):
def hashRecognition(value, isOracle=False, isMySQL=False):
retVal = None
if isinstance(value, basestring):
for name, regex in getPublicTypeMembers(HASH):
# Hashes for Oracle and old MySQL look the same hence these checks
if Backend.isDbms(DBMS.ORACLE) and regex == HASH.MYSQL_OLD:
if isOracle and regex == HASH.MYSQL_OLD:
continue
elif Backend.isDbms(DBMS.MYSQL) and regex == HASH.ORACLE_OLD:
elif isMySQL and regex == HASH.ORACLE_OLD:
continue
elif regex == HASH.CRYPT_GENERIC:
if any([getCompiledRegex(GENERAL_IP_ADDRESS_REGEX).match(value), value.lower() == value, value.upper() == value, value.isdigit()]):

View File

@ -13,6 +13,7 @@ import time
from lib.core.agent import agent
from lib.core.common import arrayizeValue
from lib.core.common import Backend
from lib.core.common import BigArray
from lib.core.common import clearConsoleLine
from lib.core.common import dataToStdout
from lib.core.common import getRange
@ -1385,7 +1386,7 @@ class Enumeration:
for column in colList:
lengths[column] = 0
entries[column] = []
entries[column] = BigArray()
colList = sorted(colList, key=lambda x: len(x) if x else MAX_INT)
@ -1706,7 +1707,7 @@ class Enumeration:
lengths[column] = 0
if column not in entries:
entries[column] = []
entries[column] = BigArray()
if Backend.getIdentifiedDbms() in ( DBMS.MYSQL, DBMS.PGSQL ):
query = rootQuery.blind.query % (column, conf.db, conf.tbl, index)