From 9da8d55128555b3ddd39c66ac8ec9d588d28e54c Mon Sep 17 00:00:00 2001 From: Miroslav Stampar Date: Wed, 7 Jun 2017 11:22:06 +0200 Subject: [PATCH] Implements #2557 --- lib/core/settings.py | 2 +- lib/techniques/error/use.py | 143 ++++++++++++------------- lib/techniques/union/use.py | 203 ++++++++++++++++++------------------ plugins/generic/entries.py | 81 ++++++++++---- txt/checksum.md5 | 10 +- xml/queries.xml | 2 +- 6 files changed, 241 insertions(+), 200 deletions(-) diff --git a/lib/core/settings.py b/lib/core/settings.py index 407a1501d..9f1e6303c 100755 --- a/lib/core/settings.py +++ b/lib/core/settings.py @@ -19,7 +19,7 @@ from lib.core.enums import DBMS_DIRECTORY_NAME from lib.core.enums import OS # sqlmap version (...) -VERSION = "1.1.6.4" +VERSION = "1.1.6.5" TYPE = "dev" if VERSION.count('.') > 2 and VERSION.split('.')[-1] != '0' else "stable" TYPE_COLORS = {"dev": 33, "stable": 90, "pip": 34} VERSION_STRING = "sqlmap/%s#%s" % ('.'.join(VERSION.split('.')[:-1]) if VERSION.count('.') > 2 and VERSION.split('.')[-1] == '0' else VERSION, TYPE) diff --git a/lib/techniques/error/use.py b/lib/techniques/error/use.py index 142bd2e39..fc928eadb 100644 --- a/lib/techniques/error/use.py +++ b/lib/techniques/error/use.py @@ -352,93 +352,94 @@ def errorUse(expression, dump=False): value = [] # for empty tables return value - if " ORDER BY " in expression and (stopLimit - startLimit) > SLOW_ORDER_COUNT_THRESHOLD: - message = "due to huge table size do you want to remove " - message += "ORDER BY clause gaining speed over consistency? [y/N] " + if isNumPosStrValue(count) and int(count) > 1: + if " ORDER BY " in expression and (stopLimit - startLimit) > SLOW_ORDER_COUNT_THRESHOLD: + message = "due to huge table size do you want to remove " + message += "ORDER BY clause gaining speed over consistency? [y/N] " - if readInput(message, default="N", boolean=True): - expression = expression[:expression.index(" ORDER BY ")] + if readInput(message, default="N", boolean=True): + expression = expression[:expression.index(" ORDER BY ")] - numThreads = min(conf.threads, (stopLimit - startLimit)) + numThreads = min(conf.threads, (stopLimit - startLimit)) - threadData = getCurrentThreadData() + threadData = getCurrentThreadData() - try: - threadData.shared.limits = iter(xrange(startLimit, stopLimit)) - except OverflowError: - errMsg = "boundary limits (%d,%d) are too large. Please rerun " % (startLimit, stopLimit) - errMsg += "with switch '--fresh-queries'" - raise SqlmapDataException(errMsg) + try: + threadData.shared.limits = iter(xrange(startLimit, stopLimit)) + except OverflowError: + errMsg = "boundary limits (%d,%d) are too large. Please rerun " % (startLimit, stopLimit) + errMsg += "with switch '--fresh-queries'" + raise SqlmapDataException(errMsg) - threadData.shared.value = BigArray() - threadData.shared.buffered = [] - threadData.shared.counter = 0 - threadData.shared.lastFlushed = startLimit - 1 - threadData.shared.showEta = conf.eta and (stopLimit - startLimit) > 1 + threadData.shared.value = BigArray() + threadData.shared.buffered = [] + threadData.shared.counter = 0 + threadData.shared.lastFlushed = startLimit - 1 + threadData.shared.showEta = conf.eta and (stopLimit - startLimit) > 1 - if threadData.shared.showEta: - threadData.shared.progress = ProgressBar(maxValue=(stopLimit - startLimit)) + if threadData.shared.showEta: + threadData.shared.progress = ProgressBar(maxValue=(stopLimit - startLimit)) - if kb.dumpTable and (len(expressionFieldsList) < (stopLimit - startLimit) > CHECK_ZERO_COLUMNS_THRESHOLD): - for field in expressionFieldsList: - if _oneShotErrorUse("SELECT COUNT(%s) FROM %s" % (field, kb.dumpTable)) == '0': - emptyFields.append(field) - debugMsg = "column '%s' of table '%s' will not be " % (field, kb.dumpTable) - debugMsg += "dumped as it appears to be empty" - logger.debug(debugMsg) + if kb.dumpTable and (len(expressionFieldsList) < (stopLimit - startLimit) > CHECK_ZERO_COLUMNS_THRESHOLD): + for field in expressionFieldsList: + if _oneShotErrorUse("SELECT COUNT(%s) FROM %s" % (field, kb.dumpTable)) == '0': + emptyFields.append(field) + debugMsg = "column '%s' of table '%s' will not be " % (field, kb.dumpTable) + debugMsg += "dumped as it appears to be empty" + logger.debug(debugMsg) - if stopLimit > TURN_OFF_RESUME_INFO_LIMIT: - kb.suppressResumeInfo = True - debugMsg = "suppressing possible resume console info because of " - debugMsg += "large number of rows. It might take too long" - logger.debug(debugMsg) + if stopLimit > TURN_OFF_RESUME_INFO_LIMIT: + kb.suppressResumeInfo = True + debugMsg = "suppressing possible resume console info because of " + debugMsg += "large number of rows. It might take too long" + logger.debug(debugMsg) - try: - def errorThread(): - threadData = getCurrentThreadData() + try: + def errorThread(): + threadData = getCurrentThreadData() - while kb.threadContinue: - with kb.locks.limit: - try: - valueStart = time.time() - threadData.shared.counter += 1 - num = threadData.shared.limits.next() - except StopIteration: + while kb.threadContinue: + with kb.locks.limit: + try: + valueStart = time.time() + threadData.shared.counter += 1 + num = threadData.shared.limits.next() + except StopIteration: + break + + output = _errorFields(expression, expressionFields, expressionFieldsList, num, emptyFields, threadData.shared.showEta) + + if not kb.threadContinue: break - output = _errorFields(expression, expressionFields, expressionFieldsList, num, emptyFields, threadData.shared.showEta) + if output and isListLike(output) and len(output) == 1: + output = output[0] - if not kb.threadContinue: - break + with kb.locks.value: + index = None + if threadData.shared.showEta: + threadData.shared.progress.progress(time.time() - valueStart, threadData.shared.counter) + for index in xrange(1 + len(threadData.shared.buffered)): + if index < len(threadData.shared.buffered) and threadData.shared.buffered[index][0] >= num: + break + threadData.shared.buffered.insert(index or 0, (num, output)) + while threadData.shared.buffered and threadData.shared.lastFlushed + 1 == threadData.shared.buffered[0][0]: + threadData.shared.lastFlushed += 1 + threadData.shared.value.append(threadData.shared.buffered[0][1]) + del threadData.shared.buffered[0] - if output and isListLike(output) and len(output) == 1: - output = output[0] + runThreads(numThreads, errorThread) - with kb.locks.value: - index = None - if threadData.shared.showEta: - threadData.shared.progress.progress(time.time() - valueStart, threadData.shared.counter) - for index in xrange(1 + len(threadData.shared.buffered)): - if index < len(threadData.shared.buffered) and threadData.shared.buffered[index][0] >= num: - break - threadData.shared.buffered.insert(index or 0, (num, output)) - while threadData.shared.buffered and threadData.shared.lastFlushed + 1 == threadData.shared.buffered[0][0]: - threadData.shared.lastFlushed += 1 - threadData.shared.value.append(threadData.shared.buffered[0][1]) - del threadData.shared.buffered[0] + except KeyboardInterrupt: + abortedFlag = True + warnMsg = "user aborted during enumeration. sqlmap " + warnMsg += "will display partial output" + logger.warn(warnMsg) - runThreads(numThreads, errorThread) - - except KeyboardInterrupt: - abortedFlag = True - warnMsg = "user aborted during enumeration. sqlmap " - warnMsg += "will display partial output" - logger.warn(warnMsg) - - finally: - threadData.shared.value.extend(_[1] for _ in sorted(threadData.shared.buffered)) - value = threadData.shared.value - kb.suppressResumeInfo = False + finally: + threadData.shared.value.extend(_[1] for _ in sorted(threadData.shared.buffered)) + value = threadData.shared.value + kb.suppressResumeInfo = False if not value and not abortedFlag: value = _errorFields(expression, expressionFields, expressionFieldsList) diff --git a/lib/techniques/union/use.py b/lib/techniques/union/use.py index 6e61d933f..11a32a96f 100644 --- a/lib/techniques/union/use.py +++ b/lib/techniques/union/use.py @@ -284,126 +284,127 @@ def unionUse(expression, unpack=True, dump=False): value = [] # for empty tables return value - threadData = getCurrentThreadData() + if isNumPosStrValue(count) and int(count) > 1: + threadData = getCurrentThreadData() - try: - threadData.shared.limits = iter(xrange(startLimit, stopLimit)) - except OverflowError: - errMsg = "boundary limits (%d,%d) are too large. Please rerun " % (startLimit, stopLimit) - errMsg += "with switch '--fresh-queries'" - raise SqlmapDataException(errMsg) + try: + threadData.shared.limits = iter(xrange(startLimit, stopLimit)) + except OverflowError: + errMsg = "boundary limits (%d,%d) are too large. Please rerun " % (startLimit, stopLimit) + errMsg += "with switch '--fresh-queries'" + raise SqlmapDataException(errMsg) - numThreads = min(conf.threads, (stopLimit - startLimit)) - threadData.shared.value = BigArray() - threadData.shared.buffered = [] - threadData.shared.counter = 0 - threadData.shared.lastFlushed = startLimit - 1 - threadData.shared.showEta = conf.eta and (stopLimit - startLimit) > 1 + numThreads = min(conf.threads, (stopLimit - startLimit)) + threadData.shared.value = BigArray() + threadData.shared.buffered = [] + threadData.shared.counter = 0 + threadData.shared.lastFlushed = startLimit - 1 + threadData.shared.showEta = conf.eta and (stopLimit - startLimit) > 1 - if threadData.shared.showEta: - threadData.shared.progress = ProgressBar(maxValue=(stopLimit - startLimit)) + if threadData.shared.showEta: + threadData.shared.progress = ProgressBar(maxValue=(stopLimit - startLimit)) - if stopLimit > TURN_OFF_RESUME_INFO_LIMIT: - kb.suppressResumeInfo = True - debugMsg = "suppressing possible resume console info because of " - debugMsg += "large number of rows. It might take too long" - logger.debug(debugMsg) + if stopLimit > TURN_OFF_RESUME_INFO_LIMIT: + kb.suppressResumeInfo = True + debugMsg = "suppressing possible resume console info because of " + debugMsg += "large number of rows. It might take too long" + logger.debug(debugMsg) - try: - def unionThread(): - threadData = getCurrentThreadData() + try: + def unionThread(): + threadData = getCurrentThreadData() - while kb.threadContinue: - with kb.locks.limit: - try: - valueStart = time.time() - threadData.shared.counter += 1 - num = threadData.shared.limits.next() - except StopIteration: + while kb.threadContinue: + with kb.locks.limit: + try: + valueStart = time.time() + threadData.shared.counter += 1 + num = threadData.shared.limits.next() + except StopIteration: + break + + if Backend.getIdentifiedDbms() in (DBMS.MSSQL, DBMS.SYBASE): + field = expressionFieldsList[0] + elif Backend.isDbms(DBMS.ORACLE): + field = expressionFieldsList + else: + field = None + + limitedExpr = agent.limitQuery(num, expression, field) + output = _oneShotUnionUse(limitedExpr, unpack, True) + + if not kb.threadContinue: break - if Backend.getIdentifiedDbms() in (DBMS.MSSQL, DBMS.SYBASE): - field = expressionFieldsList[0] - elif Backend.isDbms(DBMS.ORACLE): - field = expressionFieldsList - else: - field = None + if output: + with kb.locks.value: + if all(_ in output for _ in (kb.chars.start, kb.chars.stop)): + items = parseUnionPage(output) - limitedExpr = agent.limitQuery(num, expression, field) - output = _oneShotUnionUse(limitedExpr, unpack, True) + if threadData.shared.showEta: + threadData.shared.progress.progress(time.time() - valueStart, threadData.shared.counter) + if isListLike(items): + # in case that we requested N columns and we get M!=N then we have to filter a bit + if len(items) > 1 and len(expressionFieldsList) > 1: + items = [item for item in items if isListLike(item) and len(item) == len(expressionFieldsList)] + items = [_ for _ in flattenValue(items)] + if len(items) > len(expressionFieldsList): + filtered = OrderedDict() + for item in items: + key = re.sub(r"[^A-Za-z0-9]", "", item).lower() + if key not in filtered or re.search(r"[^A-Za-z0-9]", item): + filtered[key] = item + items = filtered.values() + items = [items] + index = None + for index in xrange(1 + len(threadData.shared.buffered)): + if index < len(threadData.shared.buffered) and threadData.shared.buffered[index][0] >= num: + break + threadData.shared.buffered.insert(index or 0, (num, items)) + else: + index = None + if threadData.shared.showEta: + threadData.shared.progress.progress(time.time() - valueStart, threadData.shared.counter) + for index in xrange(1 + len(threadData.shared.buffered)): + if index < len(threadData.shared.buffered) and threadData.shared.buffered[index][0] >= num: + break + threadData.shared.buffered.insert(index or 0, (num, None)) - if not kb.threadContinue: - break + items = output.replace(kb.chars.start, "").replace(kb.chars.stop, "").split(kb.chars.delimiter) - if output: - with kb.locks.value: - if all(_ in output for _ in (kb.chars.start, kb.chars.stop)): - items = parseUnionPage(output) + while threadData.shared.buffered and (threadData.shared.lastFlushed + 1 >= threadData.shared.buffered[0][0] or len(threadData.shared.buffered) > MAX_BUFFERED_PARTIAL_UNION_LENGTH): + threadData.shared.lastFlushed, _ = threadData.shared.buffered[0] + if not isNoneValue(_): + threadData.shared.value.extend(arrayizeValue(_)) + del threadData.shared.buffered[0] - if threadData.shared.showEta: - threadData.shared.progress.progress(time.time() - valueStart, threadData.shared.counter) - if isListLike(items): - # in case that we requested N columns and we get M!=N then we have to filter a bit - if len(items) > 1 and len(expressionFieldsList) > 1: - items = [item for item in items if isListLike(item) and len(item) == len(expressionFieldsList)] - items = [_ for _ in flattenValue(items)] - if len(items) > len(expressionFieldsList): - filtered = OrderedDict() - for item in items: - key = re.sub(r"[^A-Za-z0-9]", "", item).lower() - if key not in filtered or re.search(r"[^A-Za-z0-9]", item): - filtered[key] = item - items = filtered.values() - items = [items] - index = None - for index in xrange(1 + len(threadData.shared.buffered)): - if index < len(threadData.shared.buffered) and threadData.shared.buffered[index][0] >= num: - break - threadData.shared.buffered.insert(index or 0, (num, items)) - else: - index = None - if threadData.shared.showEta: - threadData.shared.progress.progress(time.time() - valueStart, threadData.shared.counter) - for index in xrange(1 + len(threadData.shared.buffered)): - if index < len(threadData.shared.buffered) and threadData.shared.buffered[index][0] >= num: - break - threadData.shared.buffered.insert(index or 0, (num, None)) + if conf.verbose == 1 and not (threadData.resumed and kb.suppressResumeInfo) and not threadData.shared.showEta: + _ = ','.join("\"%s\"" % _ for _ in flattenValue(arrayizeValue(items))) if not isinstance(items, basestring) else items + status = "[%s] [INFO] %s: %s" % (time.strftime("%X"), "resumed" if threadData.resumed else "retrieved", _ if kb.safeCharEncode else safecharencode(_)) - items = output.replace(kb.chars.start, "").replace(kb.chars.stop, "").split(kb.chars.delimiter) + if len(status) > width: + status = "%s..." % status[:width - 3] - while threadData.shared.buffered and (threadData.shared.lastFlushed + 1 >= threadData.shared.buffered[0][0] or len(threadData.shared.buffered) > MAX_BUFFERED_PARTIAL_UNION_LENGTH): - threadData.shared.lastFlushed, _ = threadData.shared.buffered[0] - if not isNoneValue(_): - threadData.shared.value.extend(arrayizeValue(_)) - del threadData.shared.buffered[0] + dataToStdout("%s\n" % status) - if conf.verbose == 1 and not (threadData.resumed and kb.suppressResumeInfo) and not threadData.shared.showEta: - _ = ','.join("\"%s\"" % _ for _ in flattenValue(arrayizeValue(items))) if not isinstance(items, basestring) else items - status = "[%s] [INFO] %s: %s" % (time.strftime("%X"), "resumed" if threadData.resumed else "retrieved", _ if kb.safeCharEncode else safecharencode(_)) + runThreads(numThreads, unionThread) - if len(status) > width: - status = "%s..." % status[:width - 3] + if conf.verbose == 1: + clearConsoleLine(True) - dataToStdout("%s\n" % status) + except KeyboardInterrupt: + abortedFlag = True - runThreads(numThreads, unionThread) + warnMsg = "user aborted during enumeration. sqlmap " + warnMsg += "will display partial output" + logger.warn(warnMsg) - if conf.verbose == 1: - clearConsoleLine(True) - - except KeyboardInterrupt: - abortedFlag = True - - warnMsg = "user aborted during enumeration. sqlmap " - warnMsg += "will display partial output" - logger.warn(warnMsg) - - finally: - for _ in sorted(threadData.shared.buffered): - if not isNoneValue(_[1]): - threadData.shared.value.extend(arrayizeValue(_[1])) - value = threadData.shared.value - kb.suppressResumeInfo = False + finally: + for _ in sorted(threadData.shared.buffered): + if not isNoneValue(_[1]): + threadData.shared.value.extend(arrayizeValue(_[1])) + value = threadData.shared.value + kb.suppressResumeInfo = False if not value and not abortedFlag: output = _oneShotUnionUse(expression, unpack) diff --git a/plugins/generic/entries.py b/plugins/generic/entries.py index f4515e51d..40dcf8382 100644 --- a/plugins/generic/entries.py +++ b/plugins/generic/entries.py @@ -170,18 +170,38 @@ class Entries: if not (isTechniqueAvailable(PAYLOAD.TECHNIQUE.UNION) and kb.injection.data[PAYLOAD.TECHNIQUE.UNION].where == PAYLOAD.WHERE.ORIGINAL): table = "%s.%s" % (conf.db, tbl) - try: - retVal = pivotDumpTable(table, colList, blind=False) - except KeyboardInterrupt: - retVal = None - kb.dumpKeyboardInterrupt = True - clearConsoleLine() - warnMsg = "Ctrl+C detected in dumping phase" - logger.warn(warnMsg) + if Backend.isDbms(DBMS.MSSQL): + query = rootQuery.blind.count % table + query = agent.whereQuery(query) - if retVal: - entries, _ = retVal - entries = zip(*[entries[colName] for colName in colList]) + count = inject.getValue(query, blind=False, time=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS) + if isNumPosStrValue(count): + indexRange = getLimitRange(count, plusOne=True) + + for index in indexRange: + row = [] + + for column in colList: + query = rootQuery.blind.query3 % (column, column, table, index) + query = agent.whereQuery(query) + value = inject.getValue(query, blind=False, time=False, dump=True) or "" + row.append(value) + + entries.append(row) + + if not entries: + try: + retVal = pivotDumpTable(table, colList, blind=False) + except KeyboardInterrupt: + retVal = None + kb.dumpKeyboardInterrupt = True + clearConsoleLine() + warnMsg = "Ctrl+C detected in dumping phase" + logger.warn(warnMsg) + + if retVal: + entries, _ = retVal + entries = zip(*[entries[colName] for colName in colList]) else: query = rootQuery.inband.query % (colString, conf.db, tbl) elif Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.PGSQL, DBMS.HSQLDB): @@ -285,17 +305,36 @@ class Entries: elif Backend.isDbms(DBMS.MAXDB): table = "%s.%s" % (conf.db, tbl) - try: - retVal = pivotDumpTable(table, colList, count, blind=True) - except KeyboardInterrupt: - retVal = None - kb.dumpKeyboardInterrupt = True - clearConsoleLine() - warnMsg = "Ctrl+C detected in dumping phase" - logger.warn(warnMsg) + if Backend.isDbms(DBMS.MSSQL): + indexRange = getLimitRange(count, plusOne=True) + for index in indexRange: + for column in colList: + query = rootQuery.blind.query3 % (column, column, table, index) + query = agent.whereQuery(query) - if retVal: - entries, lengths = retVal + value = inject.getValue(query, union=False, error=False, dump=True) or "" + + if column not in lengths: + lengths[column] = 0 + + if column not in entries: + entries[column] = BigArray() + + lengths[column] = max(lengths[column], len(DUMP_REPLACEMENTS.get(getUnicode(value), getUnicode(value)))) + entries[column].append(value) + + if not entries: + try: + retVal = pivotDumpTable(table, colList, count, blind=True) + except KeyboardInterrupt: + retVal = None + kb.dumpKeyboardInterrupt = True + clearConsoleLine() + warnMsg = "Ctrl+C detected in dumping phase" + logger.warn(warnMsg) + + if retVal: + entries, lengths = retVal else: emptyColumns = [] diff --git a/txt/checksum.md5 b/txt/checksum.md5 index cf69c73ad..05d4f3717 100644 --- a/txt/checksum.md5 +++ b/txt/checksum.md5 @@ -46,7 +46,7 @@ a09c6ceee8dbb624cc3ca1d17749c8a5 lib/core/option.py d8e9250f3775119df07e9070eddccd16 lib/core/replication.py 785f86e3f963fa3798f84286a4e83ff2 lib/core/revision.py 40c80b28b3a5819b737a5a17d4565ae9 lib/core/session.py -6e7731aa5bc04ef76a597c91e0701665 lib/core/settings.py +d556e1cf87bd101aa7f0a46fdbbbdff3 lib/core/settings.py d91291997d2bd2f6028aaf371bf1d3b6 lib/core/shell.py 2ad85c130cc5f2b3701ea85c2f6bbf20 lib/core/subprocessng.py 8136241fdbdb99a5dc0e51ba72918f6e lib/core/target.py @@ -93,11 +93,11 @@ b7dd3a2697a08108ddc9a4264922c2e8 lib/takeover/web.py ab1601a7f429b47637c4fb8af703d0f1 lib/techniques/dns/test.py d3da4c7ceaf57c4687a052d58722f6bb lib/techniques/dns/use.py 310efc965c862cfbd7b0da5150a5ad36 lib/techniques/error/__init__.py -c7e6589ef171819c4630ca8434f0250b lib/techniques/error/use.py +84b729215fd00e789ed75d9c00c97761 lib/techniques/error/use.py 310efc965c862cfbd7b0da5150a5ad36 lib/techniques/__init__.py 310efc965c862cfbd7b0da5150a5ad36 lib/techniques/union/__init__.py d71e48e6fd08f75cc612bf8b260994ce lib/techniques/union/test.py -36194e6c0a8dd14139f57ebf87bb80f9 lib/techniques/union/use.py +db3090ff9a740ba096ba676fcf44ebfc lib/techniques/union/use.py 67f0ad96ec2207d7e59c788b858afd6d lib/utils/api.py 7d10ba0851da8ee9cd3c140dcd18798e lib/utils/brute.py ed70f1ca9113664043ec9e6778e48078 lib/utils/crawler.py @@ -203,7 +203,7 @@ deed74334b637767fc9de8f74b37647a plugins/dbms/sybase/fingerprint.py be7481a96214220bcd8f51ca00239bed plugins/generic/connector.py 5390591ca955036d492de11355b52e8f plugins/generic/custom.py 4ad4bccc03256b8f3d21ba4f8f759404 plugins/generic/databases.py -5eae2e0992a719bfce9cf78ed0a0ea2f plugins/generic/entries.py +12b0420d9588828e4a83fe8e89bef162 plugins/generic/entries.py 55802d1d5d65938414c77ccc27731cab plugins/generic/enumeration.py 0d10a0410c416fece51c26a935e68568 plugins/generic/filesystem.py 2e397afd83939889d1a7a07893b19ae7 plugins/generic/fingerprint.py @@ -459,4 +459,4 @@ a279656ea3fcb85c727249b02f828383 xml/livetests.xml 3194e2688a7576e1f877d5b137f7c260 xml/payloads/stacked_queries.xml c2d8dd03db5a663e79eabb4495dd0723 xml/payloads/time_blind.xml ac649aff0e7db413e4937e446e398736 xml/payloads/union_query.xml -5bd467d86d7cb55fbe5f66e4ff9a6bec xml/queries.xml +7fa7db2c2296baa5e9ea381d4880492f xml/queries.xml diff --git a/xml/queries.xml b/xml/queries.xml index f5c85eef8..bc36a8ad8 100644 --- a/xml/queries.xml +++ b/xml/queries.xml @@ -194,7 +194,7 @@ - +