mirror of
https://github.com/Infinidat/infi.clickhouse_orm.git
synced 2025-08-02 11:10:11 +03:00
fix: stream request must read
This commit is contained in:
parent
5ecb3e75eb
commit
9ade7fa6a5
|
@ -20,7 +20,7 @@ dependencies = [
|
||||||
"iso8601 >= 0.1.12",
|
"iso8601 >= 0.1.12",
|
||||||
"setuptools"
|
"setuptools"
|
||||||
]
|
]
|
||||||
version = "0.0.5"
|
version = "0.0.6"
|
||||||
|
|
||||||
[tool.setuptools.packages.find]
|
[tool.setuptools.packages.find]
|
||||||
where = ["src"]
|
where = ["src"]
|
||||||
|
|
|
@ -88,7 +88,7 @@ class Database(object):
|
||||||
inserting data and other operations.
|
inserting data and other operations.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, db_name, db_url='http://localhost:18123/',
|
def __init__(self, db_name, db_url='http://localhost:8123/',
|
||||||
username=None, password=None, readonly=False, autocreate=True,
|
username=None, password=None, readonly=False, autocreate=True,
|
||||||
timeout=60, verify_ssl_cert=True, log_statements=False):
|
timeout=60, verify_ssl_cert=True, log_statements=False):
|
||||||
"""
|
"""
|
||||||
|
@ -155,7 +155,6 @@ class Database(object):
|
||||||
raise DatabaseException("You can't create system table")
|
raise DatabaseException("You can't create system table")
|
||||||
if getattr(model_class, 'engine') is None:
|
if getattr(model_class, 'engine') is None:
|
||||||
raise DatabaseException("%s class must define an engine" % model_class.__name__)
|
raise DatabaseException("%s class must define an engine" % model_class.__name__)
|
||||||
print(model_class.create_table_sql(self))
|
|
||||||
self._send(model_class.create_table_sql(self))
|
self._send(model_class.create_table_sql(self))
|
||||||
|
|
||||||
def drop_table(self, model_class: Type[MODEL]) -> None:
|
def drop_table(self, model_class: Type[MODEL]) -> None:
|
||||||
|
@ -416,6 +415,7 @@ class Database(object):
|
||||||
)
|
)
|
||||||
r = self.request_session.send(request, stream=stream)
|
r = self.request_session.send(request, stream=stream)
|
||||||
if isinstance(r, httpx.Response) and r.status_code != 200:
|
if isinstance(r, httpx.Response) and r.status_code != 200:
|
||||||
|
r.read()
|
||||||
raise ServerError(r.text)
|
raise ServerError(r.text)
|
||||||
return r
|
return r
|
||||||
|
|
||||||
|
|
|
@ -33,12 +33,15 @@ class Memory(Engine):
|
||||||
|
|
||||||
class MergeTree(Engine):
|
class MergeTree(Engine):
|
||||||
|
|
||||||
def __init__(self, date_col=None, order_by=(), sampling_expr=None,
|
def __init__(
|
||||||
index_granularity=8192, replica_table_path=None, replica_name=None, partition_key=None,
|
self, date_col=None, order_by=(), sampling_expr=None,
|
||||||
primary_key=None):
|
index_granularity=8192, replica_table_path=None,
|
||||||
|
replica_name=None, partition_key=None, primary_key=None
|
||||||
|
):
|
||||||
assert type(order_by) in (list, tuple), 'order_by must be a list or tuple'
|
assert type(order_by) in (list, tuple), 'order_by must be a list or tuple'
|
||||||
assert date_col is None or isinstance(date_col, str), 'date_col must be string if present'
|
assert date_col is None or isinstance(date_col, str), 'date_col must be string if present'
|
||||||
assert primary_key is None or type(primary_key) in (list, tuple), 'primary_key must be a list or tuple'
|
assert primary_key is None or type(primary_key) in (list, tuple), \
|
||||||
|
'primary_key must be a list or tuple'
|
||||||
assert partition_key is None or type(partition_key) in (list, tuple),\
|
assert partition_key is None or type(partition_key) in (list, tuple),\
|
||||||
'partition_key must be tuple or list if present'
|
'partition_key must be tuple or list if present'
|
||||||
assert (replica_table_path is None) == (replica_name is None), \
|
assert (replica_table_path is None) == (replica_name is None), \
|
||||||
|
@ -60,12 +63,14 @@ class MergeTree(Engine):
|
||||||
# I changed field name for new reality and syntax
|
# I changed field name for new reality and syntax
|
||||||
@property
|
@property
|
||||||
def key_cols(self):
|
def key_cols(self):
|
||||||
logger.warning('`key_cols` attribute is deprecated and may be removed in future. Use `order_by` attribute instead')
|
logger.warning('`key_cols` attribute is deprecated and may be removed in future. '
|
||||||
|
'Use `order_by` attribute instead')
|
||||||
return self.order_by
|
return self.order_by
|
||||||
|
|
||||||
@key_cols.setter
|
@key_cols.setter
|
||||||
def key_cols(self, value):
|
def key_cols(self, value):
|
||||||
logger.warning('`key_cols` attribute is deprecated and may be removed in future. Use `order_by` attribute instead')
|
logger.warning('`key_cols` attribute is deprecated and may be removed in future. '
|
||||||
|
'Use `order_by` attribute instead')
|
||||||
self.order_by = value
|
self.order_by = value
|
||||||
|
|
||||||
def create_table_sql(self, db):
|
def create_table_sql(self, db):
|
||||||
|
@ -124,11 +129,15 @@ class MergeTree(Engine):
|
||||||
|
|
||||||
class CollapsingMergeTree(MergeTree):
|
class CollapsingMergeTree(MergeTree):
|
||||||
|
|
||||||
def __init__(self, date_col=None, order_by=(), sign_col='sign', sampling_expr=None,
|
def __init__(
|
||||||
index_granularity=8192, replica_table_path=None, replica_name=None, partition_key=None,
|
self, date_col=None, order_by=(), sign_col='sign', sampling_expr=None,
|
||||||
primary_key=None):
|
index_granularity=8192, replica_table_path=None, replica_name=None,
|
||||||
super(CollapsingMergeTree, self).__init__(date_col, order_by, sampling_expr, index_granularity,
|
partition_key=None, primary_key=None
|
||||||
replica_table_path, replica_name, partition_key, primary_key)
|
):
|
||||||
|
super(CollapsingMergeTree, self).__init__(
|
||||||
|
date_col, order_by, sampling_expr, index_granularity,
|
||||||
|
replica_table_path, replica_name, partition_key, primary_key
|
||||||
|
)
|
||||||
self.sign_col = sign_col
|
self.sign_col = sign_col
|
||||||
|
|
||||||
def _build_sql_params(self, db):
|
def _build_sql_params(self, db):
|
||||||
|
@ -139,12 +148,17 @@ class CollapsingMergeTree(MergeTree):
|
||||||
|
|
||||||
class SummingMergeTree(MergeTree):
|
class SummingMergeTree(MergeTree):
|
||||||
|
|
||||||
def __init__(self, date_col=None, order_by=(), summing_cols=None, sampling_expr=None,
|
def __init__(
|
||||||
index_granularity=8192, replica_table_path=None, replica_name=None, partition_key=None,
|
self, date_col=None, order_by=(), summing_cols=None, sampling_expr=None,
|
||||||
primary_key=None):
|
index_granularity=8192, replica_table_path=None, replica_name=None,
|
||||||
super(SummingMergeTree, self).__init__(date_col, order_by, sampling_expr, index_granularity, replica_table_path,
|
partition_key=None, primary_key=None
|
||||||
replica_name, partition_key, primary_key)
|
):
|
||||||
assert type is None or type(summing_cols) in (list, tuple), 'summing_cols must be a list or tuple'
|
super(SummingMergeTree, self).__init__(
|
||||||
|
date_col, order_by, sampling_expr, index_granularity,
|
||||||
|
replica_table_path, replica_name, partition_key, primary_key
|
||||||
|
)
|
||||||
|
assert type is None or type(summing_cols) in (list, tuple), \
|
||||||
|
'summing_cols must be a list or tuple'
|
||||||
self.summing_cols = summing_cols
|
self.summing_cols = summing_cols
|
||||||
|
|
||||||
def _build_sql_params(self, db):
|
def _build_sql_params(self, db):
|
||||||
|
@ -156,11 +170,15 @@ class SummingMergeTree(MergeTree):
|
||||||
|
|
||||||
class ReplacingMergeTree(MergeTree):
|
class ReplacingMergeTree(MergeTree):
|
||||||
|
|
||||||
def __init__(self, date_col=None, order_by=(), ver_col=None, sampling_expr=None,
|
def __init__(
|
||||||
index_granularity=8192, replica_table_path=None, replica_name=None, partition_key=None,
|
self, date_col=None, order_by=(), ver_col=None, sampling_expr=None,
|
||||||
primary_key=None):
|
index_granularity=8192, replica_table_path=None, replica_name=None,
|
||||||
super(ReplacingMergeTree, self).__init__(date_col, order_by, sampling_expr, index_granularity,
|
partition_key=None, primary_key=None
|
||||||
replica_table_path, replica_name, partition_key, primary_key)
|
):
|
||||||
|
super(ReplacingMergeTree, self).__init__(
|
||||||
|
date_col, order_by, sampling_expr, index_granularity,
|
||||||
|
replica_table_path, replica_name, partition_key, primary_key
|
||||||
|
)
|
||||||
self.ver_col = ver_col
|
self.ver_col = ver_col
|
||||||
|
|
||||||
def _build_sql_params(self, db):
|
def _build_sql_params(self, db):
|
||||||
|
@ -178,8 +196,8 @@ class Buffer(Engine):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
#Buffer(database, table, num_layers, min_time, max_time, min_rows, max_rows, min_bytes, max_bytes)
|
#Buffer(database, table, num_layers, min_time, max_time, min_rows, max_rows, min_bytes, max_bytes)
|
||||||
def __init__(self, main_model, num_layers=16, min_time=10, max_time=100, min_rows=10000, max_rows=1000000,
|
def __init__(self, main_model, num_layers=16, min_time=10, max_time=100, min_rows=10000,
|
||||||
min_bytes=10000000, max_bytes=100000000):
|
max_rows=1000000, min_bytes=10000000, max_bytes=100000000):
|
||||||
self.main_model = main_model
|
self.main_model = main_model
|
||||||
self.num_layers = num_layers
|
self.num_layers = num_layers
|
||||||
self.min_time = min_time
|
self.min_time = min_time
|
||||||
|
|
Loading…
Reference in New Issue
Block a user