format code

This commit is contained in:
Aryan Iyappan 2021-08-22 11:03:22 +05:30
parent 2c66e496f7
commit 2e5944eb20
9 changed files with 33 additions and 97 deletions

View File

@ -442,13 +442,11 @@ def _field_init(f, frozen, globals, self_name):
# This field does not need initialization. Signify that
# to the caller by returning None.
return None
# Only test this now, so that we can create variables for the
# default. However, return None to signify that we're not going
# to actually do the assignment statement for InitVars.
if f._field_type == _FIELD_INITVAR:
return None
# Now, actually generate the field assignment.
return _field_assign(frozen, f.name, value, self_name)
@ -490,7 +488,6 @@ def _init_fn(fields, frozen, has_post_init, self_name):
raise TypeError(
f"non-default argument {f.name!r} " "follows default argument"
)
globals = {"MISSING": MISSING, "_HAS_DEFAULT_FACTORY": _HAS_DEFAULT_FACTORY}
body_lines = []
@ -500,16 +497,13 @@ def _init_fn(fields, frozen, has_post_init, self_name):
# initialization (it's a pseudo-field). Just skip it.
if line:
body_lines.append(line)
# Does this class have a post-init function?
if has_post_init:
params_str = ",".join(f.name for f in fields if f._field_type is _FIELD_INITVAR)
body_lines.append(f"{self_name}.{_POST_INIT_NAME}({params_str})")
# If no body lines, use 'pass'.
if not body_lines:
body_lines = ["pass"]
locals = {f"_type_{f.name}": f.type for f in fields}
return _create_fn(
"__init__",
@ -674,7 +668,6 @@ def _get_field(cls, a_name, a_type):
# This is a field in __slots__, so it has no default value.
default = MISSING
f = field(default=default)
# Only at this point do we know the name and the type. Set them.
f.name = a_name
f.type = a_type
@ -705,7 +698,6 @@ def _get_field(cls, a_name, a_type):
and _is_type(f.type, cls, typing, typing.ClassVar, _is_classvar)
):
f._field_type = _FIELD_CLASSVAR
# If the type is InitVar, or if it's a matching string annotation,
# then it's an InitVar.
if f._field_type is _FIELD:
@ -717,7 +709,6 @@ def _get_field(cls, a_name, a_type):
and _is_type(f.type, cls, dataclasses, dataclasses.InitVar, _is_initvar)
):
f._field_type = _FIELD_INITVAR
# Validations for individual fields. This is delayed until now,
# instead of in the Field() constructor, since only here do we
# know the field name, which allows for better error reporting.
@ -731,14 +722,12 @@ def _get_field(cls, a_name, a_type):
# example, how about init=False (or really,
# init=<not-the-default-init-value>)? It makes no sense for
# ClassVar and InitVar to specify init=<anything>.
# For real fields, disallow mutable defaults for known types.
if f._field_type is _FIELD and isinstance(f.default, (list, dict, set)):
raise ValueError(
f"mutable default {type(f.default)} for field "
f"{f.name} is not allowed: use default_factory"
)
return f
@ -827,7 +816,6 @@ def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen):
fields[f.name] = f
if getattr(b, _PARAMS).frozen:
any_frozen_base = True
# Annotations that are defined in this class (not in base
# classes). If __annotations__ isn't present, then this class
# adds no new annotations. We use this to compute fields that are
@ -866,22 +854,18 @@ def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen):
delattr(cls, f.name)
else:
setattr(cls, f.name, f.default)
# Do we have any Field members that don't also have annotations?
for name, value in cls.__dict__.items():
if isinstance(value, Field) and not name in cls_annotations:
raise TypeError(f"{name!r} is a field but has no type annotation")
# Check rules that apply if we are derived from any dataclasses.
if has_dataclass_bases:
# Raise an exception if any of our bases are frozen, but we're not.
if any_frozen_base and not frozen:
raise TypeError("cannot inherit non-frozen dataclass from a " "frozen one")
# Raise an exception if we're frozen, but none of our bases are.
if not any_frozen_base and frozen:
raise TypeError("cannot inherit frozen dataclass from a " "non-frozen one")
# Remember all of the fields on our class (including bases). This
# also marks this class as being a dataclass.
setattr(cls, _FIELDS, fields)
@ -900,7 +884,6 @@ def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen):
# eq methods.
if order and not eq:
raise ValueError("eq must be true if order is true")
if init:
# Does this class have a post-init function?
has_post_init = hasattr(cls, _POST_INIT_NAME)
@ -920,7 +903,6 @@ def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen):
"__dataclass_self__" if "self" in fields else "self",
),
)
# Get the fields as a list, and include only real fields. This is
# used in all of the following methods.
field_list = [f for f in fields.values() if f._field_type is _FIELD]
@ -928,7 +910,6 @@ def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen):
if repr:
flds = [f for f in field_list if f.repr]
_set_new_attribute(cls, "__repr__", _repr_fn(flds))
if eq:
# Create _eq__ method. There's no need for a __ne__ method,
# since python will call __eq__ and negate it.
@ -938,7 +919,6 @@ def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen):
_set_new_attribute(
cls, "__eq__", _cmp_fn("__eq__", "==", self_tuple, other_tuple)
)
if order:
# Create and set the ordering methods.
flds = [f for f in field_list if f.compare]
@ -958,7 +938,6 @@ def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen):
f"in class {cls.__name__}. Consider using "
"functools.total_ordering"
)
if frozen:
for fn in _frozen_get_del_attr(cls, field_list):
if _set_new_attribute(cls, fn.__name__, fn):
@ -966,7 +945,6 @@ def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen):
f"Cannot overwrite attribute {fn.__name__} "
f"in class {cls.__name__}"
)
# Decide if/how we're going to create a hash function.
hash_action = _hash_action[
bool(unsafe_hash), bool(eq), bool(frozen), has_explicit_hash
@ -975,11 +953,9 @@ def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen):
# No need to call _set_new_attribute here, since by the time
# we're here the overwriting is unconditional.
cls.__hash__ = hash_action(cls, field_list)
if not getattr(cls, "__doc__"):
# Create a class doc-string.
cls.__doc__ = cls.__name__ + str(inspect.signature(cls)).replace(" -> None", "")
return cls
@ -1015,7 +991,6 @@ def dataclass(
if _cls is None:
# We're called with parens.
return wrap
# We're called as @dataclass without parens.
return wrap(_cls)
@ -1032,7 +1007,6 @@ def fields(class_or_instance):
fields = getattr(class_or_instance, _FIELDS)
except AttributeError:
raise TypeError("must be called with a dataclass type or instance")
# Exclude pseudo-fields. Note that fields is sorted by insertion
# order, so the order of the tuple is as the fields were defined.
return tuple(f for f in fields.values() if f._field_type is _FIELD)
@ -1174,7 +1148,6 @@ def make_dataclass(
else:
# Copy namespace since we're going to mutate it.
namespace = namespace.copy()
# While we're looking through the field names, validate that they
# are identifiers, are not keywords, and not duplicates.
seen = set()
@ -1184,23 +1157,23 @@ def make_dataclass(
name = item
tp = "typing.Any"
elif len(item) == 2:
name, tp, = item
(
name,
tp,
) = item
elif len(item) == 3:
name, tp, spec = item
namespace[name] = spec
else:
raise TypeError(f"Invalid field: {item!r}")
if not isinstance(name, str) or not name.isidentifier():
raise TypeError(f"Field names must be valid identifers: {name!r}")
if keyword.iskeyword(name):
raise TypeError(f"Field names must not be keywords: {name!r}")
if name in seen:
raise TypeError(f"Field name duplicated: {name!r}")
seen.add(name)
anns[name] = tp
namespace["__annotations__"] = anns
# We use `types.new_class()` instead of simply `type()` to allow dynamic creation
# of generic dataclassses.
@ -1236,7 +1209,6 @@ def replace(obj, **changes):
if not _is_dataclass_instance(obj):
raise TypeError("replace() should be called on dataclass instances")
# It's an error to have init=False fields in 'changes'.
# If a field is not in 'changes', read its value from the provided obj.
@ -1250,10 +1222,8 @@ def replace(obj, **changes):
"replace()"
)
continue
if f.name not in changes:
changes[f.name] = getattr(obj, f.name)
# Create the new object, which calls __init__() and
# __post_init__() (if defined), using all of the init fields we've
# added and/or left in 'changes'. If there are values supplied in

View File

@ -66,7 +66,6 @@ def cursor_for(ltr):
async def execute(args=""):
if args:
args = "(" + args + ")"
return await schema.execute_async(
"""
{
@ -164,14 +163,16 @@ async def test_respects_first_and_after_and_before_too_few():
@mark.asyncio
async def test_respects_first_and_after_and_before_too_many():
await check(
f'first: 4, after: "{cursor_for("A")}", before: "{cursor_for("E")}"', "BCD",
f'first: 4, after: "{cursor_for("A")}", before: "{cursor_for("E")}"',
"BCD",
)
@mark.asyncio
async def test_respects_first_and_after_and_before_exactly_right():
await check(
f'first: 3, after: "{cursor_for("A")}", before: "{cursor_for("E")}"', "BCD",
f'first: 3, after: "{cursor_for("A")}", before: "{cursor_for("E")}"',
"BCD",
)
@ -187,14 +188,16 @@ async def test_respects_last_and_after_and_before_too_few():
@mark.asyncio
async def test_respects_last_and_after_and_before_too_many():
await check(
f'last: 4, after: "{cursor_for("A")}", before: "{cursor_for("E")}"', "BCD",
f'last: 4, after: "{cursor_for("A")}", before: "{cursor_for("E")}"',
"BCD",
)
@mark.asyncio
async def test_respects_last_and_after_and_before_exactly_right():
await check(
f'last: 3, after: "{cursor_for("A")}", before: "{cursor_for("E")}"', "BCD",
f'last: 3, after: "{cursor_for("A")}", before: "{cursor_for("E")}"',
"BCD",
)

View File

@ -76,7 +76,6 @@ class Mutation(ObjectType):
):
if not _meta:
_meta = MutationOptions(cls)
output = output or getattr(cls, "Output", None)
fields = {}
@ -85,14 +84,12 @@ class Mutation(ObjectType):
interface, Interface
), f'All interfaces of {cls.__name__} must be a subclass of Interface. Received "{interface}".'
fields.update(interface._meta.fields)
if not output:
# If output is defined, we don't need to get the fields
fields = {}
for base in reversed(cls.__mro__):
fields.update(yank_fields_from_attrs(base.__dict__, _as=Field))
output = cls
if not arguments:
input_class = getattr(cls, "Arguments", None)
if not input_class:
@ -106,22 +103,18 @@ class Mutation(ObjectType):
" https://github.com/graphql-python/graphene/blob/v2.0.0/UPGRADE-v2.0.md#mutation-input"
)
)
if input_class:
arguments = props(input_class)
else:
arguments = {}
if not resolver:
mutate = getattr(cls, "mutate", None)
assert mutate, "All mutations must define a mutate method in it"
resolver = get_unbound_function(mutate)
if _meta.fields:
_meta.fields.update(fields)
else:
_meta.fields = fields
_meta.interfaces = interfaces
_meta.output = output
_meta.resolver = resolver

View File

@ -7,7 +7,6 @@ try:
from dataclasses import make_dataclass, field
except ImportError:
from ..pyutils.dataclasses import make_dataclass, field # type: ignore
# For static type checking with Mypy
MYPY = False
if MYPY:
@ -28,7 +27,11 @@ class ObjectTypeMeta(BaseTypeMeta):
pass
base_cls = super().__new__(
cls, name_, (InterObjectType,) + bases, namespace, **options,
cls,
name_,
(InterObjectType,) + bases,
namespace,
**options,
)
if base_cls._meta:
fields = [
@ -133,7 +136,6 @@ class ObjectType(BaseType, metaclass=ObjectTypeMeta):
):
if not _meta:
_meta = ObjectTypeOptions(cls)
fields = {}
for interface in interfaces:
@ -141,10 +143,8 @@ class ObjectType(BaseType, metaclass=ObjectTypeMeta):
interface, Interface
), f'All interfaces of {cls.__name__} must be a subclass of Interface. Received "{interface}".'
fields.update(interface._meta.fields)
for base in reversed(cls.__mro__):
fields.update(yank_fields_from_attrs(base.__dict__, _as=Field))
assert not (possible_types and cls.is_type_of), (
f"{cls.__name__}.Meta.possible_types will cause type collision with {cls.__name__}.is_type_of. "
"Please use one or other."
@ -154,7 +154,6 @@ class ObjectType(BaseType, metaclass=ObjectTypeMeta):
_meta.fields.update(fields)
else:
_meta.fields = fields
if not _meta.interfaces:
_meta.interfaces = interfaces
_meta.possible_types = possible_types

View File

@ -72,7 +72,8 @@ def test_base64_query_invalid():
for input_ in bad_inputs:
result = schema.execute(
"""{ base64(input: $input) }""", variables={"input": input_},
"""{ base64(input: $input) }""",
variables={"input": input_},
)
assert isinstance(result.errors, list)
assert len(result.errors) == 1

View File

@ -2,7 +2,4 @@ from .depth_limit import depth_limit_validator
from .disable_introspection import DisableIntrospection
__all__ = [
"DisableIntrospection",
"depth_limit_validator"
]
__all__ = ["DisableIntrospection", "depth_limit_validator"]

View File

@ -30,7 +30,6 @@ try:
except ImportError:
# backwards compatibility for v3.6
from typing import Pattern
from typing import Callable, Dict, List, Optional, Union
from graphql import GraphQLError
@ -75,7 +74,6 @@ def depth_limit_validator(
operation_name=name,
ignore=ignore,
)
if callable(callback):
callback(query_depths)
super().__init__(validation_context)
@ -90,7 +88,6 @@ def get_fragments(
for definition in definitions:
if isinstance(definition, FragmentDefinitionNode):
fragments[definition.name.value] = definition
return fragments
@ -105,7 +102,6 @@ def get_queries_and_mutations(
if isinstance(definition, OperationDefinitionNode):
operation = definition.name.value if definition.name else "anonymous"
operations[operation] = definition
return operations
@ -126,7 +122,6 @@ def determine_depth(
)
)
return depth_so_far
if isinstance(node, FieldNode):
should_ignore = is_introspection_key(node.name.value) or is_ignored(
node, ignore
@ -134,7 +129,6 @@ def determine_depth(
if should_ignore or not node.selection_set:
return 0
return 1 + max(
map(
lambda selection: determine_depth(
@ -177,13 +171,14 @@ def determine_depth(
)
)
else:
raise Exception(f"Depth crawler cannot handle: {node.kind}.") # pragma: no cover
raise Exception(
f"Depth crawler cannot handle: {node.kind}."
) # pragma: no cover
def is_ignored(node: FieldNode, ignore: Optional[List[IgnoreType]] = None) -> bool:
if ignore is None:
return False
for rule in ignore:
field_name = node.name.value
if isinstance(rule, str):
@ -197,5 +192,4 @@ def is_ignored(node: FieldNode, ignore: Optional[List[IgnoreType]] = None) -> bo
return True
else:
raise ValueError(f"Invalid ignore option: {rule}.")
return False

View File

@ -48,26 +48,11 @@ class HumanType(ObjectType):
class Query(ObjectType):
user = Field(
HumanType,
required=True,
name=String()
)
version = String(
required=True
)
user1 = Field(
HumanType,
required=True
)
user2 = Field(
HumanType,
required=True
)
user3 = Field(
HumanType,
required=True
)
user = Field(HumanType, required=True, name=String())
version = String(required=True)
user1 = Field(HumanType, required=True)
user2 = Field(HumanType, required=True)
user3 = Field(HumanType, required=True)
@staticmethod
def resolve_user(root, info, name=None):
@ -91,9 +76,7 @@ def run_query(query: str, max_depth: int, ignore=None):
document_ast=document,
rules=(
depth_limit_validator(
max_depth=max_depth,
ignore=ignore,
callback=callback
max_depth=max_depth, ignore=ignore, callback=callback
),
),
)

View File

@ -5,9 +5,7 @@ from ..disable_introspection import DisableIntrospection
class Query(ObjectType):
name = String(
required=True
)
name = String(required=True)
@staticmethod
def resolve_name(root, info):
@ -23,9 +21,7 @@ def run_query(query: str):
errors = validate(
schema=schema.graphql_schema,
document_ast=document,
rules=(
DisableIntrospection,
),
rules=(DisableIntrospection,),
)
return errors