Skip to content

Commit

Permalink
Change line length to 99 (#91)
Browse files Browse the repository at this point in the history
  • Loading branch information
drhagen authored Jun 22, 2023
1 parent 6a2c376 commit 7508e59
Show file tree
Hide file tree
Showing 21 changed files with 298 additions and 111 deletions.
19 changes: 17 additions & 2 deletions examples/json.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,17 @@ class JsonStringParsers(ParserContext):
tab = lit(r"\t") > constant("\t")
uni = reg(r"\\u([0-9a-fA-F]{4})") > (lambda x: chr(int(x.group(1), 16)))

escaped = quote | reverse_solidus | solidus | backspace | form_feed | line_feed | carriage_return | tab | uni
escaped = (
quote
| reverse_solidus
| solidus
| backspace
| form_feed
| line_feed
| carriage_return
| tab
| uni
)
unescaped = reg(r"[\u0020-\u0021\u0023-\u005B\u005D-\U0010FFFF]+")

string = '"' >> rep(escaped | unescaped) << '"' > "".join
Expand Down Expand Up @@ -44,7 +54,12 @@ class JsonParsers(ParserContext, whitespace=r"[ \t\n\r]*"):
"-12.40e2",
"[false, true, null]",
'{"__class__" : "Point", "x" : 2.3, "y" : -1.6}',
'{"__class__" : "Rectangle", "location" : {"x":-1.3,"y":-4.5}, "height" : 2.0, "width" : 4.0}',
"""{
"__class__" : "Rectangle",
"location" : {"x":-1.3,"y":-4.5},
"height" : 2.0,
"width" : 4.0
}""",
'{"text" : ""}',
]

Expand Down
22 changes: 15 additions & 7 deletions examples/url.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from parsita.util import constant, splat

# This covers a typical URL schema, not the crazy one specified by https://tools.ietf.org/html/rfc3986
# In particular, this doesn't handle Unicode at the moment
# In particular, this doesn't handle Unicode

UserInfo = namedtuple("Userinfo", ["username", "password"])
DomainName = namedtuple("DomainName", ["domains"])
Expand Down Expand Up @@ -36,18 +36,26 @@ class TypicalUrlParsers(ParserContext):
)
host = ipv4_address | ipv6_address | domain_name

port = ":" >> reg(r"[0-9]+") > int
port = reg(r"[0-9]+") > int

path = rep("/" >> (reg(r"[-._~A-Za-z0-9]*") | encoded))

query_as_is = reg(r"[*-._A-Za-z0-9]+")
query_space = lit("+") > constant(" ")
query_string = rep1(query_as_is | query_space | encoded) > "".join
query = "?" >> repsep(query_string << "=" & query_string, "&") > OrderedDict

fragment = "#" >> reg(r"[-._~/?A-Za-z0-9]*")

url = scheme << "://" & opt(userinfo << "@") & host & opt(port) & path & opt(query) & opt(fragment) > splat(Url)
query = repsep(query_string << "=" & query_string, "&") > OrderedDict

fragment = reg(r"[-._~/?A-Za-z0-9]*")

url = (
scheme << "://"
& opt(userinfo << "@")
& host
& opt(":" >> port)
& path
& opt("?" >> query)
& opt("#" >> fragment)
) > splat(Url)


if __name__ == "__main__":
Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -67,12 +67,12 @@ source = [


[tool.black]
line-length = 120
line-length = 99


[tool.ruff]
src = ["src"]
line-length = 120
line-length = 99
extend-select = [
"I", # isort
"N", # pep8-naming
Expand Down
11 changes: 10 additions & 1 deletion src/parsita/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,13 @@
success,
until,
)
from .state import Failure, ParseError, Reader, RecursionError, Result, SequenceReader, StringReader, Success
from .state import (
Failure,
ParseError,
Reader,
RecursionError,
Result,
SequenceReader,
StringReader,
Success,
)
21 changes: 15 additions & 6 deletions src/parsita/metaclasses.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@ def __init__(self, old_options: dict):
def __missing__(self, key):
frame = inspect.currentframe() # Should be the frame of __missing__
while frame.f_code.co_name != "__missing__": # pragma: no cover
# But sometimes debuggers add frames on top of the stack; get back to `__missing__`'s frame
# But sometimes debuggers add frames on top of the stack;
# get back to `__missing__`'s frame
frame = frame.f_back

class_body_frame = frame.f_back.f_back # Frame of parser context is two frames back
Expand All @@ -44,8 +45,11 @@ def __missing__(self, key):

def __setitem__(self, key, value):
if isinstance(value, Parser):
value.protected = True # Protects against accidental concatenation of sequential parsers
value.name = key # Used for better error messages
# Protects against accidental concatenation of sequential parsers
value.protected = True

# Used for better error messages
value.name = key

super().__setitem__(key, value)

Expand Down Expand Up @@ -79,7 +83,11 @@ class ParserContextMeta(type):

@classmethod
def __prepare__(
mcs, name, bases, *, whitespace: Union[Parser[Input, Any], Pattern, str, None] = missing # noqa: N804
mcs, # noqa: N804
name,
bases,
*,
whitespace: Union[Parser[Input, Any], Pattern, str, None] = missing
):
if whitespace is missing:
whitespace = mcs.default_whitespace
Expand Down Expand Up @@ -119,8 +127,9 @@ def __new__(mcs, name, bases, dct, **_): # noqa: N804

def __call__(cls, *args, **kwargs):
raise TypeError(
"Parsers cannot be instantiated. They use class bodies purely as contexts for managing defaults and "
"allowing forward declarations. Access the individual parsers as static attributes."
"Parsers cannot be instantiated. They use class bodies purely as contexts for "
"managing defaults and allowing forward declarations. Access the individual parsers "
"as static attributes."
)


Expand Down
7 changes: 6 additions & 1 deletion src/parsita/parsers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,12 @@
from ._predicate import PredicateParser, pred
from ._regex import RegexParser, reg
from ._repeated import RepeatedOnceParser, RepeatedParser, rep, rep1
from ._repeated_seperated import RepeatedOnceSeparatedParser, RepeatedSeparatedParser, rep1sep, repsep
from ._repeated_seperated import (
RepeatedOnceSeparatedParser,
RepeatedSeparatedParser,
rep1sep,
repsep,
)
from ._sequential import DiscardLeftParser, DiscardRightParser, SequentialParser
from ._success import FailureParser, SuccessParser, failure, success
from ._until import UntilParser, until
19 changes: 14 additions & 5 deletions src/parsita/parsers/_alternative.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@ def __repr__(self):


def first(
parser: Union[Parser[Input, Output], Sequence[Input]], *parsers: Union[Parser[Input, Output], Sequence[Input]]
parser: Union[Parser[Input, Output], Sequence[Input]],
*parsers: Union[Parser[Input, Output], Sequence[Input]],
) -> FirstAlternativeParser[Input, Output]:
"""Match the first of several alternative parsers.
Expand All @@ -45,7 +46,9 @@ def first(
Args:
*parsers: Non-empty list of ``Parser``s or literals to try
"""
cleaned_parsers = [lit(parser_i) if isinstance(parser_i, str) else parser_i for parser_i in [parser, *parsers]]
cleaned_parsers = [
lit(parser_i) if isinstance(parser_i, str) else parser_i for parser_i in [parser, *parsers]
]
return FirstAlternativeParser(*cleaned_parsers)


Expand All @@ -59,7 +62,10 @@ def consume(self, state: State[Input], reader: Reader[Input]):
for parser in self.parsers:
status = parser.cached_consume(state, reader)
if isinstance(status, Continue):
if longest_success is None or status.remainder.position > longest_success.remainder.position:
if (
longest_success is None
or status.remainder.position > longest_success.remainder.position
):
longest_success = status

return longest_success
Expand All @@ -73,7 +79,8 @@ def __repr__(self):


def longest(
parser: Union[Parser[Input, Output], Sequence[Input]], *parsers: Union[Parser[Input, Output], Sequence[Input]]
parser: Union[Parser[Input, Output], Sequence[Input]],
*parsers: Union[Parser[Input, Output], Sequence[Input]],
) -> LongestAlternativeParser[Input, Output]:
"""Match the longest of several alternative parsers.
Expand All @@ -90,5 +97,7 @@ def longest(
Args:
*parsers: Non-empty list of ``Parser``s or literals to try
"""
cleaned_parsers = [lit(parser_i) if isinstance(parser_i, str) else parser_i for parser_i in [parser, *parsers]]
cleaned_parsers = [
lit(parser_i) if isinstance(parser_i, str) else parser_i for parser_i in [parser, *parsers]
]
return LongestAlternativeParser(*cleaned_parsers)
4 changes: 3 additions & 1 deletion src/parsita/parsers/_any.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@ class AnyParser(Generic[Input], Parser[Input, Input]):
def __init__(self):
super().__init__()

def consume(self, state: State[Input], reader: Reader[Input]) -> Optional[Continue[Input, Input]]:
def consume(
self, state: State[Input], reader: Reader[Input]
) -> Optional[Continue[Input, Input]]:
if reader.finished:
state.register_failure("anything", reader)
return None
Expand Down
8 changes: 6 additions & 2 deletions src/parsita/parsers/_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,9 @@ class Parser(Generic[Input, Output]):
name.
"""

def cached_consume(self, state: State[Input], reader: Reader[Input]) -> Optional[Continue[Input, Output]]:
def cached_consume(
self, state: State[Input], reader: Reader[Input]
) -> Optional[Continue[Input, Output]]:
"""Match this parser at the given location.
This is a concrete wrapper around ``consume``. This method implements
Expand Down Expand Up @@ -96,7 +98,9 @@ def cached_consume(self, state: State[Input], reader: Reader[Input]) -> Optional

return result

def consume(self, state: State[Input], reader: Reader[Input]) -> Optional[Continue[Input, Output]]:
def consume(
self, state: State[Input], reader: Reader[Input]
) -> Optional[Continue[Input, Output]]:
"""Abstract method for matching this parser at the given location.
This is the central method of every parser combinator.
Expand Down
14 changes: 11 additions & 3 deletions src/parsita/parsers/_conversion.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ def __init__(self, parser: Parser[Input, Output], converter: Callable[[Output],
self.parser = parser
self.converter = converter

def consume(self, state: State[Input], reader: Reader[Input]) -> Optional[Continue[Input, Convert]]:
def consume(
self, state: State[Input], reader: Reader[Input]
) -> Optional[Continue[Input, Convert]]:
status = self.parser.cached_consume(state, reader)

if isinstance(status, Continue):
Expand All @@ -27,12 +29,18 @@ def __repr__(self):


class TransformationParser(Generic[Input, Output, Convert], Parser[Input, Convert]):
def __init__(self, parser: Parser[Input, Output], transformer: Callable[[Output], Parser[Input, Convert]]):
def __init__(
self,
parser: Parser[Input, Output],
transformer: Callable[[Output], Parser[Input, Convert]],
):
super().__init__()
self.parser = parser
self.transformer = transformer

def consume(self, state: State[Input], reader: Reader[Input]) -> Optional[Continue[Input, Convert]]:
def consume(
self, state: State[Input], reader: Reader[Input]
) -> Optional[Continue[Input, Convert]]:
status = self.parser.cached_consume(state, reader)

if isinstance(status, Continue):
Expand Down
4 changes: 3 additions & 1 deletion src/parsita/parsers/_end_of_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,9 @@ class EndOfSourceParser(Generic[Input], Parser[Input, None]):
def __init__(self):
super().__init__()

def consume(self, state: State[Input], reader: Reader[Input]) -> Optional[Continue[Input, None]]:
def consume(
self, state: State[Input], reader: Reader[Input]
) -> Optional[Continue[Input, None]]:
if reader.finished:
return Continue(reader, None)
else:
Expand Down
4 changes: 3 additions & 1 deletion src/parsita/parsers/_predicate.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,9 @@


class PredicateParser(Generic[Input, Output], Parser[Input, Input]):
def __init__(self, parser: Parser[Input, Output], predicate: Callable[[Output], bool], description: str):
def __init__(
self, parser: Parser[Input, Output], predicate: Callable[[Output], bool], description: str
):
super().__init__()
self.parser = parser
self.predicate = predicate
Expand Down
7 changes: 5 additions & 2 deletions src/parsita/parsers/_repeated.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,9 @@ def __repr__(self):
return self.name_or_nothing() + f"rep1({self.parser.name_or_repr()})"


def rep1(parser: Union[Parser[Input, Output], Sequence[Input]]) -> RepeatedOnceParser[Input, Output]:
def rep1(
parser: Union[Parser[Input, Output], Sequence[Input]]
) -> RepeatedOnceParser[Input, Output]:
"""Match a parser one or more times repeatedly.
This matches ``parser`` multiple times in a row. If it matches as least
Expand Down Expand Up @@ -81,7 +83,8 @@ def consume(self, state: State[Input], reader: Reader[Input]):
def __repr__(self):
min_string = f", min={self.min}" if self.min > 0 else ""
max_string = f", max={self.max}" if self.max is not None else ""
return self.name_or_nothing() + f"rep({self.parser.name_or_repr()}{min_string}{max_string})"
string = f"rep({self.parser.name_or_repr()}{min_string}{max_string})"
return self.name_or_nothing() + string


def rep(
Expand Down
30 changes: 18 additions & 12 deletions src/parsita/parsers/_repeated_seperated.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,11 @@ def consume(self, state: State[Input], reader: Reader[Input]):
output = [status.value]
remainder = status.remainder
while self.max is None or len(output) < self.max:
# If the separator matches, but the parser does not, the remainder from the last successful parser step
# must be used, not the remainder from any separator. That is why the parser starts from the remainder
# on the status, but remainder is not updated until after the parser succeeds.
# If the separator matches, but the parser does not, the
# remainder from the last successful parser step must be used,
# not the remainder from any separator. That is why the parser
# starts from the remainder on the status, but remainder is not
# updated until after the parser succeeds.
status = self.separator.cached_consume(state, remainder)
if isinstance(status, Continue):
status = self.parser.cached_consume(state, status.remainder)
Expand All @@ -55,12 +57,12 @@ def consume(self, state: State[Input], reader: Reader[Input]):
return None

def __repr__(self):
rep_string = self.parser.name_or_repr()
sep_string = self.separator.name_or_repr()
min_string = f", min={self.min}" if self.min > 0 else ""
max_string = f", max={self.max}" if self.max is not None else ""
return (
self.name_or_nothing()
+ f"repsep({self.parser.name_or_repr()}, {self.separator.name_or_repr()}{min_string}{max_string})"
)
string = f"repsep({rep_string}, {sep_string}{min_string}{max_string})"
return self.name_or_nothing() + string


def repsep(
Expand Down Expand Up @@ -107,9 +109,11 @@ def consume(self, state: State[Input], reader: Reader[Input]):
output = [status.value]
remainder = status.remainder
while True:
# If the separator matches, but the parser does not, the remainder from the last successful parser step
# must be used, not the remainder from any separator. That is why the parser starts from the remainder
# on the status, but remainder is not updated until after the parser succeeds.
# If the separator matches, but the parser does not, the
# remainder from the last successful parser step must be used,
# not the remainder from any separator. That is why the parser
# starts from the remainder on the status, but remainder is not
# updated until after the parser succeeds.
status = self.separator.cached_consume(state, remainder)
if isinstance(status, Continue):
status = self.parser.cached_consume(state, status.remainder)
Expand All @@ -125,11 +129,13 @@ def consume(self, state: State[Input], reader: Reader[Input]):
return Continue(remainder, output)

def __repr__(self):
return self.name_or_nothing() + f"rep1sep({self.parser.name_or_repr()}, {self.separator.name_or_repr()})"
string = f"rep1sep({self.parser.name_or_repr()}, {self.separator.name_or_repr()})"
return self.name_or_nothing() + string


def rep1sep(
parser: Union[Parser[Input, Output], Sequence[Input]], separator: Union[Parser[Input, Any], Sequence[Input]]
parser: Union[Parser[Input, Output], Sequence[Input]],
separator: Union[Parser[Input, Any], Sequence[Input]],
) -> RepeatedOnceSeparatedParser[Input, Output]:
"""Match a parser one or more times separated by another parser.
Expand Down
Loading

0 comments on commit 7508e59

Please sign in to comment.