From 3c92d379d9a32b22da8b71c4dc574518eea1b02b Mon Sep 17 00:00:00 2001 From: Sachaa-Thanasius Date: Sun, 14 Apr 2024 15:53:31 -0400 Subject: [PATCH 1/6] Initial work. - Modify logic from Sachaa-Thanasius/experimental. - Copy modified tests. - Substitute import_expression.parse with inline_import.parse. - Remove IMPORTER. --- jishaku/inline_import.py | 233 ++++++++++++++++++++++++++++++++++++ jishaku/repl/compilation.py | 13 +- jishaku/repl/disassembly.py | 14 +-- requirements/_.txt | 1 - tests/test_inline_import.py | 206 +++++++++++++++++++++++++++++++ 5 files changed, 452 insertions(+), 15 deletions(-) create mode 100644 jishaku/inline_import.py create mode 100644 tests/test_inline_import.py diff --git a/jishaku/inline_import.py b/jishaku/inline_import.py new file mode 100644 index 00000000..b5214260 --- /dev/null +++ b/jishaku/inline_import.py @@ -0,0 +1,233 @@ +# -*- coding: utf-8 -*- + +""" +jishaku.inline_import +~~~~~~~~~~~~ + +Logic for parsing Python with inline import syntax. + +:copyright: (c) 2021 Devon (Gorialis) R +:license: MIT, see LICENSE for more details. + +""" + +import ast +import functools +import tokenize +from io import BytesIO +from typing import TYPE_CHECKING, Callable, Iterable, List, Optional, Tuple, TypeVar, Union + + +if TYPE_CHECKING: + from typing_extensions import ParamSpec, Buffer as ReadableBuffer + P = ParamSpec("P") +else: + ReadableBuffer = bytes + P = [TypeVar("P")] + +T = TypeVar("T") + + +__all__ = ("parse",) + + +# ======== Token modification. + + +def offset_token_horizontal(tok: tokenize.TokenInfo, offset: int) -> tokenize.TokenInfo: + """Takes a token and returns a new token with the columns for start and end offset by a given amount.""" + + start_row, start_col = tok.start + end_row, end_col = tok.end + return tok._replace(start=(start_row, start_col + offset), end=(end_row, end_col + offset)) + + +def offset_line_horizontal( + tokens: List[tokenize.TokenInfo], + start_index: int = 0, + *, + line: int, + offset: int, +) -> None: + """Takes a list of tokens and changes the offset of some of the tokens in place.""" + + for i, tok in enumerate(tokens[start_index:], start=start_index): + if line == tok.start[0]: + tokens[i] = offset_token_horizontal(tok, offset) + + +def transform_tokens(tokens: Iterable[tokenize.TokenInfo]) -> List[tokenize.TokenInfo]: + """Find the inline import expressions in a list of tokens and replace the relevant tokens to wrap the imported + modules with '_IMPORTLIB_MARKER(...)'. + + Later, the AST transformer step will replace those with valid import expressions. + """ + + orig_tokens = list(tokens) + new_tokens: list[tokenize.TokenInfo] = [] + + for orig_i, tok in enumerate(orig_tokens): + # "!" is only an OP in >=3.12. + if tok.type in {tokenize.OP, tokenize.ERRORTOKEN} and tok.string == "!": + has_invalid_syntax = False + + # Collect all name and attribute access-related tokens directly connected to the "!". + last_place = len(new_tokens) + looking_for_name = True + + for old_tok in reversed(new_tokens): + if old_tok.exact_type != (tokenize.NAME if looking_for_name else tokenize.DOT): + # The "!" was placed somewhere in a class definition, e.g. "class Fo!o: pass". + has_invalid_syntax = (old_tok.exact_type == tokenize.NAME and old_tok.string == "class") + + # There's a name immediately following "!". Might be a f-string conversion flag + # like "f'{thing!r}'" or just something invalid like "def fo!o(): pass". + try: + peek = orig_tokens[orig_i + 1] + except IndexError: + pass + else: + has_invalid_syntax = (has_invalid_syntax or peek.type == tokenize.NAME) + + break + + last_place -= 1 + looking_for_name = not looking_for_name + + # The "!" is just by itself or in a bad spot. Let it error later if it's wrong. + # Also allows other token transformers to work with it without erroring early. + if has_invalid_syntax or last_place == len(new_tokens): + new_tokens.append(tok) + continue + + # Insert "_IMPORTLIB_MARKER(" just before the inline import expression. + old_first = new_tokens[last_place] + old_f_row, old_f_col = old_first.start + + new_tokens[last_place:last_place] = [ + old_first._replace(type=tokenize.NAME, string="_IMPORTLIB_MARKER", end=(old_f_row, old_f_col + 17)), + tokenize.TokenInfo( + tokenize.OP, + "(", + (old_f_row, old_f_col + 17), + (old_f_row, old_f_col + 18), + old_first.line, + ), + ] + + # Adjust the positions of the following tokens within the inline import expression. + new_tokens[last_place + 2:] = (offset_token_horizontal(tok, 18) for tok in new_tokens[last_place + 2:]) + + # Add a closing parenthesis. + (end_row, end_col) = new_tokens[-1].end + line = new_tokens[-1].line + end_paren_token = tokenize.TokenInfo(tokenize.OP, ")", (end_row, end_col), (end_row, end_col + 1), line) + new_tokens.append(end_paren_token) + + # Fix the positions of the rest of the tokens on the same line. + fixed_line_tokens: list[tokenize.TokenInfo] = [] + offset_line_horizontal(orig_tokens, orig_i, line=new_tokens[-1].start[0], offset=18) + + # Check the rest of the line for inline import expressions. + new_tokens.extend(transform_tokens(fixed_line_tokens)) + + else: + new_tokens.append(tok) + + return new_tokens + + +def transform_source(source: Union[str, ReadableBuffer]) -> str: + """Replace and wrap inline import expressions in source code so that it has syntax, with explicit markers for + where to perform the imports. + """ + + if isinstance(source, str): + source = source.encode("utf-8") + stream = BytesIO(source) + encoding, _ = tokenize.detect_encoding(stream.readline) + stream.seek(0) + tokens_list = transform_tokens(tokenize.tokenize(stream.readline)) + return tokenize.untokenize(tokens_list).decode(encoding) + + +# ======== AST modification. + + +class InlineImportTransformer(ast.NodeTransformer): + """An AST transformer that replaces '_IMPORTLIB_MARKER(...)' with '__import__("importlib").import_module(...)'.""" + + @classmethod + def _collapse_attributes(cls, node: Union[ast.Attribute, ast.Name]) -> str: + if isinstance(node, ast.Name): + return node.id + + if not ( + isinstance(node, ast.Attribute) # pyright: ignore[reportUnnecessaryIsInstance] + and isinstance(node.value, (ast.Attribute, ast.Name)) + ): + msg = "Only names and attribute access (dot operator) can be within the inline import expression." + raise SyntaxError(msg) # noqa: TRY004 + + return cls._collapse_attributes(node.value) + f".{node.attr}" + + def visit_Call(self, node: ast.Call) -> ast.AST: + """Replace the _IMPORTLIB_MARKER calls with a valid inline import expression.""" + + if ( + isinstance(node.func, ast.Name) + and node.func.id == "_IMPORTLIB_MARKER" + and len(node.args) == 1 + and isinstance(node.args[0], (ast.Attribute, ast.Name)) + ): + node.func = ast.Attribute( + value=ast.Call( + func=ast.Name(id="__import__", ctx=ast.Load()), + args=[ast.Constant(value="importlib")], + keywords=[], + ), + attr="import_module", + ctx=ast.Load(), + ) + node.args[0] = ast.Constant(value=self._collapse_attributes(node.args[0])) + + return self.generic_visit(node) + + +def transform_ast(tree: ast.AST) -> ast.Module: + """Walk through an AST and fix it to turn the _IMPORTLIB_MARKER(...) expressions into valid import statements.""" + + return ast.fix_missing_locations(InlineImportTransformer().visit(tree)) + + +def copy_annotations(original_func: Callable[P, T]) -> Callable[[Callable[P, T]], Callable[P, T]]: + """Overrides annotations, thus lying, but it works for the final annotations that the *user* sees on the decorated func.""" + + @functools.wraps(original_func) + def inner(new_func: Callable[P, T]) -> Callable[P, T]: + return new_func + + return inner + + +# Some of the parameter annotations are too narrow or wide, but they should be "overriden" by this decorator. +@copy_annotations(ast.parse) # type: ignore +def parse( + source: Union[str, ReadableBuffer], + filename: str = "", + mode: str = "exec", + *, + type_comments: bool = False, + feature_version: Optional[Tuple[int, int]] = None, +) -> ast.Module: + """Convert source code with inline import expressions to an AST. Has the same signature as ast.parse.""" + + return transform_ast( + ast.parse( + transform_source(source), + filename, + mode, + type_comments=type_comments, + feature_version=feature_version, + ) + ) diff --git a/jishaku/repl/compilation.py b/jishaku/repl/compilation.py index 32204941..d14fc6ce 100644 --- a/jishaku/repl/compilation.py +++ b/jishaku/repl/compilation.py @@ -17,16 +17,15 @@ import linecache import typing -import import_expression # type: ignore - +from jishaku import inline_import from jishaku.functools import AsyncSender from jishaku.repl.scope import Scope from jishaku.repl.walkers import KeywordTransformer -CORO_CODE = f""" -async def _repl_coroutine({{0}}): + +CORO_CODE = """ +async def _repl_coroutine({0}): import asyncio - from importlib import import_module as {import_expression.constants.IMPORTER} import aiohttp import discord @@ -51,8 +50,8 @@ def wrap_code(code: str, args: str = '', auto_return: bool = True) -> ast.Module Also adds inline import expression support. """ - user_code: ast.Module = import_expression.parse(code, mode='exec') # type: ignore - mod: ast.Module = import_expression.parse(CORO_CODE.format(args), mode='exec') # type: ignore + user_code: ast.Module = inline_import.parse(code, mode='exec') # type: ignore + mod: ast.Module = inline_import.parse(CORO_CODE.format(args), mode='exec') # type: ignore for node in ast.walk(mod): node.lineno = -100_000 diff --git a/jishaku/repl/disassembly.py b/jishaku/repl/disassembly.py index d4da76dc..0d6556f0 100644 --- a/jishaku/repl/disassembly.py +++ b/jishaku/repl/disassembly.py @@ -17,21 +17,21 @@ import types import typing -import import_expression # type: ignore import opcode +from jishaku import inline_import from jishaku.repl.scope import Scope -CORO_CODE = f""" + +CORO_CODE = """ import asyncio import discord from discord.ext import commands -from importlib import import_module as {import_expression.constants.IMPORTER} import jishaku -async def _repl_coroutine({{0}}): +async def _repl_coroutine({0}): pass """ @@ -45,8 +45,8 @@ def wrap_code(code: str, args: str = '') -> ast.Module: it's implemented separately here. """ - user_code: ast.Module = import_expression.parse(code, mode='exec') # type: ignore - mod: ast.Module = import_expression.parse(CORO_CODE.format(args), mode='exec') # type: ignore + user_code: ast.Module = inline_import.parse(code, mode='exec') # type: ignore + mod: ast.Module = inline_import.parse(CORO_CODE.format(args), mode='exec') # type: ignore definition = mod.body[-1] # async def ...: assert isinstance(definition, ast.AsyncFunctionDef) @@ -201,7 +201,7 @@ def create_tree(code: str, use_ansi: bool = True) -> str: Compiles code into an AST tree and then formats it """ - user_code = import_expression.parse(code, mode='exec') # type: ignore + user_code = inline_import.parse(code, mode='exec') # type: ignore return '\n'.join(format_ast_node(user_code, use_ansi=use_ansi)) diff --git a/requirements/_.txt b/requirements/_.txt index a9eb92e0..6e0d287a 100644 --- a/requirements/_.txt +++ b/requirements/_.txt @@ -1,7 +1,6 @@ braceexpand >= 0.1.7 click >= 8.1.7 discord.py >= 2.3.2 -import_expression >= 1.1.4, < 2.0.0 tabulate >= 0.9.0 typing-extensions >= 4.3, < 5 importlib_metadata >= 3.7.0; python_version < "3.10" diff --git a/tests/test_inline_import.py b/tests/test_inline_import.py new file mode 100644 index 00000000..72da4def --- /dev/null +++ b/tests/test_inline_import.py @@ -0,0 +1,206 @@ +import ipaddress +import urllib.parse +from typing import TYPE_CHECKING, Any, Dict + +import pytest +from jishaku import inline_import + + +@pytest.mark.parametrize( + "test_source, expected_result", + [ + ( + "collections!.Counter(urllib.parse!.quote('foo'))", + "_IMPORTLIB_MARKER(collections).Counter(_IMPORTLIB_MARKER(urllib.parse).quote('foo'))", + ), + ("ipaddress!.IPV6LENGTH", "_IMPORTLIB_MARKER(ipaddress).IPV6LENGTH"), + ("urllib.parse!.quote('?')", "_IMPORTLIB_MARKER(urllib.parse).quote('?')"), + ], +) +def test_transform_source(test_source: str, expected_result: str) -> None: + retokenized_source = inline_import.transform_source(test_source) + assert retokenized_source == expected_result + + +@pytest.mark.parametrize( + "test_source, expected_result", + [ + ("collections!.Counter(urllib.parse!.quote('foo'))", {"f": 1, "o": 2}), + ("ipaddress!.IPV6LENGTH", ipaddress.IPV6LENGTH), + ("urllib.parse!.quote('?')", urllib.parse.quote("?")), + ], +) +def test_parse(test_source: str, expected_result: Any) -> None: + tree = inline_import.parse(test_source, mode="eval") + code = compile(tree, "", "eval") + result = eval(code) + + assert result == expected_result + + +@pytest.mark.parametrize( + "test_fstring, expected_result", + [ + ("f'{value!r}'", "'Here I am'"), + ("f'{value!r:20}'", "'Here I am' "), + ("f'{value=!r}'", "value='Here I am'"), + ("f'{value = !r}'", "value = 'Here I am'"), + ("f'{value=!r:20}'", "value='Here I am' "), + ("f'{value = !r:20}'", "value = 'Here I am' "), + ], +) +def test_regular_fstring(test_fstring: str, expected_result: Any) -> None: + globals_ = {"value": "Here I am"} + + tree = inline_import.parse(test_fstring, mode="eval") + code = compile(tree, "", "eval") + result = eval(code, globals_) + + assert result == expected_result + + +@pytest.mark.parametrize( + "invalid_expr", + [ + "!a", + "a.!b", + "!a.b", + "a!.b!", + "a.b!.c!", + "a!.b!.c", + "a.b.!c", + "a.!b.c", + "a.!b.!c" "!a.b.c", + "!a.b.!c", + "!a.!b.c", + "!a.!b.!c" "a!b", + "ab.bc.d!e", + "ab.b!c", + ], +) +def test_invalid_attribute_syntax(invalid_expr: str) -> None: + with pytest.raises(SyntaxError): + _ = inline_import.parse(invalid_expr) + + +def test_import_op_as_attr_name() -> None: + with pytest.raises(SyntaxError): + _ = inline_import.parse("a.!.b") + + +@pytest.mark.parametrize("test_source", ["del a!.b", "a!.b = 1", "del a.b.c!.d", "a.b.c!.d = 1"]) +def test_del_store_import(test_source: str) -> None: + tree = inline_import.parse(test_source) + _ = compile(tree, "", "exec") + + +@pytest.mark.parametrize("test_source", ["del a!", "a! = 1", "del a.b!", "a.b! = 1"]) +def test_invalid_del_store_import(test_source: str) -> None: + # TODO: Change test so it doesn't hide why test_del_store_import might fail. + + with pytest.raises( + ( + ValueError, # raised by builtins.compile + SyntaxError, # raised by import_expression.parse + ) + ): + _ = inline_import.parse(test_source) + + +def test_lone_import_op() -> None: + with pytest.raises(SyntaxError): + _ = inline_import.parse("!") + + +@pytest.mark.parametrize( + "invalid_source", + [ + "def foo(x!): pass", + "def foo(*x!): pass", + "def foo(**y!): pass", + "def foo(*, z!): pass", + # note space around equals sign: + # class Y(Z!=1) is valid if Z.__ne__ returns a class + "class Y(Z! = 1): pass", + ], +) +def test_invalid_argument_syntax(invalid_source: str) -> None: + with pytest.raises(SyntaxError): + _ = inline_import.parse(invalid_source) + + +@pytest.mark.parametrize( + "invalid_source", + [ + "def !foo(y): pass", + "def fo!o(y): pass", + "def foo!(y): pass", + "class X!: pass", + "class Fo!o: pass", + "class !Foo: pass", + # note space around equals sign: + # class Y(Z!=1) is valid if Z.__ne__ returns a class + "class Y(Z! = 1): pass", + ], +) +def test_invalid_def_syntax(invalid_source: str) -> None: + with pytest.raises(SyntaxError): + _ = inline_import.parse(invalid_source, "") + + +def test_kwargs() -> None: + import collections + + tree = inline_import.parse("dict(x=collections!)", mode="eval") + code = compile(tree, "", "eval") + x = eval(code)["x"] + + assert x is collections + + +@pytest.mark.parametrize( + "test_source, annotation_var", + [ + ("def test_func() -> typing!.Any: pass", "return"), + ("def test_func(x: typing!.Any): pass", "x"), + ("def test_func(x: typing!.Any = 1): pass", "x"), + ], +) +def test_typehint_conversion(test_source: str, annotation_var: str) -> None: + globals_: Dict[str, Any] = {} + + tree = inline_import.parse(test_source) + code = compile(tree, "", "exec") + exec(code, globals_) + + test_func = globals_["test_func"] + + assert test_func.__annotations__[annotation_var] is Any + + +@pytest.mark.parametrize( + "invalid_source", + [ + "import x!", + "import x.y!", + "import x!.y!", + "from x!.y import z", + "from x.y import z!", + "from w.x import y as z!", + "from w.x import y as z, a as b!", + ], +) +def test_import_statement(invalid_source: str) -> None: + with pytest.raises(SyntaxError): + _ = inline_import.parse(invalid_source, mode="exec") + + +def test_importer_name_not_mangled() -> None: + # If import_expression.constants.IMPORTER.startswith('__'), this will fail. + _ = inline_import.parse("class Foo: x = io!") + + +def test_bytes_input(): + tree = inline_import.parse(b"typing!.TYPE_CHECKING", mode="eval") + code = compile(tree, "", "eval") + assert eval(code) == TYPE_CHECKING From d28520100b2bcab834dde7213e0d3c353c02b5f4 Mon Sep 17 00:00:00 2001 From: Sachaa-Thanasius Date: Sun, 14 Apr 2024 18:02:47 -0400 Subject: [PATCH 2/6] Fix a 3.8 bug and adjust style. - Fix test break, since `tokenize.tokenize` has buggy behavior that wasn't backported. - See https://github.com/python/cpython/issues/79288 and https://github.com/python/cpython/issues/88833. - Adjust typing using so everything from typing is prepended with `typing.`. --- jishaku/inline_import.py | 59 +++++++++++++++++++++++++++------------- 1 file changed, 40 insertions(+), 19 deletions(-) diff --git a/jishaku/inline_import.py b/jishaku/inline_import.py index b5214260..8138343b 100644 --- a/jishaku/inline_import.py +++ b/jishaku/inline_import.py @@ -2,7 +2,7 @@ """ jishaku.inline_import -~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~ Logic for parsing Python with inline import syntax. @@ -13,19 +13,20 @@ import ast import functools +import io +import sys import tokenize -from io import BytesIO -from typing import TYPE_CHECKING, Callable, Iterable, List, Optional, Tuple, TypeVar, Union +import typing -if TYPE_CHECKING: +if typing.TYPE_CHECKING: from typing_extensions import ParamSpec, Buffer as ReadableBuffer P = ParamSpec("P") else: ReadableBuffer = bytes - P = [TypeVar("P")] + P = [typing.TypeVar("P")] -T = TypeVar("T") +T = typing.TypeVar("T") __all__ = ("parse",) @@ -43,7 +44,7 @@ def offset_token_horizontal(tok: tokenize.TokenInfo, offset: int) -> tokenize.To def offset_line_horizontal( - tokens: List[tokenize.TokenInfo], + tokens: typing.List[tokenize.TokenInfo], start_index: int = 0, *, line: int, @@ -52,11 +53,12 @@ def offset_line_horizontal( """Takes a list of tokens and changes the offset of some of the tokens in place.""" for i, tok in enumerate(tokens[start_index:], start=start_index): - if line == tok.start[0]: - tokens[i] = offset_token_horizontal(tok, offset) + if tok.start[0] != line: + break + tokens[i] = offset_token_horizontal(tok, offset) -def transform_tokens(tokens: Iterable[tokenize.TokenInfo]) -> List[tokenize.TokenInfo]: +def transform_tokens(tokens: typing.Iterable[tokenize.TokenInfo]) -> typing.List[tokenize.TokenInfo]: """Find the inline import expressions in a list of tokens and replace the relevant tokens to wrap the imported modules with '_IMPORTLIB_MARKER(...)'. @@ -64,7 +66,7 @@ def transform_tokens(tokens: Iterable[tokenize.TokenInfo]) -> List[tokenize.Toke """ orig_tokens = list(tokens) - new_tokens: list[tokenize.TokenInfo] = [] + new_tokens: typing.List[tokenize.TokenInfo] = [] for orig_i, tok in enumerate(orig_tokens): # "!" is only an OP in >=3.12. @@ -125,7 +127,7 @@ def transform_tokens(tokens: Iterable[tokenize.TokenInfo]) -> List[tokenize.Toke new_tokens.append(end_paren_token) # Fix the positions of the rest of the tokens on the same line. - fixed_line_tokens: list[tokenize.TokenInfo] = [] + fixed_line_tokens: typing.List[tokenize.TokenInfo] = [] offset_line_horizontal(orig_tokens, orig_i, line=new_tokens[-1].start[0], offset=18) # Check the rest of the line for inline import expressions. @@ -134,20 +136,39 @@ def transform_tokens(tokens: Iterable[tokenize.TokenInfo]) -> List[tokenize.Toke else: new_tokens.append(tok) + # Hack to get around a bug where code that ends in a comment, but no newline, has an extra + # NEWLINE token added in randomly. This patch wasn't backported to 3.8. + # https://github.com/python/cpython/issues/79288 + # https://github.com/python/cpython/issues/88833 + if sys.version_info < (3, 9): + if len(new_tokens) >= 4 and ( + new_tokens[-4].type == tokenize.COMMENT + and new_tokens[-3].type == tokenize.NL + and new_tokens[-2].type == tokenize.NEWLINE + and new_tokens[-1].type == tokenize.ENDMARKER + ): + del new_tokens[-2] + return new_tokens -def transform_source(source: Union[str, ReadableBuffer]) -> str: +def transform_source(source: typing.Union[str, ReadableBuffer]) -> str: """Replace and wrap inline import expressions in source code so that it has syntax, with explicit markers for where to perform the imports. """ if isinstance(source, str): source = source.encode("utf-8") - stream = BytesIO(source) + stream = io.BytesIO(source) encoding, _ = tokenize.detect_encoding(stream.readline) stream.seek(0) tokens_list = transform_tokens(tokenize.tokenize(stream.readline)) + try: + if tokens_list[1].type == tokenize.COMMENT: + import pprint + pprint.pprint(tokens_list) + except IndexError: + pass return tokenize.untokenize(tokens_list).decode(encoding) @@ -158,7 +179,7 @@ class InlineImportTransformer(ast.NodeTransformer): """An AST transformer that replaces '_IMPORTLIB_MARKER(...)' with '__import__("importlib").import_module(...)'.""" @classmethod - def _collapse_attributes(cls, node: Union[ast.Attribute, ast.Name]) -> str: + def _collapse_attributes(cls, node: typing.Union[ast.Attribute, ast.Name]) -> str: if isinstance(node, ast.Name): return node.id @@ -200,11 +221,11 @@ def transform_ast(tree: ast.AST) -> ast.Module: return ast.fix_missing_locations(InlineImportTransformer().visit(tree)) -def copy_annotations(original_func: Callable[P, T]) -> Callable[[Callable[P, T]], Callable[P, T]]: +def copy_annotations(original_func: typing.Callable[P, T]) -> typing.Callable[[typing.Callable[P, T]], typing.Callable[P, T]]: """Overrides annotations, thus lying, but it works for the final annotations that the *user* sees on the decorated func.""" @functools.wraps(original_func) - def inner(new_func: Callable[P, T]) -> Callable[P, T]: + def inner(new_func: typing.Callable[P, T]) -> typing.Callable[P, T]: return new_func return inner @@ -213,12 +234,12 @@ def inner(new_func: Callable[P, T]) -> Callable[P, T]: # Some of the parameter annotations are too narrow or wide, but they should be "overriden" by this decorator. @copy_annotations(ast.parse) # type: ignore def parse( - source: Union[str, ReadableBuffer], + source: typing.Union[str, ReadableBuffer], filename: str = "", mode: str = "exec", *, type_comments: bool = False, - feature_version: Optional[Tuple[int, int]] = None, + feature_version: typing.Optional[typing.Tuple[int, int]] = None, ) -> ast.Module: """Convert source code with inline import expressions to an AST. Has the same signature as ast.parse.""" From fc395b0159d94165ecbdd56cbababc6c5b8a80bd Mon Sep 17 00:00:00 2001 From: Sachaa-Thanasius Date: Sun, 14 Apr 2024 18:12:51 -0400 Subject: [PATCH 3/6] Add a test for the 3.8 newline token issue. --- tests/test_inline_import.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/test_inline_import.py b/tests/test_inline_import.py index 72da4def..61bd7074 100644 --- a/tests/test_inline_import.py +++ b/tests/test_inline_import.py @@ -200,7 +200,14 @@ def test_importer_name_not_mangled() -> None: _ = inline_import.parse("class Foo: x = io!") -def test_bytes_input(): +def test_bytes_input() -> None: tree = inline_import.parse(b"typing!.TYPE_CHECKING", mode="eval") code = compile(tree, "", "eval") assert eval(code) == TYPE_CHECKING + +@pytest.mark.parametrize("test_input", ["# comment here", "print('hello')\n# comment at end"]) +def test_comments_input(test_input: str) -> None: + # Check that python3.8's adding of a bad NEWLINE token is accounted for when code ends with a comment and no newline. + tree = inline_import.parse(test_input, "", mode="exec") + code = compile(tree, "", "exec") + eval(code, None, None) \ No newline at end of file From 347936ef1da687aa768d735c66a947fc37a55f87 Mon Sep 17 00:00:00 2001 From: Sachaa-Thanasius Date: Sun, 14 Apr 2024 18:14:26 -0400 Subject: [PATCH 4/6] Run isort --- jishaku/inline_import.py | 4 ++-- jishaku/repl/compilation.py | 1 - jishaku/repl/disassembly.py | 1 - tests/test_inline_import.py | 1 + 4 files changed, 3 insertions(+), 4 deletions(-) diff --git a/jishaku/inline_import.py b/jishaku/inline_import.py index 8138343b..0670cea7 100644 --- a/jishaku/inline_import.py +++ b/jishaku/inline_import.py @@ -18,9 +18,9 @@ import tokenize import typing - if typing.TYPE_CHECKING: - from typing_extensions import ParamSpec, Buffer as ReadableBuffer + from typing_extensions import Buffer as ReadableBuffer + from typing_extensions import ParamSpec P = ParamSpec("P") else: ReadableBuffer = bytes diff --git a/jishaku/repl/compilation.py b/jishaku/repl/compilation.py index d14fc6ce..4f8ccaa8 100644 --- a/jishaku/repl/compilation.py +++ b/jishaku/repl/compilation.py @@ -22,7 +22,6 @@ from jishaku.repl.scope import Scope from jishaku.repl.walkers import KeywordTransformer - CORO_CODE = """ async def _repl_coroutine({0}): import asyncio diff --git a/jishaku/repl/disassembly.py b/jishaku/repl/disassembly.py index 0d6556f0..06ff4863 100644 --- a/jishaku/repl/disassembly.py +++ b/jishaku/repl/disassembly.py @@ -22,7 +22,6 @@ from jishaku import inline_import from jishaku.repl.scope import Scope - CORO_CODE = """ import asyncio diff --git a/tests/test_inline_import.py b/tests/test_inline_import.py index 61bd7074..3a0de4df 100644 --- a/tests/test_inline_import.py +++ b/tests/test_inline_import.py @@ -3,6 +3,7 @@ from typing import TYPE_CHECKING, Any, Dict import pytest + from jishaku import inline_import From cd86fab9e2599f6d06e9b00fbfefcf8b3a8015c9 Mon Sep 17 00:00:00 2001 From: Sachaa-Thanasius Date: Sun, 14 Apr 2024 18:55:32 -0400 Subject: [PATCH 5/6] Fixed missing newline and removed debug code. --- jishaku/inline_import.py | 6 ------ tests/test_inline_import.py | 3 ++- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/jishaku/inline_import.py b/jishaku/inline_import.py index 0670cea7..481f2d09 100644 --- a/jishaku/inline_import.py +++ b/jishaku/inline_import.py @@ -163,12 +163,6 @@ def transform_source(source: typing.Union[str, ReadableBuffer]) -> str: encoding, _ = tokenize.detect_encoding(stream.readline) stream.seek(0) tokens_list = transform_tokens(tokenize.tokenize(stream.readline)) - try: - if tokens_list[1].type == tokenize.COMMENT: - import pprint - pprint.pprint(tokens_list) - except IndexError: - pass return tokenize.untokenize(tokens_list).decode(encoding) diff --git a/tests/test_inline_import.py b/tests/test_inline_import.py index 3a0de4df..4702f64d 100644 --- a/tests/test_inline_import.py +++ b/tests/test_inline_import.py @@ -206,9 +206,10 @@ def test_bytes_input() -> None: code = compile(tree, "", "eval") assert eval(code) == TYPE_CHECKING + @pytest.mark.parametrize("test_input", ["# comment here", "print('hello')\n# comment at end"]) def test_comments_input(test_input: str) -> None: # Check that python3.8's adding of a bad NEWLINE token is accounted for when code ends with a comment and no newline. tree = inline_import.parse(test_input, "", mode="exec") code = compile(tree, "", "exec") - eval(code, None, None) \ No newline at end of file + eval(code, None, None) From 675b30755bd94d7e779ade410a7b89a6b95935e2 Mon Sep 17 00:00:00 2001 From: Thanos <111999343+Sachaa-Thanasius@users.noreply.github.com> Date: Tue, 16 Apr 2024 09:31:51 -0400 Subject: [PATCH 6/6] Adjust `copy_annotations()` helper and `typing_extensions` imports. - `typing_extensions` is a direct dependency, so using it directly is fine. - `copy_annotations()` was using functools.wraps with the wrong function as the wrapper. --- jishaku/inline_import.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/jishaku/inline_import.py b/jishaku/inline_import.py index 481f2d09..a666ff78 100644 --- a/jishaku/inline_import.py +++ b/jishaku/inline_import.py @@ -18,14 +18,10 @@ import tokenize import typing -if typing.TYPE_CHECKING: - from typing_extensions import Buffer as ReadableBuffer - from typing_extensions import ParamSpec - P = ParamSpec("P") -else: - ReadableBuffer = bytes - P = [typing.TypeVar("P")] +from typing_extensions import Buffer as ReadableBuffer +from typing_extensions import ParamSpec +P = ParamSpec("P") T = typing.TypeVar("T") @@ -215,18 +211,22 @@ def transform_ast(tree: ast.AST) -> ast.Module: return ast.fix_missing_locations(InlineImportTransformer().visit(tree)) -def copy_annotations(original_func: typing.Callable[P, T]) -> typing.Callable[[typing.Callable[P, T]], typing.Callable[P, T]]: - """Overrides annotations, thus lying, but it works for the final annotations that the *user* sees on the decorated func.""" +def copy_annotations( + original_func: typing.Callable[P, T], +) -> typing.Callable[[typing.Callable[..., typing.Any]], typing.Callable[P, T]]: + """A decorator that applies the annotations from one function onto another. - @functools.wraps(original_func) - def inner(new_func: typing.Callable[P, T]) -> typing.Callable[P, T]: - return new_func + It can be a lie, but it aids the type checker and any IDE intellisense. + """ + + def inner(new_func: typing.Callable[..., typing.Any]) -> typing.Callable[P, T]: + return functools.update_wrapper(new_func, original_func, ("__doc__", "__annotations__")) # type: ignore return inner # Some of the parameter annotations are too narrow or wide, but they should be "overriden" by this decorator. -@copy_annotations(ast.parse) # type: ignore +@copy_annotations(ast.parse) def parse( source: typing.Union[str, ReadableBuffer], filename: str = "",