Tipragot
628be439b8
Cela permet de ne pas avoir de problèmes de compatibilité car python est dans le git.
1280 lines
47 KiB
Python
1280 lines
47 KiB
Python
from __future__ import annotations
|
|
|
|
import os.path
|
|
import sys
|
|
import traceback
|
|
from collections import defaultdict
|
|
from typing import Callable, Final, Iterable, NoReturn, Optional, TextIO, Tuple, TypeVar
|
|
from typing_extensions import Literal, TypeAlias as _TypeAlias
|
|
|
|
from mypy import errorcodes as codes
|
|
from mypy.errorcodes import IMPORT, IMPORT_NOT_FOUND, IMPORT_UNTYPED, ErrorCode
|
|
from mypy.message_registry import ErrorMessage
|
|
from mypy.options import Options
|
|
from mypy.scope import Scope
|
|
from mypy.util import DEFAULT_SOURCE_OFFSET, is_typeshed_file
|
|
from mypy.version import __version__ as mypy_version
|
|
|
|
T = TypeVar("T")
|
|
|
|
# Show error codes for some note-level messages (these usually appear alone
|
|
# and not as a comment for a previous error-level message).
|
|
SHOW_NOTE_CODES: Final = {codes.ANNOTATION_UNCHECKED}
|
|
|
|
# Do not add notes with links to error code docs to errors with these codes.
|
|
# We can tweak this set as we get more experience about what is helpful and what is not.
|
|
HIDE_LINK_CODES: Final = {
|
|
# This is a generic error code, so it has no useful docs
|
|
codes.MISC,
|
|
# These are trivial and have some custom notes (e.g. for list being invariant)
|
|
codes.ASSIGNMENT,
|
|
codes.ARG_TYPE,
|
|
codes.RETURN_VALUE,
|
|
# Undefined name/attribute errors are self-explanatory
|
|
codes.ATTR_DEFINED,
|
|
codes.NAME_DEFINED,
|
|
# Overrides have a custom link to docs
|
|
codes.OVERRIDE,
|
|
}
|
|
|
|
allowed_duplicates: Final = ["@overload", "Got:", "Expected:"]
|
|
|
|
BASE_RTD_URL: Final = "https://mypy.rtfd.io/en/stable/_refs.html#code"
|
|
|
|
# Keep track of the original error code when the error code of a message is changed.
|
|
# This is used to give notes about out-of-date "type: ignore" comments.
|
|
original_error_codes: Final = {codes.LITERAL_REQ: codes.MISC, codes.TYPE_ABSTRACT: codes.MISC}
|
|
|
|
|
|
class ErrorInfo:
|
|
"""Representation of a single error message."""
|
|
|
|
# Description of a sequence of imports that refer to the source file
|
|
# related to this error. Each item is a (path, line number) tuple.
|
|
import_ctx: list[tuple[str, int]]
|
|
|
|
# The path to source file that was the source of this error.
|
|
file = ""
|
|
|
|
# The fully-qualified id of the source module for this error.
|
|
module: str | None = None
|
|
|
|
# The name of the type in which this error is located at.
|
|
type: str | None = "" # Unqualified, may be None
|
|
|
|
# The name of the function or member in which this error is located at.
|
|
function_or_member: str | None = "" # Unqualified, may be None
|
|
|
|
# The line number related to this error within file.
|
|
line = 0 # -1 if unknown
|
|
|
|
# The column number related to this error with file.
|
|
column = 0 # -1 if unknown
|
|
|
|
# The end line number related to this error within file.
|
|
end_line = 0 # -1 if unknown
|
|
|
|
# The end column number related to this error with file.
|
|
end_column = 0 # -1 if unknown
|
|
|
|
# Either 'error' or 'note'
|
|
severity = ""
|
|
|
|
# The error message.
|
|
message = ""
|
|
|
|
# The error code.
|
|
code: ErrorCode | None = None
|
|
|
|
# If True, we should halt build after the file that generated this error.
|
|
blocker = False
|
|
|
|
# Only report this particular messages once per program.
|
|
only_once = False
|
|
|
|
# Do not remove duplicate copies of this message (ignored if only_once is True).
|
|
allow_dups = False
|
|
|
|
# Actual origin of the error message as tuple (path, line number, end line number)
|
|
# If end line number is unknown, use line number.
|
|
origin: tuple[str, Iterable[int]]
|
|
|
|
# Fine-grained incremental target where this was reported
|
|
target: str | None = None
|
|
|
|
# If True, don't show this message in output, but still record the error (needed
|
|
# by mypy daemon)
|
|
hidden = False
|
|
|
|
def __init__(
|
|
self,
|
|
import_ctx: list[tuple[str, int]],
|
|
*,
|
|
file: str,
|
|
module: str | None,
|
|
typ: str | None,
|
|
function_or_member: str | None,
|
|
line: int,
|
|
column: int,
|
|
end_line: int,
|
|
end_column: int,
|
|
severity: str,
|
|
message: str,
|
|
code: ErrorCode | None,
|
|
blocker: bool,
|
|
only_once: bool,
|
|
allow_dups: bool,
|
|
origin: tuple[str, Iterable[int]] | None = None,
|
|
target: str | None = None,
|
|
priority: int = 0,
|
|
) -> None:
|
|
self.import_ctx = import_ctx
|
|
self.file = file
|
|
self.module = module
|
|
self.type = typ
|
|
self.function_or_member = function_or_member
|
|
self.line = line
|
|
self.column = column
|
|
self.end_line = end_line
|
|
self.end_column = end_column
|
|
self.severity = severity
|
|
self.message = message
|
|
self.code = code
|
|
self.blocker = blocker
|
|
self.only_once = only_once
|
|
self.allow_dups = allow_dups
|
|
self.origin = origin or (file, [line])
|
|
self.target = target
|
|
self.priority = priority
|
|
|
|
|
|
# Type used internally to represent errors:
|
|
# (path, line, column, end_line, end_column, severity, message, allow_dups, code)
|
|
ErrorTuple: _TypeAlias = Tuple[
|
|
Optional[str], int, int, int, int, str, str, bool, Optional[ErrorCode]
|
|
]
|
|
|
|
|
|
class ErrorWatcher:
|
|
"""Context manager that can be used to keep track of new errors recorded
|
|
around a given operation.
|
|
|
|
Errors maintain a stack of such watchers. The handler is called starting
|
|
at the top of the stack, and is propagated down the stack unless filtered
|
|
out by one of the ErrorWatcher instances.
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
errors: Errors,
|
|
*,
|
|
filter_errors: bool | Callable[[str, ErrorInfo], bool] = False,
|
|
save_filtered_errors: bool = False,
|
|
):
|
|
self.errors = errors
|
|
self._has_new_errors = False
|
|
self._filter = filter_errors
|
|
self._filtered: list[ErrorInfo] | None = [] if save_filtered_errors else None
|
|
|
|
def __enter__(self) -> ErrorWatcher:
|
|
self.errors._watchers.append(self)
|
|
return self
|
|
|
|
def __exit__(self, exc_type: object, exc_val: object, exc_tb: object) -> Literal[False]:
|
|
last = self.errors._watchers.pop()
|
|
assert last == self
|
|
return False
|
|
|
|
def on_error(self, file: str, info: ErrorInfo) -> bool:
|
|
"""Handler called when a new error is recorded.
|
|
|
|
The default implementation just sets the has_new_errors flag
|
|
|
|
Return True to filter out the error, preventing it from being seen by other
|
|
ErrorWatcher further down the stack and from being recorded by Errors
|
|
"""
|
|
self._has_new_errors = True
|
|
if isinstance(self._filter, bool):
|
|
should_filter = self._filter
|
|
elif callable(self._filter):
|
|
should_filter = self._filter(file, info)
|
|
else:
|
|
raise AssertionError(f"invalid error filter: {type(self._filter)}")
|
|
if should_filter and self._filtered is not None:
|
|
self._filtered.append(info)
|
|
|
|
return should_filter
|
|
|
|
def has_new_errors(self) -> bool:
|
|
return self._has_new_errors
|
|
|
|
def filtered_errors(self) -> list[ErrorInfo]:
|
|
assert self._filtered is not None
|
|
return self._filtered
|
|
|
|
|
|
class Errors:
|
|
"""Container for compile errors.
|
|
|
|
This class generates and keeps tracks of compile errors and the
|
|
current error context (nested imports).
|
|
"""
|
|
|
|
# Map from files to generated error messages. Is an OrderedDict so
|
|
# that it can be used to order messages based on the order the
|
|
# files were processed.
|
|
error_info_map: dict[str, list[ErrorInfo]]
|
|
|
|
# optimization for legacy codebases with many files with errors
|
|
has_blockers: set[str]
|
|
|
|
# Files that we have reported the errors for
|
|
flushed_files: set[str]
|
|
|
|
# Current error context: nested import context/stack, as a list of (path, line) pairs.
|
|
import_ctx: list[tuple[str, int]]
|
|
|
|
# Path name prefix that is removed from all paths, if set.
|
|
ignore_prefix: str | None = None
|
|
|
|
# Path to current file.
|
|
file: str = ""
|
|
|
|
# Ignore some errors on these lines of each file
|
|
# (path -> line -> error-codes)
|
|
ignored_lines: dict[str, dict[int, list[str]]]
|
|
|
|
# Lines that were skipped during semantic analysis e.g. due to ALWAYS_FALSE, MYPY_FALSE,
|
|
# or platform/version checks. Those lines would not be type-checked.
|
|
skipped_lines: dict[str, set[int]]
|
|
|
|
# Lines on which an error was actually ignored.
|
|
used_ignored_lines: dict[str, dict[int, list[str]]]
|
|
|
|
# Files where all errors should be ignored.
|
|
ignored_files: set[str]
|
|
|
|
# Collection of reported only_once messages.
|
|
only_once_messages: set[str]
|
|
|
|
# Set to True to show "In function "foo":" messages.
|
|
show_error_context: bool = False
|
|
|
|
# Set to True to show column numbers in error messages.
|
|
show_column_numbers: bool = False
|
|
|
|
# Set to True to show end line and end column in error messages.
|
|
# Ths implies `show_column_numbers`.
|
|
show_error_end: bool = False
|
|
|
|
# Set to True to show absolute file paths in error messages.
|
|
show_absolute_path: bool = False
|
|
|
|
# State for keeping track of the current fine-grained incremental mode target.
|
|
# (See mypy.server.update for more about targets.)
|
|
# Current module id.
|
|
target_module: str | None = None
|
|
scope: Scope | None = None
|
|
|
|
# Have we seen an import-related error so far? If yes, we filter out other messages
|
|
# in some cases to avoid reporting huge numbers of errors.
|
|
seen_import_error = False
|
|
|
|
_watchers: list[ErrorWatcher] = []
|
|
|
|
def __init__(
|
|
self,
|
|
options: Options,
|
|
*,
|
|
read_source: Callable[[str], list[str] | None] | None = None,
|
|
hide_error_codes: bool | None = None,
|
|
) -> None:
|
|
self.options = options
|
|
self.hide_error_codes = (
|
|
hide_error_codes if hide_error_codes is not None else options.hide_error_codes
|
|
)
|
|
# We use fscache to read source code when showing snippets.
|
|
self.read_source = read_source
|
|
self.initialize()
|
|
|
|
def initialize(self) -> None:
|
|
self.error_info_map = {}
|
|
self.flushed_files = set()
|
|
self.import_ctx = []
|
|
self.function_or_member = [None]
|
|
self.ignored_lines = {}
|
|
self.skipped_lines = {}
|
|
self.used_ignored_lines = defaultdict(lambda: defaultdict(list))
|
|
self.ignored_files = set()
|
|
self.only_once_messages = set()
|
|
self.has_blockers = set()
|
|
self.scope = None
|
|
self.target_module = None
|
|
self.seen_import_error = False
|
|
|
|
def reset(self) -> None:
|
|
self.initialize()
|
|
|
|
def set_ignore_prefix(self, prefix: str) -> None:
|
|
"""Set path prefix that will be removed from all paths."""
|
|
prefix = os.path.normpath(prefix)
|
|
# Add separator to the end, if not given.
|
|
if os.path.basename(prefix) != "":
|
|
prefix += os.sep
|
|
self.ignore_prefix = prefix
|
|
|
|
def simplify_path(self, file: str) -> str:
|
|
if self.options.show_absolute_path:
|
|
return os.path.abspath(file)
|
|
else:
|
|
file = os.path.normpath(file)
|
|
return remove_path_prefix(file, self.ignore_prefix)
|
|
|
|
def set_file(
|
|
self, file: str, module: str | None, options: Options, scope: Scope | None = None
|
|
) -> None:
|
|
"""Set the path and module id of the current file."""
|
|
# The path will be simplified later, in render_messages. That way
|
|
# * 'file' is always a key that uniquely identifies a source file
|
|
# that mypy read (simplified paths might not be unique); and
|
|
# * we only have to simplify in one place, while still supporting
|
|
# reporting errors for files other than the one currently being
|
|
# processed.
|
|
self.file = file
|
|
self.target_module = module
|
|
self.scope = scope
|
|
self.options = options
|
|
|
|
def set_file_ignored_lines(
|
|
self, file: str, ignored_lines: dict[int, list[str]], ignore_all: bool = False
|
|
) -> None:
|
|
self.ignored_lines[file] = ignored_lines
|
|
if ignore_all:
|
|
self.ignored_files.add(file)
|
|
|
|
def set_skipped_lines(self, file: str, skipped_lines: set[int]) -> None:
|
|
self.skipped_lines[file] = skipped_lines
|
|
|
|
def current_target(self) -> str | None:
|
|
"""Retrieves the current target from the associated scope.
|
|
|
|
If there is no associated scope, use the target module."""
|
|
if self.scope is not None:
|
|
return self.scope.current_target()
|
|
return self.target_module
|
|
|
|
def current_module(self) -> str | None:
|
|
return self.target_module
|
|
|
|
def import_context(self) -> list[tuple[str, int]]:
|
|
"""Return a copy of the import context."""
|
|
return self.import_ctx.copy()
|
|
|
|
def set_import_context(self, ctx: list[tuple[str, int]]) -> None:
|
|
"""Replace the entire import context with a new value."""
|
|
self.import_ctx = ctx.copy()
|
|
|
|
def report(
|
|
self,
|
|
line: int,
|
|
column: int | None,
|
|
message: str,
|
|
code: ErrorCode | None = None,
|
|
*,
|
|
blocker: bool = False,
|
|
severity: str = "error",
|
|
file: str | None = None,
|
|
only_once: bool = False,
|
|
allow_dups: bool = False,
|
|
origin_span: Iterable[int] | None = None,
|
|
offset: int = 0,
|
|
end_line: int | None = None,
|
|
end_column: int | None = None,
|
|
) -> None:
|
|
"""Report message at the given line using the current error context.
|
|
|
|
Args:
|
|
line: line number of error
|
|
column: column number of error
|
|
message: message to report
|
|
code: error code (defaults to 'misc'; not shown for notes)
|
|
blocker: if True, don't continue analysis after this error
|
|
severity: 'error' or 'note'
|
|
file: if non-None, override current file as context
|
|
only_once: if True, only report this exact message once per build
|
|
allow_dups: if True, allow duplicate copies of this message (ignored if only_once)
|
|
origin_span: if non-None, override current context as origin
|
|
(type: ignores have effect here)
|
|
end_line: if non-None, override current context as end
|
|
"""
|
|
if self.scope:
|
|
type = self.scope.current_type_name()
|
|
if self.scope.ignored > 0:
|
|
type = None # Omit type context if nested function
|
|
function = self.scope.current_function_name()
|
|
else:
|
|
type = None
|
|
function = None
|
|
|
|
if column is None:
|
|
column = -1
|
|
if end_column is None:
|
|
if column == -1:
|
|
end_column = -1
|
|
else:
|
|
end_column = column + 1
|
|
|
|
if file is None:
|
|
file = self.file
|
|
if offset:
|
|
message = " " * offset + message
|
|
|
|
if origin_span is None:
|
|
origin_span = [line]
|
|
|
|
if end_line is None:
|
|
end_line = line
|
|
|
|
code = code or (codes.MISC if not blocker else None)
|
|
|
|
info = ErrorInfo(
|
|
import_ctx=self.import_context(),
|
|
file=file,
|
|
module=self.current_module(),
|
|
typ=type,
|
|
function_or_member=function,
|
|
line=line,
|
|
column=column,
|
|
end_line=end_line,
|
|
end_column=end_column,
|
|
severity=severity,
|
|
message=message,
|
|
code=code,
|
|
blocker=blocker,
|
|
only_once=only_once,
|
|
allow_dups=allow_dups,
|
|
origin=(self.file, origin_span),
|
|
target=self.current_target(),
|
|
)
|
|
self.add_error_info(info)
|
|
|
|
def _add_error_info(self, file: str, info: ErrorInfo) -> None:
|
|
assert file not in self.flushed_files
|
|
# process the stack of ErrorWatchers before modifying any internal state
|
|
# in case we need to filter out the error entirely
|
|
if self._filter_error(file, info):
|
|
return
|
|
if file not in self.error_info_map:
|
|
self.error_info_map[file] = []
|
|
self.error_info_map[file].append(info)
|
|
if info.blocker:
|
|
self.has_blockers.add(file)
|
|
if info.code in (IMPORT, IMPORT_UNTYPED, IMPORT_NOT_FOUND):
|
|
self.seen_import_error = True
|
|
|
|
def _filter_error(self, file: str, info: ErrorInfo) -> bool:
|
|
"""
|
|
process ErrorWatcher stack from top to bottom,
|
|
stopping early if error needs to be filtered out
|
|
"""
|
|
i = len(self._watchers)
|
|
while i > 0:
|
|
i -= 1
|
|
w = self._watchers[i]
|
|
if w.on_error(file, info):
|
|
return True
|
|
return False
|
|
|
|
def add_error_info(self, info: ErrorInfo) -> None:
|
|
file, lines = info.origin
|
|
# process the stack of ErrorWatchers before modifying any internal state
|
|
# in case we need to filter out the error entirely
|
|
# NB: we need to do this both here and in _add_error_info, otherwise we
|
|
# might incorrectly update the sets of ignored or only_once messages
|
|
if self._filter_error(file, info):
|
|
return
|
|
if not info.blocker: # Blockers cannot be ignored
|
|
if file in self.ignored_lines:
|
|
# Check each line in this context for "type: ignore" comments.
|
|
# line == end_line for most nodes, so we only loop once.
|
|
for scope_line in lines:
|
|
if self.is_ignored_error(scope_line, info, self.ignored_lines[file]):
|
|
# Annotation requests us to ignore all errors on this line.
|
|
self.used_ignored_lines[file][scope_line].append(
|
|
(info.code or codes.MISC).code
|
|
)
|
|
return
|
|
if file in self.ignored_files:
|
|
return
|
|
if info.only_once:
|
|
if info.message in self.only_once_messages:
|
|
return
|
|
self.only_once_messages.add(info.message)
|
|
if (
|
|
self.seen_import_error
|
|
and info.code not in (IMPORT, IMPORT_UNTYPED, IMPORT_NOT_FOUND)
|
|
and self.has_many_errors()
|
|
):
|
|
# Missing stubs can easily cause thousands of errors about
|
|
# Any types, especially when upgrading to mypy 0.900,
|
|
# which no longer bundles third-party library stubs. Avoid
|
|
# showing too many errors to make it easier to see
|
|
# import-related errors.
|
|
info.hidden = True
|
|
self.report_hidden_errors(info)
|
|
self._add_error_info(file, info)
|
|
ignored_codes = self.ignored_lines.get(file, {}).get(info.line, [])
|
|
if ignored_codes and info.code:
|
|
# Something is ignored on the line, but not this error, so maybe the error
|
|
# code is incorrect.
|
|
msg = f'Error code "{info.code.code}" not covered by "type: ignore" comment'
|
|
if info.code in original_error_codes:
|
|
# If there seems to be a "type: ignore" with a stale error
|
|
# code, report a more specific note.
|
|
old_code = original_error_codes[info.code].code
|
|
if old_code in ignored_codes:
|
|
msg = (
|
|
f'Error code changed to {info.code.code}; "type: ignore" comment '
|
|
+ "may be out of date"
|
|
)
|
|
note = ErrorInfo(
|
|
import_ctx=info.import_ctx,
|
|
file=info.file,
|
|
module=info.module,
|
|
typ=info.type,
|
|
function_or_member=info.function_or_member,
|
|
line=info.line,
|
|
column=info.column,
|
|
end_line=info.end_line,
|
|
end_column=info.end_column,
|
|
severity="note",
|
|
message=msg,
|
|
code=None,
|
|
blocker=False,
|
|
only_once=False,
|
|
allow_dups=False,
|
|
)
|
|
self._add_error_info(file, note)
|
|
if (
|
|
self.options.show_error_code_links
|
|
and not self.options.hide_error_codes
|
|
and info.code is not None
|
|
and info.code not in HIDE_LINK_CODES
|
|
):
|
|
message = f"See {BASE_RTD_URL}-{info.code.code} for more info"
|
|
if message in self.only_once_messages:
|
|
return
|
|
self.only_once_messages.add(message)
|
|
info = ErrorInfo(
|
|
import_ctx=info.import_ctx,
|
|
file=info.file,
|
|
module=info.module,
|
|
typ=info.type,
|
|
function_or_member=info.function_or_member,
|
|
line=info.line,
|
|
column=info.column,
|
|
end_line=info.end_line,
|
|
end_column=info.end_column,
|
|
severity="note",
|
|
message=message,
|
|
code=info.code,
|
|
blocker=False,
|
|
only_once=True,
|
|
allow_dups=False,
|
|
priority=20,
|
|
)
|
|
self._add_error_info(file, info)
|
|
|
|
def has_many_errors(self) -> bool:
|
|
if self.options.many_errors_threshold < 0:
|
|
return False
|
|
if len(self.error_info_map) >= self.options.many_errors_threshold:
|
|
return True
|
|
if (
|
|
sum(len(errors) for errors in self.error_info_map.values())
|
|
>= self.options.many_errors_threshold
|
|
):
|
|
return True
|
|
return False
|
|
|
|
def report_hidden_errors(self, info: ErrorInfo) -> None:
|
|
message = (
|
|
"(Skipping most remaining errors due to unresolved imports or missing stubs; "
|
|
+ "fix these first)"
|
|
)
|
|
if message in self.only_once_messages:
|
|
return
|
|
self.only_once_messages.add(message)
|
|
new_info = ErrorInfo(
|
|
import_ctx=info.import_ctx,
|
|
file=info.file,
|
|
module=info.module,
|
|
typ=None,
|
|
function_or_member=None,
|
|
line=info.line,
|
|
column=info.column,
|
|
end_line=info.end_line,
|
|
end_column=info.end_column,
|
|
severity="note",
|
|
message=message,
|
|
code=None,
|
|
blocker=False,
|
|
only_once=True,
|
|
allow_dups=False,
|
|
origin=info.origin,
|
|
target=info.target,
|
|
)
|
|
self._add_error_info(info.origin[0], new_info)
|
|
|
|
def is_ignored_error(self, line: int, info: ErrorInfo, ignores: dict[int, list[str]]) -> bool:
|
|
if info.blocker:
|
|
# Blocking errors can never be ignored
|
|
return False
|
|
if info.code and not self.is_error_code_enabled(info.code):
|
|
return True
|
|
if line not in ignores:
|
|
return False
|
|
if not ignores[line]:
|
|
# Empty list means that we ignore all errors
|
|
return True
|
|
if info.code and self.is_error_code_enabled(info.code):
|
|
return (
|
|
info.code.code in ignores[line]
|
|
or info.code.sub_code_of is not None
|
|
and info.code.sub_code_of.code in ignores[line]
|
|
)
|
|
return False
|
|
|
|
def is_error_code_enabled(self, error_code: ErrorCode) -> bool:
|
|
if self.options:
|
|
current_mod_disabled = self.options.disabled_error_codes
|
|
current_mod_enabled = self.options.enabled_error_codes
|
|
else:
|
|
current_mod_disabled = set()
|
|
current_mod_enabled = set()
|
|
|
|
if error_code in current_mod_disabled:
|
|
return False
|
|
elif error_code in current_mod_enabled:
|
|
return True
|
|
elif error_code.sub_code_of is not None and error_code.sub_code_of in current_mod_disabled:
|
|
return False
|
|
else:
|
|
return error_code.default_enabled
|
|
|
|
def clear_errors_in_targets(self, path: str, targets: set[str]) -> None:
|
|
"""Remove errors in specific fine-grained targets within a file."""
|
|
if path in self.error_info_map:
|
|
new_errors = []
|
|
has_blocker = False
|
|
for info in self.error_info_map[path]:
|
|
if info.target not in targets:
|
|
new_errors.append(info)
|
|
has_blocker |= info.blocker
|
|
elif info.only_once:
|
|
self.only_once_messages.remove(info.message)
|
|
self.error_info_map[path] = new_errors
|
|
if not has_blocker and path in self.has_blockers:
|
|
self.has_blockers.remove(path)
|
|
|
|
def generate_unused_ignore_errors(self, file: str) -> None:
|
|
if (
|
|
is_typeshed_file(self.options.abs_custom_typeshed_dir if self.options else None, file)
|
|
or file in self.ignored_files
|
|
):
|
|
return
|
|
ignored_lines = self.ignored_lines[file]
|
|
used_ignored_lines = self.used_ignored_lines[file]
|
|
for line, ignored_codes in ignored_lines.items():
|
|
if line in self.skipped_lines[file]:
|
|
continue
|
|
if codes.UNUSED_IGNORE.code in ignored_codes:
|
|
continue
|
|
used_ignored_codes = used_ignored_lines[line]
|
|
unused_ignored_codes = set(ignored_codes) - set(used_ignored_codes)
|
|
# `ignore` is used
|
|
if not ignored_codes and used_ignored_codes:
|
|
continue
|
|
# All codes appearing in `ignore[...]` are used
|
|
if ignored_codes and not unused_ignored_codes:
|
|
continue
|
|
# Display detail only when `ignore[...]` specifies more than one error code
|
|
unused_codes_message = ""
|
|
if len(ignored_codes) > 1 and unused_ignored_codes:
|
|
unused_codes_message = f"[{', '.join(sorted(unused_ignored_codes))}]"
|
|
message = f'Unused "type: ignore{unused_codes_message}" comment'
|
|
for unused in unused_ignored_codes:
|
|
narrower = set(used_ignored_codes) & codes.sub_code_map[unused]
|
|
if narrower:
|
|
message += f", use narrower [{', '.join(narrower)}] instead of [{unused}] code"
|
|
# Don't use report since add_error_info will ignore the error!
|
|
info = ErrorInfo(
|
|
import_ctx=self.import_context(),
|
|
file=file,
|
|
module=self.current_module(),
|
|
typ=None,
|
|
function_or_member=None,
|
|
line=line,
|
|
column=-1,
|
|
end_line=line,
|
|
end_column=-1,
|
|
severity="error",
|
|
message=message,
|
|
code=codes.UNUSED_IGNORE,
|
|
blocker=False,
|
|
only_once=False,
|
|
allow_dups=False,
|
|
)
|
|
self._add_error_info(file, info)
|
|
|
|
def generate_ignore_without_code_errors(
|
|
self, file: str, is_warning_unused_ignores: bool
|
|
) -> None:
|
|
if (
|
|
is_typeshed_file(self.options.abs_custom_typeshed_dir if self.options else None, file)
|
|
or file in self.ignored_files
|
|
):
|
|
return
|
|
|
|
used_ignored_lines = self.used_ignored_lines[file]
|
|
|
|
# If the whole file is ignored, ignore it.
|
|
if used_ignored_lines:
|
|
_, used_codes = min(used_ignored_lines.items())
|
|
if codes.FILE.code in used_codes:
|
|
return
|
|
|
|
for line, ignored_codes in self.ignored_lines[file].items():
|
|
if ignored_codes:
|
|
continue
|
|
|
|
# If the ignore is itself unused and that would be warned about, let
|
|
# that error stand alone
|
|
if is_warning_unused_ignores and not used_ignored_lines[line]:
|
|
continue
|
|
|
|
codes_hint = ""
|
|
ignored_codes = sorted(set(used_ignored_lines[line]))
|
|
if ignored_codes:
|
|
codes_hint = f' (consider "type: ignore[{", ".join(ignored_codes)}]" instead)'
|
|
|
|
message = f'"type: ignore" comment without error code{codes_hint}'
|
|
# Don't use report since add_error_info will ignore the error!
|
|
info = ErrorInfo(
|
|
import_ctx=self.import_context(),
|
|
file=file,
|
|
module=self.current_module(),
|
|
typ=None,
|
|
function_or_member=None,
|
|
line=line,
|
|
column=-1,
|
|
end_line=line,
|
|
end_column=-1,
|
|
severity="error",
|
|
message=message,
|
|
code=codes.IGNORE_WITHOUT_CODE,
|
|
blocker=False,
|
|
only_once=False,
|
|
allow_dups=False,
|
|
)
|
|
self._add_error_info(file, info)
|
|
|
|
def num_messages(self) -> int:
|
|
"""Return the number of generated messages."""
|
|
return sum(len(x) for x in self.error_info_map.values())
|
|
|
|
def is_errors(self) -> bool:
|
|
"""Are there any generated messages?"""
|
|
return bool(self.error_info_map)
|
|
|
|
def is_blockers(self) -> bool:
|
|
"""Are the any errors that are blockers?"""
|
|
return bool(self.has_blockers)
|
|
|
|
def blocker_module(self) -> str | None:
|
|
"""Return the module with a blocking error, or None if not possible."""
|
|
for path in self.has_blockers:
|
|
for err in self.error_info_map[path]:
|
|
if err.blocker:
|
|
return err.module
|
|
return None
|
|
|
|
def is_errors_for_file(self, file: str) -> bool:
|
|
"""Are there any errors for the given file?"""
|
|
return file in self.error_info_map
|
|
|
|
def prefer_simple_messages(self) -> bool:
|
|
"""Should we generate simple/fast error messages?
|
|
|
|
Return True if errors are not shown to user, i.e. errors are ignored
|
|
or they are collected for internal use only.
|
|
|
|
If True, we should prefer to generate a simple message quickly.
|
|
All normal errors should still be reported.
|
|
"""
|
|
if self.file in self.ignored_files:
|
|
# Errors ignored, so no point generating fancy messages
|
|
return True
|
|
for _watcher in self._watchers:
|
|
if _watcher._filter is True and _watcher._filtered is None:
|
|
# Errors are filtered
|
|
return True
|
|
return False
|
|
|
|
def raise_error(self, use_stdout: bool = True) -> NoReturn:
|
|
"""Raise a CompileError with the generated messages.
|
|
|
|
Render the messages suitable for displaying.
|
|
"""
|
|
# self.new_messages() will format all messages that haven't already
|
|
# been returned from a file_messages() call.
|
|
raise CompileError(
|
|
self.new_messages(), use_stdout=use_stdout, module_with_blocker=self.blocker_module()
|
|
)
|
|
|
|
def format_messages(
|
|
self, error_info: list[ErrorInfo], source_lines: list[str] | None
|
|
) -> list[str]:
|
|
"""Return a string list that represents the error messages.
|
|
|
|
Use a form suitable for displaying to the user. If self.pretty
|
|
is True also append a relevant trimmed source code line (only for
|
|
severity 'error').
|
|
"""
|
|
a: list[str] = []
|
|
error_info = [info for info in error_info if not info.hidden]
|
|
errors = self.render_messages(self.sort_messages(error_info))
|
|
errors = self.remove_duplicates(errors)
|
|
for (
|
|
file,
|
|
line,
|
|
column,
|
|
end_line,
|
|
end_column,
|
|
severity,
|
|
message,
|
|
allow_dups,
|
|
code,
|
|
) in errors:
|
|
s = ""
|
|
if file is not None:
|
|
if self.options.show_column_numbers and line >= 0 and column >= 0:
|
|
srcloc = f"{file}:{line}:{1 + column}"
|
|
if self.options.show_error_end and end_line >= 0 and end_column >= 0:
|
|
srcloc += f":{end_line}:{end_column}"
|
|
elif line >= 0:
|
|
srcloc = f"{file}:{line}"
|
|
else:
|
|
srcloc = file
|
|
s = f"{srcloc}: {severity}: {message}"
|
|
else:
|
|
s = message
|
|
if (
|
|
not self.hide_error_codes
|
|
and code
|
|
and (severity != "note" or code in SHOW_NOTE_CODES)
|
|
):
|
|
# If note has an error code, it is related to a previous error. Avoid
|
|
# displaying duplicate error codes.
|
|
s = f"{s} [{code.code}]"
|
|
a.append(s)
|
|
if self.options.pretty:
|
|
# Add source code fragment and a location marker.
|
|
if severity == "error" and source_lines and line > 0:
|
|
source_line = source_lines[line - 1]
|
|
source_line_expanded = source_line.expandtabs()
|
|
if column < 0:
|
|
# Something went wrong, take first non-empty column.
|
|
column = len(source_line) - len(source_line.lstrip())
|
|
|
|
# Shifts column after tab expansion
|
|
column = len(source_line[:column].expandtabs())
|
|
end_column = len(source_line[:end_column].expandtabs())
|
|
|
|
# Note, currently coloring uses the offset to detect source snippets,
|
|
# so these offsets should not be arbitrary.
|
|
a.append(" " * DEFAULT_SOURCE_OFFSET + source_line_expanded)
|
|
marker = "^"
|
|
if end_line == line and end_column > column:
|
|
marker = f'^{"~" * (end_column - column - 1)}'
|
|
a.append(" " * (DEFAULT_SOURCE_OFFSET + column) + marker)
|
|
return a
|
|
|
|
def file_messages(self, path: str) -> list[str]:
|
|
"""Return a string list of new error messages from a given file.
|
|
|
|
Use a form suitable for displaying to the user.
|
|
"""
|
|
if path not in self.error_info_map:
|
|
return []
|
|
self.flushed_files.add(path)
|
|
source_lines = None
|
|
if self.options.pretty and self.read_source:
|
|
source_lines = self.read_source(path)
|
|
return self.format_messages(self.error_info_map[path], source_lines)
|
|
|
|
def new_messages(self) -> list[str]:
|
|
"""Return a string list of new error messages.
|
|
|
|
Use a form suitable for displaying to the user.
|
|
Errors from different files are ordered based on the order in which
|
|
they first generated an error.
|
|
"""
|
|
msgs = []
|
|
for path in self.error_info_map.keys():
|
|
if path not in self.flushed_files:
|
|
msgs.extend(self.file_messages(path))
|
|
return msgs
|
|
|
|
def targets(self) -> set[str]:
|
|
"""Return a set of all targets that contain errors."""
|
|
# TODO: Make sure that either target is always defined or that not being defined
|
|
# is okay for fine-grained incremental checking.
|
|
return {
|
|
info.target for errs in self.error_info_map.values() for info in errs if info.target
|
|
}
|
|
|
|
def render_messages(self, errors: list[ErrorInfo]) -> list[ErrorTuple]:
|
|
"""Translate the messages into a sequence of tuples.
|
|
|
|
Each tuple is of form (path, line, col, severity, message, allow_dups, code).
|
|
The rendered sequence includes information about error contexts.
|
|
The path item may be None. If the line item is negative, the
|
|
line number is not defined for the tuple.
|
|
"""
|
|
result: list[ErrorTuple] = []
|
|
prev_import_context: list[tuple[str, int]] = []
|
|
prev_function_or_member: str | None = None
|
|
prev_type: str | None = None
|
|
|
|
for e in errors:
|
|
# Report module import context, if different from previous message.
|
|
if not self.options.show_error_context:
|
|
pass
|
|
elif e.import_ctx != prev_import_context:
|
|
last = len(e.import_ctx) - 1
|
|
i = last
|
|
while i >= 0:
|
|
path, line = e.import_ctx[i]
|
|
fmt = "{}:{}: note: In module imported here"
|
|
if i < last:
|
|
fmt = "{}:{}: note: ... from here"
|
|
if i > 0:
|
|
fmt += ","
|
|
else:
|
|
fmt += ":"
|
|
# Remove prefix to ignore from path (if present) to
|
|
# simplify path.
|
|
path = remove_path_prefix(path, self.ignore_prefix)
|
|
result.append(
|
|
(None, -1, -1, -1, -1, "note", fmt.format(path, line), e.allow_dups, None)
|
|
)
|
|
i -= 1
|
|
|
|
file = self.simplify_path(e.file)
|
|
|
|
# Report context within a source file.
|
|
if not self.options.show_error_context:
|
|
pass
|
|
elif e.function_or_member != prev_function_or_member or e.type != prev_type:
|
|
if e.function_or_member is None:
|
|
if e.type is None:
|
|
result.append(
|
|
(file, -1, -1, -1, -1, "note", "At top level:", e.allow_dups, None)
|
|
)
|
|
else:
|
|
result.append(
|
|
(
|
|
file,
|
|
-1,
|
|
-1,
|
|
-1,
|
|
-1,
|
|
"note",
|
|
f'In class "{e.type}":',
|
|
e.allow_dups,
|
|
None,
|
|
)
|
|
)
|
|
else:
|
|
if e.type is None:
|
|
result.append(
|
|
(
|
|
file,
|
|
-1,
|
|
-1,
|
|
-1,
|
|
-1,
|
|
"note",
|
|
f'In function "{e.function_or_member}":',
|
|
e.allow_dups,
|
|
None,
|
|
)
|
|
)
|
|
else:
|
|
result.append(
|
|
(
|
|
file,
|
|
-1,
|
|
-1,
|
|
-1,
|
|
-1,
|
|
"note",
|
|
'In member "{}" of class "{}":'.format(
|
|
e.function_or_member, e.type
|
|
),
|
|
e.allow_dups,
|
|
None,
|
|
)
|
|
)
|
|
elif e.type != prev_type:
|
|
if e.type is None:
|
|
result.append(
|
|
(file, -1, -1, -1, -1, "note", "At top level:", e.allow_dups, None)
|
|
)
|
|
else:
|
|
result.append(
|
|
(file, -1, -1, -1, -1, "note", f'In class "{e.type}":', e.allow_dups, None)
|
|
)
|
|
|
|
if isinstance(e.message, ErrorMessage):
|
|
result.append(
|
|
(
|
|
file,
|
|
e.line,
|
|
e.column,
|
|
e.end_line,
|
|
e.end_column,
|
|
e.severity,
|
|
e.message.value,
|
|
e.allow_dups,
|
|
e.code,
|
|
)
|
|
)
|
|
else:
|
|
result.append(
|
|
(
|
|
file,
|
|
e.line,
|
|
e.column,
|
|
e.end_line,
|
|
e.end_column,
|
|
e.severity,
|
|
e.message,
|
|
e.allow_dups,
|
|
e.code,
|
|
)
|
|
)
|
|
|
|
prev_import_context = e.import_ctx
|
|
prev_function_or_member = e.function_or_member
|
|
prev_type = e.type
|
|
|
|
return result
|
|
|
|
def sort_messages(self, errors: list[ErrorInfo]) -> list[ErrorInfo]:
|
|
"""Sort an array of error messages locally by line number.
|
|
|
|
I.e., sort a run of consecutive messages with the same
|
|
context by line number, but otherwise retain the general
|
|
ordering of the messages.
|
|
"""
|
|
result: list[ErrorInfo] = []
|
|
i = 0
|
|
while i < len(errors):
|
|
i0 = i
|
|
# Find neighbouring errors with the same context and file.
|
|
while (
|
|
i + 1 < len(errors)
|
|
and errors[i + 1].import_ctx == errors[i].import_ctx
|
|
and errors[i + 1].file == errors[i].file
|
|
):
|
|
i += 1
|
|
i += 1
|
|
|
|
# Sort the errors specific to a file according to line number and column.
|
|
a = sorted(errors[i0:i], key=lambda x: (x.line, x.column))
|
|
a = self.sort_within_context(a)
|
|
result.extend(a)
|
|
return result
|
|
|
|
def sort_within_context(self, errors: list[ErrorInfo]) -> list[ErrorInfo]:
|
|
"""For the same location decide which messages to show first/last.
|
|
|
|
Currently, we only compare within the same error code, to decide the
|
|
order of various additional notes.
|
|
"""
|
|
result = []
|
|
i = 0
|
|
while i < len(errors):
|
|
i0 = i
|
|
# Find neighbouring errors with the same position and error code.
|
|
while (
|
|
i + 1 < len(errors)
|
|
and errors[i + 1].line == errors[i].line
|
|
and errors[i + 1].column == errors[i].column
|
|
and errors[i + 1].end_line == errors[i].end_line
|
|
and errors[i + 1].end_column == errors[i].end_column
|
|
and errors[i + 1].code == errors[i].code
|
|
):
|
|
i += 1
|
|
i += 1
|
|
|
|
# Sort the messages specific to a given error by priority.
|
|
a = sorted(errors[i0:i], key=lambda x: x.priority)
|
|
result.extend(a)
|
|
return result
|
|
|
|
def remove_duplicates(self, errors: list[ErrorTuple]) -> list[ErrorTuple]:
|
|
"""Remove duplicates from a sorted error list."""
|
|
res: list[ErrorTuple] = []
|
|
i = 0
|
|
while i < len(errors):
|
|
dup = False
|
|
# Use slightly special formatting for member conflicts reporting.
|
|
conflicts_notes = False
|
|
j = i - 1
|
|
# Find duplicates, unless duplicates are allowed.
|
|
if not errors[i][7]:
|
|
while j >= 0 and errors[j][0] == errors[i][0]:
|
|
if errors[j][6].strip() == "Got:":
|
|
conflicts_notes = True
|
|
j -= 1
|
|
j = i - 1
|
|
while j >= 0 and errors[j][0] == errors[i][0] and errors[j][1] == errors[i][1]:
|
|
if (
|
|
errors[j][5] == errors[i][5]
|
|
and
|
|
# Allow duplicate notes in overload conflicts reporting.
|
|
not (
|
|
(errors[i][5] == "note" and errors[i][6].strip() in allowed_duplicates)
|
|
or (errors[i][6].strip().startswith("def ") and conflicts_notes)
|
|
)
|
|
and errors[j][6] == errors[i][6]
|
|
): # ignore column
|
|
dup = True
|
|
break
|
|
j -= 1
|
|
if not dup:
|
|
res.append(errors[i])
|
|
i += 1
|
|
return res
|
|
|
|
|
|
class CompileError(Exception):
|
|
"""Exception raised when there is a compile error.
|
|
|
|
It can be a parse, semantic analysis, type check or other
|
|
compilation-related error.
|
|
|
|
CompileErrors raised from an errors object carry all of the
|
|
messages that have not been reported out by error streaming.
|
|
This is patched up by build.build to contain either all error
|
|
messages (if errors were streamed) or none (if they were not).
|
|
|
|
"""
|
|
|
|
messages: list[str]
|
|
use_stdout = False
|
|
# Can be set in case there was a module with a blocking error
|
|
module_with_blocker: str | None = None
|
|
|
|
def __init__(
|
|
self, messages: list[str], use_stdout: bool = False, module_with_blocker: str | None = None
|
|
) -> None:
|
|
super().__init__("\n".join(messages))
|
|
self.messages = messages
|
|
self.use_stdout = use_stdout
|
|
self.module_with_blocker = module_with_blocker
|
|
|
|
|
|
def remove_path_prefix(path: str, prefix: str | None) -> str:
|
|
"""If path starts with prefix, return copy of path with the prefix removed.
|
|
Otherwise, return path. If path is None, return None.
|
|
"""
|
|
if prefix is not None and path.startswith(prefix):
|
|
return path[len(prefix) :]
|
|
else:
|
|
return path
|
|
|
|
|
|
def report_internal_error(
|
|
err: Exception,
|
|
file: str | None,
|
|
line: int,
|
|
errors: Errors,
|
|
options: Options,
|
|
stdout: TextIO | None = None,
|
|
stderr: TextIO | None = None,
|
|
) -> NoReturn:
|
|
"""Report internal error and exit.
|
|
|
|
This optionally starts pdb or shows a traceback.
|
|
"""
|
|
stdout = stdout or sys.stdout
|
|
stderr = stderr or sys.stderr
|
|
# Dump out errors so far, they often provide a clue.
|
|
# But catch unexpected errors rendering them.
|
|
try:
|
|
for msg in errors.new_messages():
|
|
print(msg)
|
|
except Exception as e:
|
|
print("Failed to dump errors:", repr(e), file=stderr)
|
|
|
|
# Compute file:line prefix for official-looking error messages.
|
|
if file:
|
|
if line:
|
|
prefix = f"{file}:{line}: "
|
|
else:
|
|
prefix = f"{file}: "
|
|
else:
|
|
prefix = ""
|
|
|
|
# Print "INTERNAL ERROR" message.
|
|
print(
|
|
f"{prefix}error: INTERNAL ERROR --",
|
|
"Please try using mypy master on GitHub:\n"
|
|
"https://mypy.readthedocs.io/en/stable/common_issues.html"
|
|
"#using-a-development-mypy-build",
|
|
file=stderr,
|
|
)
|
|
if options.show_traceback:
|
|
print("Please report a bug at https://github.com/python/mypy/issues", file=stderr)
|
|
else:
|
|
print(
|
|
"If this issue continues with mypy master, "
|
|
"please report a bug at https://github.com/python/mypy/issues",
|
|
file=stderr,
|
|
)
|
|
print(f"version: {mypy_version}", file=stderr)
|
|
|
|
# If requested, drop into pdb. This overrides show_tb.
|
|
if options.pdb:
|
|
print("Dropping into pdb", file=stderr)
|
|
import pdb
|
|
|
|
pdb.post_mortem(sys.exc_info()[2])
|
|
|
|
# If requested, print traceback, else print note explaining how to get one.
|
|
if options.raise_exceptions:
|
|
raise err
|
|
if not options.show_traceback:
|
|
if not options.pdb:
|
|
print(
|
|
"{}: note: please use --show-traceback to print a traceback "
|
|
"when reporting a bug".format(prefix),
|
|
file=stderr,
|
|
)
|
|
else:
|
|
tb = traceback.extract_stack()[:-2]
|
|
tb2 = traceback.extract_tb(sys.exc_info()[2])
|
|
print("Traceback (most recent call last):")
|
|
for s in traceback.format_list(tb + tb2):
|
|
print(s.rstrip("\n"))
|
|
print(f"{type(err).__name__}: {err}", file=stdout)
|
|
print(f"{prefix}: note: use --pdb to drop into pdb", file=stderr)
|
|
|
|
# Exit. The caller has nothing more to say.
|
|
# We use exit code 2 to signal that this is no ordinary error.
|
|
raise SystemExit(2)
|