1d4c55
commit f40c7887d3cc9bb0b56576ed9edbe505ff8058c0
1d4c55
Author: Florian Weimer <fweimer@redhat.com>
1d4c55
Date:   Thu Sep 22 12:10:41 2022 +0200
1d4c55
1d4c55
    scripts: Extract glibcpp.py from check-obsolete-constructs.py
1d4c55
    
1d4c55
    The C tokenizer is useful separately.
1d4c55
    
1d4c55
    Reviewed-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
1d4c55
1d4c55
diff --git a/scripts/check-obsolete-constructs.py b/scripts/check-obsolete-constructs.py
1d4c55
index 89d21dea6e788783..7c7a092e440a3258 100755
1d4c55
--- a/scripts/check-obsolete-constructs.py
1d4c55
+++ b/scripts/check-obsolete-constructs.py
1d4c55
@@ -24,193 +24,14 @@
1d4c55
 """
1d4c55
 
1d4c55
 import argparse
1d4c55
-import collections
1d4c55
+import os
1d4c55
 import re
1d4c55
 import sys
1d4c55
 
1d4c55
-# Simplified lexical analyzer for C preprocessing tokens.
1d4c55
-# Does not implement trigraphs.
1d4c55
-# Does not implement backslash-newline in the middle of any lexical
1d4c55
-#   item other than a string literal.
1d4c55
-# Does not implement universal-character-names in identifiers.
1d4c55
-# Treats prefixed strings (e.g. L"...") as two tokens (L and "...")
1d4c55
-# Accepts non-ASCII characters only within comments and strings.
1d4c55
-
1d4c55
-# Caution: The order of the outermost alternation matters.
1d4c55
-# STRING must be before BAD_STRING, CHARCONST before BAD_CHARCONST,
1d4c55
-# BLOCK_COMMENT before BAD_BLOCK_COM before PUNCTUATOR, and OTHER must
1d4c55
-# be last.
1d4c55
-# Caution: There should be no capturing groups other than the named
1d4c55
-# captures in the outermost alternation.
1d4c55
-
1d4c55
-# For reference, these are all of the C punctuators as of C11:
1d4c55
-#   [ ] ( ) { } , ; ? ~
1d4c55
-#   ! != * *= / /= ^ ^= = ==
1d4c55
-#   # ##
1d4c55
-#   % %= %> %: %:%:
1d4c55
-#   & &= &&
1d4c55
-#   | |= ||
1d4c55
-#   + += ++
1d4c55
-#   - -= -- ->
1d4c55
-#   . ...
1d4c55
-#   : :>
1d4c55
-#   < <% <: << <<= <=
1d4c55
-#   > >= >> >>=
1d4c55
-
1d4c55
-# The BAD_* tokens are not part of the official definition of pp-tokens;
1d4c55
-# they match unclosed strings, character constants, and block comments,
1d4c55
-# so that the regex engine doesn't have to backtrack all the way to the
1d4c55
-# beginning of a broken construct and then emit dozens of junk tokens.
1d4c55
-
1d4c55
-PP_TOKEN_RE_ = re.compile(r"""
1d4c55
-    (?P<STRING>        \"(?:[^\"\\\r\n]|\\(?:[\r\n -~]|\r\n))*\")
1d4c55
-   |(?P<BAD_STRING>    \"(?:[^\"\\\r\n]|\\[ -~])*)
1d4c55
-   |(?P<CHARCONST>     \'(?:[^\'\\\r\n]|\\(?:[\r\n -~]|\r\n))*\')
1d4c55
-   |(?P<BAD_CHARCONST> \'(?:[^\'\\\r\n]|\\[ -~])*)
1d4c55
-   |(?P<BLOCK_COMMENT> /\*(?:\*(?!/)|[^*])*\*/)
1d4c55
-   |(?P<BAD_BLOCK_COM> /\*(?:\*(?!/)|[^*])*\*?)
1d4c55
-   |(?P<LINE_COMMENT>  //[^\r\n]*)
1d4c55
-   |(?P<IDENT>         [_a-zA-Z][_a-zA-Z0-9]*)
1d4c55
-   |(?P<PP_NUMBER>     \.?[0-9](?:[0-9a-df-oq-zA-DF-OQ-Z_.]|[eEpP][+-]?)*)
1d4c55
-   |(?P<PUNCTUATOR>
1d4c55
-       [,;?~(){}\[\]]
1d4c55
-     | [!*/^=]=?
1d4c55
-     | \#\#?
1d4c55
-     | %(?:[=>]|:(?:%:)?)?
1d4c55
-     | &[=&]?
1d4c55
-     |\|[=|]?
1d4c55
-     |\+[=+]?
1d4c55
-     | -[=->]?
1d4c55
-     |\.(?:\.\.)?
1d4c55
-     | :>?
1d4c55
-     | <(?:[%:]|<(?:=|<=?)?)?
1d4c55
-     | >(?:=|>=?)?)
1d4c55
-   |(?P<ESCNL>         \\(?:\r|\n|\r\n))
1d4c55
-   |(?P<WHITESPACE>    [ \t\n\r\v\f]+)
1d4c55
-   |(?P<OTHER>         .)
1d4c55
-""", re.DOTALL | re.VERBOSE)
1d4c55
-
1d4c55
-HEADER_NAME_RE_ = re.compile(r"""
1d4c55
-    < [^>\r\n]+ >
1d4c55
-  | " [^"\r\n]+ "
1d4c55
-""", re.DOTALL | re.VERBOSE)
1d4c55
-
1d4c55
-ENDLINE_RE_ = re.compile(r"""\r|\n|\r\n""")
1d4c55
-
1d4c55
-# based on the sample code in the Python re documentation
1d4c55
-Token_ = collections.namedtuple("Token", (
1d4c55
-    "kind", "text", "line", "column", "context"))
1d4c55
-Token_.__doc__ = """
1d4c55
-   One C preprocessing token, comment, or chunk of whitespace.
1d4c55
-   'kind' identifies the token type, which will be one of:
1d4c55
-       STRING, CHARCONST, BLOCK_COMMENT, LINE_COMMENT, IDENT,
1d4c55
-       PP_NUMBER, PUNCTUATOR, ESCNL, WHITESPACE, HEADER_NAME,
1d4c55
-       or OTHER.  The BAD_* alternatives in PP_TOKEN_RE_ are
1d4c55
-       handled within tokenize_c, below.
1d4c55
-
1d4c55
-   'text' is the sequence of source characters making up the token;
1d4c55
-       no decoding whatsoever is performed.
1d4c55
-
1d4c55
-   'line' and 'column' give the position of the first character of the
1d4c55
-      token within the source file.  They are both 1-based.
1d4c55
-
1d4c55
-   'context' indicates whether or not this token occurred within a
1d4c55
-      preprocessing directive; it will be None for running text,
1d4c55
-      '<null>' for the leading '#' of a directive line (because '#'
1d4c55
-      all by itself on a line is a "null directive"), or the name of
1d4c55
-      the directive for tokens within a directive line, starting with
1d4c55
-      the IDENT for the name itself.
1d4c55
-"""
1d4c55
-
1d4c55
-def tokenize_c(file_contents, reporter):
1d4c55
-    """Yield a series of Token objects, one for each preprocessing
1d4c55
-       token, comment, or chunk of whitespace within FILE_CONTENTS.
1d4c55
-       The REPORTER object is expected to have one method,
1d4c55
-       reporter.error(token, message), which will be called to
1d4c55
-       indicate a lexical error at the position of TOKEN.
1d4c55
-       If MESSAGE contains the four-character sequence '{!r}', that
1d4c55
-       is expected to be replaced by repr(token.text).
1d4c55
-    """
1d4c55
+# Make available glibc Python modules.
1d4c55
+sys.path.append(os.path.dirname(os.path.realpath(__file__)))
1d4c55
 
1d4c55
-    Token = Token_
1d4c55
-    PP_TOKEN_RE = PP_TOKEN_RE_
1d4c55
-    ENDLINE_RE = ENDLINE_RE_
1d4c55
-    HEADER_NAME_RE = HEADER_NAME_RE_
1d4c55
-
1d4c55
-    line_num = 1
1d4c55
-    line_start = 0
1d4c55
-    pos = 0
1d4c55
-    limit = len(file_contents)
1d4c55
-    directive = None
1d4c55
-    at_bol = True
1d4c55
-    while pos < limit:
1d4c55
-        if directive == "include":
1d4c55
-            mo = HEADER_NAME_RE.match(file_contents, pos)
1d4c55
-            if mo:
1d4c55
-                kind = "HEADER_NAME"
1d4c55
-                directive = "after_include"
1d4c55
-            else:
1d4c55
-                mo = PP_TOKEN_RE.match(file_contents, pos)
1d4c55
-                kind = mo.lastgroup
1d4c55
-                if kind != "WHITESPACE":
1d4c55
-                    directive = "after_include"
1d4c55
-        else:
1d4c55
-            mo = PP_TOKEN_RE.match(file_contents, pos)
1d4c55
-            kind = mo.lastgroup
1d4c55
-
1d4c55
-        text = mo.group()
1d4c55
-        line = line_num
1d4c55
-        column = mo.start() - line_start
1d4c55
-        adj_line_start = 0
1d4c55
-        # only these kinds can contain a newline
1d4c55
-        if kind in ("WHITESPACE", "BLOCK_COMMENT", "LINE_COMMENT",
1d4c55
-                    "STRING", "CHARCONST", "BAD_BLOCK_COM", "ESCNL"):
1d4c55
-            for tmo in ENDLINE_RE.finditer(text):
1d4c55
-                line_num += 1
1d4c55
-                adj_line_start = tmo.end()
1d4c55
-            if adj_line_start:
1d4c55
-                line_start = mo.start() + adj_line_start
1d4c55
-
1d4c55
-        # Track whether or not we are scanning a preprocessing directive.
1d4c55
-        if kind == "LINE_COMMENT" or (kind == "WHITESPACE" and adj_line_start):
1d4c55
-            at_bol = True
1d4c55
-            directive = None
1d4c55
-        else:
1d4c55
-            if kind == "PUNCTUATOR" and text == "#" and at_bol:
1d4c55
-                directive = "<null>"
1d4c55
-            elif kind == "IDENT" and directive == "<null>":
1d4c55
-                directive = text
1d4c55
-            at_bol = False
1d4c55
-
1d4c55
-        # Report ill-formed tokens and rewrite them as their well-formed
1d4c55
-        # equivalents, so downstream processing doesn't have to know about them.
1d4c55
-        # (Rewriting instead of discarding provides better error recovery.)
1d4c55
-        if kind == "BAD_BLOCK_COM":
1d4c55
-            reporter.error(Token("BAD_BLOCK_COM", "", line, column+1, ""),
1d4c55
-                           "unclosed block comment")
1d4c55
-            text += "*/"
1d4c55
-            kind = "BLOCK_COMMENT"
1d4c55
-        elif kind == "BAD_STRING":
1d4c55
-            reporter.error(Token("BAD_STRING", "", line, column+1, ""),
1d4c55
-                           "unclosed string")
1d4c55
-            text += "\""
1d4c55
-            kind = "STRING"
1d4c55
-        elif kind == "BAD_CHARCONST":
1d4c55
-            reporter.error(Token("BAD_CHARCONST", "", line, column+1, ""),
1d4c55
-                           "unclosed char constant")
1d4c55
-            text += "'"
1d4c55
-            kind = "CHARCONST"
1d4c55
-
1d4c55
-        tok = Token(kind, text, line, column+1,
1d4c55
-                    "include" if directive == "after_include" else directive)
1d4c55
-        # Do not complain about OTHER tokens inside macro definitions.
1d4c55
-        # $ and @ appear in macros defined by headers intended to be
1d4c55
-        # included from assembly language, e.g. sysdeps/mips/sys/asm.h.
1d4c55
-        if kind == "OTHER" and directive != "define":
1d4c55
-            self.error(tok, "stray {!r} in program")
1d4c55
-
1d4c55
-        yield tok
1d4c55
-        pos = mo.end()
1d4c55
+import glibcpp
1d4c55
 
1d4c55
 #
1d4c55
 # Base and generic classes for individual checks.
1d4c55
@@ -446,7 +267,7 @@ class HeaderChecker:
1d4c55
 
1d4c55
         typedef_checker = ObsoleteTypedefChecker(self, self.fname)
1d4c55
 
1d4c55
-        for tok in tokenize_c(contents, self):
1d4c55
+        for tok in glibcpp.tokenize_c(contents, self):
1d4c55
             typedef_checker.examine(tok)
1d4c55
 
1d4c55
 def main():
1d4c55
diff --git a/scripts/glibcpp.py b/scripts/glibcpp.py
1d4c55
new file mode 100644
1d4c55
index 0000000000000000..b44c6a4392dde8ce
1d4c55
--- /dev/null
1d4c55
+++ b/scripts/glibcpp.py
1d4c55
@@ -0,0 +1,212 @@
1d4c55
+#! /usr/bin/python3
1d4c55
+# Approximation to C preprocessing.
1d4c55
+# Copyright (C) 2019-2022 Free Software Foundation, Inc.
1d4c55
+# This file is part of the GNU C Library.
1d4c55
+#
1d4c55
+# The GNU C Library is free software; you can redistribute it and/or
1d4c55
+# modify it under the terms of the GNU Lesser General Public
1d4c55
+# License as published by the Free Software Foundation; either
1d4c55
+# version 2.1 of the License, or (at your option) any later version.
1d4c55
+#
1d4c55
+# The GNU C Library is distributed in the hope that it will be useful,
1d4c55
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
1d4c55
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1d4c55
+# Lesser General Public License for more details.
1d4c55
+#
1d4c55
+# You should have received a copy of the GNU Lesser General Public
1d4c55
+# License along with the GNU C Library; if not, see
1d4c55
+# <https://www.gnu.org/licenses/>.
1d4c55
+
1d4c55
+"""
1d4c55
+Simplified lexical analyzer for C preprocessing tokens.
1d4c55
+
1d4c55
+Does not implement trigraphs.
1d4c55
+
1d4c55
+Does not implement backslash-newline in the middle of any lexical
1d4c55
+item other than a string literal.
1d4c55
+
1d4c55
+Does not implement universal-character-names in identifiers.
1d4c55
+
1d4c55
+Treats prefixed strings (e.g. L"...") as two tokens (L and "...").
1d4c55
+
1d4c55
+Accepts non-ASCII characters only within comments and strings.
1d4c55
+"""
1d4c55
+
1d4c55
+import collections
1d4c55
+import re
1d4c55
+
1d4c55
+# Caution: The order of the outermost alternation matters.
1d4c55
+# STRING must be before BAD_STRING, CHARCONST before BAD_CHARCONST,
1d4c55
+# BLOCK_COMMENT before BAD_BLOCK_COM before PUNCTUATOR, and OTHER must
1d4c55
+# be last.
1d4c55
+# Caution: There should be no capturing groups other than the named
1d4c55
+# captures in the outermost alternation.
1d4c55
+
1d4c55
+# For reference, these are all of the C punctuators as of C11:
1d4c55
+#   [ ] ( ) { } , ; ? ~
1d4c55
+#   ! != * *= / /= ^ ^= = ==
1d4c55
+#   # ##
1d4c55
+#   % %= %> %: %:%:
1d4c55
+#   & &= &&
1d4c55
+#   | |= ||
1d4c55
+#   + += ++
1d4c55
+#   - -= -- ->
1d4c55
+#   . ...
1d4c55
+#   : :>
1d4c55
+#   < <% <: << <<= <=
1d4c55
+#   > >= >> >>=
1d4c55
+
1d4c55
+# The BAD_* tokens are not part of the official definition of pp-tokens;
1d4c55
+# they match unclosed strings, character constants, and block comments,
1d4c55
+# so that the regex engine doesn't have to backtrack all the way to the
1d4c55
+# beginning of a broken construct and then emit dozens of junk tokens.
1d4c55
+
1d4c55
+PP_TOKEN_RE_ = re.compile(r"""
1d4c55
+    (?P<STRING>        \"(?:[^\"\\\r\n]|\\(?:[\r\n -~]|\r\n))*\")
1d4c55
+   |(?P<BAD_STRING>    \"(?:[^\"\\\r\n]|\\[ -~])*)
1d4c55
+   |(?P<CHARCONST>     \'(?:[^\'\\\r\n]|\\(?:[\r\n -~]|\r\n))*\')
1d4c55
+   |(?P<BAD_CHARCONST> \'(?:[^\'\\\r\n]|\\[ -~])*)
1d4c55
+   |(?P<BLOCK_COMMENT> /\*(?:\*(?!/)|[^*])*\*/)
1d4c55
+   |(?P<BAD_BLOCK_COM> /\*(?:\*(?!/)|[^*])*\*?)
1d4c55
+   |(?P<LINE_COMMENT>  //[^\r\n]*)
1d4c55
+   |(?P<IDENT>         [_a-zA-Z][_a-zA-Z0-9]*)
1d4c55
+   |(?P<PP_NUMBER>     \.?[0-9](?:[0-9a-df-oq-zA-DF-OQ-Z_.]|[eEpP][+-]?)*)
1d4c55
+   |(?P<PUNCTUATOR>
1d4c55
+       [,;?~(){}\[\]]
1d4c55
+     | [!*/^=]=?
1d4c55
+     | \#\#?
1d4c55
+     | %(?:[=>]|:(?:%:)?)?
1d4c55
+     | &[=&]?
1d4c55
+     |\|[=|]?
1d4c55
+     |\+[=+]?
1d4c55
+     | -[=->]?
1d4c55
+     |\.(?:\.\.)?
1d4c55
+     | :>?
1d4c55
+     | <(?:[%:]|<(?:=|<=?)?)?
1d4c55
+     | >(?:=|>=?)?)
1d4c55
+   |(?P<ESCNL>         \\(?:\r|\n|\r\n))
1d4c55
+   |(?P<WHITESPACE>    [ \t\n\r\v\f]+)
1d4c55
+   |(?P<OTHER>         .)
1d4c55
+""", re.DOTALL | re.VERBOSE)
1d4c55
+
1d4c55
+HEADER_NAME_RE_ = re.compile(r"""
1d4c55
+    < [^>\r\n]+ >
1d4c55
+  | " [^"\r\n]+ "
1d4c55
+""", re.DOTALL | re.VERBOSE)
1d4c55
+
1d4c55
+ENDLINE_RE_ = re.compile(r"""\r|\n|\r\n""")
1d4c55
+
1d4c55
+# based on the sample code in the Python re documentation
1d4c55
+Token_ = collections.namedtuple("Token", (
1d4c55
+    "kind", "text", "line", "column", "context"))
1d4c55
+Token_.__doc__ = """
1d4c55
+   One C preprocessing token, comment, or chunk of whitespace.
1d4c55
+   'kind' identifies the token type, which will be one of:
1d4c55
+       STRING, CHARCONST, BLOCK_COMMENT, LINE_COMMENT, IDENT,
1d4c55
+       PP_NUMBER, PUNCTUATOR, ESCNL, WHITESPACE, HEADER_NAME,
1d4c55
+       or OTHER.  The BAD_* alternatives in PP_TOKEN_RE_ are
1d4c55
+       handled within tokenize_c, below.
1d4c55
+
1d4c55
+   'text' is the sequence of source characters making up the token;
1d4c55
+       no decoding whatsoever is performed.
1d4c55
+
1d4c55
+   'line' and 'column' give the position of the first character of the
1d4c55
+      token within the source file.  They are both 1-based.
1d4c55
+
1d4c55
+   'context' indicates whether or not this token occurred within a
1d4c55
+      preprocessing directive; it will be None for running text,
1d4c55
+      '<null>' for the leading '#' of a directive line (because '#'
1d4c55
+      all by itself on a line is a "null directive"), or the name of
1d4c55
+      the directive for tokens within a directive line, starting with
1d4c55
+      the IDENT for the name itself.
1d4c55
+"""
1d4c55
+
1d4c55
+def tokenize_c(file_contents, reporter):
1d4c55
+    """Yield a series of Token objects, one for each preprocessing
1d4c55
+       token, comment, or chunk of whitespace within FILE_CONTENTS.
1d4c55
+       The REPORTER object is expected to have one method,
1d4c55
+       reporter.error(token, message), which will be called to
1d4c55
+       indicate a lexical error at the position of TOKEN.
1d4c55
+       If MESSAGE contains the four-character sequence '{!r}', that
1d4c55
+       is expected to be replaced by repr(token.text).
1d4c55
+    """
1d4c55
+
1d4c55
+    Token = Token_
1d4c55
+    PP_TOKEN_RE = PP_TOKEN_RE_
1d4c55
+    ENDLINE_RE = ENDLINE_RE_
1d4c55
+    HEADER_NAME_RE = HEADER_NAME_RE_
1d4c55
+
1d4c55
+    line_num = 1
1d4c55
+    line_start = 0
1d4c55
+    pos = 0
1d4c55
+    limit = len(file_contents)
1d4c55
+    directive = None
1d4c55
+    at_bol = True
1d4c55
+    while pos < limit:
1d4c55
+        if directive == "include":
1d4c55
+            mo = HEADER_NAME_RE.match(file_contents, pos)
1d4c55
+            if mo:
1d4c55
+                kind = "HEADER_NAME"
1d4c55
+                directive = "after_include"
1d4c55
+            else:
1d4c55
+                mo = PP_TOKEN_RE.match(file_contents, pos)
1d4c55
+                kind = mo.lastgroup
1d4c55
+                if kind != "WHITESPACE":
1d4c55
+                    directive = "after_include"
1d4c55
+        else:
1d4c55
+            mo = PP_TOKEN_RE.match(file_contents, pos)
1d4c55
+            kind = mo.lastgroup
1d4c55
+
1d4c55
+        text = mo.group()
1d4c55
+        line = line_num
1d4c55
+        column = mo.start() - line_start
1d4c55
+        adj_line_start = 0
1d4c55
+        # only these kinds can contain a newline
1d4c55
+        if kind in ("WHITESPACE", "BLOCK_COMMENT", "LINE_COMMENT",
1d4c55
+                    "STRING", "CHARCONST", "BAD_BLOCK_COM", "ESCNL"):
1d4c55
+            for tmo in ENDLINE_RE.finditer(text):
1d4c55
+                line_num += 1
1d4c55
+                adj_line_start = tmo.end()
1d4c55
+            if adj_line_start:
1d4c55
+                line_start = mo.start() + adj_line_start
1d4c55
+
1d4c55
+        # Track whether or not we are scanning a preprocessing directive.
1d4c55
+        if kind == "LINE_COMMENT" or (kind == "WHITESPACE" and adj_line_start):
1d4c55
+            at_bol = True
1d4c55
+            directive = None
1d4c55
+        else:
1d4c55
+            if kind == "PUNCTUATOR" and text == "#" and at_bol:
1d4c55
+                directive = "<null>"
1d4c55
+            elif kind == "IDENT" and directive == "<null>":
1d4c55
+                directive = text
1d4c55
+            at_bol = False
1d4c55
+
1d4c55
+        # Report ill-formed tokens and rewrite them as their well-formed
1d4c55
+        # equivalents, so downstream processing doesn't have to know about them.
1d4c55
+        # (Rewriting instead of discarding provides better error recovery.)
1d4c55
+        if kind == "BAD_BLOCK_COM":
1d4c55
+            reporter.error(Token("BAD_BLOCK_COM", "", line, column+1, ""),
1d4c55
+                           "unclosed block comment")
1d4c55
+            text += "*/"
1d4c55
+            kind = "BLOCK_COMMENT"
1d4c55
+        elif kind == "BAD_STRING":
1d4c55
+            reporter.error(Token("BAD_STRING", "", line, column+1, ""),
1d4c55
+                           "unclosed string")
1d4c55
+            text += "\""
1d4c55
+            kind = "STRING"
1d4c55
+        elif kind == "BAD_CHARCONST":
1d4c55
+            reporter.error(Token("BAD_CHARCONST", "", line, column+1, ""),
1d4c55
+                           "unclosed char constant")
1d4c55
+            text += "'"
1d4c55
+            kind = "CHARCONST"
1d4c55
+
1d4c55
+        tok = Token(kind, text, line, column+1,
1d4c55
+                    "include" if directive == "after_include" else directive)
1d4c55
+        # Do not complain about OTHER tokens inside macro definitions.
1d4c55
+        # $ and @ appear in macros defined by headers intended to be
1d4c55
+        # included from assembly language, e.g. sysdeps/mips/sys/asm.h.
1d4c55
+        if kind == "OTHER" and directive != "define":
1d4c55
+            self.error(tok, "stray {!r} in program")
1d4c55
+
1d4c55
+        yield tok
1d4c55
+        pos = mo.end()