diff options
Diffstat (limited to 'scripts')
-rw-r--r-- | scripts/CheckMentioned.py | 11 | ||||
-rw-r--r-- | scripts/Dependencies.py | 1 | ||||
-rw-r--r-- | scripts/FileGenerator.py | 2 | ||||
-rw-r--r-- | scripts/GenerateCaseConvert.py | 2 | ||||
-rwxr-xr-x | scripts/HFacer.py | 5 | ||||
-rw-r--r-- | scripts/HeaderCheck.py | 1 | ||||
-rw-r--r-- | scripts/LexGen.py | 22 | ||||
-rw-r--r-- | scripts/ScintillaData.py | 188 |
8 files changed, 11 insertions, 221 deletions
diff --git a/scripts/CheckMentioned.py b/scripts/CheckMentioned.py index cd518ce28..bf26a0ba3 100644 --- a/scripts/CheckMentioned.py +++ b/scripts/CheckMentioned.py @@ -68,15 +68,6 @@ def convertIFaceTypeToC(t): def makeParm(t, n, v): return (convertIFaceTypeToC(t) + n).rstrip() -def makeRet(params): - retType = params["ReturnType"] - if retType in ["void", "string", "stringresult"]: - retType = "" - if retType: - retType = " → " + retType - - return retType - def makeSig(params): p1 = makeParm(params["Param1Type"], params["Param1Name"], params["Param1Value"]) p2 = makeParm(params["Param2Type"], params["Param2Name"], params["Param2Value"]) @@ -92,7 +83,6 @@ def makeSig(params): if p1 == "" and p2 == "": return retType - ret = "" if p1 == "": p1 = "<unused>" joiner = "" @@ -140,7 +130,6 @@ def checkDocumentation(): # Examine header sections which point to definitions #<a class="message" href="#SCI_SETLAYOUTCACHE">SCI_SETLAYOUTCACHE(int cacheMode)</a><br /> dirPattern = re.compile(r'<a class="message" href="#([A-Z0-9_]+)">([A-Z][A-Za-z0-9_() *&;,\n]+)</a>') - firstWord = re.compile(r'[A-Z0-9_]+') for api, sig in re.findall(dirPattern, docs): sigApi = re.split('\W+', sig)[0] sigFlat = flattenSpaces(sig) diff --git a/scripts/Dependencies.py b/scripts/Dependencies.py index c9f85db02..135c57ac8 100644 --- a/scripts/Dependencies.py +++ b/scripts/Dependencies.py @@ -111,7 +111,6 @@ def ExtractDependencies(input): for line in input: headersLine = line.startswith(" ") or line.startswith("\t") line = line.strip() - isContinued = line.endswith("\\") line = line.rstrip("\\ ") fileNames = line.strip().split(" ") if not headersLine: diff --git a/scripts/FileGenerator.py b/scripts/FileGenerator.py index eb13e0fd5..b4fe02653 100644 --- a/scripts/FileGenerator.py +++ b/scripts/FileGenerator.py @@ -150,7 +150,7 @@ def UpdateLineInPlistFile(path, key, value): elif ls.startswith("<string>"): if keyCurrent == key: start, tag, rest = l.partition("<string>") - val, etag, end = rest.partition("</string>") + _val, etag, end = rest.partition("</string>") l = start + tag + value + etag + end lines.append(l) contents = "".join(lines) diff --git a/scripts/GenerateCaseConvert.py b/scripts/GenerateCaseConvert.py index 7a0f473f7..3c0193ecb 100644 --- a/scripts/GenerateCaseConvert.py +++ b/scripts/GenerateCaseConvert.py @@ -99,7 +99,7 @@ def groupRanges(symmetrics): rangeCoverage = list(flatten([range(r[0], r[0]+r[2]*r[3], r[3]) for r in rangeGroups])) - nonRanges = [(l, u) for l, u, d in symmetrics if l not in rangeCoverage] + nonRanges = [(l, u) for l, u, _d in symmetrics if l not in rangeCoverage] return rangeGroups, nonRanges diff --git a/scripts/HFacer.py b/scripts/HFacer.py index 0955cdaf1..2bdfe8107 100755 --- a/scripts/HFacer.py +++ b/scripts/HFacer.py @@ -6,8 +6,7 @@ import pathlib import Face - -from FileGenerator import UpdateFile, Generate, Regenerate, UpdateLineInFile, lineEnd +import FileGenerator def printHFile(f): out = [] @@ -35,7 +34,7 @@ def printHFile(f): def RegenerateAll(root, showMaxID): f = Face.Face() f.ReadFromFile(root / "include/Scintilla.iface") - Regenerate(root / "include/Scintilla.h", "/* ", printHFile(f)) + FileGenerator.Regenerate(root / "include/Scintilla.h", "/* ", printHFile(f)) if showMaxID: valueSet = set(int(x) for x in f.values if int(x) < 3000) maximumID = max(valueSet) diff --git a/scripts/HeaderCheck.py b/scripts/HeaderCheck.py index afa996384..e24cfa1a9 100644 --- a/scripts/HeaderCheck.py +++ b/scripts/HeaderCheck.py @@ -53,7 +53,6 @@ def CheckFiles(headerOrderTxt): for f in orderedPaths: #~ print(" File ", f.relative_to(root)) incs = ExtractHeaders(f) - news = set(incs) - set(headerOrder) allIncs = allIncs.union(set(incs)) m = 0 diff --git a/scripts/LexGen.py b/scripts/LexGen.py index f40efce04..416f69c2d 100644 --- a/scripts/LexGen.py +++ b/scripts/LexGen.py @@ -2,21 +2,16 @@ # LexGen.py - implemented 2002 by Neil Hodgson neilh@scintilla.org # Released to the public domain. -# Regenerate the Scintilla source files that list all the lexers. -# Should be run whenever a new lexer is added or removed. +# Update Scintilla files. +# Update version numbers and modification dates in documentation and header files. +# Update make dependencies. # Requires Python 3.6 or later -# Files are regenerated in place with templates stored in comments. -# The format of generation comments is documented in FileGenerator.py. -# Also updates version numbers and modification dates. -from FileGenerator import Regenerate, UpdateLineInFile, \ - ReplaceREInFile, UpdateLineInPlistFile, ReadFileAsList, UpdateFileFromLines, \ - FindSectionInList +from FileGenerator import UpdateLineInFile, ReplaceREInFile, UpdateLineInPlistFile import ScintillaData import HFacer import os import pathlib -import uuid import sys baseDirectory = os.path.dirname(os.path.dirname(ScintillaData.__file__)) @@ -61,13 +56,6 @@ def UpdateVersionNumbers(sci, root): "CURRENT_PROJECT_VERSION = [0-9.]+;", f'CURRENT_PROJECT_VERSION = {sci.versionDotted};') -# Last 24 digits of UUID, used for item IDs in Xcode -def uid24(): - return str(uuid.uuid4()).replace("-", "").upper()[-24:] - -def ciLexerKey(a): - return a.split()[2].lower() - def RegenerateAll(rootDirectory): root = pathlib.Path(rootDirectory) @@ -76,8 +64,6 @@ def RegenerateAll(rootDirectory): sci = ScintillaData.ScintillaData(scintillaBase) - Regenerate(scintillaBase / "win32/scintilla.mak", "#", sci.lexFiles) - startDir = os.getcwd() os.chdir(os.path.join(scintillaBase, "win32")) win32.DepGen.Generate() diff --git a/scripts/ScintillaData.py b/scripts/ScintillaData.py index a8cbb9872..355b5d471 100644 --- a/scripts/ScintillaData.py +++ b/scripts/ScintillaData.py @@ -15,143 +15,11 @@ # mdyModified # dmyModified # myModified -# -# Information about lexers and properties defined in lexers -# lexFiles -# sorted list of lexer files -# lexerModules -# sorted list of module names -# lexerProperties -# sorted list of lexer properties -# propertyDocuments -# dictionary of property documentation { name: document string } -# sclexFromName -# dictionary of SCLEX_* IDs { name: SCLEX_ID } -# fileFromSclex -# dictionary of file names { SCLEX_ID: file name } # This file can be run to see the data it provides. # Requires Python 3.6 or later -import datetime, pathlib, sys, textwrap - -import FileGenerator - -def FindModules(lexFile): - modules = [] - partLine = "" - with lexFile.open() as f: - for l in f.readlines(): - l = l.rstrip() - if partLine or l.startswith("LexerModule"): - if ")" in l: - l = partLine + l - l = l.replace("(", " ") - l = l.replace(")", " ") - l = l.replace(",", " ") - parts = l.split() - modules.append([parts[1], parts[2], parts[4][1:-1]]) - partLine = "" - else: - partLine = partLine + l - return modules - -def FindLexersInXcode(xCodeProject): - lines = FileGenerator.ReadFileAsList(xCodeProject) - - uidsOfBuild = {} - markersPBXBuildFile = ["Begin PBXBuildFile section", "", "End PBXBuildFile section"] - for buildLine in lines[FileGenerator.FindSectionInList(lines, markersPBXBuildFile)]: - # Occurs for each file in the build. Find the UIDs used for the file. - #\t\t[0-9A-F]+ /* [a-zA-Z]+.cxx in sources */ = {isa = PBXBuildFile; fileRef = [0-9A-F]+ /* [a-zA-Z]+ */; }; - pieces = buildLine.split() - uid1 = pieces[0] - filename = pieces[2].split(".")[0] - uid2 = pieces[12] - uidsOfBuild[filename] = [uid1, uid2] - - lexers = {} - markersLexers = ["/* Lexers */ =", "children", ");"] - for lexerLine in lines[FileGenerator.FindSectionInList(lines, markersLexers)]: - #\t\t\t\t[0-9A-F]+ /* [a-zA-Z]+.cxx */, - uid, _, rest = lexerLine.partition("/* ") - uid = uid.strip() - lexer, _, _ = rest.partition(".") - lexers[lexer] = uidsOfBuild[lexer] - - return lexers - -# Properties that start with lexer. or fold. are automatically found but there are some -# older properties that don't follow this pattern so must be explicitly listed. -knownIrregularProperties = [ - "fold", - "styling.within.preprocessor", - "tab.timmy.whinge.level", - "asp.default.language", - "html.tags.case.sensitive", - "ps.level", - "ps.tokenize", - "sql.backslash.escapes", - "nsis.uservars", - "nsis.ignorecase" -] - -def FindProperties(lexFile): - properties = {} - with open(lexFile) as f: - for l in f.readlines(): - if ("GetProperty" in l or "DefineProperty" in l) and "\"" in l: - l = l.strip() - if not l.startswith("//"): # Drop comments - propertyName = l.split("\"")[1] - if propertyName.lower() == propertyName: - # Only allow lower case property names - if propertyName in knownIrregularProperties or \ - propertyName.startswith("fold.") or \ - propertyName.startswith("lexer."): - properties[propertyName] = 1 - return properties - -def FindPropertyDocumentation(lexFile): - documents = {} - with lexFile.open() as f: - name = "" - for l in f.readlines(): - l = l.strip() - if "// property " in l: - propertyName = l.split()[2] - if propertyName.lower() == propertyName: - # Only allow lower case property names - name = propertyName - documents[name] = "" - elif "DefineProperty" in l and "\"" in l: - propertyName = l.split("\"")[1] - if propertyName.lower() == propertyName: - # Only allow lower case property names - name = propertyName - documents[name] = "" - elif name: - if l.startswith("//"): - if documents[name]: - documents[name] += " " - documents[name] += l[2:].strip() - elif l.startswith("\""): - l = l[1:].strip() - if l.endswith(";"): - l = l[:-1].strip() - if l.endswith(")"): - l = l[:-1].strip() - if l.endswith("\""): - l = l[:-1] - # Fix escaped double quotes - l = l.replace("\\\"", "\"") - documents[name] += l - else: - name = "" - for name in list(documents.keys()): - if documents[name] == "": - del documents[name] - return documents +import datetime, pathlib, sys def FindCredits(historyFile, removeLinks=True): credits = [] @@ -166,8 +34,8 @@ def FindCredits(historyFile, removeLinks=True): if stage == 1 and l.startswith("<td>"): credit = l[4:-5] if removeLinks and "<a" in l: - title, a, rest = credit.partition("<a href=") - urlplus, bracket, end = rest.partition(">") + title, _a, rest = credit.partition("<a href=") + urlplus, _bracket, end = rest.partition(">") name = end.split("<")[0] url = urlplus[1:-1] credit = title.strip() @@ -177,12 +45,6 @@ def FindCredits(historyFile, removeLinks=True): credits.append(credit) return credits -def ciKey(a): - return str(a).lower() - -def SortListInsensitive(l): - l.sort(key=ciKey) - class ScintillaData: def __init__(self, scintillaRoot): # Discover version information @@ -208,57 +70,13 @@ class ScintillaData: # ScintillaHistory.html -- only first should change self.myModified = monthModified + " " + self.yearModified - # Find all the lexer source code files - lexFilePaths = list((scintillaRoot / "lexers").glob("Lex*.cxx")) - SortListInsensitive(lexFilePaths) - self.lexFiles = [f.stem for f in lexFilePaths] - self.lexerModules = [] - lexerProperties = set() - self.propertyDocuments = {} - self.sclexFromName = {} - self.fileFromSclex = {} - for lexFile in lexFilePaths: - modules = FindModules(lexFile) - for module in modules: - self.sclexFromName[module[2]] = module[1] - self.fileFromSclex[module[1]] = lexFile - self.lexerModules.append(module[0]) - for k in FindProperties(lexFile).keys(): - lexerProperties.add(k) - documents = FindPropertyDocumentation(lexFile) - for k in documents.keys(): - if k not in self.propertyDocuments: - self.propertyDocuments[k] = documents[k] - SortListInsensitive(self.lexerModules) - self.lexerProperties = list(lexerProperties) - SortListInsensitive(self.lexerProperties) - self.credits = FindCredits(scintillaRoot / "doc" / "ScintillaHistory.html") -def printWrapped(text): - print(textwrap.fill(text, subsequent_indent=" ")) - if __name__=="__main__": sci = ScintillaData(pathlib.Path(__file__).resolve().parent.parent) print("Version %s %s %s" % (sci.version, sci.versionDotted, sci.versionCommad)) print("Date last modified %s %s %s %s %s" % ( sci.dateModified, sci.yearModified, sci.mdyModified, sci.dmyModified, sci.myModified)) - printWrapped(str(len(sci.lexFiles)) + " lexer files: " + ", ".join(sci.lexFiles)) - printWrapped(str(len(sci.lexerModules)) + " lexer modules: " + ", ".join(sci.lexerModules)) - print("Lexer name to ID:") - lexNames = sorted(sci.sclexFromName.keys()) - for lexName in lexNames: - sclex = sci.sclexFromName[lexName] - fileName = sci.fileFromSclex[sclex].name - print(" " + lexName + " -> " + sclex + " in " + fileName) - printWrapped("Lexer properties: " + ", ".join(sci.lexerProperties)) - print("Lexer property documentation:") - documentProperties = list(sci.propertyDocuments.keys()) - SortListInsensitive(documentProperties) - for k in documentProperties: - print(" " + k) - print(textwrap.fill(sci.propertyDocuments[k], initial_indent=" ", - subsequent_indent=" ")) print("Credits:") for c in sci.credits: sys.stdout.buffer.write(b" " + c.encode("utf-8") + b"\n") |