From 92bd3a82d73dd2ea22df882b0b62e7f5878e5420 Mon Sep 17 00:00:00 2001 From: Neil Date: Mon, 1 Jul 2013 16:37:06 +1000 Subject: Move non-platform-specific scripts into the scripts directory. Use FileGenerator module for file generation instead of code in each script. --- include/Face.py | 114 ------------ include/HFacer.py | 81 -------- qt/ScintillaEdit/WidgetGen.py | 119 +++++------- qt/ScintillaEditPy/sepbuild.py | 19 +- scripts/Face.py | 114 ++++++++++++ scripts/HFacer.py | 48 +++++ scripts/LexGen.py | 256 ++++++++++++++++++++++++++ src/LexGen.py | 408 ----------------------------------------- 8 files changed, 475 insertions(+), 684 deletions(-) delete mode 100644 include/Face.py delete mode 100755 include/HFacer.py create mode 100644 scripts/Face.py create mode 100755 scripts/HFacer.py create mode 100755 scripts/LexGen.py delete mode 100755 src/LexGen.py diff --git a/include/Face.py b/include/Face.py deleted file mode 100644 index 855d6321a..000000000 --- a/include/Face.py +++ /dev/null @@ -1,114 +0,0 @@ -# Module for reading and parsing Scintilla.iface file - -def sanitiseLine(line): - if line[-1:] == '\n': line = line[:-1] - if line.find("##") != -1: - line = line[:line.find("##")] - line = line.strip() - return line - -def decodeFunction(featureVal): - retType, rest = featureVal.split(" ", 1) - nameIdent, params = rest.split("(") - name, value = nameIdent.split("=") - params, rest = params.split(")") - param1, param2 = params.split(",") - return retType, name, value, param1, param2 - -def decodeEvent(featureVal): - retType, rest = featureVal.split(" ", 1) - nameIdent, params = rest.split("(") - name, value = nameIdent.split("=") - return retType, name, value - -def decodeParam(p): - param = p.strip() - type = "" - name = "" - value = "" - if " " in param: - type, nv = param.split(" ") - if "=" in nv: - name, value = nv.split("=") - else: - name = nv - return type, name, value - -class Face: - - def __init__(self): - self.order = [] - self.features = {} - self.values = {} - self.events = {} - - def ReadFromFile(self, name): - currentCategory = "" - currentComment = [] - currentCommentFinished = 0 - file = open(name) - for line in file.readlines(): - line = sanitiseLine(line) - if line: - if line[0] == "#": - if line[1] == " ": - if currentCommentFinished: - currentComment = [] - currentCommentFinished = 0 - currentComment.append(line[2:]) - else: - currentCommentFinished = 1 - featureType, featureVal = line.split(" ", 1) - if featureType in ["fun", "get", "set"]: - try: - retType, name, value, param1, param2 = decodeFunction(featureVal) - except ValueError: - print("Failed to decode %s" % line) - raise - p1 = decodeParam(param1) - p2 = decodeParam(param2) - self.features[name] = { - "FeatureType": featureType, - "ReturnType": retType, - "Value": value, - "Param1Type": p1[0], "Param1Name": p1[1], "Param1Value": p1[2], - "Param2Type": p2[0], "Param2Name": p2[1], "Param2Value": p2[2], - "Category": currentCategory, "Comment": currentComment - } - if value in self.values: - raise Exception("Duplicate value " + value + " " + name) - self.values[value] = 1 - self.order.append(name) - elif featureType == "evt": - retType, name, value = decodeEvent(featureVal) - self.features[name] = { - "FeatureType": featureType, - "ReturnType": retType, - "Value": value, - "Category": currentCategory, "Comment": currentComment - } - if value in self.events: - raise Exception("Duplicate event " + value + " " + name) - self.events[value] = 1 - self.order.append(name) - elif featureType == "cat": - currentCategory = featureVal - elif featureType == "val": - try: - name, value = featureVal.split("=", 1) - except ValueError: - print("Failure %s" % featureVal) - raise Exception() - self.features[name] = { - "FeatureType": featureType, - "Category": currentCategory, - "Value": value } - self.order.append(name) - elif featureType == "enu" or featureType == "lex": - name, value = featureVal.split("=", 1) - self.features[name] = { - "FeatureType": featureType, - "Category": currentCategory, - "Value": value } - self.order.append(name) - diff --git a/include/HFacer.py b/include/HFacer.py deleted file mode 100755 index aa6b0f0a2..000000000 --- a/include/HFacer.py +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env python -# HFacer.py - regenerate the Scintilla.h and SciLexer.h files from the Scintilla.iface interface -# definition file. -# The header files are copied to a temporary file apart from the section between a /* ++Autogenerated*/ -# comment and a /* --Autogenerated*/ comment which is generated by the printHFile and printLexHFile -# functions. After the temporary file is created, it is copied back to the original file name. - -import sys -import os -import Face - -def Contains(s,sub): - return s.find(sub) != -1 - -def printLexHFile(f,out): - for name in f.order: - v = f.features[name] - if v["FeatureType"] in ["val"]: - if Contains(name, "SCE_") or Contains(name, "SCLEX_"): - out.write("#define " + name + " " + v["Value"] + "\n") - -def printHFile(f,out): - previousCategory = "" - for name in f.order: - v = f.features[name] - if v["Category"] != "Deprecated": - if v["Category"] == "Provisional" and previousCategory != "Provisional": - out.write("#ifndef SCI_DISABLE_PROVISIONAL\n") - previousCategory = v["Category"] - if v["FeatureType"] in ["fun", "get", "set"]: - featureDefineName = "SCI_" + name.upper() - out.write("#define " + featureDefineName + " " + v["Value"] + "\n") - elif v["FeatureType"] in ["evt"]: - featureDefineName = "SCN_" + name.upper() - out.write("#define " + featureDefineName + " " + v["Value"] + "\n") - elif v["FeatureType"] in ["val"]: - if not (Contains(name, "SCE_") or Contains(name, "SCLEX_")): - out.write("#define " + name + " " + v["Value"] + "\n") - out.write("#endif\n") - -def CopyWithInsertion(input, output, genfn, definition): - copying = 1 - for line in input.readlines(): - if copying: - output.write(line) - if Contains(line, "/* ++Autogenerated"): - copying = 0 - genfn(definition, output) - if Contains(line, "/* --Autogenerated"): - copying = 1 - output.write(line) - -def contents(filename): - f = open(filename) - t = f.read() - f.close() - return t - -def Regenerate(filename, genfn, definition): - inText = contents(filename) - tempname = "HFacer.tmp" - out = open(tempname,"w") - hfile = open(filename) - CopyWithInsertion(hfile, out, genfn, definition) - out.close() - hfile.close() - outText = contents(tempname) - if inText == outText: - os.unlink(tempname) - else: - os.unlink(filename) - os.rename(tempname, filename) - -f = Face.Face() -try: - f.ReadFromFile("Scintilla.iface") - Regenerate("Scintilla.h", printHFile, f) - Regenerate("SciLexer.h", printLexHFile, f) - print("Maximum ID is %s" % max([x for x in f.values if int(x) < 3000])) -except: - raise diff --git a/qt/ScintillaEdit/WidgetGen.py b/qt/ScintillaEdit/WidgetGen.py index 322c7dc7d..b53fe988c 100644 --- a/qt/ScintillaEdit/WidgetGen.py +++ b/qt/ScintillaEdit/WidgetGen.py @@ -7,12 +7,10 @@ import os import getopt scintillaDirectory = "../.." -scintillaIncludeDirectory = os.path.join(scintillaDirectory, "include") -sys.path.append(scintillaIncludeDirectory) +scintillaScriptsDirectory = os.path.join(scintillaDirectory, "scripts") +sys.path.append(scintillaScriptsDirectory) import Face - -def Contains(s,sub): - return s.find(sub) != -1 +from FileGenerator import GenerateFile def underscoreName(s): # Name conversion fixes to match gtkscintilla2 @@ -82,17 +80,20 @@ def arguments(v, stringResult, options): ret = ret + p2Type + " " + normalisedName(v["Param2Name"], options) return ret -def printPyFile(f,out, options): +def printPyFile(f, options): + out = [] for name in f.order: v = f.features[name] if v["Category"] != "Deprecated": feat = v["FeatureType"] if feat in ["val"]: - out.write(name + "=" + v["Value"] + "\n") + out.append(name + "=" + v["Value"]) if feat in ["evt"]: - out.write("SCN_" + name.upper() + "=" + v["Value"] + "\n") + out.append("SCN_" + name.upper() + "=" + v["Value"]) + return out -def printHFile(f,out, options): +def printHFile(f, options): + out = [] for name in f.order: v = f.features[name] if v["Category"] != "Deprecated": @@ -104,9 +105,10 @@ def printHFile(f,out, options): stringResult = v["Param2Type"] == "stringresult" if stringResult: returnType = "QByteArray" - out.write("\t" + returnType + " " + normalisedName(name, options, feat) + "(") - out.write(arguments(v, stringResult, options)) - out.write(")" + constDeclarator + ";\n") + out.append("\t" + returnType + " " + normalisedName(name, options, feat) + "(" + + arguments(v, stringResult, options)+ + ")" + constDeclarator + ";") + return out def methodNames(f, options): for name in f.order: @@ -117,7 +119,8 @@ def methodNames(f, options): if checkTypes(name, v): yield normalisedName(name, options) -def printCPPFile(f,out, options): +def printCPPFile(f, options): + out = [] for name in f.order: v = f.features[name] if v["Category"] != "Deprecated": @@ -133,75 +136,39 @@ def printCPPFile(f,out, options): returnStatement = "" if returnType != "void": returnStatement = "return " - out.write(returnType + " ScintillaEdit::" + normalisedName(name, options, feat) + "(") - out.write(arguments(v, stringResult, options)) - out.write(")" + constDeclarator + " {\n") + out.append(returnType + " ScintillaEdit::" + normalisedName(name, options, feat) + "(" + + arguments(v, stringResult, options) + + ")" + constDeclarator + " {") + returns = "" if stringResult: - out.write(" " + returnStatement + "TextReturner(" + featureDefineName + ", ") + returns += " " + returnStatement + "TextReturner(" + featureDefineName + ", " if "*" in cppAlias(v["Param1Type"]): - out.write("(uptr_t)") + returns += "(uptr_t)" if v["Param1Name"]: - out.write(normalisedName(v["Param1Name"], options)) + returns += normalisedName(v["Param1Name"], options) else: - out.write("0") - out.write(");\n") + returns += "0" + returns += ");" else: - out.write(" " + returnStatement + "send(" + featureDefineName + ", ") + returns += " " + returnStatement + "send(" + featureDefineName + ", " if "*" in cppAlias(v["Param1Type"]): - out.write("(uptr_t)") + returns += "(uptr_t)" if v["Param1Name"]: - out.write(normalisedName(v["Param1Name"], options)) + returns += normalisedName(v["Param1Name"], options) else: - out.write("0") - out.write(", ") + returns += "0" + returns += ", " if "*" in cppAlias(v["Param2Type"]): - out.write("(sptr_t)") + returns += "(sptr_t)" if v["Param2Name"]: - out.write(normalisedName(v["Param2Name"], options)) + returns += normalisedName(v["Param2Name"], options) else: - out.write("0") - out.write(");\n") - out.write("}\n") - out.write("\n") - -def CopyWithInsertion(input, output, genfn, definition, options): - copying = 1 - for line in input.readlines(): - if copying: - output.write(line) - if "/* ++Autogenerated" in line or "# ++Autogenerated" in line or "" in definition): - definition = definition.replace(" -->", "") - listid = 0 - if definition[0] in string.digits: - listid = int(definition[:1]) - definition = definition[2:] - # Hide double slashes as a control character - definition = definition.replace("\\\\", "\001") - # Do some normal C style transforms - definition = definition.replace("\\n", "\n") - definition = definition.replace("\\t", "\t") - # Get the doubled backslashes back as single backslashes - definition = definition.replace("\001", "\\") - startRepeat = definition.find("\\(") - endRepeat = definition.find("\\)") - intro = definition[:startRepeat] - out = "" - if intro.endswith("\n"): - pos = 0 - else: - pos = len(intro) - out += intro - middle = definition[startRepeat+2:endRepeat] - for i in lists[listid]: - item = middle.replace("\\*", i) - if pos and (pos + len(item) >= 80): - out += "\\\n" - pos = 0 - out += item - pos += len(item) - if item.endswith("\n"): - pos = 0 - outro = definition[endRepeat+2:] - out += outro - out = out.replace("\n", eolType) # correct EOLs in generated content - output.append(out) - elif line.startswith(commentPrefix + "--Autogenerated"): - copying = 1 - if retainDefs: - output.append(line) - output = [line.rstrip(" \t") for line in output] # trim trailing whitespace - return eolType.join(output) + eolType - -def UpdateFile(filename, updated): - """ If the file is different to updated then copy updated - into the file else leave alone so CVS and make don't treat - it as modified. """ - try: - infile = open(filename, "rb") - except IOError: # File is not there yet - out = open(filename, "wb") - out.write(updated.encode('utf-8')) - out.close() - print("New %s" % filename) - return - original = infile.read() - infile.close() - original = original.decode('utf-8') - if updated != original: - os.unlink(filename) - out = open(filename, "wb") - out.write(updated.encode('utf-8')) - out.close() - print("Changed %s " % filename) - #~ else: - #~ print "Unchanged", filename - -def Generate(inpath, outpath, commentPrefix, eolType, *lists): - """Generate 'outpath' from 'inpath'. - - "eolType" indicates the type of EOLs to use in the generated - file. It should be one of following constants: LF, CRLF, - CR, or NATIVE. - """ - #print "generate '%s' -> '%s' (comment prefix: %r, eols: %r)"\ - # % (inpath, outpath, commentPrefix, eolType) - try: - infile = open(inpath, "rb") - except IOError: - print("Can not open %s" % inpath) - return - original = infile.read() - infile.close() - original = original.decode('utf-8') - updated = CopyWithInsertion(original, commentPrefix, - inpath == outpath, eolType, *lists) - UpdateFile(outpath, updated) - -def Regenerate(filename, commentPrefix, eolType, *lists): - """Regenerate the given file. - - "eolType" indicates the type of EOLs to use in the generated - file. It should be one of following constants: LF, CRLF, - CR, or NATIVE. - """ - Generate(filename, filename, commentPrefix, eolType, *lists) - -def FindModules(lexFile): - modules = [] - f = open(lexFile) - for l in f.readlines(): - if l.startswith("LexerModule"): - l = l.replace("(", " ") - modules.append(l.split()[1]) - return modules - -# Properties that start with lexer. or fold. are automatically found but there are some -# older properties that don't follow this pattern so must be explicitly listed. -knownIrregularProperties = [ - "fold", - "styling.within.preprocessor", - "tab.timmy.whinge.level", - "asp.default.language", - "html.tags.case.sensitive", - "ps.level", - "ps.tokenize", - "sql.backslash.escapes", - "nsis.uservars", - "nsis.ignorecase" -] - -def FindProperties(lexFile): - properties = {} - f = open(lexFile) - for l in f.readlines(): - if ("GetProperty" in l or "DefineProperty" in l) and "\"" in l: - l = l.strip() - if not l.startswith("//"): # Drop comments - propertyName = l.split("\"")[1] - if propertyName.lower() == propertyName: - # Only allow lower case property names - if propertyName in knownIrregularProperties or \ - propertyName.startswith("fold.") or \ - propertyName.startswith("lexer."): - properties[propertyName] = 1 - return properties - -def FindPropertyDocumentation(lexFile): - documents = {} - f = open(lexFile) - name = "" - for l in f.readlines(): - l = l.strip() - if "// property " in l: - propertyName = l.split()[2] - if propertyName.lower() == propertyName: - # Only allow lower case property names - name = propertyName - documents[name] = "" - elif "DefineProperty" in l and "\"" in l: - propertyName = l.split("\"")[1] - if propertyName.lower() == propertyName: - # Only allow lower case property names - name = propertyName - documents[name] = "" - elif name: - if l.startswith("//"): - if documents[name]: - documents[name] += " " - documents[name] += l[2:].strip() - elif l.startswith("\""): - l = l[1:].strip() - if l.endswith(";"): - l = l[:-1].strip() - if l.endswith(")"): - l = l[:-1].strip() - if l.endswith("\""): - l = l[:-1] - # Fix escaped double quotes - l = l.replace("\\\"", "\"") - documents[name] += l - else: - name = "" - for name in list(documents.keys()): - if documents[name] == "": - del documents[name] - return documents - -def ciCompare(a,b): - return cmp(a.lower(), b.lower()) - -def ciKey(a): - return a.lower() - -def sortListInsensitive(l): - try: # Try key function - l.sort(key=ciKey) - except TypeError: # Earlier version of Python, so use comparison function - l.sort(ciCompare) - -def UpdateLineInFile(path, linePrefix, lineReplace): - lines = [] - updated = False - with codecs.open(path, "r", "utf-8") as f: - for l in f.readlines(): - l = l.rstrip() - if not updated and l.startswith(linePrefix): - lines.append(lineReplace) - updated = True - else: - lines.append(l) - contents = NATIVE.join(lines) + NATIVE - UpdateFile(path, contents) - -host = "prdownloads.sourceforge.net/" -def UpdateDownloadLinks(path, version): - lines = [] - with open(path, "r") as f: - for l in f.readlines(): - l = l.rstrip() - if host in l: - start, prd, rest = l.partition(host) - pth, dot, ending = rest.partition(".") - pthNew = pth[:-3] + version.rstrip() - lineWithNewVersion = start + prd +pthNew + dot + ending - lines.append(lineWithNewVersion) - else: - lines.append(l) - contents = NATIVE.join(lines) + NATIVE - UpdateFile(path, contents) - -def UpdateVersionNumbers(root): - with open(root + "scintilla/version.txt") as f: - version = f.read() - versionDotted = version[0] + '.' + version[1] + '.' + version[2] - versionCommad = version[0] + ', ' + version[1] + ', ' + version[2] + ', 0' - with open(root + "scintilla/doc/index.html") as f: - dateModified = [l for l in f.readlines() if "Date.Modified" in l][0].split('\"')[3] - # 20130602 - # index.html, SciTE.html - dtModified = datetime.datetime.strptime(dateModified, "%Y%m%d") - yearModified = dateModified[0:4] - monthModified = dtModified.strftime("%B") - dayModified = "%d" % dtModified.day - mdyModified = monthModified + " " + dayModified + " " + yearModified - # May 22 2013 - # index.html, SciTE.html - dmyModified = dayModified + " " + monthModified + " " + yearModified - # 22 May 2013 - # ScintillaHistory.html -- only first should change - myModified = monthModified + " " + yearModified - # scite/src/SciTE.h - #define COPYRIGHT_DATES "December 1998-May 2013" - #define COPYRIGHT_YEARS "1998-2013" - dateLine = f.readlines() - - UpdateLineInFile(root + "scintilla/win32/ScintRes.rc", "#define VERSION_SCINTILLA", - "#define VERSION_SCINTILLA \"" + versionDotted + "\"") - UpdateLineInFile(root + "scintilla/win32/ScintRes.rc", "#define VERSION_WORDS", - "#define VERSION_WORDS " + versionCommad) - UpdateLineInFile(root + "scintilla/qt/ScintillaEditBase/ScintillaEditBase.pro", - "VERSION =", - "VERSION = " + versionDotted) - UpdateLineInFile(root + "scintilla/qt/ScintillaEdit/ScintillaEdit.pro", - "VERSION =", - "VERSION = " + versionDotted) - UpdateLineInFile(root + "scintilla/doc/ScintillaDownload.html", " Release", - " Release " + versionDotted) - UpdateDownloadLinks(root + "scintilla/doc/ScintillaDownload.html", version) - UpdateLineInFile(root + "scintilla/doc/index.html", - ' Release version', - ' Release version ' + versionDotted + '
') - UpdateLineInFile(root + "scintilla/doc/index.html", - ' Site last modified', - ' Site last modified ' + mdyModified + '
') - UpdateLineInFile(root + "scintilla/doc/ScintillaHistory.html", - ' Released ', - ' Released ' + dmyModified + '.') - - if os.path.exists(root + "scite"): - UpdateLineInFile(root + "scite/src/SciTE.h", "#define VERSION_SCITE", - "#define VERSION_SCITE \"" + versionDotted + "\"") - UpdateLineInFile(root + "scite/src/SciTE.h", "#define VERSION_WORDS", - "#define VERSION_WORDS " + versionCommad) - UpdateLineInFile(root + "scite/src/SciTE.h", "#define COPYRIGHT_DATES", - '#define COPYRIGHT_DATES "December 1998-' + myModified + '"') - UpdateLineInFile(root + "scite/src/SciTE.h", "#define COPYRIGHT_YEARS", - '#define COPYRIGHT_YEARS "1998-' + yearModified + '"') - UpdateLineInFile(root + "scite/doc/SciTEDownload.html", " Release", - " Release " + versionDotted) - UpdateDownloadLinks(root + "scite/doc/SciTEDownload.html", version) - UpdateLineInFile(root + "scite/doc/SciTE.html", - ' Release version', - ' Release version ' + versionDotted + '
') - UpdateLineInFile(root + "scite/doc/SciTE.html", - ' Site last modified', - ' Site last modified ' + mdyModified + '
') - UpdateLineInFile(root + "scite/doc/SciTE.html", - ' ') - -def RegenerateAll(): - root="../../" - - # Find all the lexer source code files - lexFilePaths = glob.glob(root + "scintilla/lexers/Lex*.cxx") - sortListInsensitive(lexFilePaths) - lexFiles = [os.path.basename(f)[:-4] for f in lexFilePaths] - print(lexFiles) - lexerModules = [] - lexerProperties = {} - propertyDocuments = {} - for lexFile in lexFilePaths: - lexerModules.extend(FindModules(lexFile)) - for k in FindProperties(lexFile).keys(): - lexerProperties[k] = 1 - documents = FindPropertyDocumentation(lexFile) - for k in documents.keys(): - if k not in propertyDocuments: - propertyDocuments[k] = documents[k] - sortListInsensitive(lexerModules) - lexerProperties = list(lexerProperties.keys()) - sortListInsensitive(lexerProperties) - - # Generate HTML to document each property - # This is done because tags can not be safely put inside comments in HTML - documentProperties = list(propertyDocuments.keys()) - sortListInsensitive(documentProperties) - propertiesHTML = [] - for k in documentProperties: - propertiesHTML.append("\t\n\t%s\n\t%s\n\t" % - (k, k, propertyDocuments[k])) - - # Find all the SciTE properties files - otherProps = ["abbrev.properties", "Embedded.properties", "SciTEGlobal.properties", "SciTE.properties"] - if os.path.exists(root + "scite"): - propFilePaths = glob.glob(root + "scite/src/*.properties") - sortListInsensitive(propFilePaths) - propFiles = [os.path.basename(f) for f in propFilePaths if os.path.basename(f) not in otherProps] - sortListInsensitive(propFiles) - print(propFiles) - - Regenerate(root + "scintilla/src/Catalogue.cxx", "//", NATIVE, lexerModules) - Regenerate(root + "scintilla/win32/scintilla.mak", "#", NATIVE, lexFiles) - if os.path.exists(root + "scite"): - Regenerate(root + "scite/win32/makefile", "#", NATIVE, propFiles) - Regenerate(root + "scite/win32/scite.mak", "#", NATIVE, propFiles) - Regenerate(root + "scite/src/SciTEProps.cxx", "//", NATIVE, lexerProperties) - Regenerate(root + "scite/doc/SciTEDoc.html", "