From d5ac8981a694e8bba957c4b65831e3690e71a662 Mon Sep 17 00:00:00 2001 From: nyamatongwe Date: Sat, 26 May 2012 13:26:11 +1000 Subject: For case-insensitive UTF-8 searching, use UTF8Classify for finding valid character width so compatible with other similar code. Optimize treatment of single byte ASCII characters and also optimize loop conditions. These mostly make up for the performance decrease from calling UTF8Classify. Add support definitions UTF8MaxBytes and UTF8IsAscii in UniConversion. Remove ExtractChar as no longer needed. --- src/Document.h | 1 - 1 file changed, 1 deletion(-) (limited to 'src/Document.h') diff --git a/src/Document.h b/src/Document.h index ec41603eb..18bf00a3d 100644 --- a/src/Document.h +++ b/src/Document.h @@ -352,7 +352,6 @@ public: int NextWordEnd(int pos, int delta); int SCI_METHOD Length() const { return cb.Length(); } void Allocate(int newSize) { cb.Allocate(newSize); } - size_t ExtractChar(int pos, char *bytes); bool MatchesWordOptions(bool word, bool wordStart, int pos, int length); long FindText(int minPos, int maxPos, const char *search, bool caseSensitive, bool word, bool wordStart, bool regExp, int flags, int *length, CaseFolder *pcf); -- cgit v1.2.3