feat: add missing KF6 framework recipes

This commit is contained in:
2026-05-07 07:53:26 +01:00
parent d8d498f831
commit a69f479b52
2374 changed files with 2610246 additions and 0 deletions
@@ -0,0 +1,3 @@
default:
extension: .theme
output: themes
@@ -0,0 +1,174 @@
{
"metadata": {
"copyright": [
"SPDX-FileCopyrightText: {{scheme-author}}"
],
"license": "SPDX-License-Identifier: MIT",
"name": "{{scheme-name}}",
"revision": 1
},
"editor-colors": {
"BackgroundColor": "#{{base00-hex}}",
"BracketMatching": "#{{base02-hex}}",
"CodeFolding": "#{{base02-hex}}",
"CurrentLine": "#{{base03-hex}}",
"CurrentLineNumber": "#{{base04-hex}}",
"IconBorder": "#{{base00-hex}}",
"IndentationLine": "#{{base02-hex}}",
"LineNumbers": "#{{base03-hex}}",
"MarkBookmark": "#{{base0A-hex}}",
"MarkBreakpointActive": "#{{base0D-hex}}",
"MarkBreakpointDisabled": "#{{base03-hex}}",
"MarkBreakpointReached": "#{{base05-hex}}",
"MarkError": "#{{base08-hex}}",
"MarkExecution": "#{{base08-hex}}",
"MarkWarning": "#{{base0B-hex}}",
"ModifiedLines": "#{{base08-hex}}",
"ReplaceHighlight": "#{{base04-hex}}",
"SavedLines": "#{{base0A-hex}}",
"SearchHighlight": "#{{base01-hex}}",
"Separator": "#{{base09-hex}}",
"SpellChecking": "#{{base0A-hex}}",
"TabMarker": "#{{base02-hex}}",
"TemplateBackground": "#{{base03-hex}}",
"TemplateFocusedPlaceholder": "#{{base04-hex}}",
"TemplatePlaceholder": "#{{base0B-hex}}",
"TemplateReadOnlyPlaceholder": "#{{base03-hex}}",
"TextSelection": "#{{base02-hex}}",
"WordWrapMarker": "#{{base02-hex}}"
},
"text-styles": {
"Alert": {
"background-color": "#{{base0E-hex}}",
"bold": true,
"selected-text-color": "#{{base08-hex}}",
"text-color": "#{{base08-hex}}"
},
"Annotation": {
"selected-text-color": "#{{base05-hex}}",
"text-color": "#{{base05-hex}}"
},
"Attribute": {
"selected-text-color": "#{{base0A-hex}}",
"text-color": "#{{base0A-hex}}"
},
"BaseN": {
"selected-text-color": "#{{base09-hex}}",
"text-color": "#{{base09-hex}}"
},
"BuiltIn": {
"selected-text-color": "#{{base08-hex}}",
"text-color": "#{{base08-hex}}"
},
"Char": {
"selected-text-color": "#{{base08-hex}}",
"text-color": "#{{base08-hex}}"
},
"Comment": {
"italic": true,
"selected-text-color": "#{{base03-hex}}",
"text-color": "#{{base03-hex}}"
},
"CommentVar": {
"selected-text-color": "#{{base03-hex}}",
"text-color": "#{{base03-hex}}"
},
"Constant": {
"selected-text-color": "#{{base03-hex}}",
"text-color": "#{{base03-hex}}"
},
"ControlFlow": {
"bold": true,
"selected-text-color": "#{{base0E-hex}}",
"text-color": "#{{base0E-hex}}"
},
"DataType": {
"selected-text-color": "#{{base0E-hex}}",
"text-color": "#{{base0E-hex}}"
},
"DecVal": {
"selected-text-color": "#{{base09-hex}}",
"text-color": "#{{base09-hex}}"
},
"Documentation": {
"selected-text-color": "#{{base03-hex}}",
"text-color": "#{{base03-hex}}"
},
"Error": {
"selected-text-color": "#{{base08-hex}}",
"text-color": "#{{base08-hex}}",
"underline": true
},
"Extension": {
"bold": true,
"selected-text-color": "#{{base0A-hex}}",
"text-color": "#{{base0A-hex}}"
},
"Float": {
"selected-text-color": "#{{base09-hex}}",
"text-color": "#{{base09-hex}}"
},
"Function": {
"selected-text-color": "#{{base0D-hex}}",
"text-color": "#{{base0D-hex}}"
},
"Import": {
"selected-text-color": "#{{base0B-hex}}",
"text-color": "#{{base0B-hex}}"
},
"Information": {
"selected-text-color": "#{{base09-hex}}",
"text-color": "#{{base09-hex}}"
},
"Keyword": {
"bold": true,
"selected-text-color": "#{{base0E-hex}}",
"text-color": "#{{base0E-hex}}"
},
"Normal": {
"selected-text-color": "#{{base05-hex}}",
"text-color": "#{{base05-hex}}"
},
"Operator": {
"selected-text-color": "#{{base05-hex}}",
"text-color": "#{{base05-hex}}"
},
"Others": {
"selected-text-color": "#{{base05-hex}}",
"text-color": "#{{base05-hex}}"
},
"Preprocessor": {
"selected-text-color": "#{{base0D-hex}}",
"text-color": "#{{base0D-hex}}"
},
"RegionMarker": {
"background-color": "#{{base03-hex}}",
"selected-text-color": "#{{base0A-hex}}",
"text-color": "#{{base0A-hex}}"
},
"SpecialChar": {
"selected-text-color": "#{{base08-hex}}",
"text-color": "#{{base08-hex}}"
},
"SpecialString": {
"selected-text-color": "#{{base08-hex}}",
"text-color": "#{{base08-hex}}"
},
"String": {
"selected-text-color": "#{{base0B-hex}}",
"text-color": "#{{base0B-hex}}"
},
"Variable": {
"selected-text-color": "#{{base05-hex}}",
"text-color": "#{{base05-hex}}"
},
"VerbatimString": {
"selected-text-color": "#{{base0B-hex}}",
"text-color": "#{{base0B-hex}}"
},
"Warning": {
"selected-text-color": "#{{base0D-hex}}",
"text-color": "#{{base0D-hex}}"
}
}
}
@@ -0,0 +1,78 @@
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2023 Jonathan Poelen <jonathan.poelen@gmail.com>
# SPDX-License-Identifier: MIT
from typing import Iterable, Mapping
from xml.etree.ElementTree import ElementTree
import sys
def print_usage_and_exit():
print(sys.argv[0], '[-p] syntax.xml...\n -p show duplicate content', file=sys.stderr)
exit(1)
def normalize_bool_or_remove_if_false(d: dict[str, str], key: str) -> None:
value = d.get(key)
if value is not None:
if value == '1' or value.lower() == 'true':
d[key] = '1'
else:
d.pop(key)
def remove_if_stay(d: dict[str, str], key: str) -> None:
value = d.get(key)
if value is not None and (not value or value == '#stay'):
d.pop(key)
if len(sys.argv) < 2 or sys.argv[1] in {'-h', '--help'}:
print_usage_and_exit()
iarg = 1
show_content = sys.argv[1] == '-p'
if show_content:
iarg += 1
if len(sys.argv) < 3:
print_usage_and_exit()
for filename in sys.argv[iarg:]:
tree = ElementTree()
tree.parse(filename)
identical_contexts = {}
for context in tree.getroot()[0].find("contexts"):
if len(context) == 0 or (len(context) == 1 and context[0].tag == 'IncludeRules'):
attrib = context.attrib
name = attrib.pop('name')
remove_if_stay(attrib, 'lineEndContext')
remove_if_stay(attrib, 'lineEmptyContext')
remove_if_stay(attrib, 'fallthroughContext')
if len(context):
attrib.update(context[0].attrib)
normalize_bool_or_remove_if_false(attrib, 'includeAttrib')
s = '\x01'.join(sorted(f'{k}={v}' for k,v in attrib.items()))
identical_contexts.setdefault(s, []).append(name)
else:
rules = set()
for rule in context:
attrib = rule.attrib
remove_if_stay(attrib, 'context')
normalize_bool_or_remove_if_false(attrib, 'dynamic')
normalize_bool_or_remove_if_false(attrib, 'minimal')
normalize_bool_or_remove_if_false(attrib, 'includeAttrib')
normalize_bool_or_remove_if_false(attrib, 'firstNonSpace')
normalize_bool_or_remove_if_false(attrib, 'lookAhead')
s = '\x01'.join(f'{k}={v}' for k,v in sorted(attrib.items()))
rules.add(f'{rule.tag}\x01{s}')
identical_contexts.setdefault('\n'.join(sorted(rules)), []).append(context.attrib['name'])
for content, names in identical_contexts.items():
if len(names) > 1:
print(f'{filename}: {names}')
if show_content:
print(' ', content.replace('\x01', ' ').replace('\n', '\n '))
@@ -0,0 +1,413 @@
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2023 Jonathan Poelen <jonathan.poelen@gmail.com>
# SPDX-License-Identifier: MIT
from argparse import ArgumentParser
from pathlib import Path
from typing import Iterable, Mapping
from textwrap import wrap
from xml.parsers import expat
import re
parser = ArgumentParser(
prog='generate-dot-file.py',
description=f'''Dot file generator for xml syntax
Example:
generate-dot-file.py data/syntax/lua.xml | dot -T svg -o image.svg && xdg-open image.svg''')
parser.add_argument('-c', '--context-only', action='store_true',
help='Generates contexts without rules')
parser.add_argument('-r', '--resolve-entities', action='store_true',
help='Evaluate xml entities')
parser.add_argument('-i', '--include', action='append', default=[],
help='Include only contexts that respect a pattern')
parser.add_argument('-e', '--exclude', action='append', default=[],
help='Exclude contexts that respect a pattern')
parser.add_argument('syntax', help='XML Syntax Definition Files')
args = parser.parse_args()
excludes = [re.compile(patt) for patt in args.exclude]
includes = [re.compile(patt) for patt in args.include]
context_only = args.context_only
resolve_entities = args.resolve_entities or context_only
global_entities = {
'&#9;': '\\t',
'&#37;': '%',
'&#38;': '&',
'&amp;': '&',
'&#39;': "'",
'&aquot;': "'",
'&#34;': '"',
'&quot;': '"',
'&#60;': '<',
'&lt;': '<',
'&#62;': '>',
'&gt;': '>',
}
entities_finder = re.compile('|'.join(global_entities))
Outside = 0
Context = 1
Rule = 2
class XMLParser:
depth = Outside
matched = False
ictx = 0
ctx_name = ''
ctx_attrs: dict[str, str] = {}
escaped_ctx_name = ''
ctx_color = ''
irule = 0
resolve_entities = True
reversed_entities: dict[str, str] = {}
resolved_entity_searcher: re.Pattern
def __init__(self, start_ctx, end_ctx, rule_process):
self.start_ctx = start_ctx
self.end_ctx = end_ctx
self.rule_process = rule_process
def start_element(self, tag: str, attrs: dict[str, str]):
if self.depth == Context:
self.depth = Rule
if self.matched:
self.irule += 1
if not self.resolve_entities:
string = attrs.get('String')
if string:
attrs['String'] = self.unresolve_entities(string)
self.rule_process(self, self.irule, tag, attrs)
elif tag == 'context':
name = attrs['name']
self.depth = Context
self.matched = (not match_patterns(name, excludes)
and (not includes or match_patterns(name, includes)))
if self.matched:
self.irule = 0
self.ctx_name = name
self.ctx_attrs = attrs
self.escaped_ctx_name = escape(name)
self.ctx_color = compute_color(name)
self.start_ctx(self)
def end_element(self, name: str):
if self.depth == Context:
if self.matched:
self.end_ctx(self)
self.ictx += 1
self.depth -= 1
def unresolve_entities(self, s: str) -> str:
"""
expat module converts all entities. This function tries to do the
opposite by replacing pieces of text with entities.
The result may differ from the original text, but will be equivalent.
"""
b = True
def replace(m):
nonlocal b
b = True
return self.reversed_entities[m[0]]
while b:
b = False
s = self.resolved_entity_searcher.sub(replace, s)
return s
def entity_decl(self, name, is_parameter_entity, value, base, system_id, public_id, notation_name):
value = entities_finder.sub(lambda m: global_entities[m[0]], value)
self.reversed_entities[value] = f'&{name};'
def end_doctype(self):
patt = '|'.join(re.escape(value) for value in self.reversed_entities)
self.resolved_entity_searcher = re.compile(patt)
color_map = [
'"/rdgy4/3"',
'"/set312/1"',
'"lightgoldenrod1"',
'"/set312/3"',
'"/set312/4"',
'"/set312/5"',
'"/set312/6"',
'"/set312/7"',
'"/rdpu3/2"',
'"/purd6/3"',
'"/ylgn4/2"',
'"/set26/6"',
]
picked_colors: dict[int, str] = {}
def compute_color(name: str) -> str:
"""
returns a color which depends on the first 2 characters
"""
k = ord(name[0])
if len(name) > 1:
k += ord(name[1]) * 1024
color = color_map[len(picked_colors) % len(color_map)]
return picked_colors.setdefault(k, color)
def match_patterns(name: str, patterns: list[re.Pattern]) -> bool:
return any(patt.search(name) for patt in patterns)
_pop_counter_re = re.compile('^(?:#pop)+')
def labelize(name: str) -> str:
m = _pop_counter_re.match(name)
if m:
n = len(m[0]) // 4
if n > 1:
return f'#pop({n}){name[n * 4:]}'
return name
def stringify_attrs(attr_names: Iterable[str], attrs: Mapping[str, str]) -> str:
s = ''
for name in attr_names:
attr = attrs.get(name)
if attr:
part = '\n'.join(wrap(attr, 40))
s += f' {v}:{part}'
return s
def escape(s: str) -> str:
return s.replace('\\', '\\\\').replace('"', '\\"')
def jumpctx(s: str) -> str:
i = s.find('!')
return '' if i == -1 else s[i+1:]
def xml_bool(s: str | None) -> bool:
return s == '1' or s == 'true'
def push_context_attr(output: list[str],
escaped_origin: str, escaped_ctx_name: str, escaped_name_attr: str,
style: str, color: str) -> None:
if escaped_name_attr == '#stay':
output.append(f' "{escaped_origin}" -> "{escaped_ctx_name}" [style={style},color={color}];\n')
elif escaped_name_attr.startswith('#'):
ref = f'{escaped_ctx_name}!!{escaped_name_attr}'
output.append(
f' "{escaped_origin}" -> "{ref}" [style={style},color={color}];\n'
f' "{ref}" [label="{labelize(escaped_name_attr)}",color={color}];\n'
)
def push_last_transition(output: list[str],
escaped_name: str, escaped_ctx_name: str, escaped_name_attr: str,
color: str) -> None:
if escaped_name_attr == '#stay':
return
if escaped_name_attr.startswith('#'):
escaped_last_ctx = jumpctx(escaped_name_attr)
if escaped_last_ctx:
output.append(f' "{escaped_ctx_name}!!{escaped_name_attr}" -> "{escaped_last_ctx}" [style=dashed,color={color}];\n')
else:
output.append(f' "{escaped_name}" -> "{escaped_name_attr}" [style=dashed,color={color}];\n')
output = [
'digraph G {\n',
' compound=true;ratio=auto\n'
]
if context_only:
# avoid multi arrow for ctx1 -> ctx2
krule_contexts: dict[str, int] = {}
# shares #pop... nodes
kpoped_contexts: dict[tuple[str, str], str] = {}
def start_ctx(p: XMLParser):
krule_contexts.clear()
def rule_process(p: XMLParser, irule: int, name: str, attrs: dict[str, str]):
krule_contexts[attrs.get('context') or '#stay'] = irule
def end_ctx(p: XMLParser):
color = p.ctx_color
ctx_name = p.escaped_ctx_name
output.append(f' "{ctx_name}" [style=filled,color={color}]\n')
krule_contexts.setdefault(p.ctx_attrs.get('fallthroughContext') or '#stay', -1)
krule_contexts.setdefault(p.ctx_attrs.get('lineEndContext') or '#stay', -2)
krule_contexts.setdefault(p.ctx_attrs.get('lineEmptyContext') or '#stay', -3)
krule_contexts.pop('#stay')
for rule_context, i in sorted(krule_contexts.items(), key=lambda t: t[1]):
if i >= 0:
style = f'color={color}'
elif i == -1:
style = f'style=dashed,color={color}'
elif i == -2:
style = 'style=dotted,color=blue'
else: # if i == -3:
style = 'style=dotted,color=purple'
escaped_rule_context = escape(rule_context)
labelized_context = labelize(escaped_rule_context)
if rule_context.startswith('#'):
next_context = jumpctx(escaped_rule_context)
if next_context:
k = (labelized_context, next_context)
poped_context = kpoped_contexts.get(k)
if poped_context:
output.append(f' "{ctx_name}" -> "{poped_context}" [{style}];\n')
else:
poped_context = f'{ctx_name}!!{i}'
kpoped_contexts[k] = poped_context
output.append(f' "{ctx_name}" -> "{poped_context}" [{style}];\n'
f' "{poped_context}" [label="{labelized_context}"];\n'
f' "{poped_context}" -> "{next_context}"\n')
else:
poped_context = f'{ctx_name}!!{i}'
output.append(f' "{ctx_name}" -> "{poped_context}" [{style}];\n'
f' "{poped_context}" [label="{labelized_context}"];\n')
else:
output.append(f' "{ctx_name}" -> "{labelized_context}" [{style}]\n')
else:
first_line_attributes = ('attribute', 'String', 'char') # char1 is tranformed into String
second_line_attributes = ('beginRegion', 'endRegion', 'lookAhead', 'firstNonSpace', 'column', 'additionalDeliminator', 'weakDeliminator')
kdot: dict[str, tuple[str, int]] = {}
escaped_name = ''
def start_ctx(p: XMLParser):
global escaped_name
escaped_name = p.escaped_ctx_name
kdot.clear()
output.append(
f' subgraph cluster{p.ictx} {{\n'
f' "{escaped_name}" [shape=box,style=filled,color={p.ctx_color}];\n'
)
def rule_process(p: XMLParser, irule: int, name: str, attrs: dict[str, str]):
global escaped_name
color = p.ctx_color
escaped_ctx_name = p.escaped_ctx_name
next_name = f'{p.ctx_name}!!{irule}!!{name}'
escaped_next_name = escape(next_name)
rule_context = attrs.get('context', '#stay')
output.append(f' "{escaped_name}" -> "{escaped_next_name}" [style=dashed,color={color}];\n')
escaped_name = escaped_next_name
if name == 'IncludeRules':
label = f' {rule_context}'
else:
if 'attribute' not in attrs:
attrs['attribute'] = p.ctx_attrs['attribute']
if 'char1' in attrs:
attrs['String'] = attrs.pop('char') + attrs.pop('char1')
label = stringify_attrs(first_line_attributes, attrs)
label2 = stringify_attrs(second_line_attributes, attrs)
if label2:
label = f'{label}\n{label2}'
output.append(f' "{escaped_name}" [label="{name}{escape(label)}"];\n')
if xml_bool(attrs.get('lookAhead')):
output.append(f' "{escaped_name}" [style=dashed];\n')
if rule_context == '#stay':
output.append(f' "{escaped_name}" -> "{escaped_ctx_name}" [color=dodgerblue3];\n')
elif rule_context:
escaped_rule_context = escape(rule_context)
if rule_context.startswith('#'):
escaped_bind_ctx_name = jumpctx(escaped_rule_context)
ref = f'{escaped_ctx_name}!!{escaped_rule_context}'
output.append(
f' "{escaped_name}" -> "{ref}" [color={color}];\n'
f' "{ref}" [label="{labelize(escaped_rule_context)}"];\n'
)
if escaped_bind_ctx_name:
kdot[f'{ref}!!{escaped_bind_ctx_name}'] = (
f' "{ref}" -> "{escaped_bind_ctx_name}" [color={color}];\n'
f' "{ref}" [color=red];\n',
irule,
)
else:
kdot[f'{irule}'] = (
f' "{escaped_name}" -> "{escaped_rule_context}" [color={color}];\n',
irule,
)
def end_ctx(p: XMLParser):
color = p.ctx_color
escaped_ctx_name = p.escaped_ctx_name
fallthrough_ctx = p.ctx_attrs.get('fallthroughContext', '#stay')
escaped_fallthrough_ctx = escape(fallthrough_ctx)
push_context_attr(output, escaped_name, escaped_ctx_name,
escaped_fallthrough_ctx, 'dashed', color)
end_ctx = p.ctx_attrs.get('lineEndContext', '#stay')
escaped_end_ctx = escape(end_ctx)
push_context_attr(output, escaped_ctx_name, escaped_ctx_name,
escaped_end_ctx, 'dotted', 'blue')
empty_ctx = p.ctx_attrs.get('lineEmptyContext', '#stay')
escaped_empty_ctx = escape(empty_ctx)
push_context_attr(output, escaped_ctx_name, escaped_ctx_name,
escaped_empty_ctx, 'dotted', 'purple')
output.append(' }\n')
push_last_transition(output, escaped_name, escaped_ctx_name,
escaped_fallthrough_ctx, color)
push_last_transition(output, escaped_name, escaped_ctx_name,
escaped_end_ctx, color)
push_last_transition(output, escaped_name, escaped_ctx_name,
escaped_empty_ctx, color)
output.extend(expr for expr, _ in sorted(kdot.values(), key=lambda t: t[1]))
xml_parser = XMLParser(start_ctx, end_ctx, rule_process)
p = expat.ParserCreate()
p.StartElementHandler = xml_parser.start_element
p.EndElementHandler = xml_parser.end_element
if not resolve_entities:
xml_parser.resolve_entities = False
p.EntityDeclHandler = xml_parser.entity_decl
p.EndDoctypeDeclHandler = xml_parser.end_doctype
# # remove BOM
# if content.startswith('\xef\xbb\xbf'):
# content = content[3:]
p.Parse(Path(args.syntax).read_text())
output.append('}\n')
print(''.join(output))
@@ -0,0 +1,148 @@
/* This test file tests Kate's ANTLR highlighting
compilable bt ANTLR although not directly:
grammar can be alone file for both parser and lexer
or two files
This file is merged TestParser.g4 and TestLexer.g4
this lines also tests regions of multiline comment
*/
//file TestParser.g4
parser grammar TestParser;
options { tokenVocab = TestLexer; }
// The main entry point for parsing a grammar.
startRule
: (expression | STRING)+ EOF
;
expression
: expression PLUS mulExpr
| expression MINUS mulExpr
| mulExpr
;
mulExpr
: mulExpr MUL unaryExpr
| mulExpr DIV unaryExpr
| unaryExpr
;
unaryExpr
: atom
| LPAR expression RPAR
;
atom
: IDENT
| number
;
number
: INT
| FLOAT
;
//================================
//file TestLexer.g4
lexer grammar TestLexer;
/*'channels' and '{' must be in one line
to correct highlighting, highlighter can't
recognize regular expression "(options|tokens|channels)(?=([\s]*{))"
where apart from \s whitrspaces are end of lines
*/
channels { OFF_CHANNEL , COMMENT }
PLUS
: '+'
;
MINUS
: '-'
;
MUL
: '*'
;
DIV
: '/'
;
LPAR
: '('
;
RPAR
: ')'
;
IDENT
: Nondigit
( Nondigit
| Digit
)*
;
fragment
Digit
: [0-9]
;
fragment
NonzeroDigit
: [1-9]
;
fragment
Nondigit
: [a-zA-Z_]
;
Sign
: '+' | '-'
;
INT
: Sign? (NonzeroDigit Digit* | '0')
;
fragment
DigitSequence
: Digit+
;
fragment
ExponentPart
: [eE] Sign? DigitSequence
;
fragment
FractionalConstant
: DigitSequence? '.' DigitSequence
| DigitSequence '.'
;
FLOAT
: (FractionalConstant ExponentPart? | DigitSequence ExponentPart)
;
fragment
EscapeSequence
: '\\' ['"?abfnrtvhe\\]
;
//between [] is charset , test escape \
fragment
SChar
: ~["\\\r\n]
| EscapeSequence
;
STRING
: '"' SChar* '"'
;
@@ -0,0 +1,263 @@
#!/usr/bin/env python3
"""
LICENSE:
SPDX-FileCopyrightText: 2020 Juraj Oravec <jurajoravec@mailo.com>
SPDX-License-Identifier: MIT
Configuration:
- IncludeCustoms: Try to export all available settings
- prefferStandAloneData: prefferdata set specifically for current datatype
Usage:
script.py ThemeName themefile.kateschema
"""
from configparser import ConfigParser
import json
import sys
settings = {
"prefferStandAloneData": True,
"IncludeCustoms": True
}
jsonConfig = {
"_comments": "Created by theme_converter script"
}
editorColors = {
"Color Background": "BackgroundColor",
"Color Code Folding": "CodeFolding",
"Color Current Line Number": "CurrentLineNumber",
"Color Highlighted Bracket": "BracketMatching",
"Color Highlighted Line": "CurrentLine",
"Color Icon Bar": "IconBorder",
"Color Indentation Line": "IndentationLine",
"Color Line Number": "LineNumbers",
"Color MarkType 1": "MarkBookmark",
"Color MarkType 2": "MarkBreakpointActive",
"Color MarkType 3": "MarkBreakpointReached",
"Color MarkType 4": "MarkBreakpointDisabled",
"Color MarkType 5": "MarkExecution",
"Color MarkType 6": "MarkWarning",
"Color MarkType 7": "MarkError",
"Color Modified Lines": "ModifiedLines",
"Color Replace Highlight": "ReplaceHighlight",
"Color Saved Lines": "SavedLines",
"Color Search Highlight": "SearchHighlight",
"Color Selection": "TextSelection",
"Color Separator": "Separator",
"Color Spelling Mistake Line": "SpellChecking",
"Color Tab Marker": "TabMarker",
"Color Template Background": "TemplateBackground",
"Color Template Editable Placeholder": "TemplatePlaceholder",
"Color Template Focused Editable Placeholder": "TemplateFocusedPlaceholder",
"Color Template Not Editable Placeholder": "TemplateReadOnlyPlaceholder",
"Color Word Wrap Marker": "WordWrapMarker"
}
textStyles = {
"Alert": "Alert",
"Annotation": "Annotation",
"Attribute": "Attribute",
"Base-N Integer": "BaseN",
"Built-in": "BuiltIn",
"Character": "Char",
"Comment": "Comment",
"Comment Variable": "CommentVar",
"Constant": "Constant",
"Control Flow": "ControlFlow",
"Data Type": "DataType",
"Decimal/Value": "DecVal",
"Documentation": "Documentation",
"Error": "Error",
"Extension": "Extension",
"Floating Point": "Float",
"Function": "Function",
"Import": "Import",
"Information": "Information",
"Keyword": "Keyword",
"Normal": "Normal",
"Operator": "Operator",
"Others": "Others",
"Preprocessor": "Preprocessor",
"Region Marker": "RegionMarker",
"Special Character": "SpecialChar",
"Special String": "SpecialString",
"String": "String",
"Variable": "Variable",
"Verbatim String": "VerbatimString",
"Warning": "Warning"
}
indexToStyle = {
"0": "Normal",
"1": "Keyword",
"2": "Function",
"3": "Variable",
"4": "ControlFlow",
"5": "Operator",
"6": "BuiltIn",
"7": "Extension",
"8": "Preprocessor",
"9": "Attribute",
"10": "Char",
"11": "SpecialChar",
"12": "String",
"13": "VerbatimString",
"14": "SpecialString",
"15": "Import",
"16": "DataType",
"17": "DecVal",
"18": "BaseN",
"19": "Float",
"20": "Constant",
"21": "Comment",
"22": "Documentation",
"23": "Annotation",
"24": "CommentVar",
"25": "RegionMarker",
"26": "Information",
"27": "Warning",
"28": "Alert",
"29": "Others",
"30": "Error"
}
NormalizedSections = dict[str, str]
Style = dict[str, str | bool]
CustomStyles = dict[str, Style]
def normalizeSections(sections: list[str]) -> NormalizedSections:
return {value.partition(" - ")[0]: value for value in sections}
def reEcodeColors(text: str) -> str:
return "#" + text[2:]
def reEncodeBool(text: str) -> bool:
return text == "1"
def rgb_to_hex(rgb: tuple) -> str:
return '#%02x%02x%02x' % rgb
def decodeTextStyle(text: str) -> Style:
style = {}
field = text.split(",")
if len(field) == 11:
styleIndex = field.pop(0)
style = jsonConfig["text-styles"].get(indexToStyle[styleIndex], dict()).copy()
if len(field) != 10 or not any(field[0:8]):
return dict()
if len(field[0]) == 8:
style["text-color"] = reEcodeColors(field[0])
if len(field[1]) == 8:
style["selected-text-color"] = reEcodeColors(field[1])
if len(field[2]) == 1:
style["bold"] = reEncodeBool(field[2])
if len(field[3]) == 1:
style["italic"] = reEncodeBool(field[3])
if len(field[4]) == 1:
style["strike-through"] = reEncodeBool(field[4])
if len(field[5]) == 1:
style["underline"] = reEncodeBool(field[5])
if len(field[6]) == 8:
style["background-color"] = reEcodeColors(field[6])
if len(field[7]) == 8:
style["selected-text-color"] = reEcodeColors(field[7])
# 8: font family > ignored
# 9: --- > ignored
return style
def decodeColorSettings(text: str) -> str | None:
fieldds = tuple(map(int, text.split(",")))
if len(fieldds) != 3:
return
return rgb_to_hex(fieldds)
def extractEditorColors(section: dict[str, str]) -> dict[str, str | None]:
return {editorColors[key]: decodeColorSettings(value)
for key, value in section.items()}
def extractTextStyles(section: dict[str, str]) -> dict[str, Style]:
return {textStyles[key]: decodeTextStyle(value)
for key, value in section.items()}
def extractCustomStyle(custom_styles: CustomStyles, style: Style, realKey: str):
for key, value in style.items():
style = decodeTextStyle(value)
# some items have ':' in their name, therefore it is necessary to limit the split
keys = key.split(":", 1)
# invalid or None language
if len(keys) == 1:
continue
primaryKey, SecondaryKey = keys
if style:
custom_style = custom_styles.setdefault(primaryKey, dict())
custom_style.setdefault(SecondaryKey, style)
if settings["prefferStandAloneData"] and realKey == primaryKey:
custom_style[SecondaryKey] = style
def extractCustomStyles(config: ConfigParser, normalizedSections: NormalizedSections) -> CustomStyles:
custom_styles: CustomStyles = {}
for key, value in normalizedSections.items():
if not key.startswith("Highlighting"):
continue
realKey = key[len("Highlighting "):]
extractCustomStyle(custom_styles, config[value], realKey)
return custom_styles
def main(inputFile: str):
config = ConfigParser(delimiters="=")
config.optionxform = str
config.read(inputFile)
normalizedSections = normalizeSections(config.sections())
if "Editor Colors" in normalizedSections:
jsonConfig["editor-colors"] = extractEditorColors(config[normalizedSections["Editor Colors"]])
if "Default Item Styles" in normalizedSections:
jsonConfig["text-styles"] = extractTextStyles(config[normalizedSections["Default Item Styles"]])
if settings["IncludeCustoms"]:
jsonConfig["custom-styles"] = extractCustomStyles(config, normalizedSections)
print(json.dumps(jsonConfig, indent=4, sort_keys=True))
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: " + sys.argv[0] + " ThemeName Filepath.kateschema")
exit()
jsonConfig["metadata"] = {
"name": str(sys.argv[1]),
"revision": 1
}
main(sys.argv[2])
@@ -0,0 +1,37 @@
# Minted lexer matcher
Purpose of this tool is to make matchin between `minted` availabl lexers and syntax schemas provided by `syntax-highlighter`.
### Usage
Call script with xml files as arguments. The `latex.xml` in the input will be ignored.
```bash
match_languages.py ../../data/syntax/*.xml
```
The script will generate an output file `lexers_found.xml` which should be manually merged into `../../data/syntax/latex.xml`, e.g.
```bash
meld lexers_found.xml ../../data/syntax/latex.xml
```
Additional options to the script:
* `-v` - more verbose output
* `-o file_name` - output file name
### Configuration
`config.yml` provides extra configuration for the tool.
* `fuzz_level` - used for fuzzy matching, not used anymore
* `minted_blacklisted` - list of languages in minted, which should be ignored (e.g. due to duplication or latex incompatible names, like 'c++' and 'cpp' in minted, only 'cpp' will work with `mycode`
* `minted_mapping` - list of `key: val` pairs, where `key` is minted name of language, anbd `val` is corresponfing lexer in this repository, for all pairs which do not have exact matching
* `lstlisting_mapping` - same as for minted but for lstlistings, currenlty not supported yet.
Whenever `minted` language name is references in fact it refeers to `pygments` which provides syntax highlighting for `minted`.
## LICENSE
The script is distributed under MIT license.
@@ -0,0 +1,130 @@
fuzz_level: 60
minted_blacklisted:
- coffeescript
- coffee-script
- restructuredtext
- c++
minted_mapping:
Objective Caml: OCaml
PHP/PHP: PHP
ActionScript 2.0: ActionScript
Apache Configuration: ApacheConf
Pascal: Delphi
Email: E-mail
ANS-Forth94: Forth
Fortran (Fixed Format): FortranFixed
Fortran (Free Format): Fortran
Godot: GDScript
Inform: Inform 7
INI Files: INI
SQL (MySQL): MySQL
Intel x86 (NASM): NASM
Nim: Nimrod
OV-Ray: POVRay
Protobuf: Protocol Buffer
RelaxNG-Compact: Relax-NG Compact
RPM Spec: RPMSpec
Ruby/Rails/RHTML: RHTML
SML: Standard ML
Tcl/Tk: Tcl
LaTeX: Tex
Todo.txt: Todotxt
x.org Configuration: Xorg
lstlisting_mapping:
# :ABAP (R/2 4.3, R/2 5.0, R/3 3.1, R/3 4.6C, R/3 6.10)
# :ACM
# :ACMscript
# :ACSL
# :Ada (2005, 83, 95)
# :Algol (60, 68)
# :Ant
# :Assembler (Motorola68k, x86masm)
# :Awk (gnu, POSIX)
# :bash
# :Basic (Visual)
# :C (ANSI, Handel, Objective, Sharp)
# :C++ (11, ANSI, GNU, ISO, Visual)
# :Caml (light, Objective)
# :CIL
# :Clean
# :Cobol (1974, 1985, ibm)
# :Comal 80
# :command.com (WinXP)
# :Comsol
# :csh
# :Delphi
# :Eiffel
# :Elan
# :elisp
# :erlang
# :Euphoria
# :Fortran (03, 08, 77, 90, 95)
# :GAP
# :GCL
# :Gnuplot
# :Go
# :hansl
# :Haskell
# :HTML
# :IDL (empty, CORBA)
# :inform
# :Java (empty, AspectJ)
# :JVMIS
# :ksh
# :Lingo
# :Lisp (empty, Auto)
# :LLVM
# :Logo
# :Lua (5.0, 5.1, 5.2, 5.3)
# :make (empty, gnu)
# :Mathematica (1.0, 11.0, 3.0, 5.2)
# :Matlab
# :Mercury
# :MetaPost
# :Miranda
# :Mizar
# :ML
# :Modula-2
# :MuPAD
# :NASTRAN
# :Oberon-2
# :OCL (decorative, OMG)
# :Octave
# :OORexx
# :Oz
# :Pascal (Borland6, Standard, XSC)
# :Perl
# :PHP
# :PL/I
# :Plasm
# :PostScript
# :POV
# :Prolog
# :Promela
# :PSTricks
# :Python
# :R
# :Reduce
# :Rexx (empty, VM/XA)
# :RSL
# :Ruby
# :S (empty, PLUS)
# :SAS
# :Scala
# :Scilab
# :sh
# :SHELXL
# :Simula (67, CII, DEC, IBM)
# :SPARQL
# :SQL
# :Swift
# :TLA+
# :tcl (empty, tk)
# :TeX (AlLaTeX, common, LaTeX, plain, primitive)
# :VBScript
# :Verilog
# :VHDL (empty, AMS)
# :VRML (97)
# :XML
# :XSLT
@@ -0,0 +1,262 @@
#!/usr/bin/env python3
"""
Copyright (c) 2022 Rafał Lalik <rafallalik@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import argparse
import xml.etree.ElementTree as ET
import pygments.lexers as pyglex
import re
import yaml
from itertools import combinations
from colorama import Fore, Back, Style
#from fuzzywuzzy import fuzz
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', help='output file', type=str, default="lexers_found.xml")
parser.add_argument('-v', '--verbose', help='verbose mode', action='store_true', default=False)
opts, args = parser.parse_known_args()
if opts.verbose:
print("Options: ", opts, args)
with open('config.yml', 'r') as file:
config = yaml.safe_load(file)
print(config)
pygment_lexers = list(pyglex.get_all_lexers())
kde_langs = []
for xmlfile in args:
if xmlfile == 'latex.xml':
continue
#print(f"Read {xmlfile}")
rc = parseXML(xmlfile)
if rc is not None:
print(f"Found '{rc}' language syntax file")
kde_langs.append(rc)
if opts.verbose:
print("***\nKDE languages: ", kde_langs)
print("***\nPygments lexers: ", pygment_lexers, f"total = {len(pygment_lexers)}")
print("\n***\nSearch for KDE lexer")
matched_lexers = []
minted_used_lexers = set()
for kde_lang in kde_langs:
lower_kl = kde_lang.lower()
if kde_lang in config['minted_mapping']:
val_of_minted_mapping = config['minted_mapping'][kde_lang]
if opts.verbose:
print(Fore.YELLOW + f"+ Mapped KDE lang '{kde_lang}' to minted lang '{val_of_minted_mapping}'" + Style.RESET_ALL)
lexers = list(filter(lambda x: x[0] == val_of_minted_mapping, pygment_lexers))
if len(lexers):
#print(f"Lexers are {lexers[0][1]} from {pygment_lexers} and {kde_lang}")
selected_lexers = filter_minted_lexers(lexers[0], config['minted_blacklisted'], minted_used_lexers)
matched_lexers.append([kde_lang, val_of_minted_mapping, selected_lexers])
else:
if opts.verbose:
print(Fore.RED + f"No lexers from {pygment_lexers} and {kde_lang}" + Style.RESET_ALL)
continue
key_of_minted_matching = list(filter(lambda x: x[0].casefold() == kde_lang.casefold(), pygment_lexers))
if len(key_of_minted_matching):
if opts.verbose:
print(Fore.GREEN + f"+ Matched KDE lang '{kde_lang}' to minted lang '{key_of_minted_matching[0][0]}'" + Style.RESET_ALL)
selected_lexers = filter_minted_lexers(key_of_minted_matching[0], config['minted_blacklisted'], minted_used_lexers)
matched_lexers.append([kde_lang, key_of_minted_matching[0][0], selected_lexers])
continue
#klm = max(kde_langs, key=lambda x: fuzz.ratio(ll[0].lower(), x.lower()))
#klf = fuzz.ratio(ll[0].lower(), klm.lower())
##print(f" Fuzz znalazł '{klm}'={klf} dla szukanego '{ll[1]}'")
#if klf > int(config['fuzz_level']):
#print(Fore.YELLOW + f"+ Fuzzed lexer '{ll[0]}' ({klf}) lang '{ll[1]}' with KDE lang '{klm}'" + Style.RESET_ALL)
##matched_lexers.append([ll[0], klm, ll[1]])
#else:
if opts.verbose:
print(Fore.RED + f"- Lexer for KDE lang '{kde_lang}' not found" + Style.RESET_ALL)
generate_output(matched_lexers, opts.output)
def generate_output(lexers, filename):
print("\n***\nGenerate outputs")
used_codes = []
with open(filename, "w") as f:
f.write(' <list name="MintedCodeLang">\n')
last_lang = None
for code,lang,ll in lexers:
if last_lang is None or last_lang is not lang:
f.write(f" <!-- {code} lexers -->\n")
for l in ll:
f.write(f" <item>{l}code*</item>\n")
f.write(f" <item>{l}code</item>\n")
f.write(' </list>\n')
used_langs = []
mintenv_list = []
f.write('\n\n\n\n\n')
f.write(' <!-- environment type 5: minted environment created with newminted -->\n')
f.write(' <context name="MintedCodeEnv" attribute="Environment" lineEndContext="#stay" fallthroughContext="#pop#pop#pop#pop">\n')
for code,lang,ll in lexers:
for l in ll:
f.write(f' <WordDetect String="{l}code*" attribute="Environment" context="Highlighting{code}CodeEnvS"/>\n');
f.write(f' <WordDetect String="{l}code" attribute="Environment" context="Highlighting{code}CodeEnv"/>\n');
mintenv_list.append(f' <WordDetect String="{l}" insensitive="true" context="Highlighting{code}CodeEnv"/>\n')
f.write(''' <RegExpr String=".+code\*" attribute="Environment" context="UnknownHighlightingCodeEnvS"/>
<RegExpr String=".+code" attribute="Environment" context="UnknownHighlightingCodeEnv"/>
</context>\n\n''')
f.write(' <context name="HighlightingSelector" attribute="Normal Text" lineEndContext="#stay">\n')
f.write(''.join(mintenv_list))
f.write(''' <AnyChar String="}]" context="#pop!UnknownHighlightingBegin"/>
<RegExpr String="[^]}]*" context="#stay"/>
</context>
''')
f.write('\n')
f.write(''' <context name="HighlightingCommon" attribute="Normal Text" lineEndContext="#stay">
<RegExpr String="\\\\end\s*\{(?:lstlisting|minted|[a-zA-Z]+code)\*?\}" attribute="Structure" lookAhead="true" context="#pop#pop#pop#pop#pop#pop"/>
</context>\n''')
for code,lang,ll in lexers:
f.write('''
<context name="Highlighting{0}CodeEnvS" attribute="Error" lineEndContext="#stay">
<DetectSpaces/>
<DetectChar char="{2}" attribute="Normal Text" context="#pop!Highlighting{0}CodeEnvSParam"/>
</context>
<context name="Highlighting{0}CodeEnvSParam" attribute="Error" lineEndContext="#stay">
<DetectSpaces attribute="Normal Text"/>
<DetectChar char="{1}" attribute="Normal Text" context="Highlighting{0}CodeEnvSParamInside"/>
<IncludeRules context="FindComments"/>
</context>
<context name="Highlighting{0}CodeEnvSParamInside" attribute="Normal Text" lineEndContext="#stay">
<DetectSpaces/>
<DetectIdentifier/>
<DetectChar char="{2}" attribute="Normal Text" context="#pop!HighlightingBegin{0}"/>
<IncludeRules context="FindComments"/>
<RegExpr String="\&envname;" attribute="Macro" context="#stay"/>
</context>
<context name="Highlighting{0}CodeEnv" attribute="Normal Text" lineEndContext="#stay">
<DetectChar char="{2}" context="HighlightingBegin{0}"/>
<RegExpr String="[^{2}]*" attribute="Normal Text" context="#stay"/>
</context>
<context name="HighlightingBegin{0}" attribute="Normal Text" lineEndContext="#pop!Highlighting{0}">
<DetectSpaces/>
<RegExpr String=".+" attribute="Error" context="#stay"/>
</context>
<context name="Highlighting{0}" attribute="Normal Text" lineEndContext="#stay">
<IncludeRules context="HighlightingCommon"/>
<IncludeRules context="##{0}" includeAttrib="true"/>
</context>
'''.format(code, '{', '}'))
f.write(' <!-- end of mintedcode environment -->\n')
def filter_minted_lexers(lexers_list, blacklist, used_lexers):
"""Loop over lexers, filter out blacklisted and simplify names, pick up uniques"""
selected = set()
current_set = set()
name_filter = r"^[^a-z]+|[^a-z]+$"
for ll in lexers_list[1]:
if ll in blacklist:
print(f" Ignore blacklisted '{ll[1]}' lexer")
continue
new_ll = ll
#rc = re.search(name_filter, new_ll)
#new_ll = re.sub(r"[^a-z]", "", ll[0])
new_ll = re.sub(r"\+\+", "pp", new_ll)
new_ll = re.sub(r"\#", "sharp", new_ll)
new_ll = re.sub(r"[^a-z]", "", new_ll)
#new_ll = re.sub(r"[0-9]", "", new_ll)
if new_ll != ll:
print(f" {ll} => {new_ll}" + Fore.YELLOW + " - Replaced" + Style.RESET_ALL)
rc = re.search(name_filter, new_ll)
if rc is not None:
print(f" {ll} => {new_ll}" + Fore.RED + " - Removed" + Style.RESET_ALL)
continue
if new_ll in used_lexers:
print(f" {ll} => {new_ll}" + Fore.RED + " - Ignored" + Style.RESET_ALL)
continue
print(f" {ll} => {new_ll}" + Fore.GREEN + " - Added" + Style.RESET_ALL)
selected.add(new_ll)
used_lexers.update(selected) # update set of all lexers
return sorted(selected)
def search_lexer(langname, pygment_lexers):
for l in pygment_lexers:
#print(l)
if langname.lower() in (x.lower() for x in l[1]):
return l[0]
return None
def split_lexers(lexers):
"""Get pygments language with lexers and create all lexers list"""
lexers_list = []
for l in lexers:
for ll in l[1]:
lexers_list.append([ll, l[0], False])
return lexers_list
def parseXML(xmlfile):
tree = ET.parse(xmlfile)
root = tree.getroot();
if root.tag == 'language':
langname = root.attrib['name']
rc = root.find('highlighting')
if rc is None:
if opts.verbose:
print(f"Language {langname} has no highlightng")
return None
return langname
return None
if __name__ == '__main__':
main()
@@ -0,0 +1,12 @@
__pycache__/
.venv/
dist/
build/
tmp/
*.egg-info/
.pytest_cache/
.coverage
.env
MANIFEST
Pipfile.lock
@@ -0,0 +1,25 @@
[[source]]
name = "pypi"
url = "https://pypi.org/simple"
verify_ssl = true
[scripts]
dist = "python -c 'import setuptools; setuptools.setup()' -- bdist_wheel"
dead-check = "vulture kateschema2theme"
lint-check = "pylint kateschema2theme"
style-check = "pycodestyle kateschema2theme"
type-check = "mypy kateschema2theme"
[dev-packages]
kateschema2theme = {editable = true, path = ".", use_pep517 = false}
mypy = "*"
pycodestyle = "*"
pylint = "*"
vulture = "*"
[packages]
click = "*"
columnize = "*"
[requires]
python_version = "3.8"
@@ -0,0 +1,125 @@
What is This
============
The new kate's theme format incompatible with the previously used ``*.kateschema``
and ``*.katehlcolor`` files. Here is a tool aimed to help convert old colors to
the newly used theme files.
Usage
=====
- Make a virtualenv with all dependencies::
$ pipenv install --dev
- Enter the virtualenv::
$ pipenv shell
- Check the help screen::
Usage: kateschema2theme [OPTIONS] [INPUT_FILE]
Options:
-h, --help Show this message and exit.
--version Show the version and exit.
-d, --skip-included Do not write custom colors included from another
syntax files.
-s, --syntax-dir TEXT Specify the directory to search for syntax files. If
given, extra validation going to happen.
- Example *katecolor* file::
$ cat .desktop.katehlcolor
[Highlighting .desktop - Schema Zaufi::dark]
.desktop:Comment=8,ff787775,ff787775,0,1,0,0,,,,---
.desktop:Key=2,ff77aa77,ff77aa77,0,0,0,0,,,,---
.desktop:Language=3,ff4086c0,ff4086c0,1,0,0,0,,,,---
.desktop:Normal Text=0,ffcacaca,ffcacaca,0,0,0,0,,,,---
.desktop:Section=1,ffdaaa66,ffdaaa66,1,0,0,0,,,,---
[KateHLColors]
full schema=false
highlight=.desktop
schema=Zaufi::dark
- Perform the conversion::
$ kateschema2theme .desktop.katehlcolor
{
"custom-styles": {
".desktop": {
"Comment": {
"bold": false,
"italic": true,
"selected-text-color": "#787775",
"strike-through": false,
"text-color": "#787775",
"underline": false
},
"Key": {
"bold": false,
"italic": false,
"selected-text-color": "#77aa77",
"strike-through": false,
"text-color": "#77aa77",
"underline": false
},
"Language": {
"bold": true,
"italic": false,
"selected-text-color": "#4086c0",
"strike-through": false,
"text-color": "#4086c0",
"underline": false
},
"Normal Text": {
"bold": false,
"italic": false,
"selected-text-color": "#cacaca",
"strike-through": false,
"text-color": "#cacaca",
"underline": false
},
"Section": {
"bold": true,
"italic": false,
"selected-text-color": "#daaa66",
"strike-through": false,
"text-color": "#daaa66",
"underline": false
}
}
},
"metadata": {
"name": "Zaufi::dark",
"revision": 1
}
}
- Because the syntax files also may be changed, and some syntax items could
disappear, or some new added. The old theme files may contain unused entries
as well as missed definitions for the newly added items. To address these
possible issues, the ``-s`` option could be used with a path to the syntax
XMLs directory. The conversion tool will validate the items defined in
the old there against actually declared by the corresponding syntax.
Unused items will be discarded from the converted theme::
$ kateschema2theme -s /usr/share/org.kde.syntax-highlighting/syntax very-old.kateschema >/dev/null
* The following styles are not used by `CMake` syntax anymore:
Commands │ CMake Variable │ Environment Variable
Builtin CMake Variable │ Properties
Macros │ Third-Party Commands
* The following styles are not defined in the converted `CMake` syntax:
User Function/Macro │ False Special Arg │ Command
Named Args │ Cache Variable Substitution
Internal Name │ CMake Internal Variable
Variable Substitution │ Property
True Special Arg │ Environment Variable Substitution
Builtin Variable │ Standard Environment Variable
Aliased Targets │ @Variable Substitution
@@ -0,0 +1,490 @@
# -*- coding: utf-8 -*-
#
# SPDX-FileCopyrightText: 2020 Alex Turbov <i.zaufi@gmail.com>
# SPDX-FileContributor: Juraj Oravec <jurajoravec@mailo.com>
# SPDX-License-Identifier: MIT
#
'''
CLI utility to convert old `kate`s schema/hlcolors files to the
new JSON theme format.
'''
from __future__ import annotations
import configparser
import enum
import functools
import itertools
import json
import pathlib
import re
import textwrap
from xml.etree import ElementTree
from typing import \
Dict \
, Final \
, Iterable \
, Generator \
, List \
, Literal \
, Pattern \
, Set \
, TextIO \
, Tuple \
, TypedDict \
, TypeVar
import click
import columnize # type: ignore
# BEGIN Type declarations
T = TypeVar('T') # pylint: disable=invalid-name
PropName = Literal[
'background-color'
, 'bold'
, 'italic'
, 'selected-text-color'
, 'seleted-background-color'
, 'strike-through'
, 'text-color'
, 'text-color'
, 'underline'
]
StylePropsDict = TypedDict(
'StylePropsDict'
, {
'background-color': str
, 'bold': bool
, 'italic': bool
, 'selected-text-color': str
, 'seleted-background-color': str
, 'strike-through': bool
, 'text-color': str
, 'underline': bool
}
, total=False
)
CustomStyleDict = Dict[str, StylePropsDict]
CustomStylesDict = Dict[str, CustomStyleDict]
EditorColorsDict = Dict[str, str]
TextStylesDict = Dict[str, StylePropsDict]
MetadataDict = TypedDict(
'MetadataDict'
, {
'name': str
, 'revision': int
}
)
ThemeDict = TypedDict(
'ThemeDict'
, {
'custom-styles': CustomStylesDict
, 'editor-colors': EditorColorsDict
, 'metadata': MetadataDict
, 'text-styles': TextStylesDict
}
, total=False
)
SyntaxesDict = Dict[str, Set[str]]
# END Type declarations
class QtColorItemOffset(enum.IntEnum):
'''
Enumeration class with offsets in the CSV record
of the old style definition.
'''
_UNKNOWN = 0
TEXT = enum.auto()
SELECTED_TEXT = enum.auto()
BOLD = enum.auto()
ITALIC = enum.auto()
STRIKE_THROUGH = enum.auto()
UNDERLINE = enum.auto()
BACKGROUND = enum.auto()
SELECTED_BACKGROUND = enum.auto()
_IGNORED_FONT_FAMILY = enum.auto()
_TRAILING_DASHES = enum.auto()
# Special item to validate the components count
CUSTOM_COLOR_EXPECTED_SIZE = enum.auto()
STANDARD_COLOR_EXPECTED_SIZE = 10
_EXPECTED_OLD_COLOR_LEN: Final[int] = 8
_OLD_COLOR_LEADING_STRIP_SIZE: Final[int] = 2
_HIGHLIGHTING_PFX: Final[str] = 'Highlighting '
_COLUMIZED_LIST_INDENT_PFX: Final[str] = ' '
_EDITOR_COLORS: Final[Dict[str, str]] = {
"Color Background": "BackgroundColor"
, "Color Code Folding": "CodeFolding"
, "Color Current Line Number": "CurrentLineNumber"
, "Color Highlighted Bracket": "BracketMatching"
, "Color Highlighted Line": "CurrentLine"
, "Color Icon Bar": "IconBorder"
, "Color Indentation Line": "IndentationLine"
, "Color Line Number": "LineNumbers"
, "Color MarkType 1": "MarkBookmark"
, "Color MarkType 2": "MarkBreakpointActive"
, "Color MarkType 3": "MarkBreakpointReached"
, "Color MarkType 4": "MarkBreakpointDisabled"
, "Color MarkType 5": "MarkExecution"
, "Color MarkType 6": "MarkWarning"
, "Color MarkType 7": "MarkError"
, "Color Modified Lines": "ModifiedLines"
, "Color Replace Highlight": "ReplaceHighlight"
, "Color Saved Lines": "SavedLines"
, "Color Search Highlight": "SearchHighlight"
, "Color Selection": "TextSelection"
, "Color Separator": "Separator"
, "Color Spelling Mistake Line": "SpellChecking"
, "Color Tab Marker": "TabMarker"
, "Color Template Background": "TemplateBackground"
, "Color Template Editable Placeholder": "TemplatePlaceholder"
, "Color Template Focused Editable Placeholder": "TemplateFocusedPlaceholder"
, "Color Template Not Editable Placeholder": "TemplateReadOnlyPlaceholder"
, "Color Word Wrap Marker": "WordWrapMarker"
}
_TEXT_STYLES: Final[Dict[str, str]] = {
"Alert": "Alert"
, "Annotation": "Annotation"
, "Attribute": "Attribute"
, "Base-N Integer": "BaseN"
, "Built-in": "BuiltIn"
, "Character": "Char"
, "Comment": "Comment"
, "Comment Variable": "CommentVar"
, "Constant": "Constant"
, "Control Flow": "ControlFlow"
, "Data Type": "DataType"
, "Decimal/Value": "DecVal"
, "Documentation": "Documentation"
, "Error": "Error"
, "Extension": "Extension"
, "Floating Point": "Float"
, "Function": "Function"
, "Import": "Import"
, "Information": "Information"
, "Keyword": "Keyword"
, "Normal": "Normal"
, "Operator": "Operator"
, "Others": "Others"
, "Preprocessor": "Preprocessor"
, "Region Marker": "RegionMarker"
, "Special Character": "SpecialChar"
, "Special String": "SpecialString"
, "String": "String"
, "Variable": "Variable"
, "Verbatim String": "VerbatimString"
, "Warning": "Warning"
}
_OFFSET2NAME: Final[Dict[QtColorItemOffset, PropName]] = {
QtColorItemOffset.TEXT: 'text-color'
, QtColorItemOffset.SELECTED_TEXT: 'selected-text-color'
, QtColorItemOffset.BOLD: 'bold'
, QtColorItemOffset.ITALIC: 'italic'
, QtColorItemOffset.STRIKE_THROUGH: 'strike-through'
, QtColorItemOffset.UNDERLINE: 'underline'
, QtColorItemOffset.BACKGROUND: 'background-color'
, QtColorItemOffset.SELECTED_BACKGROUND: 'seleted-background-color'
}
_META_SECTIONS: Final[List[str]] = ['KateSchema', 'KateHLColors']
_SECTION_MATCH: Final[Pattern] = re.compile(r'\[(?P<header>[^]]+?)( - Schema .*)?\]')
@click.command()
@click.help_option(
'--help'
, '-h'
)
@click.version_option()
@click.option(
'--skip-included'
, '-d'
, default=True
, is_flag=True
, help='Do not write custom colors included from another syntax files.'
)
@click.option(
'-s'
, '--syntax-dirs'
, multiple=True
, metavar='DIRECTORY...'
, type=click.Path(exists=True, file_okay=False, dir_okay=True)
, help='Specify the directory to search for syntax files. '
'If given, extra validation going to happen. Multiple '
'options allowed.'
)
@click.argument(
'input-file'
, type=click.File('r')
, default='-'
)
def kateschema2theme(skip_included: bool, syntax_dirs: List[click.Path], input_file: TextIO) -> int:
''' Kate colors/schema to theme converter. '''
config = configparser.ConfigParser(
delimiters=['=']
, interpolation=None
)
setattr(config, 'optionxform', str)
setattr(config, 'SECTCRE', _SECTION_MATCH)
try:
config.read_file(input_file)
except configparser.DuplicateOptionError as ex:
eerror(f'{ex!s}')
return 1
result: ThemeDict = {}
sections: List[str] = config.sections()
if 'Editor Colors' in sections:
result['editor-colors'] = functools.reduce(
convert_editor_color
, config.items('Editor Colors')
, {}
)
if 'Default Item Styles' in sections:
result['text-styles'] = functools.reduce(
collect_standard_colors
, config.items('Default Item Styles')
, {}
)
custom_styles: CustomStylesDict = functools.reduce(
collect_custom_colors
, hl_colors(config, skip_included)
, {}
)
if bool(custom_styles):
known_syntaxes: SyntaxesDict = get_syntaxes_available(syntax_dirs) \
if bool(syntax_dirs) else {}
if bool(known_syntaxes):
custom_styles = verify_converted_styles(custom_styles, known_syntaxes)
result['custom-styles'] = custom_styles
meta_section_name = first_true(lambda name: name in sections, _META_SECTIONS)
if meta_section_name is not None:
result['metadata'] = {
'name': config[meta_section_name]['schema']
, 'revision': 1
}
print(json.dumps(result, sort_keys=True, indent=4))
return 0
def convert_editor_color(state: Dict[str, str], color_line: Tuple[str, str]) -> Dict[str, str]:
'''Convert standard editor color names from old to new using the mapping table.'''
name, color_settings = color_line
assert name in _EDITOR_COLORS
state[_EDITOR_COLORS[name]] = decode_rgb_set(color_settings)
return state
def decode_rgb_set(color_settings: str) -> str:
'''Transform the RGB record given as CSV string to web-hex format.'''
return rgb2hex(*map(int, color_settings.split(',')))
def rgb2hex(red: int, green: int, blue: int) -> str:
'''Convert R,G,B integers to web-hex string'''
return f'#{red:02x}{green:02x}{blue:02x}'
def collect_standard_colors(state, item):
'''Convert standard text styles from old to new names using the mapping table.'''
name, value = item
state[_TEXT_STYLES[name]] = parse_qcolor_value(value)
return state
def collect_custom_colors(state: CustomStylesDict, item: Tuple[str, str, str]) -> CustomStylesDict:
'''A functor to convert one old style setting to the new format
and update the given `state` (a dict).
'''
syntax, syntax_item, value = item
props = parse_qcolor_value(value)
if bool(props):
syntax_node: CustomStyleDict = state.get(syntax, {})
syntax_node[syntax_item] = props
state[syntax] = syntax_node
return state
def hl_colors(config: configparser.ConfigParser, skip_included: bool) \
-> Generator[Tuple[str, str, str], None, None]:
'''A generator function to iterate over custom styles in the old format.'''
for section in config.sections():
if not section.startswith(_HIGHLIGHTING_PFX):
continue
for name, value in config.items(section):
syntax, *parts = name.split(':')
if not bool(parts):
ewarn(f'Unexpected color name: `{name}` in section `{section}`')
elif not skip_included or section[len(_HIGHLIGHTING_PFX):] == syntax:
yield syntax, ':'.join(parts), value
def parse_qcolor_value(value: str) -> StylePropsDict:
'''Convert old color settings (QColor stored as a CSV config item)
into a dict of new styles.
'''
components = value.split(',')
if len(components) == QtColorItemOffset.CUSTOM_COLOR_EXPECTED_SIZE:
components.pop()
assert len(components) == QtColorItemOffset.STANDARD_COLOR_EXPECTED_SIZE
return transform_qcolor_to_dict(components)
def transform_qcolor_to_dict(components: List[str]) -> StylePropsDict:
'''Convert old color settings given as a list of items
into a dict of new styles.
'''
init: StylePropsDict = {}
return functools.reduce(convert_color_property, enumerate(components), init)
def convert_color_property(state: StylePropsDict, prop: Tuple[int, str]) -> StylePropsDict:
'''A reducer functor to convert one item of the former color record (CSV)
into a new property name and a value.
'''
offset = QtColorItemOffset(prop[0])
value = prop[1]
assert offset < QtColorItemOffset.CUSTOM_COLOR_EXPECTED_SIZE
if bool(value) and offset in _OFFSET2NAME:
custom_prop_name = _OFFSET2NAME[offset]
if custom_prop_name.endswith('-color'):
if len(value) == _EXPECTED_OLD_COLOR_LEN:
state[custom_prop_name] = '#' + value[_OLD_COLOR_LEADING_STRIP_SIZE:]
else:
state[custom_prop_name] = bool(value == '1')
return state
def first_true(pred, iterable: Iterable[T], default=None) -> T:
'''A helper function to return first item for which predicate is true.'''
return next(filter(pred, iterable), default)
def get_syntaxes_available(dirs: List[click.Path]) -> SyntaxesDict:
'''Collect syntaxs available in the given path.
Returns a dict of syntax names to a list of syntax items in it.
'''
return functools.reduce(
load_syntax_data
, filter(
lambda p: p.suffix == '.xml'
, itertools.chain(
*map(
lambda p: pathlib.Path(str(p)).iterdir()
, dirs
)
)
)
, {}
)
def load_syntax_data(state: SyntaxesDict, syntax_file: pathlib.Path) -> SyntaxesDict:
'''A reducer functor to obtain syntax items.'''
tree = ElementTree.parse(syntax_file)
root = tree.getroot()
syntax_name = root.get('name')
assert syntax_name is not None
if syntax_name in state:
ewarn(
f'Use `{syntax_name}` found '
f'in `{click.format_filename(str(syntax_file))}`'
)
state[syntax_name] = functools.reduce(
collect_syntax_item_data
, root.iterfind('highlighting/itemDatas/itemData')
, set()
)
return state
def verify_converted_styles(custom_styles: CustomStylesDict, known_syntaxes: SyntaxesDict) \
-> CustomStylesDict:
'''Validate the given `custom_styles` according to actual syntax items
described in the known syntax files.
Returns a dict of syntaxes without unused syntax items.
'''
for syntax, styles in custom_styles.items():
if syntax not in known_syntaxes:
ewarn(f'The `{syntax}` is not known. Ignoring validation.')
continue
found_custom_items = set(styles.keys())
if unused_items := [*found_custom_items.difference(known_syntaxes[syntax])]:
ewarn(
f'The following styles are not used by `{syntax}` syntax anymore:'
+ '\n'
+ format_columns(unused_items)
)
custom_styles[syntax] = functools.reduce(
remove_unused_syntax_item
, unused_items
, styles
)
if undefined_items := [*known_syntaxes[syntax].difference(found_custom_items)]:
ewarn(
f'The following styles are not defined in the converted `{syntax}` syntax:'
+ '\n'
+ format_columns(undefined_items)
)
return custom_styles
def remove_unused_syntax_item(state: CustomStyleDict, item: str) -> CustomStyleDict:
'''Remove the given `item` from the `state`.'''
assert item in state
del state[item]
return state
def format_columns(iterable: Iterable[str]) -> str:
'''A helper functor to output the list in columns.'''
term_width = click.get_terminal_size()[0] - len(_COLUMIZED_LIST_INDENT_PFX)
return textwrap.indent(
columnize.columnize(iterable, displaywidth=term_width, colsep='')
, prefix=_COLUMIZED_LIST_INDENT_PFX
)
def collect_syntax_item_data(items: Set[str], node: ElementTree.Element) -> Set[str]:
'''A reducer functor to append a syntax item name to the given set.'''
name = node.get('name')
assert name is not None
items.add(name)
return items
def eerror(msg: str):
'''A helper function to display an error message.'''
click.echo(' ' + click.style('*', fg='red', bold=True) + f' {msg}', err=True)
def ewarn(msg: str):
'''A helper function to display a warning message.'''
click.echo(' ' + click.style('*', fg='yellow') + f' {msg}', err=True)
@@ -0,0 +1,6 @@
[build-system]
requires = ["setuptools >= 47.1.0", "wheel"]
build-backend = "setuptools.build_meta"
[tool.vulture]
min_confidence = 61
@@ -0,0 +1,47 @@
[metadata]
# https://setuptools.readthedocs.io/en/latest/setuptools.html#metadata
name = kateschema2theme
version = 1.0.0
description = Convert kate4 schema or hlcolors file to the JSON theme
long_description = file: README.rst
long_description_content_type = text/x-rst
author = Alex Turbov
author_email = i.zaufi@gmail.com
maintainer = Alex Turbov
maintainer_email = i.zaufi@gmail.com
keywords =
kate color theme
license = MIT
license_file = ../../LICENSES/MIT.txt
platform = any
classifiers =
Development Status :: 4 - Beta
Environment :: Console
Intended Audience :: Developers
License :: OSI Approved :: MIT License
Natural Language :: English
Operating System :: OS Independent
Programming Language :: Python :: 3.8
Topic :: Software Development :: Utilities
[options]
# https://setuptools.readthedocs.io/en/latest/setuptools.html#options
packages =
kateschema2theme
install_requires =
click
columnize
python_requires = >=3.8
zip_safe = True
[options.entry_points]
console_scripts = kateschema2theme = kateschema2theme.cli:kateschema2theme
[bdist_wheel]
universal = 1
[pycodestyle]
# https://pep8.readthedocs.io/en/latest/intro.html#configuration
ignore = E121,E123,E126,E131,E203,E265,W503,W504
max-line-length = 160
statistics = True
@@ -0,0 +1,5 @@
# -*- coding: utf-8 -*-
import setuptools
setuptools.setup()
@@ -0,0 +1,793 @@
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2023 Jonathan Poelen <jonathan.poelen@gmail.com>
# SPDX-License-Identifier: MIT
import argparse
import json
import sys
def parse_scores(s: str) -> list[int]:
return [int(x) for x in s.split(',')]
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description='''Contrast checker for themes
Allows you to view all the colors and backgrounds applied to a theme and rate the contrast based on the APCA (Accessible Perceptual Contrast Algorithm) used in WCAG 3. A very low score is a sign of poor contrast, and can make reading difficult or impossible.
However, color perception depends on the individual, hardware or software configurations (night/blue light filter), lighting or simply surrounding colors. For example, low contrast may remain legible in the editor when not surrounded by bright color.
There are 3 options for modifying the contract result:
-a / --add-luminance and -p / --add-percent-luminance to directly modify the output value. For example, -p -14 -a 15 increases the constrast a little when it's low and very little when it's high.
-C / --color-space Okl selects a color space with different properties, mainly on the color red.
''')
parser.add_argument('-f', '--bg', metavar='BACKGROUND', action='append',
help='show only the specified background color styles')
parser.add_argument('-l', '--language', metavar='LANGUAGE', action='append',
help='show only the specified language')
parser.add_argument('-c', '--no-custom-styles', action='store_true',
help='do not display custom languages')
parser.add_argument('-s', '--no-standard-styles', action='store_true',
help='do not display standard colors')
parser.add_argument('-b', '--no-borders', action='store_true',
help='do not display border colors')
parser.add_argument('-H', '--no-legend', action='store_true',
help='do not display legend')
parser.add_argument('-M', '--min-luminance', metavar='LUMINANCE', type=float, default=0,
help='only displays colors with a higher luminance')
parser.add_argument('-L', '--max-luminance', metavar='LUMINANCE', type=float, default=110.0,
help='only displays colors with a lower luminance')
parser.add_argument('-a', '--add-luminance', metavar='LUMINANCE', type=float, default=0,
help='add fixed value for luminance')
parser.add_argument('-p', '--add-percent-luminance', metavar='LUMINANCE', type=float, default=0,
help='add percent luminance. Apply before --add-luminance')
parser.add_argument('-S', '--scores', metavar='SCORES', type=parse_scores,
help='modify ratings values. Expects a comma-separated list of numbers. The order of the list is the same as in the legend.')
# sRGB is W3 in APCA
parser.add_argument('-C', '--color-space', default='sRGB',
choices=['sRGB', 'DisplayP3', 'AdobeRGB', 'Rec2020', 'Okl'],
help='select a color space ; Okl is a color space that increases the contrast of red with black or blue background and decreases it with white or green background')
parser.add_argument('-d', '--compute-diff', action='store_true',
help='compute luminance between 2 colors or more ; the first color represents the background, the others the foreground')
parser.add_argument('-F', '--output-format', default='ansi', choices=['ansi', 'html'])
parser.add_argument('-T', '--html-title', help='title of html page when --output-format=html')
parser.add_argument('themes_or_colors', metavar='THEME_OR_COLOR', nargs='+',
help='a .theme file or a color (#rgb, #rrggbb, #argb, #aarrggbb) when -d / --compute-diff is used')
args = parser.parse_intermixed_args()
RGBColor = tuple[int, int, int]
def parse_rgb_color(color: str, bg: RGBColor) -> RGBColor:
n = len(color)
if not color.startswith('#') or n not in (7, 9, 4, 5):
raise Exception(f'Invalid argb format: {color}')
try:
argb = int(color[1:], 16)
except ValueError:
raise Exception(f'Invalid argb format: {color}')
# format: #rrggbb or #aarrggbb
if n == 7 or n == 9:
result = (
(argb >> 16) & 0xff,
(argb >> 8) & 0xff,
(argb ) & 0xff,
)
if n == 7:
return result
alpha = argb >> 24
# format: #rgb or #argb
else:
(r, g, b) = (
(argb >> 8) & 0xf,
(argb >> 4) & 0xf,
(argb ) & 0xf,
)
result = ((r << 4 | r), (g << 4 | g), (b << 4 | b))
if n == 4:
return result
alpha = argb >> 12
alpha |= alpha << 4
# alpha blend
return (
(alpha * result[0] + (255 - alpha) * bg[0]) // 255,
(alpha * result[1] + (255 - alpha) * bg[1]) // 255,
(alpha * result[2] + (255 - alpha) * bg[2]) // 255,
)
# based on https://drafts.csswg.org/css-color/#color-conversion-code (CSS 4)
# 17. Sample code for Color Conversions
def lin_sRGB(c: int) -> float:
# [0, 255] to [0, 1]
v = c / 255.0
if v <= 0.04045:
return v / 12.92
return ((v + 0.055) / 1.055) ** 2.4
sRGB_to_Y_mat = (
0.21263900587151036,
0.71516867876775592,
0.072192315360733714,
)
DisplayP3_to_Y_mat = (
0.22897456406974884,
0.69173852183650619,
0.079286914093744998,
)
# not in CSS
Okl_to_Y_mat = (
# These values are the formula which calculates `l` in the XYZ to Okalab transformation.
# (https://bottosson.github.io/posts/oklab/#converting-from-linear-srgb-to-oklab)
# Specifically taking these values doesn't really make sense, but compared to sRGB
# - the luminance between red and green will be very greatly decreased
# - the luminance between red and white will be decreased
# - the luminance between red and blue / black color will be greatly increased
# - the luminance between blue and green will be decreased
0.4122214708,
0.5363325363,
0.0514459929,
)
def lin_AdobeRGB(c: int) -> float:
# [0, 255] to [0, 1]
return (c / 255.0) ** 2.19921875
AdobeRGB_to_Y_mat = (
0.29734497525053616,
0.62736356625546597,
0.07529145849399789,
)
def lin_Rec2020(c: int) -> float:
# [0, 255] to [0, 1]
v = c / 255.0
if v < 0.08124285829863151:
return v / 4.5
return ((v + 0.09929682680944) / 1.09929682680944) ** (1 / 0.45)
Rec2020_to_Y_mat = (
0.26270021201126703,
0.67799807151887104,
0.059301716469861945,
)
def make_to_Y(lin, mat):
def to_Y(rgb: RGBColor) -> float:
return (
mat[0] * lin(rgb[0]) +
mat[1] * lin(rgb[1]) +
mat[2] * lin(rgb[2])
)
return to_Y
if args.color_space == 'sRGB':
rgb_to_Y = make_to_Y(lin_sRGB, sRGB_to_Y_mat)
elif args.color_space == 'Okl':
rgb_to_Y = make_to_Y(lin_sRGB, Okl_to_Y_mat)
elif args.color_space == 'DisplayP3':
rgb_to_Y = make_to_Y(lin_sRGB, DisplayP3_to_Y_mat)
elif args.color_space == 'AdobeRGB':
rgb_to_Y = make_to_Y(lin_AdobeRGB, AdobeRGB_to_Y_mat)
else: # Rec2020
rgb_to_Y = make_to_Y(lin_Rec2020, Rec2020_to_Y_mat)
# https://github.com/Myndex/apca-w3
# G-4g constants for use with 2.4 exponent
normBG = 0.56
normTXT = 0.57
revTXT = 0.62
revBG = 0.65
# G-4g Clamps and Scalers
blkThrs = 0.022
blkClmp = 1.414
scaleBoW = 1.14
scaleWoB = 1.14
loBoWoffset = 0.027
loWoBoffset = 0.027
deltaYmin = 0.0005
loClip = 0.0 # originally 0.1, but this limits the contrast to 7.5
def APCA_contrast(txtY: float, bgY: float) -> float:
## BLACK SOFT CLAMP
# Soft clamps Y for either color if it is near black.
if txtY <= blkThrs:
txtY += (blkThrs - txtY) ** blkClmp
if bgY <= blkThrs:
bgY += (blkThrs - bgY) ** blkClmp
# Return 0 Early for extremely low ∆Y
if abs(bgY - txtY) < deltaYmin:
return 0.0
## APCA/SAPC CONTRAST - LOW CLIP (W3 LICENSE)
# For normal polarity, black text on white (BoW)
# Calculate the SAPC contrast value and scale
if bgY > txtY:
SAPC = (bgY ** normBG - txtY ** normTXT) * scaleBoW
# Low Contrast smooth rollout to prevent polarity reversal
# and also a low-clip for very low contrasts
outputContrast = 0.0 if SAPC < loClip else SAPC - loBoWoffset
# For reverse polarity, light text on dark (WoB)
# WoB should always return negative value.
else:
SAPC = (bgY ** revBG - txtY ** revTXT) * scaleWoB
outputContrast = 0.0 if SAPC > -loClip else SAPC + loWoBoffset
return outputContrast * 100.0
class ColorInfo:
text: str = '#000'
color: RGBColor = (0, 0, 0)
Y: float = 0
def __init__(self, rgb: str, bg: RGBColor):
self.text = rgb
self.color = parse_rgb_color(rgb, bg)
self.Y = rgb_to_Y(self.color)
def __str__(self) -> str:
return self.text
NORMAL_LUMINANCE = (90, 75, 60, 45, 35)
BOLD_LUMINANCE = (80, 65, 50, 38, 30)
SPELL_LUMINANCE = (70, 55, 40, 30, 25)
DECORATION_LUMINANCE = (60, 45, 30, 15, 10)
if args.scores:
scores_update_len = min(20, len(args.scores))
scores = [*NORMAL_LUMINANCE, *BOLD_LUMINANCE, *SPELL_LUMINANCE, *DECORATION_LUMINANCE]
scores[:scores_update_len] = args.scores[:scores_update_len]
NORMAL_LUMINANCE = scores[0:5]
BOLD_LUMINANCE = scores[5:10]
SPELL_LUMINANCE = scores[10:15]
DECORATION_LUMINANCE = scores[15:20]
BOLD_TEXT = (BOLD_LUMINANCE, True, 'Text. ▐')
NORMAL_TEXT = (NORMAL_LUMINANCE, False, 'Text. ▐')
DECORATION_TEXT = (DECORATION_LUMINANCE, False, 'Text. ▐')
HEADER = (
'\x1b[35m'
' Background |'
' Foreground |'
f' {(args.add_luminance or args.add_percent_luminance) and " Luminance " or " Lum "} |'
' Score\x1b[m'
)
RANK1 = '\x1b[32m'
RANK2 = '\x1b[32m'
RANK3 = '\x1b[32m'
RANK4 = '\x1b[33m'
RANK5 = '\x1b[31m'
RANK6 = '\x1b[31;1m'
def ffloat(x: float) -> str:
s = f'{x:>5.1f}\x1b[m'
return s.replace('.', '\x1b[37m.')
def flum(luminance: float,
add_luminance: float,
add_percent_luminance: float,
bg: ColorInfo,
fg: ColorInfo,
luminance_values: tuple[int, int, int, int, int],
is_bold: bool,
sample_text: str
) -> str:
"""
Compute and formate a score
"""
luminance = abs(luminance)
adjusted_luminance = luminance
adjusted_text = ''
if add_luminance or add_percent_luminance:
adjusted_luminance += adjusted_luminance * add_percent_luminance + add_luminance
adjusted_luminance = max(0, min(108, adjusted_luminance))
adjusted_text = f' \x1b[35m->\x1b[m {ffloat(adjusted_luminance)}'
(AAAA, AAA, AA, BAD, VERY_BAD) = luminance_values
if adjusted_luminance >= AAAA:
score = f'{RANK1}AAAA'
elif adjusted_luminance >= AAA:
score = f'{RANK2}AAA '
elif adjusted_luminance >= AA:
score = f'{RANK3}AA '
elif adjusted_luminance >= BAD:
score = f'{RANK4}A '
elif adjusted_luminance >= VERY_BAD:
score = f'{RANK5}FAIL'
else:
score = f'{RANK6}FAIL'
(r1, g1, b1) = fg.color
(r2, g2, b2) = bg.color
bold = ';1' if is_bold else ''
return f'{ffloat(luminance)}{adjusted_text} | {score}\x1b[m ' \
f'\x1b[38;2;{r1};{g1};{b1};48;2;{r2};{g2};{b2}{bold}m {sample_text} \x1b[m'
def color2ansi(rgb: RGBColor) -> str:
return f'{rgb[0]};{rgb[1]};{rgb[2]}'
spaces = ' '
def fcol_impl(name: str, rgb: str, n: int) -> str:
w = spaces[0: n - (len(name) + len(rgb) + 3)]
return f'{name} \x1b[37m({rgb})\x1b[m{w}'
def fcol1(name: str, rgb: str) -> str:
return fcol_impl(name, rgb, 38)
def fcol2(name: str, rgb: str) -> str:
return fcol_impl(name, rgb, 43)
def create_tab_from_text_styles(min_luminance: float,
max_luminance: float,
add_luminance: float,
add_percent_luminance: float,
col1: str,
kstyle: str,
bg: ColorInfo,
text_styles: dict[str, dict[str, str | bool]]
) -> str:
lines = []
for name, defs in sorted(text_styles.items()):
if style := defs.get(kstyle):
fg = ColorInfo(style, bg.color)
lum = APCA_contrast(fg.Y, bg.Y)
if min_luminance <= abs(lum) <= max_luminance:
bold = defs.get('bold', False)
result = flum(lum, add_luminance, add_percent_luminance,
bg, fg, *(BOLD_TEXT if bold else NORMAL_TEXT))
lines.append(f' {col1} | {fcol2(name, fg.text)} | {result}')
return '\n'.join(lines)
output = []
def run_borders(
min_luminance: float,
max_luminance: float,
add_luminance: float,
add_percent_luminance: float,
editor_colors: dict[str, str],
bg_editor: RGBColor
) -> None:
output.append('\n\x1b[34mIcon Border\x1b[m:\n')
bg_icon = ColorInfo(editor_colors['IconBorder'], (0, 0, 0))
bg_current_line = ColorInfo(editor_colors['CurrentLine'], bg_editor.color)
bg_current_line_border = ColorInfo(editor_colors['CurrentLine'], bg_icon.color)
fg_line = ColorInfo(editor_colors['LineNumbers'], bg_icon.color)
fg_current = ColorInfo(editor_colors['CurrentLineNumber'], bg_icon.color)
fg_separator = ColorInfo(editor_colors['Separator'], bg_icon.color)
fg_modified = ColorInfo(editor_colors['ModifiedLines'], bg_icon.color)
fg_saved = ColorInfo(editor_colors['SavedLines'], bg_icon.color)
xbg = color2ansi(bg_icon.color)
xborder = f'\x1b[38;2;{color2ansi(fg_line.color)};48;2;{xbg}m'
xsaved = f'\x1b[48;2;{color2ansi(fg_saved.color)};38;2;{xbg}m▋{xborder}'
xmodified = f'\x1b[48;2;{color2ansi(fg_modified.color)};38;2;{xbg}m▋{xborder}'
xeditor = f'\x1b[38;2;{color2ansi(fg_separator.color)}m▕\x1b[m'\
f'\x1b[48;2;{color2ansi(bg_editor.color)}m \x1b[m'
cbg = color2ansi(bg_current_line_border.color)
cborder = f'\x1b[38;2;{color2ansi(fg_current.color)};48;2;{cbg}m'
csaved = f'\x1b[48;2;{color2ansi(fg_saved.color)};38;2;{cbg}m▋{cborder}'
cmodified = f'\x1b[48;2;{color2ansi(fg_modified.color)};38;2;{cbg}m▋{cborder}'
ceditor = f'\x1b[38;2;{color2ansi(fg_separator.color)}m▕\x1b[m'\
f'\x1b[48;2;{color2ansi(bg_current_line.color)}m \x1b[m'
# imitates the border of Kate editor
output.append(f' LineNumbers: {xborder} 42 {xeditor} bg: IconBorder | BackgroundColor\n'
f' CurrentLineNumber: {cborder} 43 {ceditor} bg: CurrentLine | CurrentLine\n'
f' LineNumbers: {xborder} 44{xmodified}{xeditor} (ModifiedLines)\n'
f' CurrentLineNumber: {cborder} 45{cmodified}{ceditor}\n'
f' LineNumbers: {xborder} 46{xsaved}{xeditor} (SavedLines)\n'
f' CurrentLineNumber: {cborder} 47{csaved}{ceditor}\n'
f' Separator\n\n{HEADER}\n')
color_line_number = ('LineNumbers', fg_line, NORMAL_TEXT)
colors = (
('CurrentLineNumber', fg_current, NORMAL_TEXT),
('ModifiedLines', fg_modified, DECORATION_TEXT),
('SavedLines', fg_saved, DECORATION_TEXT),
)
for name, bg, colors in (
('IconBorder', bg_icon, (color_line_number, *colors)),
('CurrentLine', bg_current_line_border, colors),
):
col = fcol1(name, bg.text)
lines = []
for k, fg, text_data in colors:
lum = APCA_contrast(fg.Y, bg.Y)
if min_luminance <= abs(lum) <= max_luminance:
result = flum(lum, add_luminance, add_percent_luminance,
bg, fg, *text_data)
lines.append(f' {col} | {fcol2(k, fg.text)} | {result}')
if lines:
output.append('\n'.join(lines))
output.append('\n\n')
# table for Separator color
for name, bg in (
('IconBorder', bg_icon),
('CurrentLine', bg_current_line_border),
('BackgroundColor', bg_editor),
):
lum = APCA_contrast(fg_separator.Y, bg.Y)
if min_luminance <= abs(lum) <= max_luminance:
col = fcol1(name, bg.text)
result = flum(lum, add_luminance, add_percent_luminance,
bg, fg_separator, DECORATION_LUMINANCE, False, NORMAL_TEXT[2])
output.append(f' {col} | {fcol2("Separator", fg_separator.text)} | {result}\n')
def run(d: dict[str, str | dict[str, bool | str | dict[str, bool | str]]],
min_luminance: float,
max_luminance: float,
add_luminance: float,
add_percent_luminance: float,
show_borders: bool,
show_custom_styles: bool,
show_standard_styles: bool,
accepted_backgrounds: set[str] | None,
accepted_languages: set[str] | None
) -> None:
editor_colors = d['editor-colors']
bg_editor = ColorInfo(editor_colors['BackgroundColor'], (0, 0, 0))
output.append(f'\x1b[34;1mTheme\x1b[m: {d["metadata"]["name"]}\n')
if show_borders:
run_borders(min_luminance, max_luminance,
add_luminance, add_percent_luminance,
editor_colors, bg_editor)
#
# Editor
#
output.append('\n\x1b[34mText Area\x1b[m:\n')
editor_bg_colors = {
k: (ColorInfo(editor_colors[k], bg_editor.color), 'text-color')
for k in (
'TemplateReadOnlyPlaceholder',
'TemplatePlaceholder',
'TemplateFocusedPlaceholder',
'TemplateBackground',
'MarkBookmark',
'CodeFolding',
'ReplaceHighlight',
'SearchHighlight',
'BracketMatching',
)
if not accepted_backgrounds or k in accepted_backgrounds
}
if not accepted_backgrounds or 'TextSelection' in accepted_backgrounds:
editor_bg_colors['TextSelection'] = (
ColorInfo(editor_colors['TextSelection'], bg_editor.color),
'selected-text-color'
)
if not accepted_backgrounds or 'BackgroundColor' in accepted_backgrounds:
editor_bg_colors['BackgroundColor'] = (bg_editor, 'text-color')
text_styles = d['text-styles']
custom_styles = d.get('custom-styles', {}) if show_custom_styles else {}
for name, (bg, kstyle) in editor_bg_colors.items():
col = fcol1(name, bg.text)
if show_standard_styles:
tab = create_tab_from_text_styles(
min_luminance, max_luminance,
add_luminance, add_percent_luminance,
col, kstyle, bg, text_styles
)
#
# Spell decoration
#
name = 'SpellChecking'
fg = ColorInfo(editor_colors[name], bg_editor.color)
lum = APCA_contrast(fg.Y, bg.Y)
if min_luminance <= abs(lum) <= max_luminance:
result = flum(lum, add_luminance, add_percent_luminance,
bg, fg, SPELL_LUMINANCE, False, '~~~~~~~')
spell_line = f' {col} | {fcol2(name, fg.text)} | {result}'
tab = f'{tab}\n{spell_line}' if tab else spell_line
if tab:
output.append(f'\n{HEADER}\n{tab}\n')
# table by language for custom styles
for language, defs in sorted(custom_styles.items()):
if accepted_languages and language not in accepted_languages:
continue
if tab := create_tab_from_text_styles(
min_luminance, max_luminance,
add_luminance, add_percent_luminance,
col, kstyle, bg, defs
):
output.append(f'\n\x1b[36mLanguage: "{language}"\x1b[m:\n{tab}\n')
# ignored:
# - WordWrapMarker
# - TabMarker
# - IndentationLine
# - MarkBreakpointActive
# - MarkBreakpointReached
# - MarkBreakpointDisabled
# - MarkExecution
# - MarkWarning
# - MarkError
def result_legend(AAAA: float, AAA: float, AA: float, BAD: float, VERY_BAD: float) -> str:
return (
f'{RANK1}AAAA\x1b[m (>={AAAA}) ; '
f'{RANK2}AAA\x1b[m (>={AAA}) ; '
f'{RANK3}AA\x1b[m (>={AA}) ; '
f'{RANK4}A\x1b[m (>={BAD}) ; '
f'{RANK5}FAIL\x1b[m (>={VERY_BAD}) ; '
f'{RANK6}FAIL\x1b[m (<{VERY_BAD})'
)
if not args.no_legend:
output.append(f'''Luminance legend:
- Range for light theme: [0; 106]
- Range for dark theme: [0; 108]
- Result for normal text: {result_legend(*NORMAL_LUMINANCE)}
- Result for bold text: {result_legend(*BOLD_LUMINANCE)}
- Result for spelling error: {result_legend(*SPELL_LUMINANCE)}
- Result for decoration: {result_legend(*DECORATION_LUMINANCE)}
Luminance adjustement: {args.add_percent_luminance:+}% {args.add_luminance:+} (see -p and -a)
Color space: {args.color_space}
''')
add_luminance = args.add_luminance
add_percent_luminance = args.add_percent_luminance / 100
if args.compute_diff:
bg = ColorInfo(args.themes_or_colors[0], (0,0,0))
output.append('Background | Foreground\n')
# compares the background with all foreground colors in normal and bold
for color in args.themes_or_colors[1:]:
fg = ColorInfo(color, bg.color)
lum = APCA_contrast(fg.Y, bg.Y)
col = f'{bg.text:^10} | {fg.text:^10} | '
output.append(col)
output.append(flum(lum, add_luminance, add_percent_luminance, bg, fg, *NORMAL_TEXT))
output.append('\n')
output.append(col)
output.append(flum(lum, add_luminance, add_percent_luminance, bg, fg, *BOLD_TEXT))
output.append('\n')
else:
add_new_line = False
for theme in args.themes_or_colors:
if add_new_line:
output.append('\n\n')
add_new_line = True
# read json theme file
try:
if theme == '-':
data = json.load(sys.stdin)
else:
with open(theme) as f:
data = json.load(f)
except OSError as e:
print(f'\x1b[31m{e}\x1b[m', file=sys.stderr)
continue
run(data,
args.min_luminance,
args.max_luminance,
add_luminance,
add_percent_luminance,
not args.no_borders,
not args.no_custom_styles,
not args.no_standard_styles,
args.bg and set(args.bg),
args.language and set(args.language),
)
is_html = args.output_format == 'html' and not args.compute_diff
output = ''.join(output)
if is_html:
import re
ansi_to_html = {
'1': 'bold',
'31': 'red',
'32': 'green',
'33': 'orange',
'34': 'blue',
'35': 'purple',
'36': 'cyan',
'37': 'gray',
}
extract_color = re.compile(r'([34])8;2;(\d+);(\d+);(\d+)')
extract_effect = re.compile(r'\d+')
depth = 0
def replace_styles(m) -> str:
global depth
effects = m[1]
if not effects:
ret = '</span>' * depth
depth = 0
return ret
depth += 1
colors = []
def rgb(m) -> str:
prop = 'color' if m[1] == '3' else 'background'
colors.append(f'{prop}:rgb({m[2]},{m[3]},{m[4]})')
return ''
effects = extract_color.sub(rgb, effects)
if colors:
styles = ';'.join(colors)
styles = f' style="{styles}"'
else:
styles = ''
classes = ' '.join(map(lambda s: ansi_to_html[s], extract_effect.findall(effects)))
if classes:
classes = f' class="{classes}"'
return f'<span{styles}{classes}>'
output = re.sub(r'\x1b\[([^m]*)m', replace_styles, output)
try:
if is_html:
bg = data['editor-colors']['BackgroundColor'];
rgb = parse_rgb_color(bg, (0,0,0))
if (rgb[0] < 127) + (rgb[0] < 127) + (rgb[0] < 127) >= 2:
tmode1 = '#light:target'
tmode2 = ''
mode1 = 'dark'
mode2 = 'light'
else:
tmode1 = ''
tmode2 = '#dark:target'
mode1 = 'light'
mode2 = 'dark'
title = args.html_title or data["metadata"]["name"]
sys.stdout.write(f'''<!DOCTYPE html>
<html><head><title>{title}</title><style>
html, body, #mode {{
padding: 0;
margin: 0;
}}
body {{
padding: .5em;
}}
pre {{
font-family: "JetBrains Mono", "Liberation Mono", Firacode, "DejaVu Sans Mono", Inconsolata, monospace;
}}
.bold {{ font-weight: bold }}
/* light theme */
body{tmode1} {{
background: #ddd;
color: #000;
}}
{tmode1} .red {{ color: #A02222 }}
{tmode1} .green {{ color: #229022 }}
{tmode1} .orange {{ color: #909022 }}
{tmode1} .blue {{ color: #2222A0 }}
{tmode1} .purple {{ color: #A022A0 }}
{tmode1} .cyan {{ color: #22A0A0 }}
{tmode1} .gray {{ color: Gray }}
/* dark theme */
body{tmode2} {{
background: #222;
color: #eee;
}}
{tmode2} .red {{ color: #D95555 }}
{tmode2} .green {{ color: #55D055 }}
{tmode2} .orange {{ color: #D0D055 }}
{tmode2} .blue {{ color: #68A0E8 }}
{tmode2} .purple {{ color: #D077D0 }}
{tmode2} .cyan {{ color: Turquoise }}
{tmode2} .gray {{ color: Gray }}
div {{
position: absolute;
top: -1px;
left: 0;
}}
#mode a {{
padding: .5rem 1rem;
}}
#light-mode {{
color: #2222A0;
background: #ddd;
}}
#light-mode:hover, #light-mode:focus {{ background: #ccc; }}
#dark-mode {{
color: #68a0E8;
background: #222;
}}
#dark-mode:hover, #dark-mode:focus {{ background: #333; }}
#{mode1}-mode {{ display: none }}
#{mode2}-mode {{ display: inline-block }}
#{mode2}:target #{mode2}-mode {{ display: none }}
#{mode2}:target #{mode1}-mode {{ display: inline-block }}
</style></head><body id="{mode2}">
<p id="mode"><a id="{mode2}-mode" href="#{mode2}">Switch to {mode2} mode</a><a id="{mode1}-mode" href="#{mode1}">Switch to {mode1} mode</a></p>
<pre>''')
sys.stdout.write(output)
sys.stdout.write('</pre></body></html>')
else:
sys.stdout.write(output)
# flush output here to force SIGPIPE to be triggered
sys.stdout.flush()
# open in `less` then closing can cause this error
except BrokenPipeError:
# Python flushes standard streams on exit; redirect remaining output
# to devnull to avoid another BrokenPipeError at shutdown
import os
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, sys.stdout.fileno())
@@ -0,0 +1,321 @@
#!/usr/bin/perl -w
# update script for kate-editor.org/syntax
# SPDX-FileCopyrightText: 2020 Christoph Cullmann <cullmann@kde.org>
# SPDX-License-Identifier: MIT
# needed things
use FindBin;
use Cwd;
use File::Basename;
use File::Copy;
use File::Path qw(make_path remove_tree);
use XML::Parser;
# be strict & warn
use strict;
use warnings;
# we need as parameter both the source and the build directory
my $sourceDir = shift;
my $buildDir = shift;
if (!defined($sourceDir) || !defined($buildDir)) {
die "Not all arguments provided, valid call: update-kate-editor-org.pl <source directory> <build directory>\n";
}
# output settings
print "Using source directory: $sourceDir\n";
print "Using build directory: $buildDir\n";
# switch to build directory, we do all our work there
chdir($buildDir) || die "Failed to switch to build directory '$buildDir'!\n";
# get kate-editor.org clone from invent.kde.org, update if already around
if (-d "kate-editor-org") {
print "Updating kate-editor.org clone...\n";
system("git", "-C", "kate-editor-org", "pull") == 0 || die "Failed to pull kate-editor-org.git!\n";
} else {
print "Creating kate-editor.org clone...\n";
system("git", "clone", "git\@invent.kde.org:websites/kate-editor-org.git") == 0 || die "Failed to clone kate-editor-org.git!\n";
}
#
# update of syntax definitions
# beside the pure update site generation, we will create some web site with examples for all highlightings
#
# try to get current frameworks version
my $currentVersion;
open (my $list, "<$sourceDir/CMakeLists.txt");
for (<$list>) {
if ((my $version) = /^set\(KF_VERSION "6.([0-9]+)\.[0-9]+"\)/) {
$currentVersion = $version;
last;
}
}
close $list;
if (!defined($currentVersion)) {
die "Failed to determine current version of syntax-highlighting framework!\n"
}
# current maximal version
print "Current version of syntax-highlighting: 6.$currentVersion\n";
# purge old data in kate-editor.org clone
my $staticSyntaxPath = "kate-editor-org/static/syntax";
my $staticSyntaxPathData = "$staticSyntaxPath/data/syntax";
remove_tree($staticSyntaxPath);
if (-d $staticSyntaxPath) {
die "Failed to delete '$staticSyntaxPath'!\n";
}
make_path($staticSyntaxPathData);
if (! -d $staticSyntaxPathData) {
die "Failed to create '$staticSyntaxPathData'!\n";
}
# collect all known syntax files from the generated resource file and copy them over
open my $resourceFile, "<$buildDir/data/syntax-data-xml.qrc";
while (<$resourceFile>) {
if ((my $file) = /<file>(.*\.xml)<\/file>/) {
copy($file, $staticSyntaxPathData) or die "Copy failed: $!";
}
}
close $resourceFile;
# copy over all html references as examples
system("cp", "-rf", "$sourceDir/autotests/html", "$staticSyntaxPath/data/html") == 0 || die "Failed to copy HTML references!\n";
# switch to kate-editor.org syntax directory now for post-processing
chdir($staticSyntaxPath) || die "Failed to switch to '$staticSyntaxPath' directory!\n";
# add new data to kate-editor.org git
system("git", "add", "data") == 0 || die "Failed to add syntax files to git!\n";
# setup XML parser with handler for start element
my %languageAttributes = ();
sub start_tag
{
# we only care for the language element, remember the attributes
my($p, $tag, %attrs) = @_;
if ($tag eq "language") {
%languageAttributes = %attrs;
$p->finish();
}
}
my $parser = XML::Parser->new( Handlers => { Start => \&start_tag });
# read all syntax files and remember for their version infos!
print "Parsing XML syntax/*.xml files...\n";
my %metaInfo;
my %nameToFile;
my $count = 0;
foreach my $xmlFile (<data/syntax/*.xml>) {
# parse the file
%languageAttributes = ();
$parser->parsefile( $xmlFile );
# we need a name!
my $name = $languageAttributes{'name'};
if (!defined($name)) {
print "Skipping $xmlFile as name attribute is missing.\n";
next;
}
# if we have no versions set, we can't handle this file!
my $version = $languageAttributes{'version'};
if (!defined($version)) {
print "Skipping $xmlFile as version attribute is missing.\n";
next;
}
my $kateversion = $languageAttributes{'kateversion'};
if (!defined($kateversion)) {
print "Skipping $xmlFile as kateversion attribute is missing.\n";
next;
}
# remember attributes
# print "Remembering $xmlFile '$name' with version=$version & kateversion=$kateversion\n";
foreach my $key (keys %languageAttributes) {
$metaInfo{$xmlFile}{$key} = $languageAttributes{$key};
}
# remember section => name => file mapping
$nameToFile{$languageAttributes{'section'}}{$name} = $xmlFile;
++$count;
}
# now: generate all needed update-*.xml files
print "Generating XML update-*.xml files...\n";
for (my $majorVersion = 5; $majorVersion <= 6; ++$majorVersion) {
my $minorVersion = 0;
while ($minorVersion <= (($majorVersion == 6) ? $currentVersion : 256)) {
# generate one update file
my $cVersion = "$majorVersion.$minorVersion";
#print "Generation update-$cVersion.xml...\n";
open (my $update, ">update-$cVersion.xml");
print $update "<!DOCTYPE DEFINITIONS>\n";
print $update "<DEFINITIONS>\n";
foreach my $def (sort keys %metaInfo) {
# is this definition allowed here?
$_ = $metaInfo{$def}{kateversion};
if ((my $major, my $minor) = /([0-9]+)\.([0-9]+)/) {
next if (($major > $majorVersion) || ($major == $majorVersion && $minor > $minorVersion));
} else {
next;
}
print $update "<Definition name=\"$metaInfo{$def}{name}\" url=\"https://kate-editor.org/syntax/$def\" version=\"$metaInfo{$def}{version}\"/>\n";
}
print $update "</DEFINITIONS>\n";
close $update;
# add to git
system("git add update-$cVersion.xml") == 0 || die "Failed to add update-$cVersion.xml to git!\n";
# next one
++$minorVersion;
}
}
# parse the html files to match them to the highlighting they belong to
# we just search for the dark variants and derive the names for the non-dark from that
print "Parsing HTML example syntax/data/html/*.html files...\n";
my %nameToHTML;
foreach my $htmlFile (<data/html/*.dark.html>, <data/html/.*.dark.html>) {
my $name;
open my $F, "<$htmlFile";
while (<$F>) {
if (($name) = /name="generator" content="KF5::SyntaxHighlighting - Definition \((.*)\) - Theme/) {
last;
}
}
close $F;
if (defined($name)) {
$htmlFile =~ s/\.dark\.html//;
if (defined($nameToHTML{$name})) {
die "Duplicated test output found for '$name' (".$nameToHTML{$name}." vs. ".$htmlFile.").\ntesthighlighter_test in the framework should not have allowed that!\n";
}
$nameToHTML{$name} = $htmlFile;
} else {
print "Skipping $htmlFile as proper generator meta information tag missing.\n";
}
}
# for better l10n, only generate a YAML file containing syntax data
# a Hugo template will be combined with the data to generate the overview page /syntax
print "Generating syntax data in data/syntax.yaml...\n";
make_path("../../data");
my $syntax_path = "../../data/syntax.yaml";
open (my $syntax_handle, ">$syntax_path");
print $syntax_handle "# This file is auto-generated by \"make update_kate_editor_org\" in syntax-highlighting.git\n";
foreach my $section (sort keys %nameToFile) {
foreach my $name (sort keys %{$nameToFile{$section}}) {
my $file = $nameToFile{$section}{$name};
print $syntax_handle
"- name: $name\n".
" section: $section\n".
" file: /syntax/$file\n";
# link example output if existing
if (defined($nameToHTML{$name})) {
print $syntax_handle " examples: [/syntax/".$nameToHTML{$name}.".html, /syntax/".$nameToHTML{$name}.".dark.html]\n";
} else {
print $syntax_handle " examples: []\n";
}
}
}
close($syntax_handle);
# add to git
system("git add $syntax_path") == 0 || die "Failed to add $syntax_path to git!\n";
#
# update of themes web site
# this will generate an overview of all shipped themes with an example
#
# switch back to build directory, we do all our work there
chdir($buildDir) || die "Failed to switch to build directory '$buildDir'!\n";
# purge old data in kate-editor.org clone
my $staticThemePath = "kate-editor-org/static/themes";
remove_tree($staticThemePath);
if (-d $staticThemePath) {
die "Failed to delete '$staticThemePath'!\n";
}
make_path($staticThemePath);
if (! -d $staticThemePath) {
die "Failed to create '$staticThemePath'!\n";
}
# copy over all html renderings as examples
print "Updating theme example HTML files...\n";
system("cp", "-rf", "autotests/theme.html.output", "$staticThemePath/html") == 0 || die "Failed to copy autotests/theme.html.output references!\n";
# switch over to git again
chdir($staticThemePath) || die "Failed to switch to '$staticThemePath' directory!\n";
# collect all themes with their test case
print "Parsing theme kate-editor-org/static/themes/html/*.html files...\n";
my %themeToHTML;
foreach my $htmlFile (<html/*.html>) {
my $name;
open my $F, "<$htmlFile";
while (<$F>) {
if (($name) = /name="generator" content="KF5::SyntaxHighlighting - Definition \(.*\) - Theme \((.*)\)"/) {
last;
}
}
close $F;
if (defined($name)) {
$themeToHTML{$name} = $htmlFile;
} else {
print "Skipping $htmlFile as proper generator meta information tag missing.\n";
}
}
# create HTML snippets one can embed into a page from the theme HTML pages
# we will hash the stuff from lower case name to real name + file for better output order below
my %themeToHTMLSnippet;
foreach my $name (sort keys %themeToHTML) {
# get full file
open my $F, "<$themeToHTML{$name}";
my $fullFile = do { local $/; <$F> };
close $F;
# kill <body ...><pre> start and replace it with simple <pre> with body attribute
$fullFile =~ s@.*<body style="(.*)"><pre>@<pre style="$1">@s;
# kill ending </pre>... and replace it with simple </pre>
$fullFile =~ s@</pre></body></html>@</pre>@g;
# write snippet to disk
my $snippetName = $themeToHTML{$name};
$snippetName =~ s/\.html/-snippet.html/;
open my $OF, ">$snippetName";
print $OF $fullFile;
close $OF;
$themeToHTMLSnippet{lc($name)}{"name"} = $name;
$themeToHTMLSnippet{lc($name)}{"file"} = $snippetName;
}
# add html files
system("git", "add", "html") == 0 || die "Failed to add theme HTML files to git!\n";
# for better l10n, only generate a YAML file containing theme data
# a Hugo template will be combined with the data to generate the overview page /themes
# we output sorted by lower case names as otherwise ayu and co. end up at the end...
print "Generating theme data in data/themes.yaml...\n";
my $themes_path = "../../data/themes.yaml";
open (my $themes_handle, ">$themes_path");
print $themes_handle "# This file is auto-generated by \"make update_kate_editor_org\" in syntax-highlighting.git\n";
foreach my $lcName (sort keys %themeToHTMLSnippet) {
my $name = $themeToHTMLSnippet{$lcName}{"name"};
my $file = $themeToHTMLSnippet{$lcName}{"file"};
print $themes_handle
"- name: $name\n".
" file: /static/themes/$file\n";
}
close($themes_handle);
# add to git
system("git add $themes_path") == 0 || die "Failed to add $themes_path to git!\n";