diff options
| author | orbitalquark <70453897+orbitalquark@users.noreply.github.com> | 2024-09-18 14:30:49 -0400 |
|---|---|---|
| committer | Randy Palamar <randy@rnpnr.xyz> | 2025-01-04 12:29:07 -0700 |
| commit | c1f4d3f68787fa2ae964c468d28a84df37319b28 (patch) | |
| tree | dcd62bd74f8f9fd786cba6a0c248fb388d5244a8 /lua/lexers/jq.lua | |
| parent | cc18cea14d1f836abcebb84a96f5029431474255 (diff) | |
| download | vis-c1f4d3f68787fa2ae964c468d28a84df37319b28.tar.gz vis-c1f4d3f68787fa2ae964c468d28a84df37319b28.tar.xz | |
lexers: switch to tabs for indentation
Diffstat (limited to 'lua/lexers/jq.lua')
| -rw-r--r-- | lua/lexers/jq.lua | 56 |
1 files changed, 28 insertions, 28 deletions
diff --git a/lua/lexers/jq.lua b/lua/lexers/jq.lua index cf2ea3e..04e3f71 100644 --- a/lua/lexers/jq.lua +++ b/lua/lexers/jq.lua @@ -13,35 +13,35 @@ lex:add_rule('whitespace', token(lexer.WHITESPACE, lexer.space^1)) -- Keywords. lex:add_rule('keyword', token(lexer.KEYWORD, word_match{ - -- keywords not listed by jq's "builtins", minus operators 'and' and 'or', plus the '?' shorthand - 'as', 'break', 'catch', 'def', 'elif', 'else', 'end', 'foreach', 'if', 'import', 'include', - 'label', 'module', 'reduce', 'then', 'try' + -- keywords not listed by jq's "builtins", minus operators 'and' and 'or', plus the '?' shorthand + 'as', 'break', 'catch', 'def', 'elif', 'else', 'end', 'foreach', 'if', 'import', 'include', + 'label', 'module', 'reduce', 'then', 'try' } + '?')) -- Functions. lex:add_rule('function', token(lexer.FUNCTION, word_match{ - -- jq 1.6 built-in functions (SQL in upper caisse) - 'acos', 'acosh', 'add', 'all', 'any', 'arrays', 'ascii_downcase', 'ascii_upcase', 'asin', 'asinh', - 'atan', 'atan2', 'atanh', 'booleans', 'bsearch', 'builtins', 'capture', 'cbrt', 'ceil', - 'combinations', 'contains', 'copysign', 'cos', 'cosh', 'debug', 'del', 'delpaths', 'drem', - 'empty', 'endswith', 'env', 'erf', 'erfc', 'error', 'exp', 'exp10', 'exp2', 'explode', 'expm1', - 'fabs', 'fdim', 'finites', 'first', 'flatten', 'floor', 'fma', 'fmax', 'fmin', 'fmod', 'format', - 'frexp', 'from_entries', 'fromdate', 'fromdateiso8601', 'fromjson', 'fromstream', 'gamma', - 'get_jq_origin', 'get_prog_origin', 'get_search_list', 'getpath', 'gmtime', 'group_by', 'gsub', - 'halt', 'halt_error', 'has', 'hypot', 'implode', 'IN', 'in', 'INDEX', 'index', 'indices', - 'infinite', 'input', 'input_filename', 'input_line_number', 'inputs', 'inside', 'isempty', - 'isfinite', 'isinfinite', 'isnan', 'isnormal', 'iterables', 'j0', 'j1', 'jn', 'JOIN', 'join', - 'keys', 'keys_unsorted', 'last', 'ldexp', 'leaf_paths', 'length', 'lgamma', 'lgamma_r', 'limit', - 'localtime', 'log', 'log10', 'log1p', 'log2', 'logb', 'ltrimstr', 'map', 'map_values', 'match', - 'max', 'max_by', 'min', 'min_by', 'mktime', 'modf', 'modulemeta', 'nan', 'nearbyint', 'nextafter', - 'nexttoward', 'normals', 'not', 'now', 'nth', 'nulls', 'numbers', 'objects', 'path', 'paths', - 'pow', 'pow10', 'range', 'recurse', 'recurse_down', 'remainder', 'repeat', 'reverse', 'rindex', - 'rint', 'round', 'rtrimstr', 'scalars', 'scalars_or_empty', 'scalb', 'scalbln', 'scan', 'select', - 'setpath', 'significand', 'sin', 'sinh', 'sort', 'sort_by', 'split', 'splits', 'sqrt', - 'startswith', 'stderr', 'strflocaltime', 'strftime', 'strings', 'strptime', 'sub', 'tan', 'tanh', - 'test', 'tgamma', 'to_entries', 'todate', 'todateiso8601', 'tojson', 'tonumber', 'tostream', - 'tostring', 'transpose', 'trunc', 'truncate_stream', 'type', 'unique', 'unique_by', 'until', - 'utf8bytelength', 'values', 'walk', 'while', 'with_entries', 'y0', 'y1', 'yn' + -- jq 1.6 built-in functions (SQL in upper caisse) + 'acos', 'acosh', 'add', 'all', 'any', 'arrays', 'ascii_downcase', 'ascii_upcase', 'asin', 'asinh', + 'atan', 'atan2', 'atanh', 'booleans', 'bsearch', 'builtins', 'capture', 'cbrt', 'ceil', + 'combinations', 'contains', 'copysign', 'cos', 'cosh', 'debug', 'del', 'delpaths', 'drem', + 'empty', 'endswith', 'env', 'erf', 'erfc', 'error', 'exp', 'exp10', 'exp2', 'explode', 'expm1', + 'fabs', 'fdim', 'finites', 'first', 'flatten', 'floor', 'fma', 'fmax', 'fmin', 'fmod', 'format', + 'frexp', 'from_entries', 'fromdate', 'fromdateiso8601', 'fromjson', 'fromstream', 'gamma', + 'get_jq_origin', 'get_prog_origin', 'get_search_list', 'getpath', 'gmtime', 'group_by', 'gsub', + 'halt', 'halt_error', 'has', 'hypot', 'implode', 'IN', 'in', 'INDEX', 'index', 'indices', + 'infinite', 'input', 'input_filename', 'input_line_number', 'inputs', 'inside', 'isempty', + 'isfinite', 'isinfinite', 'isnan', 'isnormal', 'iterables', 'j0', 'j1', 'jn', 'JOIN', 'join', + 'keys', 'keys_unsorted', 'last', 'ldexp', 'leaf_paths', 'length', 'lgamma', 'lgamma_r', 'limit', + 'localtime', 'log', 'log10', 'log1p', 'log2', 'logb', 'ltrimstr', 'map', 'map_values', 'match', + 'max', 'max_by', 'min', 'min_by', 'mktime', 'modf', 'modulemeta', 'nan', 'nearbyint', 'nextafter', + 'nexttoward', 'normals', 'not', 'now', 'nth', 'nulls', 'numbers', 'objects', 'path', 'paths', + 'pow', 'pow10', 'range', 'recurse', 'recurse_down', 'remainder', 'repeat', 'reverse', 'rindex', + 'rint', 'round', 'rtrimstr', 'scalars', 'scalars_or_empty', 'scalb', 'scalbln', 'scan', 'select', + 'setpath', 'significand', 'sin', 'sinh', 'sort', 'sort_by', 'split', 'splits', 'sqrt', + 'startswith', 'stderr', 'strflocaltime', 'strftime', 'strings', 'strptime', 'sub', 'tan', 'tanh', + 'test', 'tgamma', 'to_entries', 'todate', 'todateiso8601', 'tojson', 'tonumber', 'tostream', + 'tostring', 'transpose', 'trunc', 'truncate_stream', 'type', 'unique', 'unique_by', 'until', + 'utf8bytelength', 'values', 'walk', 'while', 'with_entries', 'y0', 'y1', 'yn' })) -- Strings. @@ -52,8 +52,8 @@ lex:add_rule('string', string + literal) -- Operators. -- 'not' isn't an operator but a function (filter) lex:add_rule('operator', token(lexer.OPERATOR, - P('.[]') + '?//' + '//=' + 'and' + '[]' + '//' + '==' + '!=' + '>=' + '<=' + '|=' + '+=' + '-=' + - '*=' + '/=' + '%=' + 'or' + S('=+-*/%<>()[]{}.,') + '|' + ';')) + P('.[]') + '?//' + '//=' + 'and' + '[]' + '//' + '==' + '!=' + '>=' + '<=' + '|=' + '+=' + '-=' + + '*=' + '/=' + '%=' + 'or' + S('=+-*/%<>()[]{}.,') + '|' + ';')) -- Identifiers. lex:add_rule('identifier', token(lexer.IDENTIFIER, lexer.word)) @@ -66,7 +66,7 @@ lex:add_rule('number', token(lexer.NUMBER, lexer.number)) -- Formats. lex:add_rule('format', - token('format', '@' * word_match('text json html uri csv tsv sh base64 base64d'))) + token('format', '@' * word_match('text json html uri csv tsv sh base64 base64d'))) lex:add_style('format', lexer.styles.constant) -- Variables. |
