aboutsummaryrefslogtreecommitdiff
path: root/lexers/man.lua
diff options
context:
space:
mode:
authorMarc André Tanner <mat@brain-dump.org>2016-12-07 16:49:29 +0100
committerMarc André Tanner <mat@brain-dump.org>2016-12-07 20:11:32 +0100
commit3570869c9ae2c4df14b15423789919e514322916 (patch)
tree6b990c9ec59fbdc7abce89c1307d22e66d0fd88a /lexers/man.lua
parent098504f67aea8a862840d58c69e8f6360eef3073 (diff)
downloadvis-3570869c9ae2c4df14b15423789919e514322916.tar.gz
vis-3570869c9ae2c4df14b15423789919e514322916.tar.xz
Move all lua related files to lua/ subfolder
Also remove the lexers sub directory from the Lua search path. As a result we attempt to open fewer files during startup: $ strace -e open -o log ./vis +q config.h && wc -l log In order to avoid having to modifiy all lexers which `require('lexer')` we instead place a symlink in the top level directory. $ ./configure --disable-lua $ rm -rf lua Should result in a source tree with most lua specifc functionality removed.
Diffstat (limited to 'lexers/man.lua')
-rw-r--r--lexers/man.lua37
1 files changed, 0 insertions, 37 deletions
diff --git a/lexers/man.lua b/lexers/man.lua
deleted file mode 100644
index 557e11d..0000000
--- a/lexers/man.lua
+++ /dev/null
@@ -1,37 +0,0 @@
--- Copyright 2015-2016 David B. Lamkins <david@lamkins.net>. See LICENSE.
--- man/roff LPeg lexer.
-
-local l = require('lexer')
-local token, word_match = l.token, l.word_match
-local P, R, S = lpeg.P, lpeg.R, lpeg.S
-
-local M = {_NAME = 'man'}
-
--- Whitespace.
-local ws = token(l.WHITESPACE, l.space^1)
-
--- Markup.
-local rule1 = token(l.STRING,
- P('.') * (P('B') * P('R')^-1 + P('I') * P('PR')^-1) *
- l.nonnewline^0)
-local rule2 = token(l.NUMBER, P('.') * S('ST') * P('H') * l.nonnewline^0)
-local rule3 = token(l.KEYWORD,
- P('.br') + P('.DS') + P('.RS') + P('.RE') + P('.PD'))
-local rule4 = token(l.LABEL, P('.') * (S('ST') * P('H') + P('.TP')))
-local rule5 = token(l.VARIABLE,
- P('.B') * P('R')^-1 + P('.I') * S('PR')^-1 + P('.PP'))
-local rule6 = token(l.TYPE, P('\\f') * S('BIPR'))
-local rule7 = token(l.PREPROCESSOR, l.starts_line('.') * l.alpha^1)
-
-M._rules = {
- {'whitespace', ws},
- {'rule1', rule1},
- {'rule2', rule2},
- {'rule3', rule3},
- {'rule4', rule4},
- {'rule5', rule5},
- {'rule6', rule6},
- {'rule7', rule7},
-}
-
-return M