lexer.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660
  1. # -*- coding: utf-8 -*-
  2. """
  3. pygments.lexer
  4. ~~~~~~~~~~~~~~
  5. Base lexer classes.
  6. :copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
  7. :license: BSD, see LICENSE for details.
  8. """
  9. import re
  10. from pygments.filter import apply_filters, Filter
  11. from pygments.filters import get_filter_by_name
  12. from pygments.token import Error, Text, Other, _TokenType
  13. from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
  14. make_analysator
  15. __all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
  16. 'LexerContext', 'include', 'bygroups', 'using', 'this']
  17. _default_analyse = staticmethod(lambda x: 0.0)
  18. class LexerMeta(type):
  19. """
  20. This metaclass automagically converts ``analyse_text`` methods into
  21. static methods which always return float values.
  22. """
  23. def __new__(cls, name, bases, d):
  24. if 'analyse_text' in d:
  25. d['analyse_text'] = make_analysator(d['analyse_text'])
  26. return type.__new__(cls, name, bases, d)
  27. class Lexer(object):
  28. """
  29. Lexer for a specific language.
  30. Basic options recognized:
  31. ``stripnl``
  32. Strip leading and trailing newlines from the input (default: True).
  33. ``stripall``
  34. Strip all leading and trailing whitespace from the input
  35. (default: False).
  36. ``ensurenl``
  37. Make sure that the input ends with a newline (default: True). This
  38. is required for some lexers that consume input linewise.
  39. *New in Pygments 1.3.*
  40. ``tabsize``
  41. If given and greater than 0, expand tabs in the input (default: 0).
  42. ``encoding``
  43. If given, must be an encoding name. This encoding will be used to
  44. convert the input string to Unicode, if it is not already a Unicode
  45. string (default: ``'latin1'``).
  46. Can also be ``'guess'`` to use a simple UTF-8 / Latin1 detection, or
  47. ``'chardet'`` to use the chardet library, if it is installed.
  48. """
  49. #: Name of the lexer
  50. name = None
  51. #: Shortcuts for the lexer
  52. aliases = []
  53. #: fn match rules
  54. filenames = []
  55. #: fn alias filenames
  56. alias_filenames = []
  57. #: mime types
  58. mimetypes = []
  59. __metaclass__ = LexerMeta
  60. def __init__(self, **options):
  61. self.options = options
  62. self.stripnl = get_bool_opt(options, 'stripnl', True)
  63. self.stripall = get_bool_opt(options, 'stripall', False)
  64. self.ensurenl = get_bool_opt(options, 'ensurenl', True)
  65. self.tabsize = get_int_opt(options, 'tabsize', 0)
  66. self.encoding = options.get('encoding', 'latin1')
  67. # self.encoding = options.get('inencoding', None) or self.encoding
  68. self.filters = []
  69. for filter_ in get_list_opt(options, 'filters', ()):
  70. self.add_filter(filter_)
  71. def __repr__(self):
  72. if self.options:
  73. return '<pygments.lexers.%s with %r>' % (self.__class__.__name__,
  74. self.options)
  75. else:
  76. return '<pygments.lexers.%s>' % self.__class__.__name__
  77. def add_filter(self, filter_, **options):
  78. """
  79. Add a new stream filter to this lexer.
  80. """
  81. if not isinstance(filter_, Filter):
  82. filter_ = get_filter_by_name(filter_, **options)
  83. self.filters.append(filter_)
  84. def analyse_text(text):
  85. """
  86. Has to return a float between ``0`` and ``1`` that indicates
  87. if a lexer wants to highlight this text. Used by ``guess_lexer``.
  88. If this method returns ``0`` it won't highlight it in any case, if
  89. it returns ``1`` highlighting with this lexer is guaranteed.
  90. The `LexerMeta` metaclass automatically wraps this function so
  91. that it works like a static method (no ``self`` or ``cls``
  92. parameter) and the return value is automatically converted to
  93. `float`. If the return value is an object that is boolean `False`
  94. it's the same as if the return values was ``0.0``.
  95. """
  96. def get_tokens(self, text, unfiltered=False):
  97. """
  98. Return an iterable of (tokentype, value) pairs generated from
  99. `text`. If `unfiltered` is set to `True`, the filtering mechanism
  100. is bypassed even if filters are defined.
  101. Also preprocess the text, i.e. expand tabs and strip it if
  102. wanted and applies registered filters.
  103. """
  104. if not isinstance(text, unicode):
  105. if self.encoding == 'guess':
  106. try:
  107. text = text.decode('utf-8')
  108. if text.startswith(u'\ufeff'):
  109. text = text[len(u'\ufeff'):]
  110. except UnicodeDecodeError:
  111. text = text.decode('latin1')
  112. elif self.encoding == 'chardet':
  113. try:
  114. import chardet
  115. except ImportError:
  116. raise ImportError('To enable chardet encoding guessing, '
  117. 'please install the chardet library '
  118. 'from http://chardet.feedparser.org/')
  119. enc = chardet.detect(text)
  120. text = text.decode(enc['encoding'])
  121. else:
  122. text = text.decode(self.encoding)
  123. # text now *is* a unicode string
  124. text = text.replace('\r\n', '\n')
  125. text = text.replace('\r', '\n')
  126. if self.stripall:
  127. text = text.strip()
  128. elif self.stripnl:
  129. text = text.strip('\n')
  130. if self.tabsize > 0:
  131. text = text.expandtabs(self.tabsize)
  132. if self.ensurenl and not text.endswith('\n'):
  133. text += '\n'
  134. def streamer():
  135. for i, t, v in self.get_tokens_unprocessed(text):
  136. yield t, v
  137. stream = streamer()
  138. if not unfiltered:
  139. stream = apply_filters(stream, self.filters, self)
  140. return stream
  141. def get_tokens_unprocessed(self, text):
  142. """
  143. Return an iterable of (tokentype, value) pairs.
  144. In subclasses, implement this method as a generator to
  145. maximize effectiveness.
  146. """
  147. raise NotImplementedError
  148. class DelegatingLexer(Lexer):
  149. """
  150. This lexer takes two lexer as arguments. A root lexer and
  151. a language lexer. First everything is scanned using the language
  152. lexer, afterwards all ``Other`` tokens are lexed using the root
  153. lexer.
  154. The lexers from the ``template`` lexer package use this base lexer.
  155. """
  156. def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
  157. self.root_lexer = _root_lexer(**options)
  158. self.language_lexer = _language_lexer(**options)
  159. self.needle = _needle
  160. Lexer.__init__(self, **options)
  161. def get_tokens_unprocessed(self, text):
  162. buffered = ''
  163. insertions = []
  164. lng_buffer = []
  165. for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
  166. if t is self.needle:
  167. if lng_buffer:
  168. insertions.append((len(buffered), lng_buffer))
  169. lng_buffer = []
  170. buffered += v
  171. else:
  172. lng_buffer.append((i, t, v))
  173. if lng_buffer:
  174. insertions.append((len(buffered), lng_buffer))
  175. return do_insertions(insertions,
  176. self.root_lexer.get_tokens_unprocessed(buffered))
  177. #-------------------------------------------------------------------------------
  178. # RegexLexer and ExtendedRegexLexer
  179. #
  180. class include(str):
  181. """
  182. Indicates that a state should include rules from another state.
  183. """
  184. pass
  185. class combined(tuple):
  186. """
  187. Indicates a state combined from multiple states.
  188. """
  189. def __new__(cls, *args):
  190. return tuple.__new__(cls, args)
  191. def __init__(self, *args):
  192. # tuple.__init__ doesn't do anything
  193. pass
  194. class _PseudoMatch(object):
  195. """
  196. A pseudo match object constructed from a string.
  197. """
  198. def __init__(self, start, text):
  199. self._text = text
  200. self._start = start
  201. def start(self, arg=None):
  202. return self._start
  203. def end(self, arg=None):
  204. return self._start + len(self._text)
  205. def group(self, arg=None):
  206. if arg:
  207. raise IndexError('No such group')
  208. return self._text
  209. def groups(self):
  210. return (self._text,)
  211. def groupdict(self):
  212. return {}
  213. def bygroups(*args):
  214. """
  215. Callback that yields multiple actions for each group in the match.
  216. """
  217. def callback(lexer, match, ctx=None):
  218. for i, action in enumerate(args):
  219. if action is None:
  220. continue
  221. elif type(action) is _TokenType:
  222. data = match.group(i + 1)
  223. if data:
  224. yield match.start(i + 1), action, data
  225. else:
  226. if ctx:
  227. ctx.pos = match.start(i + 1)
  228. for item in action(lexer, _PseudoMatch(match.start(i + 1),
  229. match.group(i + 1)), ctx):
  230. if item:
  231. yield item
  232. if ctx:
  233. ctx.pos = match.end()
  234. return callback
  235. class _This(object):
  236. """
  237. Special singleton used for indicating the caller class.
  238. Used by ``using``.
  239. """
  240. this = _This()
  241. def using(_other, **kwargs):
  242. """
  243. Callback that processes the match with a different lexer.
  244. The keyword arguments are forwarded to the lexer, except `state` which
  245. is handled separately.
  246. `state` specifies the state that the new lexer will start in, and can
  247. be an enumerable such as ('root', 'inline', 'string') or a simple
  248. string which is assumed to be on top of the root state.
  249. Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
  250. """
  251. gt_kwargs = {}
  252. if 'state' in kwargs:
  253. s = kwargs.pop('state')
  254. if isinstance(s, (list, tuple)):
  255. gt_kwargs['stack'] = s
  256. else:
  257. gt_kwargs['stack'] = ('root', s)
  258. if _other is this:
  259. def callback(lexer, match, ctx=None):
  260. # if keyword arguments are given the callback
  261. # function has to create a new lexer instance
  262. if kwargs:
  263. # XXX: cache that somehow
  264. kwargs.update(lexer.options)
  265. lx = lexer.__class__(**kwargs)
  266. else:
  267. lx = lexer
  268. s = match.start()
  269. for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
  270. yield i + s, t, v
  271. if ctx:
  272. ctx.pos = match.end()
  273. else:
  274. def callback(lexer, match, ctx=None):
  275. # XXX: cache that somehow
  276. kwargs.update(lexer.options)
  277. lx = _other(**kwargs)
  278. s = match.start()
  279. for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
  280. yield i + s, t, v
  281. if ctx:
  282. ctx.pos = match.end()
  283. return callback
  284. class RegexLexerMeta(LexerMeta):
  285. """
  286. Metaclass for RegexLexer, creates the self._tokens attribute from
  287. self.tokens on the first instantiation.
  288. """
  289. def _process_state(cls, unprocessed, processed, state):
  290. assert type(state) is str, "wrong state name %r" % state
  291. assert state[0] != '#', "invalid state name %r" % state
  292. if state in processed:
  293. return processed[state]
  294. tokens = processed[state] = []
  295. rflags = cls.flags
  296. for tdef in unprocessed[state]:
  297. if isinstance(tdef, include):
  298. # it's a state reference
  299. assert tdef != state, "circular state reference %r" % state
  300. tokens.extend(cls._process_state(unprocessed, processed, str(tdef)))
  301. continue
  302. assert type(tdef) is tuple, "wrong rule def %r" % tdef
  303. try:
  304. rex = re.compile(tdef[0], rflags).match
  305. except Exception, err:
  306. raise ValueError("uncompilable regex %r in state %r of %r: %s" %
  307. (tdef[0], state, cls, err))
  308. assert type(tdef[1]) is _TokenType or callable(tdef[1]), \
  309. 'token type must be simple type or callable, not %r' % (tdef[1],)
  310. if len(tdef) == 2:
  311. new_state = None
  312. else:
  313. tdef2 = tdef[2]
  314. if isinstance(tdef2, str):
  315. # an existing state
  316. if tdef2 == '#pop':
  317. new_state = -1
  318. elif tdef2 in unprocessed:
  319. new_state = (tdef2,)
  320. elif tdef2 == '#push':
  321. new_state = tdef2
  322. elif tdef2[:5] == '#pop:':
  323. new_state = -int(tdef2[5:])
  324. else:
  325. assert False, 'unknown new state %r' % tdef2
  326. elif isinstance(tdef2, combined):
  327. # combine a new state from existing ones
  328. new_state = '_tmp_%d' % cls._tmpname
  329. cls._tmpname += 1
  330. itokens = []
  331. for istate in tdef2:
  332. assert istate != state, 'circular state ref %r' % istate
  333. itokens.extend(cls._process_state(unprocessed,
  334. processed, istate))
  335. processed[new_state] = itokens
  336. new_state = (new_state,)
  337. elif isinstance(tdef2, tuple):
  338. # push more than one state
  339. for state in tdef2:
  340. assert (state in unprocessed or
  341. state in ('#pop', '#push')), \
  342. 'unknown new state ' + state
  343. new_state = tdef2
  344. else:
  345. assert False, 'unknown new state def %r' % tdef2
  346. tokens.append((rex, tdef[1], new_state))
  347. return tokens
  348. def process_tokendef(cls, name, tokendefs=None):
  349. processed = cls._all_tokens[name] = {}
  350. tokendefs = tokendefs or cls.tokens[name]
  351. for state in tokendefs.keys():
  352. cls._process_state(tokendefs, processed, state)
  353. return processed
  354. def __call__(cls, *args, **kwds):
  355. if not hasattr(cls, '_tokens'):
  356. cls._all_tokens = {}
  357. cls._tmpname = 0
  358. if hasattr(cls, 'token_variants') and cls.token_variants:
  359. # don't process yet
  360. pass
  361. else:
  362. cls._tokens = cls.process_tokendef('', cls.tokens)
  363. return type.__call__(cls, *args, **kwds)
  364. class RegexLexer(Lexer):
  365. """
  366. Base for simple stateful regular expression-based lexers.
  367. Simplifies the lexing process so that you need only
  368. provide a list of states and regular expressions.
  369. """
  370. __metaclass__ = RegexLexerMeta
  371. #: Flags for compiling the regular expressions.
  372. #: Defaults to MULTILINE.
  373. flags = re.MULTILINE
  374. #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
  375. #:
  376. #: The initial state is 'root'.
  377. #: ``new_state`` can be omitted to signify no state transition.
  378. #: If it is a string, the state is pushed on the stack and changed.
  379. #: If it is a tuple of strings, all states are pushed on the stack and
  380. #: the current state will be the topmost.
  381. #: It can also be ``combined('state1', 'state2', ...)``
  382. #: to signify a new, anonymous state combined from the rules of two
  383. #: or more existing ones.
  384. #: Furthermore, it can be '#pop' to signify going back one step in
  385. #: the state stack, or '#push' to push the current state on the stack
  386. #: again.
  387. #:
  388. #: The tuple can also be replaced with ``include('state')``, in which
  389. #: case the rules from the state named by the string are included in the
  390. #: current one.
  391. tokens = {}
  392. def get_tokens_unprocessed(self, text, stack=('root',)):
  393. """
  394. Split ``text`` into (tokentype, text) pairs.
  395. ``stack`` is the inital stack (default: ``['root']``)
  396. """
  397. pos = 0
  398. tokendefs = self._tokens
  399. statestack = list(stack)
  400. statetokens = tokendefs[statestack[-1]]
  401. while 1:
  402. for rexmatch, action, new_state in statetokens:
  403. m = rexmatch(text, pos)
  404. if m:
  405. if type(action) is _TokenType:
  406. yield pos, action, m.group()
  407. else:
  408. for item in action(self, m):
  409. yield item
  410. pos = m.end()
  411. if new_state is not None:
  412. # state transition
  413. if isinstance(new_state, tuple):
  414. for state in new_state:
  415. if state == '#pop':
  416. statestack.pop()
  417. elif state == '#push':
  418. statestack.append(statestack[-1])
  419. else:
  420. statestack.append(state)
  421. elif isinstance(new_state, int):
  422. # pop
  423. del statestack[new_state:]
  424. elif new_state == '#push':
  425. statestack.append(statestack[-1])
  426. else:
  427. assert False, "wrong state def: %r" % new_state
  428. statetokens = tokendefs[statestack[-1]]
  429. break
  430. else:
  431. try:
  432. if text[pos] == '\n':
  433. # at EOL, reset state to "root"
  434. pos += 1
  435. statestack = ['root']
  436. statetokens = tokendefs['root']
  437. yield pos, Text, u'\n'
  438. continue
  439. yield pos, Error, text[pos]
  440. pos += 1
  441. except IndexError:
  442. break
  443. class LexerContext(object):
  444. """
  445. A helper object that holds lexer position data.
  446. """
  447. def __init__(self, text, pos, stack=None, end=None):
  448. self.text = text
  449. self.pos = pos
  450. self.end = end or len(text) # end=0 not supported ;-)
  451. self.stack = stack or ['root']
  452. def __repr__(self):
  453. return 'LexerContext(%r, %r, %r)' % (
  454. self.text, self.pos, self.stack)
  455. class ExtendedRegexLexer(RegexLexer):
  456. """
  457. A RegexLexer that uses a context object to store its state.
  458. """
  459. def get_tokens_unprocessed(self, text=None, context=None):
  460. """
  461. Split ``text`` into (tokentype, text) pairs.
  462. If ``context`` is given, use this lexer context instead.
  463. """
  464. tokendefs = self._tokens
  465. if not context:
  466. ctx = LexerContext(text, 0)
  467. statetokens = tokendefs['root']
  468. else:
  469. ctx = context
  470. statetokens = tokendefs[ctx.stack[-1]]
  471. text = ctx.text
  472. while 1:
  473. for rexmatch, action, new_state in statetokens:
  474. m = rexmatch(text, ctx.pos, ctx.end)
  475. if m:
  476. if type(action) is _TokenType:
  477. yield ctx.pos, action, m.group()
  478. ctx.pos = m.end()
  479. else:
  480. for item in action(self, m, ctx):
  481. yield item
  482. if not new_state:
  483. # altered the state stack?
  484. statetokens = tokendefs[ctx.stack[-1]]
  485. # CAUTION: callback must set ctx.pos!
  486. if new_state is not None:
  487. # state transition
  488. if isinstance(new_state, tuple):
  489. ctx.stack.extend(new_state)
  490. elif isinstance(new_state, int):
  491. # pop
  492. del ctx.stack[new_state:]
  493. elif new_state == '#push':
  494. ctx.stack.append(ctx.stack[-1])
  495. else:
  496. assert False, "wrong state def: %r" % new_state
  497. statetokens = tokendefs[ctx.stack[-1]]
  498. break
  499. else:
  500. try:
  501. if ctx.pos >= ctx.end:
  502. break
  503. if text[ctx.pos] == '\n':
  504. # at EOL, reset state to "root"
  505. ctx.pos += 1
  506. ctx.stack = ['root']
  507. statetokens = tokendefs['root']
  508. yield ctx.pos, Text, u'\n'
  509. continue
  510. yield ctx.pos, Error, text[ctx.pos]
  511. ctx.pos += 1
  512. except IndexError:
  513. break
  514. def do_insertions(insertions, tokens):
  515. """
  516. Helper for lexers which must combine the results of several
  517. sublexers.
  518. ``insertions`` is a list of ``(index, itokens)`` pairs.
  519. Each ``itokens`` iterable should be inserted at position
  520. ``index`` into the token stream given by the ``tokens``
  521. argument.
  522. The result is a combined token stream.
  523. TODO: clean up the code here.
  524. """
  525. insertions = iter(insertions)
  526. try:
  527. index, itokens = insertions.next()
  528. except StopIteration:
  529. # no insertions
  530. for item in tokens:
  531. yield item
  532. return
  533. realpos = None
  534. insleft = True
  535. # iterate over the token stream where we want to insert
  536. # the tokens from the insertion list.
  537. for i, t, v in tokens:
  538. # first iteration. store the postition of first item
  539. if realpos is None:
  540. realpos = i
  541. oldi = 0
  542. while insleft and i + len(v) >= index:
  543. tmpval = v[oldi:index - i]
  544. yield realpos, t, tmpval
  545. realpos += len(tmpval)
  546. for it_index, it_token, it_value in itokens:
  547. yield realpos, it_token, it_value
  548. realpos += len(it_value)
  549. oldi = index - i
  550. try:
  551. index, itokens = insertions.next()
  552. except StopIteration:
  553. insleft = False
  554. break # not strictly necessary
  555. yield realpos, t, v[oldi:]
  556. realpos += len(v) - oldi
  557. # leftover tokens
  558. while insleft:
  559. # no normal tokens, set realpos to zero
  560. realpos = realpos or 0
  561. for p, t, v in itokens:
  562. yield realpos, t, v
  563. realpos += len(v)
  564. try:
  565. index, itokens = insertions.next()
  566. except StopIteration:
  567. insleft = False
  568. break # not strictly necessary