Dusan Klinec | ccaa0d9 | 2014-11-09 03:21:31 +0100 | [diff] [blame] | 1 | # ----------------------------------------------------------------------------- |
| 2 | # ply: lex.py |
| 3 | # |
| 4 | # Copyright (C) 2001-2011, |
| 5 | # David M. Beazley (Dabeaz LLC) |
| 6 | # All rights reserved. |
| 7 | # |
| 8 | # Redistribution and use in source and binary forms, with or without |
| 9 | # modification, are permitted provided that the following conditions are |
| 10 | # met: |
| 11 | # |
| 12 | # * Redistributions of source code must retain the above copyright notice, |
| 13 | # this list of conditions and the following disclaimer. |
| 14 | # * Redistributions in binary form must reproduce the above copyright notice, |
| 15 | # this list of conditions and the following disclaimer in the documentation |
| 16 | # and/or other materials provided with the distribution. |
| 17 | # * Neither the name of the David Beazley or Dabeaz LLC may be used to |
| 18 | # endorse or promote products derived from this software without |
| 19 | # specific prior written permission. |
| 20 | # |
| 21 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 22 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 23 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 24 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 25 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 26 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 27 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 28 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 29 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 30 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 31 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 32 | # ----------------------------------------------------------------------------- |
| 33 | |
| 34 | __version__ = "3.5" |
| 35 | __tabversion__ = "3.5" # Version of table file used |
| 36 | |
| 37 | import re, sys, types, copy, os, inspect |
| 38 | |
| 39 | # This tuple contains known string types |
| 40 | try: |
| 41 | # Python 2.6 |
| 42 | StringTypes = (types.StringType, types.UnicodeType) |
| 43 | except AttributeError: |
| 44 | # Python 3.0 |
| 45 | StringTypes = (str, bytes) |
| 46 | |
| 47 | # Extract the code attribute of a function. Different implementations |
| 48 | # are for Python 2/3 compatibility. |
| 49 | |
| 50 | if sys.version_info[0] < 3: |
| 51 | def func_code(f): |
| 52 | return f.func_code |
| 53 | else: |
| 54 | def func_code(f): |
| 55 | return f.__code__ |
| 56 | |
| 57 | # This regular expression is used to match valid token names |
| 58 | _is_identifier = re.compile(r'^[a-zA-Z0-9_]+$') |
| 59 | |
| 60 | # Exception thrown when invalid token encountered and no default error |
| 61 | # handler is defined. |
| 62 | |
| 63 | class LexError(Exception): |
| 64 | def __init__(self,message,s): |
| 65 | self.args = (message,) |
| 66 | self.text = s |
| 67 | |
| 68 | # Token class. This class is used to represent the tokens produced. |
| 69 | class LexToken(object): |
| 70 | def __str__(self): |
| 71 | return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos) |
| 72 | def __repr__(self): |
| 73 | return str(self) |
| 74 | |
| 75 | # This object is a stand-in for a logging object created by the |
| 76 | # logging module. |
| 77 | |
| 78 | class PlyLogger(object): |
| 79 | def __init__(self,f): |
| 80 | self.f = f |
| 81 | def critical(self,msg,*args,**kwargs): |
| 82 | self.f.write((msg % args) + "\n") |
| 83 | |
| 84 | def warning(self,msg,*args,**kwargs): |
| 85 | self.f.write("WARNING: "+ (msg % args) + "\n") |
| 86 | |
| 87 | def error(self,msg,*args,**kwargs): |
| 88 | self.f.write("ERROR: " + (msg % args) + "\n") |
| 89 | |
| 90 | info = critical |
| 91 | debug = critical |
| 92 | |
| 93 | # Null logger is used when no output is generated. Does nothing. |
| 94 | class NullLogger(object): |
| 95 | def __getattribute__(self,name): |
| 96 | return self |
| 97 | def __call__(self,*args,**kwargs): |
| 98 | return self |
| 99 | |
| 100 | # ----------------------------------------------------------------------------- |
| 101 | # === Lexing Engine === |
| 102 | # |
| 103 | # The following Lexer class implements the lexer runtime. There are only |
| 104 | # a few public methods and attributes: |
| 105 | # |
| 106 | # input() - Store a new string in the lexer |
| 107 | # token() - Get the next token |
| 108 | # clone() - Clone the lexer |
| 109 | # |
| 110 | # lineno - Current line number |
| 111 | # lexpos - Current position in the input string |
| 112 | # ----------------------------------------------------------------------------- |
| 113 | |
| 114 | class Lexer: |
| 115 | def __init__(self): |
| 116 | self.lexre = None # Master regular expression. This is a list of |
| 117 | # tuples (re,findex) where re is a compiled |
| 118 | # regular expression and findex is a list |
| 119 | # mapping regex group numbers to rules |
| 120 | self.lexretext = None # Current regular expression strings |
| 121 | self.lexstatere = {} # Dictionary mapping lexer states to master regexs |
| 122 | self.lexstateretext = {} # Dictionary mapping lexer states to regex strings |
| 123 | self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names |
| 124 | self.lexstate = "INITIAL" # Current lexer state |
| 125 | self.lexstatestack = [] # Stack of lexer states |
| 126 | self.lexstateinfo = None # State information |
| 127 | self.lexstateignore = {} # Dictionary of ignored characters for each state |
| 128 | self.lexstateerrorf = {} # Dictionary of error functions for each state |
| 129 | self.lexreflags = 0 # Optional re compile flags |
| 130 | self.lexdata = None # Actual input data (as a string) |
| 131 | self.lexpos = 0 # Current position in input text |
| 132 | self.lexlen = 0 # Length of the input text |
| 133 | self.lexerrorf = None # Error rule (if any) |
| 134 | self.lextokens = None # List of valid tokens |
| 135 | self.lexignore = "" # Ignored characters |
| 136 | self.lexliterals = "" # Literal characters that can be passed through |
| 137 | self.lexmodule = None # Module |
| 138 | self.lineno = 1 # Current line number |
| 139 | self.lexoptimize = 0 # Optimized mode |
| 140 | |
| 141 | def clone(self,object=None): |
| 142 | c = copy.copy(self) |
| 143 | |
| 144 | # If the object parameter has been supplied, it means we are attaching the |
| 145 | # lexer to a new object. In this case, we have to rebind all methods in |
| 146 | # the lexstatere and lexstateerrorf tables. |
| 147 | |
| 148 | if object: |
| 149 | newtab = { } |
| 150 | for key, ritem in self.lexstatere.items(): |
| 151 | newre = [] |
| 152 | for cre, findex in ritem: |
| 153 | newfindex = [] |
| 154 | for f in findex: |
| 155 | if not f or not f[0]: |
| 156 | newfindex.append(f) |
| 157 | continue |
| 158 | newfindex.append((getattr(object,f[0].__name__),f[1])) |
| 159 | newre.append((cre,newfindex)) |
| 160 | newtab[key] = newre |
| 161 | c.lexstatere = newtab |
| 162 | c.lexstateerrorf = { } |
| 163 | for key, ef in self.lexstateerrorf.items(): |
| 164 | c.lexstateerrorf[key] = getattr(object,ef.__name__) |
| 165 | c.lexmodule = object |
| 166 | return c |
| 167 | |
| 168 | # ------------------------------------------------------------ |
| 169 | # writetab() - Write lexer information to a table file |
| 170 | # ------------------------------------------------------------ |
| 171 | def writetab(self,tabfile,outputdir=""): |
| 172 | if isinstance(tabfile,types.ModuleType): |
| 173 | return |
| 174 | basetabfilename = tabfile.split(".")[-1] |
| 175 | filename = os.path.join(outputdir,basetabfilename)+".py" |
| 176 | tf = open(filename,"w") |
| 177 | tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__)) |
| 178 | tf.write("_tabversion = %s\n" % repr(__tabversion__)) |
| 179 | tf.write("_lextokens = %s\n" % repr(self.lextokens)) |
| 180 | tf.write("_lexreflags = %s\n" % repr(self.lexreflags)) |
| 181 | tf.write("_lexliterals = %s\n" % repr(self.lexliterals)) |
| 182 | tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo)) |
| 183 | |
| 184 | tabre = { } |
| 185 | # Collect all functions in the initial state |
| 186 | initial = self.lexstatere["INITIAL"] |
| 187 | initialfuncs = [] |
| 188 | for part in initial: |
| 189 | for f in part[1]: |
| 190 | if f and f[0]: |
| 191 | initialfuncs.append(f) |
| 192 | |
| 193 | for key, lre in self.lexstatere.items(): |
| 194 | titem = [] |
| 195 | for i in range(len(lre)): |
| 196 | titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i]))) |
| 197 | tabre[key] = titem |
| 198 | |
| 199 | tf.write("_lexstatere = %s\n" % repr(tabre)) |
| 200 | tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore)) |
| 201 | |
| 202 | taberr = { } |
| 203 | for key, ef in self.lexstateerrorf.items(): |
| 204 | if ef: |
| 205 | taberr[key] = ef.__name__ |
| 206 | else: |
| 207 | taberr[key] = None |
| 208 | tf.write("_lexstateerrorf = %s\n" % repr(taberr)) |
| 209 | tf.close() |
| 210 | |
| 211 | # ------------------------------------------------------------ |
| 212 | # readtab() - Read lexer information from a tab file |
| 213 | # ------------------------------------------------------------ |
| 214 | def readtab(self,tabfile,fdict): |
| 215 | if isinstance(tabfile,types.ModuleType): |
| 216 | lextab = tabfile |
| 217 | else: |
| 218 | if sys.version_info[0] < 3: |
| 219 | exec("import %s as lextab" % tabfile) |
| 220 | else: |
| 221 | env = { } |
| 222 | exec("import %s as lextab" % tabfile, env,env) |
| 223 | lextab = env['lextab'] |
| 224 | |
| 225 | if getattr(lextab,"_tabversion","0.0") != __tabversion__: |
| 226 | raise ImportError("Inconsistent PLY version") |
| 227 | |
| 228 | self.lextokens = lextab._lextokens |
| 229 | self.lexreflags = lextab._lexreflags |
| 230 | self.lexliterals = lextab._lexliterals |
| 231 | self.lexstateinfo = lextab._lexstateinfo |
| 232 | self.lexstateignore = lextab._lexstateignore |
| 233 | self.lexstatere = { } |
| 234 | self.lexstateretext = { } |
| 235 | for key,lre in lextab._lexstatere.items(): |
| 236 | titem = [] |
| 237 | txtitem = [] |
| 238 | for i in range(len(lre)): |
| 239 | titem.append((re.compile(lre[i][0],lextab._lexreflags | re.VERBOSE),_names_to_funcs(lre[i][1],fdict))) |
| 240 | txtitem.append(lre[i][0]) |
| 241 | self.lexstatere[key] = titem |
| 242 | self.lexstateretext[key] = txtitem |
| 243 | self.lexstateerrorf = { } |
| 244 | for key,ef in lextab._lexstateerrorf.items(): |
| 245 | self.lexstateerrorf[key] = fdict[ef] |
| 246 | self.begin('INITIAL') |
| 247 | |
| 248 | # ------------------------------------------------------------ |
| 249 | # input() - Push a new string into the lexer |
| 250 | # ------------------------------------------------------------ |
| 251 | def input(self,s): |
| 252 | # Pull off the first character to see if s looks like a string |
| 253 | c = s[:1] |
| 254 | if not isinstance(c,StringTypes): |
| 255 | raise ValueError("Expected a string") |
| 256 | self.lexdata = s |
| 257 | self.lexpos = 0 |
| 258 | self.lexlen = len(s) |
| 259 | |
| 260 | # ------------------------------------------------------------ |
| 261 | # begin() - Changes the lexing state |
| 262 | # ------------------------------------------------------------ |
| 263 | def begin(self,state): |
| 264 | if not state in self.lexstatere: |
| 265 | raise ValueError("Undefined state") |
| 266 | self.lexre = self.lexstatere[state] |
| 267 | self.lexretext = self.lexstateretext[state] |
| 268 | self.lexignore = self.lexstateignore.get(state,"") |
| 269 | self.lexerrorf = self.lexstateerrorf.get(state,None) |
| 270 | self.lexstate = state |
| 271 | |
| 272 | # ------------------------------------------------------------ |
| 273 | # push_state() - Changes the lexing state and saves old on stack |
| 274 | # ------------------------------------------------------------ |
| 275 | def push_state(self,state): |
| 276 | self.lexstatestack.append(self.lexstate) |
| 277 | self.begin(state) |
| 278 | |
| 279 | # ------------------------------------------------------------ |
| 280 | # pop_state() - Restores the previous state |
| 281 | # ------------------------------------------------------------ |
| 282 | def pop_state(self): |
| 283 | self.begin(self.lexstatestack.pop()) |
| 284 | |
| 285 | # ------------------------------------------------------------ |
| 286 | # current_state() - Returns the current lexing state |
| 287 | # ------------------------------------------------------------ |
| 288 | def current_state(self): |
| 289 | return self.lexstate |
| 290 | |
| 291 | # ------------------------------------------------------------ |
| 292 | # skip() - Skip ahead n characters |
| 293 | # ------------------------------------------------------------ |
| 294 | def skip(self,n): |
| 295 | self.lexpos += n |
| 296 | |
| 297 | # ------------------------------------------------------------ |
| 298 | # opttoken() - Return the next token from the Lexer |
| 299 | # |
| 300 | # Note: This function has been carefully implemented to be as fast |
| 301 | # as possible. Don't make changes unless you really know what |
| 302 | # you are doing |
| 303 | # ------------------------------------------------------------ |
| 304 | def token(self): |
| 305 | # Make local copies of frequently referenced attributes |
| 306 | lexpos = self.lexpos |
| 307 | lexlen = self.lexlen |
| 308 | lexignore = self.lexignore |
| 309 | lexdata = self.lexdata |
| 310 | |
| 311 | while lexpos < lexlen: |
| 312 | # This code provides some short-circuit code for whitespace, tabs, and other ignored characters |
| 313 | if lexdata[lexpos] in lexignore: |
| 314 | lexpos += 1 |
| 315 | continue |
| 316 | |
| 317 | # Look for a regular expression match |
| 318 | for lexre,lexindexfunc in self.lexre: |
| 319 | m = lexre.match(lexdata,lexpos) |
| 320 | if not m: continue |
| 321 | |
| 322 | # Create a token for return |
| 323 | tok = LexToken() |
| 324 | tok.value = m.group() |
| 325 | tok.lineno = self.lineno |
| 326 | tok.lexpos = lexpos |
| 327 | |
| 328 | i = m.lastindex |
| 329 | func,tok.type = lexindexfunc[i] |
| 330 | |
| 331 | if not func: |
| 332 | # If no token type was set, it's an ignored token |
| 333 | if tok.type: |
| 334 | self.lexpos = m.end() |
| 335 | return tok |
| 336 | else: |
| 337 | lexpos = m.end() |
| 338 | break |
| 339 | |
| 340 | lexpos = m.end() |
| 341 | |
| 342 | # If token is processed by a function, call it |
| 343 | |
| 344 | tok.lexer = self # Set additional attributes useful in token rules |
| 345 | self.lexmatch = m |
| 346 | self.lexpos = lexpos |
| 347 | |
| 348 | newtok = func(tok) |
| 349 | |
| 350 | # Every function must return a token, if nothing, we just move to next token |
| 351 | if not newtok: |
| 352 | lexpos = self.lexpos # This is here in case user has updated lexpos. |
| 353 | lexignore = self.lexignore # This is here in case there was a state change |
| 354 | break |
| 355 | |
| 356 | # Verify type of the token. If not in the token map, raise an error |
| 357 | if not self.lexoptimize: |
| 358 | if not newtok.type in self.lextokens: |
| 359 | raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % ( |
| 360 | func_code(func).co_filename, func_code(func).co_firstlineno, |
| 361 | func.__name__, newtok.type),lexdata[lexpos:]) |
| 362 | |
| 363 | return newtok |
| 364 | else: |
| 365 | # No match, see if in literals |
| 366 | if lexdata[lexpos] in self.lexliterals: |
| 367 | tok = LexToken() |
| 368 | tok.value = lexdata[lexpos] |
| 369 | tok.lineno = self.lineno |
| 370 | tok.type = tok.value |
| 371 | tok.lexpos = lexpos |
| 372 | self.lexpos = lexpos + 1 |
| 373 | return tok |
| 374 | |
| 375 | # No match. Call t_error() if defined. |
| 376 | if self.lexerrorf: |
| 377 | tok = LexToken() |
| 378 | tok.value = self.lexdata[lexpos:] |
| 379 | tok.lineno = self.lineno |
| 380 | tok.type = "error" |
| 381 | tok.lexer = self |
| 382 | tok.lexpos = lexpos |
| 383 | self.lexpos = lexpos |
| 384 | newtok = self.lexerrorf(tok) |
| 385 | if lexpos == self.lexpos: |
| 386 | # Error method didn't change text position at all. This is an error. |
| 387 | raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:]) |
| 388 | lexpos = self.lexpos |
| 389 | if not newtok: continue |
| 390 | return newtok |
| 391 | |
| 392 | self.lexpos = lexpos |
| 393 | raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:]) |
| 394 | |
| 395 | self.lexpos = lexpos + 1 |
| 396 | if self.lexdata is None: |
| 397 | raise RuntimeError("No input string given with input()") |
| 398 | return None |
| 399 | |
| 400 | # Iterator interface |
| 401 | def __iter__(self): |
| 402 | return self |
| 403 | |
| 404 | def next(self): |
| 405 | t = self.token() |
| 406 | if t is None: |
| 407 | raise StopIteration |
| 408 | return t |
| 409 | |
| 410 | __next__ = next |
| 411 | |
| 412 | # ----------------------------------------------------------------------------- |
| 413 | # ==== Lex Builder === |
| 414 | # |
| 415 | # The functions and classes below are used to collect lexing information |
| 416 | # and build a Lexer object from it. |
| 417 | # ----------------------------------------------------------------------------- |
| 418 | |
| 419 | # ----------------------------------------------------------------------------- |
| 420 | # _get_regex(func) |
| 421 | # |
| 422 | # Returns the regular expression assigned to a function either as a doc string |
| 423 | # or as a .regex attribute attached by the @TOKEN decorator. |
| 424 | # ----------------------------------------------------------------------------- |
| 425 | |
| 426 | def _get_regex(func): |
| 427 | return getattr(func,"regex",func.__doc__) |
| 428 | |
| 429 | # ----------------------------------------------------------------------------- |
| 430 | # get_caller_module_dict() |
| 431 | # |
| 432 | # This function returns a dictionary containing all of the symbols defined within |
| 433 | # a caller further down the call stack. This is used to get the environment |
| 434 | # associated with the yacc() call if none was provided. |
| 435 | # ----------------------------------------------------------------------------- |
| 436 | |
| 437 | def get_caller_module_dict(levels): |
| 438 | try: |
| 439 | raise RuntimeError |
| 440 | except RuntimeError: |
| 441 | e,b,t = sys.exc_info() |
| 442 | f = t.tb_frame |
| 443 | while levels > 0: |
| 444 | f = f.f_back |
| 445 | levels -= 1 |
| 446 | ldict = f.f_globals.copy() |
| 447 | if f.f_globals != f.f_locals: |
| 448 | ldict.update(f.f_locals) |
| 449 | |
| 450 | return ldict |
| 451 | |
| 452 | # ----------------------------------------------------------------------------- |
| 453 | # _funcs_to_names() |
| 454 | # |
| 455 | # Given a list of regular expression functions, this converts it to a list |
| 456 | # suitable for output to a table file |
| 457 | # ----------------------------------------------------------------------------- |
| 458 | |
| 459 | def _funcs_to_names(funclist,namelist): |
| 460 | result = [] |
| 461 | for f,name in zip(funclist,namelist): |
| 462 | if f and f[0]: |
| 463 | result.append((name, f[1])) |
| 464 | else: |
| 465 | result.append(f) |
| 466 | return result |
| 467 | |
| 468 | # ----------------------------------------------------------------------------- |
| 469 | # _names_to_funcs() |
| 470 | # |
| 471 | # Given a list of regular expression function names, this converts it back to |
| 472 | # functions. |
| 473 | # ----------------------------------------------------------------------------- |
| 474 | |
| 475 | def _names_to_funcs(namelist,fdict): |
| 476 | result = [] |
| 477 | for n in namelist: |
| 478 | if n and n[0]: |
| 479 | result.append((fdict[n[0]],n[1])) |
| 480 | else: |
| 481 | result.append(n) |
| 482 | return result |
| 483 | |
| 484 | # ----------------------------------------------------------------------------- |
| 485 | # _form_master_re() |
| 486 | # |
| 487 | # This function takes a list of all of the regex components and attempts to |
| 488 | # form the master regular expression. Given limitations in the Python re |
| 489 | # module, it may be necessary to break the master regex into separate expressions. |
| 490 | # ----------------------------------------------------------------------------- |
| 491 | |
| 492 | def _form_master_re(relist,reflags,ldict,toknames): |
| 493 | if not relist: return [] |
| 494 | regex = "|".join(relist) |
| 495 | try: |
| 496 | lexre = re.compile(regex,re.VERBOSE | reflags) |
| 497 | |
| 498 | # Build the index to function map for the matching engine |
| 499 | lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1) |
| 500 | lexindexnames = lexindexfunc[:] |
| 501 | |
| 502 | for f,i in lexre.groupindex.items(): |
| 503 | handle = ldict.get(f,None) |
| 504 | if type(handle) in (types.FunctionType, types.MethodType): |
| 505 | lexindexfunc[i] = (handle,toknames[f]) |
| 506 | lexindexnames[i] = f |
| 507 | elif handle is not None: |
| 508 | lexindexnames[i] = f |
| 509 | if f.find("ignore_") > 0: |
| 510 | lexindexfunc[i] = (None,None) |
| 511 | else: |
| 512 | lexindexfunc[i] = (None, toknames[f]) |
| 513 | |
| 514 | return [(lexre,lexindexfunc)],[regex],[lexindexnames] |
| 515 | except Exception: |
| 516 | m = int(len(relist)/2) |
| 517 | if m == 0: m = 1 |
| 518 | llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames) |
| 519 | rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames) |
| 520 | return llist+rlist, lre+rre, lnames+rnames |
| 521 | |
| 522 | # ----------------------------------------------------------------------------- |
| 523 | # def _statetoken(s,names) |
| 524 | # |
| 525 | # Given a declaration name s of the form "t_" and a dictionary whose keys are |
| 526 | # state names, this function returns a tuple (states,tokenname) where states |
| 527 | # is a tuple of state names and tokenname is the name of the token. For example, |
| 528 | # calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM') |
| 529 | # ----------------------------------------------------------------------------- |
| 530 | |
| 531 | def _statetoken(s,names): |
| 532 | nonstate = 1 |
| 533 | parts = s.split("_") |
| 534 | for i in range(1,len(parts)): |
| 535 | if not parts[i] in names and parts[i] != 'ANY': break |
| 536 | if i > 1: |
| 537 | states = tuple(parts[1:i]) |
| 538 | else: |
| 539 | states = ('INITIAL',) |
| 540 | |
| 541 | if 'ANY' in states: |
| 542 | states = tuple(names) |
| 543 | |
| 544 | tokenname = "_".join(parts[i:]) |
| 545 | return (states,tokenname) |
| 546 | |
| 547 | |
| 548 | # ----------------------------------------------------------------------------- |
| 549 | # LexerReflect() |
| 550 | # |
| 551 | # This class represents information needed to build a lexer as extracted from a |
| 552 | # user's input file. |
| 553 | # ----------------------------------------------------------------------------- |
| 554 | class LexerReflect(object): |
| 555 | def __init__(self,ldict,log=None,reflags=0): |
| 556 | self.ldict = ldict |
| 557 | self.error_func = None |
| 558 | self.tokens = [] |
| 559 | self.reflags = reflags |
| 560 | self.stateinfo = { 'INITIAL' : 'inclusive'} |
| 561 | self.modules = {} |
| 562 | self.error = 0 |
| 563 | |
| 564 | if log is None: |
| 565 | self.log = PlyLogger(sys.stderr) |
| 566 | else: |
| 567 | self.log = log |
| 568 | |
| 569 | # Get all of the basic information |
| 570 | def get_all(self): |
| 571 | self.get_tokens() |
| 572 | self.get_literals() |
| 573 | self.get_states() |
| 574 | self.get_rules() |
| 575 | |
| 576 | # Validate all of the information |
| 577 | def validate_all(self): |
| 578 | self.validate_tokens() |
| 579 | self.validate_literals() |
| 580 | self.validate_rules() |
| 581 | return self.error |
| 582 | |
| 583 | # Get the tokens map |
| 584 | def get_tokens(self): |
| 585 | tokens = self.ldict.get("tokens",None) |
| 586 | if not tokens: |
| 587 | self.log.error("No token list is defined") |
| 588 | self.error = 1 |
| 589 | return |
| 590 | |
| 591 | if not isinstance(tokens,(list, tuple)): |
| 592 | self.log.error("tokens must be a list or tuple") |
| 593 | self.error = 1 |
| 594 | return |
| 595 | |
| 596 | if not tokens: |
| 597 | self.log.error("tokens is empty") |
| 598 | self.error = 1 |
| 599 | return |
| 600 | |
| 601 | self.tokens = tokens |
| 602 | |
| 603 | # Validate the tokens |
| 604 | def validate_tokens(self): |
| 605 | terminals = {} |
| 606 | for n in self.tokens: |
| 607 | if not _is_identifier.match(n): |
| 608 | self.log.error("Bad token name '%s'",n) |
| 609 | self.error = 1 |
| 610 | if n in terminals: |
| 611 | self.log.warning("Token '%s' multiply defined", n) |
| 612 | terminals[n] = 1 |
| 613 | |
| 614 | # Get the literals specifier |
| 615 | def get_literals(self): |
| 616 | self.literals = self.ldict.get("literals","") |
| 617 | if not self.literals: |
| 618 | self.literals = "" |
| 619 | |
| 620 | # Validate literals |
| 621 | def validate_literals(self): |
| 622 | try: |
| 623 | for c in self.literals: |
| 624 | if not isinstance(c,StringTypes) or len(c) > 1: |
| 625 | self.log.error("Invalid literal %s. Must be a single character", repr(c)) |
| 626 | self.error = 1 |
| 627 | |
| 628 | except TypeError: |
| 629 | self.log.error("Invalid literals specification. literals must be a sequence of characters") |
| 630 | self.error = 1 |
| 631 | |
| 632 | def get_states(self): |
| 633 | self.states = self.ldict.get("states",None) |
| 634 | # Build statemap |
| 635 | if self.states: |
| 636 | if not isinstance(self.states,(tuple,list)): |
| 637 | self.log.error("states must be defined as a tuple or list") |
| 638 | self.error = 1 |
| 639 | else: |
| 640 | for s in self.states: |
| 641 | if not isinstance(s,tuple) or len(s) != 2: |
| 642 | self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s)) |
| 643 | self.error = 1 |
| 644 | continue |
| 645 | name, statetype = s |
| 646 | if not isinstance(name,StringTypes): |
| 647 | self.log.error("State name %s must be a string", repr(name)) |
| 648 | self.error = 1 |
| 649 | continue |
| 650 | if not (statetype == 'inclusive' or statetype == 'exclusive'): |
| 651 | self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name) |
| 652 | self.error = 1 |
| 653 | continue |
| 654 | if name in self.stateinfo: |
| 655 | self.log.error("State '%s' already defined",name) |
| 656 | self.error = 1 |
| 657 | continue |
| 658 | self.stateinfo[name] = statetype |
| 659 | |
| 660 | # Get all of the symbols with a t_ prefix and sort them into various |
| 661 | # categories (functions, strings, error functions, and ignore characters) |
| 662 | |
| 663 | def get_rules(self): |
| 664 | tsymbols = [f for f in self.ldict if f[:2] == 't_' ] |
| 665 | |
| 666 | # Now build up a list of functions and a list of strings |
| 667 | |
| 668 | self.toknames = { } # Mapping of symbols to token names |
| 669 | self.funcsym = { } # Symbols defined as functions |
| 670 | self.strsym = { } # Symbols defined as strings |
| 671 | self.ignore = { } # Ignore strings by state |
| 672 | self.errorf = { } # Error functions by state |
| 673 | |
| 674 | for s in self.stateinfo: |
| 675 | self.funcsym[s] = [] |
| 676 | self.strsym[s] = [] |
| 677 | |
| 678 | if len(tsymbols) == 0: |
| 679 | self.log.error("No rules of the form t_rulename are defined") |
| 680 | self.error = 1 |
| 681 | return |
| 682 | |
| 683 | for f in tsymbols: |
| 684 | t = self.ldict[f] |
| 685 | states, tokname = _statetoken(f,self.stateinfo) |
| 686 | self.toknames[f] = tokname |
| 687 | |
| 688 | if hasattr(t,"__call__"): |
| 689 | if tokname == 'error': |
| 690 | for s in states: |
| 691 | self.errorf[s] = t |
| 692 | elif tokname == 'ignore': |
| 693 | line = func_code(t).co_firstlineno |
| 694 | file = func_code(t).co_filename |
| 695 | self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__) |
| 696 | self.error = 1 |
| 697 | else: |
| 698 | for s in states: |
| 699 | self.funcsym[s].append((f,t)) |
| 700 | elif isinstance(t, StringTypes): |
| 701 | if tokname == 'ignore': |
| 702 | for s in states: |
| 703 | self.ignore[s] = t |
| 704 | if "\\" in t: |
| 705 | self.log.warning("%s contains a literal backslash '\\'",f) |
| 706 | |
| 707 | elif tokname == 'error': |
| 708 | self.log.error("Rule '%s' must be defined as a function", f) |
| 709 | self.error = 1 |
| 710 | else: |
| 711 | for s in states: |
| 712 | self.strsym[s].append((f,t)) |
| 713 | else: |
| 714 | self.log.error("%s not defined as a function or string", f) |
| 715 | self.error = 1 |
| 716 | |
| 717 | # Sort the functions by line number |
| 718 | for f in self.funcsym.values(): |
| 719 | if sys.version_info[0] < 3: |
| 720 | f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno)) |
| 721 | else: |
| 722 | # Python 3.0 |
| 723 | f.sort(key=lambda x: func_code(x[1]).co_firstlineno) |
| 724 | |
| 725 | # Sort the strings by regular expression length |
| 726 | for s in self.strsym.values(): |
| 727 | if sys.version_info[0] < 3: |
| 728 | s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1]))) |
| 729 | else: |
| 730 | # Python 3.0 |
| 731 | s.sort(key=lambda x: len(x[1]),reverse=True) |
| 732 | |
| 733 | # Validate all of the t_rules collected |
| 734 | def validate_rules(self): |
| 735 | for state in self.stateinfo: |
| 736 | # Validate all rules defined by functions |
| 737 | |
| 738 | |
| 739 | |
| 740 | for fname, f in self.funcsym[state]: |
| 741 | line = func_code(f).co_firstlineno |
| 742 | file = func_code(f).co_filename |
| 743 | module = inspect.getmodule(f) |
| 744 | self.modules[module] = 1 |
| 745 | |
| 746 | tokname = self.toknames[fname] |
| 747 | if isinstance(f, types.MethodType): |
| 748 | reqargs = 2 |
| 749 | else: |
| 750 | reqargs = 1 |
| 751 | nargs = func_code(f).co_argcount |
| 752 | if nargs > reqargs: |
| 753 | self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__) |
| 754 | self.error = 1 |
| 755 | continue |
| 756 | |
| 757 | if nargs < reqargs: |
| 758 | self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__) |
| 759 | self.error = 1 |
| 760 | continue |
| 761 | |
| 762 | if not _get_regex(f): |
| 763 | self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__) |
| 764 | self.error = 1 |
| 765 | continue |
| 766 | |
| 767 | try: |
| 768 | c = re.compile("(?P<%s>%s)" % (fname, _get_regex(f)), re.VERBOSE | self.reflags) |
| 769 | if c.match(""): |
| 770 | self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__) |
| 771 | self.error = 1 |
| 772 | except re.error: |
| 773 | _etype, e, _etrace = sys.exc_info() |
| 774 | self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e) |
| 775 | if '#' in _get_regex(f): |
| 776 | self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__) |
| 777 | self.error = 1 |
| 778 | |
| 779 | # Validate all rules defined by strings |
| 780 | for name,r in self.strsym[state]: |
| 781 | tokname = self.toknames[name] |
| 782 | if tokname == 'error': |
| 783 | self.log.error("Rule '%s' must be defined as a function", name) |
| 784 | self.error = 1 |
| 785 | continue |
| 786 | |
| 787 | if not tokname in self.tokens and tokname.find("ignore_") < 0: |
| 788 | self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname) |
| 789 | self.error = 1 |
| 790 | continue |
| 791 | |
| 792 | try: |
| 793 | c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags) |
| 794 | if (c.match("")): |
| 795 | self.log.error("Regular expression for rule '%s' matches empty string",name) |
| 796 | self.error = 1 |
| 797 | except re.error: |
| 798 | _etype, e, _etrace = sys.exc_info() |
| 799 | self.log.error("Invalid regular expression for rule '%s'. %s",name,e) |
| 800 | if '#' in r: |
| 801 | self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name) |
| 802 | self.error = 1 |
| 803 | |
| 804 | if not self.funcsym[state] and not self.strsym[state]: |
| 805 | self.log.error("No rules defined for state '%s'",state) |
| 806 | self.error = 1 |
| 807 | |
| 808 | # Validate the error function |
| 809 | efunc = self.errorf.get(state,None) |
| 810 | if efunc: |
| 811 | f = efunc |
| 812 | line = func_code(f).co_firstlineno |
| 813 | file = func_code(f).co_filename |
| 814 | module = inspect.getmodule(f) |
| 815 | self.modules[module] = 1 |
| 816 | |
| 817 | if isinstance(f, types.MethodType): |
| 818 | reqargs = 2 |
| 819 | else: |
| 820 | reqargs = 1 |
| 821 | nargs = func_code(f).co_argcount |
| 822 | if nargs > reqargs: |
| 823 | self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__) |
| 824 | self.error = 1 |
| 825 | |
| 826 | if nargs < reqargs: |
| 827 | self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__) |
| 828 | self.error = 1 |
| 829 | |
| 830 | for module in self.modules: |
| 831 | self.validate_module(module) |
| 832 | |
| 833 | |
| 834 | # ----------------------------------------------------------------------------- |
| 835 | # validate_module() |
| 836 | # |
| 837 | # This checks to see if there are duplicated t_rulename() functions or strings |
| 838 | # in the parser input file. This is done using a simple regular expression |
| 839 | # match on each line in the source code of the given module. |
| 840 | # ----------------------------------------------------------------------------- |
| 841 | |
| 842 | def validate_module(self, module): |
| 843 | lines, linen = inspect.getsourcelines(module) |
| 844 | |
| 845 | fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(') |
| 846 | sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=') |
| 847 | |
| 848 | counthash = { } |
| 849 | linen += 1 |
| 850 | for l in lines: |
| 851 | m = fre.match(l) |
| 852 | if not m: |
| 853 | m = sre.match(l) |
| 854 | if m: |
| 855 | name = m.group(1) |
| 856 | prev = counthash.get(name) |
| 857 | if not prev: |
| 858 | counthash[name] = linen |
| 859 | else: |
| 860 | filename = inspect.getsourcefile(module) |
| 861 | self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev) |
| 862 | self.error = 1 |
| 863 | linen += 1 |
| 864 | |
| 865 | # ----------------------------------------------------------------------------- |
| 866 | # lex(module) |
| 867 | # |
| 868 | # Build all of the regular expression rules from definitions in the supplied module |
| 869 | # ----------------------------------------------------------------------------- |
| 870 | def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None): |
| 871 | global lexer |
| 872 | ldict = None |
| 873 | stateinfo = { 'INITIAL' : 'inclusive'} |
| 874 | lexobj = Lexer() |
| 875 | lexobj.lexoptimize = optimize |
| 876 | global token,input |
| 877 | |
| 878 | if errorlog is None: |
| 879 | errorlog = PlyLogger(sys.stderr) |
| 880 | |
| 881 | if debug: |
| 882 | if debuglog is None: |
| 883 | debuglog = PlyLogger(sys.stderr) |
| 884 | |
| 885 | # Get the module dictionary used for the lexer |
| 886 | if object: module = object |
| 887 | |
| 888 | if module: |
| 889 | _items = [(k,getattr(module,k)) for k in dir(module)] |
| 890 | ldict = dict(_items) |
| 891 | else: |
| 892 | ldict = get_caller_module_dict(2) |
| 893 | |
| 894 | # Collect parser information from the dictionary |
| 895 | linfo = LexerReflect(ldict,log=errorlog,reflags=reflags) |
| 896 | linfo.get_all() |
| 897 | if not optimize: |
| 898 | if linfo.validate_all(): |
| 899 | raise SyntaxError("Can't build lexer") |
| 900 | |
| 901 | if optimize and lextab: |
| 902 | try: |
| 903 | lexobj.readtab(lextab,ldict) |
| 904 | token = lexobj.token |
| 905 | input = lexobj.input |
| 906 | lexer = lexobj |
| 907 | return lexobj |
| 908 | |
| 909 | except ImportError: |
| 910 | pass |
| 911 | |
| 912 | # Dump some basic debugging information |
| 913 | if debug: |
| 914 | debuglog.info("lex: tokens = %r", linfo.tokens) |
| 915 | debuglog.info("lex: literals = %r", linfo.literals) |
| 916 | debuglog.info("lex: states = %r", linfo.stateinfo) |
| 917 | |
| 918 | # Build a dictionary of valid token names |
| 919 | lexobj.lextokens = { } |
| 920 | for n in linfo.tokens: |
| 921 | lexobj.lextokens[n] = 1 |
| 922 | |
| 923 | # Get literals specification |
| 924 | if isinstance(linfo.literals,(list,tuple)): |
| 925 | lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals) |
| 926 | else: |
| 927 | lexobj.lexliterals = linfo.literals |
| 928 | |
| 929 | # Get the stateinfo dictionary |
| 930 | stateinfo = linfo.stateinfo |
| 931 | |
| 932 | regexs = { } |
| 933 | # Build the master regular expressions |
| 934 | for state in stateinfo: |
| 935 | regex_list = [] |
| 936 | |
| 937 | # Add rules defined by functions first |
| 938 | for fname, f in linfo.funcsym[state]: |
| 939 | line = func_code(f).co_firstlineno |
| 940 | file = func_code(f).co_filename |
| 941 | regex_list.append("(?P<%s>%s)" % (fname,_get_regex(f))) |
| 942 | if debug: |
| 943 | debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,_get_regex(f), state) |
| 944 | |
| 945 | # Now add all of the simple rules |
| 946 | for name,r in linfo.strsym[state]: |
| 947 | regex_list.append("(?P<%s>%s)" % (name,r)) |
| 948 | if debug: |
| 949 | debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state) |
| 950 | |
| 951 | regexs[state] = regex_list |
| 952 | |
| 953 | # Build the master regular expressions |
| 954 | |
| 955 | if debug: |
| 956 | debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====") |
| 957 | |
| 958 | for state in regexs: |
| 959 | lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames) |
| 960 | lexobj.lexstatere[state] = lexre |
| 961 | lexobj.lexstateretext[state] = re_text |
| 962 | lexobj.lexstaterenames[state] = re_names |
| 963 | if debug: |
| 964 | for i in range(len(re_text)): |
| 965 | debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i]) |
| 966 | |
| 967 | # For inclusive states, we need to add the regular expressions from the INITIAL state |
| 968 | for state,stype in stateinfo.items(): |
| 969 | if state != "INITIAL" and stype == 'inclusive': |
| 970 | lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL']) |
| 971 | lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL']) |
| 972 | lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL']) |
| 973 | |
| 974 | lexobj.lexstateinfo = stateinfo |
| 975 | lexobj.lexre = lexobj.lexstatere["INITIAL"] |
| 976 | lexobj.lexretext = lexobj.lexstateretext["INITIAL"] |
| 977 | lexobj.lexreflags = reflags |
| 978 | |
| 979 | # Set up ignore variables |
| 980 | lexobj.lexstateignore = linfo.ignore |
| 981 | lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","") |
| 982 | |
| 983 | # Set up error functions |
| 984 | lexobj.lexstateerrorf = linfo.errorf |
| 985 | lexobj.lexerrorf = linfo.errorf.get("INITIAL",None) |
| 986 | if not lexobj.lexerrorf: |
| 987 | errorlog.warning("No t_error rule is defined") |
| 988 | |
| 989 | # Check state information for ignore and error rules |
| 990 | for s,stype in stateinfo.items(): |
| 991 | if stype == 'exclusive': |
| 992 | if not s in linfo.errorf: |
| 993 | errorlog.warning("No error rule is defined for exclusive state '%s'", s) |
| 994 | if not s in linfo.ignore and lexobj.lexignore: |
| 995 | errorlog.warning("No ignore rule is defined for exclusive state '%s'", s) |
| 996 | elif stype == 'inclusive': |
| 997 | if not s in linfo.errorf: |
| 998 | linfo.errorf[s] = linfo.errorf.get("INITIAL",None) |
| 999 | if not s in linfo.ignore: |
| 1000 | linfo.ignore[s] = linfo.ignore.get("INITIAL","") |
| 1001 | |
| 1002 | # Create global versions of the token() and input() functions |
| 1003 | token = lexobj.token |
| 1004 | input = lexobj.input |
| 1005 | lexer = lexobj |
| 1006 | |
| 1007 | # If in optimize mode, we write the lextab |
| 1008 | if lextab and optimize: |
| 1009 | lexobj.writetab(lextab,outputdir) |
| 1010 | |
| 1011 | return lexobj |
| 1012 | |
| 1013 | # ----------------------------------------------------------------------------- |
| 1014 | # runmain() |
| 1015 | # |
| 1016 | # This runs the lexer as a main program |
| 1017 | # ----------------------------------------------------------------------------- |
| 1018 | |
| 1019 | def runmain(lexer=None,data=None): |
| 1020 | if not data: |
| 1021 | try: |
| 1022 | filename = sys.argv[1] |
| 1023 | f = open(filename) |
| 1024 | data = f.read() |
| 1025 | f.close() |
| 1026 | except IndexError: |
| 1027 | sys.stdout.write("Reading from standard input (type EOF to end):\n") |
| 1028 | data = sys.stdin.read() |
| 1029 | |
| 1030 | if lexer: |
| 1031 | _input = lexer.input |
| 1032 | else: |
| 1033 | _input = input |
| 1034 | _input(data) |
| 1035 | if lexer: |
| 1036 | _token = lexer.token |
| 1037 | else: |
| 1038 | _token = token |
| 1039 | |
| 1040 | while 1: |
| 1041 | tok = _token() |
| 1042 | if not tok: break |
| 1043 | sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos)) |
| 1044 | |
| 1045 | # ----------------------------------------------------------------------------- |
| 1046 | # @TOKEN(regex) |
| 1047 | # |
| 1048 | # This decorator function can be used to set the regex expression on a function |
| 1049 | # when its docstring might need to be set in an alternative way |
| 1050 | # ----------------------------------------------------------------------------- |
| 1051 | |
| 1052 | def TOKEN(r): |
| 1053 | def set_regex(f): |
| 1054 | if hasattr(r,"__call__"): |
| 1055 | f.regex = _get_regex(r) |
| 1056 | else: |
| 1057 | f.regex = r |
| 1058 | return f |
| 1059 | return set_regex |
| 1060 | |
| 1061 | # Alternative spelling of the TOKEN decorator |
| 1062 | Token = TOKEN |
| 1063 | |