CORD-889: xproto code generator
Change-Id: If6fd37c412364c8bacbed4a529a8638e8ffe6da4
diff --git a/xos/genx/.gitignore b/xos/genx/.gitignore
new file mode 100644
index 0000000..db4561e
--- /dev/null
+++ b/xos/genx/.gitignore
@@ -0,0 +1,54 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.cache
+nosetests.xml
+coverage.xml
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
diff --git a/xos/genx/generator/LICENSE b/xos/genx/generator/LICENSE
new file mode 100644
index 0000000..e06d208
--- /dev/null
+++ b/xos/genx/generator/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/xos/genx/generator/README.md b/xos/genx/generator/README.md
new file mode 100644
index 0000000..a26869b
--- /dev/null
+++ b/xos/genx/generator/README.md
@@ -0,0 +1,60 @@
+# PLY Protobuf
+
+[Protocol Buffers] [0] lexer & parser written in Python for [PLY] [1].
+
+With this library you can create and process parse trees of a Protocol Buffers files with Python.
+For example usage see `demo.py`.
+
+My use case: automated refactoring of `.proto` files.
+
+* Benefit of this project is support for easy refactoring of the protocol buffers files. From the parse
+result one can simply determine position of a particular lexical unit in the source text and replace it.
+* The visitor pattern is used for processing a parse tree.
+
+## Dependency
+* This project has only one dependency, [PLY] [1].
+* `ply/` subdirectory is present in this repo for demonstration purposes and completeness only. If you intend to use this project, prefer better original
+ [PLY] [1] repository which is up-to-date.
+
+## Contributions
+* There may be bugs although it works for me for quite complicated protocol buffers files.
+If you find a bug, please feel free to submit a pull request or file an issue.
+
+## Bugs
+* `Oneof` is not implemented yet.
+
+## Demo 1
+* `demo.py`
+* The first demo file shows simple parsing of the example protocol buffer message. It produces a parse tree of this simple
+examples.
+
+## Demo 2 - Protocol Buffers file refactoring.
+* `prefixize.py`
+* Main use-case is rename refactoring of entities in Protocol Buffers.
+* [Protocol Buffers Objetive C compiler] [5] does not prefix entities and this may conflict with some objects in the project.
+This script renames all entties in protobuf file by prefixing them wth specified string. Also identifiers with conflicting
+names can be renamed if specified by parameter (e.g., hash, description). Result of this refactoring can be then used with
+objeciveC protoc without conflicts.
+
+## Acknowledgement
+This work was inspired by:
+* [plyj] [2], Java lexer & parser for PLY.
+* [pyparsing] [3], Protocol Buffers parsing example.
+* [PLYTalk] [4], nice tutorial for PLY I used.
+
+## Disclaimer
+* This project was created because I needed it for myself and I didn't find Protocol Buffers parser for PLY.
+It is my first PLY / parser generator project and the first version was created in couple hours so it is not polished code.
+* There already exist Protocol Buffer parsing variant as [pyparsing] [3] example, but my previous scripts used
+PLY for parsing Java so I chosen to stay with PLY and to create Protocol Buffer variant for PLY. I like the output I can get from PLY
+(e.g., line, character position in the input text) so I can automatically process input files - e.g., refactoring.
+* API for this project is not guaranteed to remain stable. In particular I mean model generated from `.proto` files.
+Initial model may be considered suboptimal and changed at some point. This project is intended to serve as an
+inspiration or a starting point. You will probably adapt it for your own needs.
+
+ [0]: https://developers.google.com/protocol-buffers/
+ [1]: https://github.com/dabeaz/ply
+ [2]: https://github.com/musiKk/plyj
+ [3]: http://pyparsing.wikispaces.com/
+ [4]: http://www.dabeaz.com/ply/PLYTalk.pdf
+ [5]: https://github.com/alexeyxo/protobuf-objc
diff --git a/xos/genx/generator/lextab.py b/xos/genx/generator/lextab.py
new file mode 100644
index 0000000..d99a9cc
--- /dev/null
+++ b/xos/genx/generator/lextab.py
@@ -0,0 +1,9 @@
+# lextab.py. This file automatically created by PLY (version 3.5). Don't edit!
+_tabversion = '3.5'
+_lextokens = {'LPAR': 1, 'OPTION': 1, 'EXTEND': 1, 'FIXED32': 1, 'RPAR': 1, 'REPEATED': 1, 'TRUE': 1, 'DOT': 1, 'STRING': 1, 'INT32': 1, 'SERVICE': 1, 'SEMI': 1, 'OPTIONAL': 1, 'REQUIRED': 1, 'TO': 1, 'RPC': 1, 'NUM': 1, 'EXTENSIONS': 1, 'FIXED64': 1, 'IMPORT': 1, 'UINT32': 1, 'SINT32': 1, 'BLOCK_COMMENT': 1, 'ENUM': 1, 'LINE_COMMENT': 1, 'RBRACE': 1, 'PACKAGE': 1, 'RBRACK': 1, 'BYTES': 1, 'RETURNS': 1, 'INT64': 1, 'MAX': 1, 'EQ': 1, 'STRING_LITERAL': 1, 'UINT64': 1, 'LBRACE': 1, 'FALSE': 1, 'NAME': 1, 'SINT64': 1, 'STARTTOKEN': 1, 'FLOAT': 1, 'LBRACK': 1, 'SFIXED64': 1, 'SFIXED32': 1, 'BOOL': 1, 'DOUBLE': 1, 'EXTENDS': 1, 'MESSAGE': 1}
+_lexreflags = 0
+_lexliterals = '()+-*/=?:,.^|&~!=[]{};<>@%'
+_lexstateinfo = {'INITIAL': 'inclusive'}
+_lexstatere = {'INITIAL': [('(?P<t_BLOCK_COMMENT>/\\*(.|\\n)*?\\*/)|(?P<t_NAME>[A-Za-z_$][A-Za-z0-9_$]*)|(?P<t_newline>\\n+)|(?P<t_newline2>(\\r\\n)+)|(?P<t_STRING_LITERAL>\\"([^\\\\\\n]|(\\\\.))*?\\")|(?P<t_NUM>[+-]?\\d+)|(?P<t_ignore_LINE_COMMENT>//.*)|(?P<t_RPAR>\\))|(?P<t_DOT>\\.)|(?P<t_LPAR>\\()|(?P<t_LBRACK>\\[)|(?P<t_STARTTOKEN>\\+)|(?P<t_RBRACK>\\])|(?P<t_RBRACE>})|(?P<t_EQ>=)|(?P<t_LBRACE>{)|(?P<t_SEMI>;)', [None, ('t_BLOCK_COMMENT', 'BLOCK_COMMENT'), None, ('t_NAME', 'NAME'), ('t_newline', 'newline'), ('t_newline2', 'newline2'), None, (None, 'STRING_LITERAL'), None, None, (None, 'NUM'), (None, None), (None, 'RPAR'), (None, 'DOT'), (None, 'LPAR'), (None, 'LBRACK'), (None, 'STARTTOKEN'), (None, 'RBRACK'), (None, 'RBRACE'), (None, 'EQ'), (None, 'LBRACE'), (None, 'SEMI')])]}
+_lexstateignore = {'INITIAL': ' \t\x0c'}
+_lexstateerrorf = {'INITIAL': 't_error'}
diff --git a/xos/genx/generator/lib.py b/xos/genx/generator/lib.py
new file mode 100644
index 0000000..ebd685b
--- /dev/null
+++ b/xos/genx/generator/lib.py
@@ -0,0 +1,83 @@
+import pdb
+
+def django_content_type_string(xptags):
+ # Check possibility of KeyError in caller
+ content_type = xptags['content_type']
+ if (content_type=='url'):
+ return 'URLField'
+ elif (content_type=='ip'):
+ return 'GenericIPAddressField'
+
+def django_string_type(xptags):
+ if ('content_type' in xptags):
+ return django_content_type_string(xptags)
+ elif ('stripped' in xptags):
+ return 'StrippedCharField'
+ elif ('indexed' not in xptags):
+ return 'TextField'
+ else:
+ return 'CharField'
+
+
+def xproto_django_type(xptype, xptags):
+ if (xptype=='string'):
+ return django_string_type(xptags)
+ elif (xptype=='bool'):
+ return 'BooleanField'
+ elif (xptype=='int32'):
+ return 'IntegerField'
+ elif (xptype=='int64'):
+ return 'BigIntegerField'
+
+
+def xproto_django_link_type(f):
+ if (f['link_type']=='manytoone'):
+ return 'ForeignKey'
+ elif (f['link_type']=='manytomany'):
+ if (f['dst_port']):
+ return 'ManyToManyRelation'
+ else:
+ return 'GenericRelation'
+
+def format_options_string(d):
+ if (not d):
+ return ''
+ else:
+ lst = []
+ for k,v in d.items():
+ if (type(v)==str and v.startswith('"')):
+ tup = eval(v[1:-1])
+ if (type(tup)==tuple):
+ lst.append('%s = %r'%(k,tup))
+ else:
+ lst.append('%s = %s'%(k,v))
+ elif (type(v)==bool):
+ lst.append('%s = %r'%(k,bool(v)))
+ else:
+ try:
+ lst.append('%s = %r'%(k,int(v)))
+ except ValueError:
+ lst.append('%s = %s'%(k,v))
+
+ return ', '.join(lst)
+
+def map_xproto_to_django(f):
+ allowed_keys=['help_text','default','max_length','modifier','blank']
+
+ m = {'modifier':{'optional':True, 'required':False, '_target':'null'}}
+ out = {}
+
+ for k,v in f['options'].items():
+ if (k in allowed_keys):
+ try:
+ kv2 = m[k]
+ out[kv2['_target']] = kv2[v]
+ except:
+ out[k] = v
+ return out
+
+
+def xproto_django_options_str(field):
+ output_dict = map_xproto_to_django(field)
+
+ return format_options_string(output_dict)
diff --git a/xos/genx/generator/parsetab.py b/xos/genx/generator/parsetab.py
new file mode 100644
index 0000000..fc82579
--- /dev/null
+++ b/xos/genx/generator/parsetab.py
@@ -0,0 +1,125 @@
+
+# parsetab.py
+# This file is automatically generated. Do not edit.
+_tabversion = '3.2'
+
+_lr_method = 'LALR'
+
+_lr_signature = '\xbb\xef\x0b\x04g\x1c>\xe6\x08G\xd4fj\xe8\x12\xb6'
+
+_lr_action_items = {'LPAR':([33,107,137,],[40,116,146,]),'DOUBLE':([50,53,54,59,],[-4,-2,-3,100,]),'OPTION':([1,3,6,7,10,13,14,17,18,19,21,22,23,26,27,37,39,68,70,72,75,79,81,106,108,110,118,125,],[-1,-90,11,-91,11,-87,-85,-94,-84,-88,-89,-86,-92,-76,-93,-77,11,11,-49,-50,-48,-83,-58,-75,-51,-54,-70,-47,]),'EXTEND':([1,3,6,7,10,13,14,17,18,19,21,22,23,26,27,36,37,49,51,52,56,57,58,60,61,62,78,79,80,81,106,110,112,118,126,138,161,],[-1,-90,12,-91,12,-87,-85,-94,-84,-88,-89,-86,-92,-76,-93,12,-77,12,-60,-66,-62,-63,-59,-61,-65,-64,12,-83,-67,-58,-75,-54,12,-70,-57,-46,-45,]),'FIXED32':([50,53,54,59,],[-4,-2,-3,83,]),'RPAR':([77,124,151,],[111,130,160,]),'REPEATED':([36,49,51,52,56,57,58,60,61,62,78,80,81,110,112,118,126,138,161,],[50,50,-60,-66,-62,-63,-59,-61,-65,-64,50,-67,-58,-54,50,-70,-57,-46,-45,]),'COLON':([129,],[134,]),'TRUE':([35,147,],[44,154,]),'DOT':([8,9,34,84,],[25,-36,-37,25,]),'ONETOONE':([50,53,54,59,],[-4,-2,-3,93,]),'INT32':([50,53,54,59,],[-4,-2,-3,94,]),'SEMI':([8,9,30,34,43,44,45,46,47,48,117,119,120,121,127,128,131,133,149,150,159,],[26,-36,37,-37,-80,-79,-81,-78,79,-82,125,-55,126,-56,-1,-23,138,-29,-30,-1,161,]),'OPTIONAL':([36,49,51,52,56,57,58,60,61,62,78,80,81,110,112,118,126,138,161,],[54,54,-60,-66,-62,-63,-59,-61,-65,-64,54,-67,-58,-54,54,-70,-57,-46,-45,]),'REQUIRED':([36,49,51,52,56,57,58,60,61,62,78,80,81,110,112,118,126,138,161,],[53,53,-60,-66,-62,-63,-59,-61,-65,-64,53,-67,-58,-54,53,-70,-57,-46,-45,]),'MANYTOMANY':([50,53,54,59,],[-4,-2,-3,87,]),'TO':([82,],[113,]),'RPC':([38,63,65,66,105,160,],[64,64,-73,-72,-74,-71,]),'NUM':([35,55,109,113,122,145,147,],[46,82,117,121,128,128,156,]),'EXTENSIONS':([36,49,51,52,56,57,58,60,61,62,78,80,81,110,112,118,126,138,161,],[55,55,-60,-66,-62,-63,-59,-61,-65,-64,55,-67,-58,-54,55,-70,-57,-46,-45,]),'FIXED64':([50,53,54,59,],[-4,-2,-3,89,]),'IMPORT':([1,3,6,7,10,13,14,17,18,19,21,22,23,26,27,37,79,81,106,110,118,],[-1,-90,15,-91,15,-87,-85,-94,-84,-88,-89,-86,-92,-76,-93,-77,-83,-58,-75,-54,-70,]),'STRING':([50,53,54,59,],[-4,-2,-3,85,]),'_SERVICE':([1,3,6,7,10,13,14,17,18,19,21,22,23,26,27,37,79,81,106,110,118,],[-1,-90,16,-91,16,-87,-85,-94,-84,-88,-89,-86,-92,-76,-93,-77,-83,-58,-75,-54,-70,]),'SFIXED64':([50,53,54,59,],[-4,-2,-3,102,]),'$end':([1,2,3,5,6,7,10,13,14,17,18,19,21,22,23,26,27,37,79,81,106,110,118,],[-1,0,-90,-96,-1,-91,-95,-87,-85,-94,-84,-88,-89,-86,-92,-76,-93,-77,-83,-58,-75,-54,-70,]),'RBRACK':([132,140,141,142,143,152,153,154,155,156,157,158,],[-1,-32,149,-34,-33,-26,-28,-25,-31,-24,-27,-35,]),'RBRACE':([36,38,39,49,51,52,56,57,58,60,61,62,63,65,66,68,70,72,73,75,76,78,79,80,81,105,108,110,112,118,125,126,138,160,161,],[-1,-1,-1,81,-60,-66,-62,-63,-59,-61,-65,-64,106,-73,-72,-53,-49,-50,110,-48,-52,-1,-83,-67,-58,-74,-51,-54,118,-70,-47,-57,-46,-71,-45,]),'PACKAGE':([1,],[4,]),'ENUM':([1,3,6,7,10,13,14,17,18,19,21,22,23,26,27,36,37,49,51,52,56,57,58,60,61,62,78,79,80,81,106,110,112,118,126,138,161,],[-1,-90,20,-91,20,-87,-85,-94,-84,-88,-89,-86,-92,-76,-93,20,-77,20,-60,-66,-62,-63,-59,-61,-65,-64,20,-83,-67,-58,-75,-54,20,-70,-57,-46,-45,]),'BYTES':([50,53,54,59,],[-4,-2,-3,95,]),'COMMA':([140,142,152,153,154,155,156,157,158,],[148,-34,-26,-28,-25,-31,-24,-27,-35,]),'RETURNS':([130,],[137,]),'INT64':([50,53,54,59,],[-4,-2,-3,96,]),'ARROW':([67,71,74,115,],[-38,-40,-39,123,]),'MANYTOONE':([50,53,54,59,],[-4,-2,-3,97,]),'EQ':([28,67,69,71,74,114,129,135,136,139,144,],[35,-38,109,-40,-39,122,-1,-44,145,147,-43,]),'STRING_LITERAL':([15,35,147,],[30,45,157,]),'UINT64':([50,53,54,59,],[-4,-2,-3,98,]),'LBRACE':([29,31,32,33,41,42,111,],[36,38,39,-1,78,-69,-68,]),'FALSE':([35,147,],[43,152,]),'NAME':([4,9,11,12,16,20,24,25,34,35,39,40,50,53,54,59,64,68,70,72,75,79,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,108,116,123,125,132,134,146,147,148,],[9,-36,28,29,31,32,33,34,-37,48,67,77,-4,-2,-3,9,107,67,-49,-50,-48,-83,-13,-42,-18,67,-22,-41,-14,-9,67,-11,-20,-7,-19,-8,-21,-10,-12,-5,-6,-16,-15,-17,-51,124,129,-47,139,67,151,153,139,]),'SINT64':([50,53,54,59,],[-4,-2,-3,99,]),'STARTTOKEN':([0,],[1,]),'FLOAT':([50,53,54,59,],[-4,-2,-3,101,]),'LBRACK':([127,128,150,],[132,-23,132,]),'UINT32':([50,53,54,59,],[-4,-2,-3,90,]),'SFIXED32':([50,53,54,59,],[-4,-2,-3,103,]),'BOOL':([50,53,54,59,],[-4,-2,-3,104,]),'MAX':([9,34,39,68,70,72,75,79,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,108,113,125,134,],[-36,-37,71,71,-49,-50,-48,-83,-13,-42,-18,71,-22,-41,-14,-9,71,-11,-20,-7,-19,-8,-21,-10,-12,-5,-6,-16,-15,-17,-51,119,-47,71,]),'MESSAGE':([1,3,6,7,9,10,13,14,17,18,19,21,22,23,26,27,34,36,37,39,49,51,52,56,57,58,60,61,62,68,70,72,75,78,79,80,81,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,106,108,110,112,118,125,126,134,138,161,],[-1,-90,24,-91,-36,24,-87,-85,-94,-84,-88,-89,-86,-92,-76,-93,-37,24,-77,74,24,-60,-66,-62,-63,-59,-61,-65,-64,74,-49,-50,-48,24,-83,-67,-58,-13,-42,-18,74,-22,-41,-14,-9,74,-11,-20,-7,-19,-8,-21,-10,-12,-5,-6,-16,-15,-17,-75,-51,-54,24,-70,-47,-57,74,-46,-45,]),'SINT32':([50,53,54,59,],[-4,-2,-3,92,]),}
+
+_lr_action = { }
+for _k, _v in _lr_action_items.items():
+ for _x,_y in zip(_v[0],_v[1]):
+ if not _x in _lr_action: _lr_action[_x] = { }
+ _lr_action[_x][_k] = _y
+del _lr_action_items
+
+_lr_goto_items = {'message_body':([36,78,],[49,112,]),'base_definition':([33,],[41,]),'statements':([6,],[10,]),'field_directive_plus':([132,],[140,]),'field_directive_times':([132,],[141,]),'service_definition':([6,10,],[13,13,]),'link_definition':([36,49,78,112,],[51,51,51,51,]),'enum_body_part':([39,68,],[72,108,]),'message_body_part':([36,49,78,112,],[52,80,52,80,]),'dotname':([4,59,],[8,84,]),'field_modifier':([36,49,78,112,],[59,59,59,59,]),'field_type':([59,],[86,]),'goal':([0,],[2,]),'message_extension':([6,10,36,49,78,112,],[14,14,62,62,62,62,]),'method_definition_opt':([38,],[63,]),'enum_body':([39,],[68,]),'field_directives':([127,150,],[131,159,]),'primitive_type':([59,],[88,]),'extensions_to':([113,],[120,]),'colon_fieldname':([129,],[136,]),'empty':([1,6,33,36,38,39,78,127,129,132,150,],[7,17,42,61,66,76,61,133,135,143,133,]),'field_directive':([132,148,],[142,158,]),'message_definition':([6,10,36,49,78,112,],[18,18,56,56,56,56,]),'package_directive':([1,],[3,]),'import_directive':([6,10,],[19,19,]),'rvalue':([147,],[155,]),'field_id':([122,145,],[127,150,]),'option_directive':([6,10,39,68,],[21,21,70,70,]),'field_definition':([36,49,78,112,],[58,58,58,58,]),'method_definition':([38,63,],[65,105,]),'enum_body_opt':([39,],[73,]),'package_definition':([1,],[6,]),'field_name':([39,68,86,91,134,],[69,69,114,115,144,]),'extensions_definition':([36,49,78,112,],[57,57,57,57,]),'protofile':([1,],[5,]),'enum_definition':([6,10,36,49,78,112,],[22,22,60,60,60,60,]),'topLevel':([6,10,],[23,27,]),'link_type':([59,],[91,]),'option_rvalue':([35,],[47,]),'enum_field':([39,68,],[75,75,]),}
+
+_lr_goto = { }
+for _k, _v in _lr_goto_items.items():
+ for _x,_y in zip(_v[0],_v[1]):
+ if not _x in _lr_goto: _lr_goto[_x] = { }
+ _lr_goto[_x][_k] = _y
+del _lr_goto_items
+_lr_productions = [
+ ("S' -> goal","S'",1,None,None,None),
+ ('empty -> <empty>','empty',0,'p_empty','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',132),
+ ('field_modifier -> REQUIRED','field_modifier',1,'p_field_modifier','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',136),
+ ('field_modifier -> OPTIONAL','field_modifier',1,'p_field_modifier','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',137),
+ ('field_modifier -> REPEATED','field_modifier',1,'p_field_modifier','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',138),
+ ('primitive_type -> DOUBLE','primitive_type',1,'p_primitive_type','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',142),
+ ('primitive_type -> FLOAT','primitive_type',1,'p_primitive_type','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',143),
+ ('primitive_type -> INT32','primitive_type',1,'p_primitive_type','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',144),
+ ('primitive_type -> INT64','primitive_type',1,'p_primitive_type','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',145),
+ ('primitive_type -> UINT32','primitive_type',1,'p_primitive_type','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',146),
+ ('primitive_type -> UINT64','primitive_type',1,'p_primitive_type','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',147),
+ ('primitive_type -> SINT32','primitive_type',1,'p_primitive_type','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',148),
+ ('primitive_type -> SINT64','primitive_type',1,'p_primitive_type','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',149),
+ ('primitive_type -> FIXED32','primitive_type',1,'p_primitive_type','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',150),
+ ('primitive_type -> FIXED64','primitive_type',1,'p_primitive_type','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',151),
+ ('primitive_type -> SFIXED32','primitive_type',1,'p_primitive_type','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',152),
+ ('primitive_type -> SFIXED64','primitive_type',1,'p_primitive_type','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',153),
+ ('primitive_type -> BOOL','primitive_type',1,'p_primitive_type','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',154),
+ ('primitive_type -> STRING','primitive_type',1,'p_primitive_type','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',155),
+ ('primitive_type -> BYTES','primitive_type',1,'p_primitive_type','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',156),
+ ('link_type -> ONETOONE','link_type',1,'p_link_type','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',160),
+ ('link_type -> MANYTOONE','link_type',1,'p_link_type','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',161),
+ ('link_type -> MANYTOMANY','link_type',1,'p_link_type','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',162),
+ ('field_id -> NUM','field_id',1,'p_field_id','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',166),
+ ('rvalue -> NUM','rvalue',1,'p_rvalue','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',170),
+ ('rvalue -> TRUE','rvalue',1,'p_rvalue','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',171),
+ ('rvalue -> FALSE','rvalue',1,'p_rvalue','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',172),
+ ('rvalue -> STRING_LITERAL','rvalue',1,'p_rvalue3','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',176),
+ ('rvalue -> NAME','rvalue',1,'p_rvalue2','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',182),
+ ('field_directives -> empty','field_directives',1,'p_field_directives2','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',188),
+ ('field_directives -> LBRACK field_directive_times RBRACK','field_directives',3,'p_field_directives','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',192),
+ ('field_directive -> NAME EQ rvalue','field_directive',3,'p_field_directive','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',197),
+ ('field_directive_times -> field_directive_plus','field_directive_times',1,'p_field_directive_times','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',202),
+ ('field_directive_times -> empty','field_directive_times',1,'p_field_directive_times2','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',206),
+ ('field_directive_plus -> field_directive','field_directive_plus',1,'p_field_directive_plus','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',210),
+ ('field_directive_plus -> field_directive_plus COMMA field_directive','field_directive_plus',3,'p_field_directive_plus','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',211),
+ ('dotname -> NAME','dotname',1,'p_dotname','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',218),
+ ('dotname -> dotname DOT NAME','dotname',3,'p_dotname','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',219),
+ ('field_name -> NAME','field_name',1,'p_fieldName','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',227),
+ ('field_name -> MESSAGE','field_name',1,'p_fieldName','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',228),
+ ('field_name -> MAX','field_name',1,'p_fieldName','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',229),
+ ('field_type -> primitive_type','field_type',1,'p_field_type','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',235),
+ ('field_type -> dotname','field_type',1,'p_field_type2','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',240),
+ ('colon_fieldname -> COLON field_name','colon_fieldname',2,'p_colon_fieldname','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',246),
+ ('colon_fieldname -> empty','colon_fieldname',1,'p_colon_fieldname2','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',251),
+ ('link_definition -> field_modifier link_type field_name ARROW NAME colon_fieldname EQ field_id field_directives SEMI','link_definition',10,'p_link_definition','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',256),
+ ('field_definition -> field_modifier field_type field_name EQ field_id field_directives SEMI','field_definition',7,'p_field_definition','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',265),
+ ('enum_field -> field_name EQ NUM SEMI','enum_field',4,'p_enum_field','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',271),
+ ('enum_body_part -> enum_field','enum_body_part',1,'p_enum_body_part','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',276),
+ ('enum_body_part -> option_directive','enum_body_part',1,'p_enum_body_part','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',277),
+ ('enum_body -> enum_body_part','enum_body',1,'p_enum_body','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',281),
+ ('enum_body -> enum_body enum_body_part','enum_body',2,'p_enum_body','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',282),
+ ('enum_body_opt -> empty','enum_body_opt',1,'p_enum_body_opt','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',289),
+ ('enum_body_opt -> enum_body','enum_body_opt',1,'p_enum_body_opt2','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',293),
+ ('enum_definition -> ENUM NAME LBRACE enum_body_opt RBRACE','enum_definition',5,'p_enum_definition','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',299),
+ ('extensions_to -> MAX','extensions_to',1,'p_extensions_to','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',304),
+ ('extensions_to -> NUM','extensions_to',1,'p_extensions_to2','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',309),
+ ('extensions_definition -> EXTENSIONS NUM TO extensions_to SEMI','extensions_definition',5,'p_extensions_definition','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',314),
+ ('message_extension -> EXTEND NAME LBRACE message_body RBRACE','message_extension',5,'p_message_extension','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',320),
+ ('message_body_part -> field_definition','message_body_part',1,'p_message_body_part','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',325),
+ ('message_body_part -> link_definition','message_body_part',1,'p_message_body_part','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',326),
+ ('message_body_part -> enum_definition','message_body_part',1,'p_message_body_part','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',327),
+ ('message_body_part -> message_definition','message_body_part',1,'p_message_body_part','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',328),
+ ('message_body_part -> extensions_definition','message_body_part',1,'p_message_body_part','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',329),
+ ('message_body_part -> message_extension','message_body_part',1,'p_message_body_part','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',330),
+ ('message_body -> empty','message_body',1,'p_message_body','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',335),
+ ('message_body -> message_body_part','message_body',1,'p_message_body2','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',340),
+ ('message_body -> message_body message_body_part','message_body',2,'p_message_body2','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',341),
+ ('base_definition -> LPAR NAME RPAR','base_definition',3,'p_base_definition','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',348),
+ ('base_definition -> empty','base_definition',1,'p_base_definition2','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',352),
+ ('message_definition -> MESSAGE NAME base_definition LBRACE message_body RBRACE','message_definition',6,'p_message_definition','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',358),
+ ('method_definition -> RPC NAME LPAR NAME RPAR RETURNS LPAR NAME RPAR','method_definition',9,'p_method_definition','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',364),
+ ('method_definition_opt -> empty','method_definition_opt',1,'p_method_definition_opt','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',369),
+ ('method_definition_opt -> method_definition','method_definition_opt',1,'p_method_definition_opt2','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',373),
+ ('method_definition_opt -> method_definition_opt method_definition','method_definition_opt',2,'p_method_definition_opt2','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',374),
+ ('service_definition -> _SERVICE NAME LBRACE method_definition_opt RBRACE','service_definition',5,'p_service_definition','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',383),
+ ('package_directive -> PACKAGE dotname SEMI','package_directive',3,'p_package_directive','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',389),
+ ('import_directive -> IMPORT STRING_LITERAL SEMI','import_directive',3,'p_import_directive','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',395),
+ ('option_rvalue -> NUM','option_rvalue',1,'p_option_rvalue','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',400),
+ ('option_rvalue -> TRUE','option_rvalue',1,'p_option_rvalue','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',401),
+ ('option_rvalue -> FALSE','option_rvalue',1,'p_option_rvalue','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',402),
+ ('option_rvalue -> STRING_LITERAL','option_rvalue',1,'p_option_rvalue2','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',406),
+ ('option_rvalue -> NAME','option_rvalue',1,'p_option_rvalue3','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',410),
+ ('option_directive -> OPTION NAME EQ option_rvalue SEMI','option_directive',5,'p_option_directive','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',415),
+ ('topLevel -> message_definition','topLevel',1,'p_topLevel','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',421),
+ ('topLevel -> message_extension','topLevel',1,'p_topLevel','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',422),
+ ('topLevel -> enum_definition','topLevel',1,'p_topLevel','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',423),
+ ('topLevel -> service_definition','topLevel',1,'p_topLevel','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',424),
+ ('topLevel -> import_directive','topLevel',1,'p_topLevel','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',425),
+ ('topLevel -> option_directive','topLevel',1,'p_topLevel','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',426),
+ ('package_definition -> package_directive','package_definition',1,'p_package_definition','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',430),
+ ('package_definition -> empty','package_definition',1,'p_packages2','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',434),
+ ('statements -> topLevel','statements',1,'p_statements2','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',438),
+ ('statements -> statements topLevel','statements',2,'p_statements2','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',439),
+ ('statements -> empty','statements',1,'p_statements','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',446),
+ ('protofile -> package_definition statements','protofile',2,'p_protofile','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',451),
+ ('goal -> STARTTOKEN protofile','goal',2,'p_goal','/Users/sapanbhatia/Projects/uberold/uber/plyproto/parser.py',457),
+]
diff --git a/xos/genx/generator/ply/__init__.py b/xos/genx/generator/ply/__init__.py
new file mode 100644
index 0000000..853a985
--- /dev/null
+++ b/xos/genx/generator/ply/__init__.py
@@ -0,0 +1,4 @@
+# PLY package
+# Author: David Beazley (dave@dabeaz.com)
+
+__all__ = ['lex','yacc']
diff --git a/xos/genx/generator/ply/cpp.py b/xos/genx/generator/ply/cpp.py
new file mode 100644
index 0000000..2f6a030
--- /dev/null
+++ b/xos/genx/generator/ply/cpp.py
@@ -0,0 +1,908 @@
+# -----------------------------------------------------------------------------
+# cpp.py
+#
+# Author: David Beazley (http://www.dabeaz.com)
+# Copyright (C) 2007
+# All rights reserved
+#
+# This module implements an ANSI-C style lexical preprocessor for PLY.
+# -----------------------------------------------------------------------------
+from __future__ import generators
+
+# -----------------------------------------------------------------------------
+# Default preprocessor lexer definitions. These tokens are enough to get
+# a basic preprocessor working. Other modules may import these if they want
+# -----------------------------------------------------------------------------
+
+tokens = (
+ 'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT1', 'CPP_COMMENT2', 'CPP_POUND','CPP_DPOUND'
+)
+
+literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\""
+
+# Whitespace
+def t_CPP_WS(t):
+ r'\s+'
+ t.lexer.lineno += t.value.count("\n")
+ return t
+
+t_CPP_POUND = r'\#'
+t_CPP_DPOUND = r'\#\#'
+
+# Identifier
+t_CPP_ID = r'[A-Za-z_][\w_]*'
+
+# Integer literal
+def CPP_INTEGER(t):
+ r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)'
+ return t
+
+t_CPP_INTEGER = CPP_INTEGER
+
+# Floating literal
+t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
+
+# String literal
+def t_CPP_STRING(t):
+ r'\"([^\\\n]|(\\(.|\n)))*?\"'
+ t.lexer.lineno += t.value.count("\n")
+ return t
+
+# Character constant 'c' or L'c'
+def t_CPP_CHAR(t):
+ r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
+ t.lexer.lineno += t.value.count("\n")
+ return t
+
+# Comment
+def t_CPP_COMMENT1(t):
+ r'(/\*(.|\n)*?\*/)'
+ ncr = t.value.count("\n")
+ t.lexer.lineno += ncr
+ # replace with one space or a number of '\n'
+ t.type = 'CPP_WS'; t.value = '\n' * ncr if ncr else ' '
+ return t
+
+# Line comment
+def t_CPP_COMMENT2(t):
+ r'(//.*?(\n|$))'
+ # replace with '/n'
+ t.type = 'CPP_WS'; t.value = '\n'
+
+def t_error(t):
+ t.type = t.value[0]
+ t.value = t.value[0]
+ t.lexer.skip(1)
+ return t
+
+import re
+import copy
+import time
+import os.path
+
+# -----------------------------------------------------------------------------
+# trigraph()
+#
+# Given an input string, this function replaces all trigraph sequences.
+# The following mapping is used:
+#
+# ??= #
+# ??/ \
+# ??' ^
+# ??( [
+# ??) ]
+# ??! |
+# ??< {
+# ??> }
+# ??- ~
+# -----------------------------------------------------------------------------
+
+_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
+_trigraph_rep = {
+ '=':'#',
+ '/':'\\',
+ "'":'^',
+ '(':'[',
+ ')':']',
+ '!':'|',
+ '<':'{',
+ '>':'}',
+ '-':'~'
+}
+
+def trigraph(input):
+ return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)
+
+# ------------------------------------------------------------------
+# Macro object
+#
+# This object holds information about preprocessor macros
+#
+# .name - Macro name (string)
+# .value - Macro value (a list of tokens)
+# .arglist - List of argument names
+# .variadic - Boolean indicating whether or not variadic macro
+# .vararg - Name of the variadic parameter
+#
+# When a macro is created, the macro replacement token sequence is
+# pre-scanned and used to create patch lists that are later used
+# during macro expansion
+# ------------------------------------------------------------------
+
+class Macro(object):
+ def __init__(self,name,value,arglist=None,variadic=False):
+ self.name = name
+ self.value = value
+ self.arglist = arglist
+ self.variadic = variadic
+ if variadic:
+ self.vararg = arglist[-1]
+ self.source = None
+
+# ------------------------------------------------------------------
+# Preprocessor object
+#
+# Object representing a preprocessor. Contains macro definitions,
+# include directories, and other information
+# ------------------------------------------------------------------
+
+class Preprocessor(object):
+ def __init__(self,lexer=None):
+ if lexer is None:
+ lexer = lex.lexer
+ self.lexer = lexer
+ self.macros = { }
+ self.path = []
+ self.temp_path = []
+
+ # Probe the lexer for selected tokens
+ self.lexprobe()
+
+ tm = time.localtime()
+ self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
+ self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
+ self.parser = None
+
+ # -----------------------------------------------------------------------------
+ # tokenize()
+ #
+ # Utility function. Given a string of text, tokenize into a list of tokens
+ # -----------------------------------------------------------------------------
+
+ def tokenize(self,text):
+ tokens = []
+ self.lexer.input(text)
+ while True:
+ tok = self.lexer.token()
+ if not tok: break
+ tokens.append(tok)
+ return tokens
+
+ # ---------------------------------------------------------------------
+ # error()
+ #
+ # Report a preprocessor error/warning of some kind
+ # ----------------------------------------------------------------------
+
+ def error(self,file,line,msg):
+ print("%s:%d %s" % (file,line,msg))
+
+ # ----------------------------------------------------------------------
+ # lexprobe()
+ #
+ # This method probes the preprocessor lexer object to discover
+ # the token types of symbols that are important to the preprocessor.
+ # If this works right, the preprocessor will simply "work"
+ # with any suitable lexer regardless of how tokens have been named.
+ # ----------------------------------------------------------------------
+
+ def lexprobe(self):
+
+ # Determine the token type for identifiers
+ self.lexer.input("identifier")
+ tok = self.lexer.token()
+ if not tok or tok.value != "identifier":
+ print("Couldn't determine identifier type")
+ else:
+ self.t_ID = tok.type
+
+ # Determine the token type for integers
+ self.lexer.input("12345")
+ tok = self.lexer.token()
+ if not tok or int(tok.value) != 12345:
+ print("Couldn't determine integer type")
+ else:
+ self.t_INTEGER = tok.type
+ self.t_INTEGER_TYPE = type(tok.value)
+
+ # Determine the token type for strings enclosed in double quotes
+ self.lexer.input("\"filename\"")
+ tok = self.lexer.token()
+ if not tok or tok.value != "\"filename\"":
+ print("Couldn't determine string type")
+ else:
+ self.t_STRING = tok.type
+
+ # Determine the token type for whitespace--if any
+ self.lexer.input(" ")
+ tok = self.lexer.token()
+ if not tok or tok.value != " ":
+ self.t_SPACE = None
+ else:
+ self.t_SPACE = tok.type
+
+ # Determine the token type for newlines
+ self.lexer.input("\n")
+ tok = self.lexer.token()
+ if not tok or tok.value != "\n":
+ self.t_NEWLINE = None
+ print("Couldn't determine token for newlines")
+ else:
+ self.t_NEWLINE = tok.type
+
+ self.t_WS = (self.t_SPACE, self.t_NEWLINE)
+
+ # Check for other characters used by the preprocessor
+ chars = [ '<','>','#','##','\\','(',')',',','.']
+ for c in chars:
+ self.lexer.input(c)
+ tok = self.lexer.token()
+ if not tok or tok.value != c:
+ print("Unable to lex '%s' required for preprocessor" % c)
+
+ # ----------------------------------------------------------------------
+ # add_path()
+ #
+ # Adds a search path to the preprocessor.
+ # ----------------------------------------------------------------------
+
+ def add_path(self,path):
+ self.path.append(path)
+
+ # ----------------------------------------------------------------------
+ # group_lines()
+ #
+ # Given an input string, this function splits it into lines. Trailing whitespace
+ # is removed. Any line ending with \ is grouped with the next line. This
+ # function forms the lowest level of the preprocessor---grouping into text into
+ # a line-by-line format.
+ # ----------------------------------------------------------------------
+
+ def group_lines(self,input):
+ lex = self.lexer.clone()
+ lines = [x.rstrip() for x in input.splitlines()]
+ for i in xrange(len(lines)):
+ j = i+1
+ while lines[i].endswith('\\') and (j < len(lines)):
+ lines[i] = lines[i][:-1]+lines[j]
+ lines[j] = ""
+ j += 1
+
+ input = "\n".join(lines)
+ lex.input(input)
+ lex.lineno = 1
+
+ current_line = []
+ while True:
+ tok = lex.token()
+ if not tok:
+ break
+ current_line.append(tok)
+ if tok.type in self.t_WS and '\n' in tok.value:
+ yield current_line
+ current_line = []
+
+ if current_line:
+ yield current_line
+
+ # ----------------------------------------------------------------------
+ # tokenstrip()
+ #
+ # Remove leading/trailing whitespace tokens from a token list
+ # ----------------------------------------------------------------------
+
+ def tokenstrip(self,tokens):
+ i = 0
+ while i < len(tokens) and tokens[i].type in self.t_WS:
+ i += 1
+ del tokens[:i]
+ i = len(tokens)-1
+ while i >= 0 and tokens[i].type in self.t_WS:
+ i -= 1
+ del tokens[i+1:]
+ return tokens
+
+
+ # ----------------------------------------------------------------------
+ # collect_args()
+ #
+ # Collects comma separated arguments from a list of tokens. The arguments
+ # must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions)
+ # where tokencount is the number of tokens consumed, args is a list of arguments,
+ # and positions is a list of integers containing the starting index of each
+ # argument. Each argument is represented by a list of tokens.
+ #
+ # When collecting arguments, leading and trailing whitespace is removed
+ # from each argument.
+ #
+ # This function properly handles nested parenthesis and commas---these do not
+ # define new arguments.
+ # ----------------------------------------------------------------------
+
+ def collect_args(self,tokenlist):
+ args = []
+ positions = []
+ current_arg = []
+ nesting = 1
+ tokenlen = len(tokenlist)
+
+ # Search for the opening '('.
+ i = 0
+ while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
+ i += 1
+
+ if (i < tokenlen) and (tokenlist[i].value == '('):
+ positions.append(i+1)
+ else:
+ self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments")
+ return 0, [], []
+
+ i += 1
+
+ while i < tokenlen:
+ t = tokenlist[i]
+ if t.value == '(':
+ current_arg.append(t)
+ nesting += 1
+ elif t.value == ')':
+ nesting -= 1
+ if nesting == 0:
+ if current_arg:
+ args.append(self.tokenstrip(current_arg))
+ positions.append(i)
+ return i+1,args,positions
+ current_arg.append(t)
+ elif t.value == ',' and nesting == 1:
+ args.append(self.tokenstrip(current_arg))
+ positions.append(i+1)
+ current_arg = []
+ else:
+ current_arg.append(t)
+ i += 1
+
+ # Missing end argument
+ self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
+ return 0, [],[]
+
+ # ----------------------------------------------------------------------
+ # macro_prescan()
+ #
+ # Examine the macro value (token sequence) and identify patch points
+ # This is used to speed up macro expansion later on---we'll know
+ # right away where to apply patches to the value to form the expansion
+ # ----------------------------------------------------------------------
+
+ def macro_prescan(self,macro):
+ macro.patch = [] # Standard macro arguments
+ macro.str_patch = [] # String conversion expansion
+ macro.var_comma_patch = [] # Variadic macro comma patch
+ i = 0
+ while i < len(macro.value):
+ if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:
+ argnum = macro.arglist.index(macro.value[i].value)
+ # Conversion of argument to a string
+ if i > 0 and macro.value[i-1].value == '#':
+ macro.value[i] = copy.copy(macro.value[i])
+ macro.value[i].type = self.t_STRING
+ del macro.value[i-1]
+ macro.str_patch.append((argnum,i-1))
+ continue
+ # Concatenation
+ elif (i > 0 and macro.value[i-1].value == '##'):
+ macro.patch.append(('c',argnum,i-1))
+ del macro.value[i-1]
+ continue
+ elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):
+ macro.patch.append(('c',argnum,i))
+ i += 1
+ continue
+ # Standard expansion
+ else:
+ macro.patch.append(('e',argnum,i))
+ elif macro.value[i].value == '##':
+ if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \
+ ((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \
+ (macro.value[i+1].value == macro.vararg):
+ macro.var_comma_patch.append(i-1)
+ i += 1
+ macro.patch.sort(key=lambda x: x[2],reverse=True)
+
+ # ----------------------------------------------------------------------
+ # macro_expand_args()
+ #
+ # Given a Macro and list of arguments (each a token list), this method
+ # returns an expanded version of a macro. The return value is a token sequence
+ # representing the replacement macro tokens
+ # ----------------------------------------------------------------------
+
+ def macro_expand_args(self,macro,args):
+ # Make a copy of the macro token sequence
+ rep = [copy.copy(_x) for _x in macro.value]
+
+ # Make string expansion patches. These do not alter the length of the replacement sequence
+
+ str_expansion = {}
+ for argnum, i in macro.str_patch:
+ if argnum not in str_expansion:
+ str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\")
+ rep[i] = copy.copy(rep[i])
+ rep[i].value = str_expansion[argnum]
+
+ # Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid
+ comma_patch = False
+ if macro.variadic and not args[-1]:
+ for i in macro.var_comma_patch:
+ rep[i] = None
+ comma_patch = True
+
+ # Make all other patches. The order of these matters. It is assumed that the patch list
+ # has been sorted in reverse order of patch location since replacements will cause the
+ # size of the replacement sequence to expand from the patch point.
+
+ expanded = { }
+ for ptype, argnum, i in macro.patch:
+ # Concatenation. Argument is left unexpanded
+ if ptype == 'c':
+ rep[i:i+1] = args[argnum]
+ # Normal expansion. Argument is macro expanded first
+ elif ptype == 'e':
+ if argnum not in expanded:
+ expanded[argnum] = self.expand_macros(args[argnum])
+ rep[i:i+1] = expanded[argnum]
+
+ # Get rid of removed comma if necessary
+ if comma_patch:
+ rep = [_i for _i in rep if _i]
+
+ return rep
+
+
+ # ----------------------------------------------------------------------
+ # expand_macros()
+ #
+ # Given a list of tokens, this function performs macro expansion.
+ # The expanded argument is a dictionary that contains macros already
+ # expanded. This is used to prevent infinite recursion.
+ # ----------------------------------------------------------------------
+
+ def expand_macros(self,tokens,expanded=None):
+ if expanded is None:
+ expanded = {}
+ i = 0
+ while i < len(tokens):
+ t = tokens[i]
+ if t.type == self.t_ID:
+ if t.value in self.macros and t.value not in expanded:
+ # Yes, we found a macro match
+ expanded[t.value] = True
+
+ m = self.macros[t.value]
+ if not m.arglist:
+ # A simple macro
+ ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded)
+ for e in ex:
+ e.lineno = t.lineno
+ tokens[i:i+1] = ex
+ i += len(ex)
+ else:
+ # A macro with arguments
+ j = i + 1
+ while j < len(tokens) and tokens[j].type in self.t_WS:
+ j += 1
+ if tokens[j].value == '(':
+ tokcount,args,positions = self.collect_args(tokens[j:])
+ if not m.variadic and len(args) != len(m.arglist):
+ self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist)))
+ i = j + tokcount
+ elif m.variadic and len(args) < len(m.arglist)-1:
+ if len(m.arglist) > 2:
+ self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1))
+ else:
+ self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1))
+ i = j + tokcount
+ else:
+ if m.variadic:
+ if len(args) == len(m.arglist)-1:
+ args.append([])
+ else:
+ args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
+ del args[len(m.arglist):]
+
+ # Get macro replacement text
+ rep = self.macro_expand_args(m,args)
+ rep = self.expand_macros(rep,expanded)
+ for r in rep:
+ r.lineno = t.lineno
+ tokens[i:j+tokcount] = rep
+ i += len(rep)
+ del expanded[t.value]
+ continue
+ elif t.value == '__LINE__':
+ t.type = self.t_INTEGER
+ t.value = self.t_INTEGER_TYPE(t.lineno)
+
+ i += 1
+ return tokens
+
+ # ----------------------------------------------------------------------
+ # evalexpr()
+ #
+ # Evaluate an expression token sequence for the purposes of evaluating
+ # integral expressions.
+ # ----------------------------------------------------------------------
+
+ def evalexpr(self,tokens):
+ # tokens = tokenize(line)
+ # Search for defined macros
+ i = 0
+ while i < len(tokens):
+ if tokens[i].type == self.t_ID and tokens[i].value == 'defined':
+ j = i + 1
+ needparen = False
+ result = "0L"
+ while j < len(tokens):
+ if tokens[j].type in self.t_WS:
+ j += 1
+ continue
+ elif tokens[j].type == self.t_ID:
+ if tokens[j].value in self.macros:
+ result = "1L"
+ else:
+ result = "0L"
+ if not needparen: break
+ elif tokens[j].value == '(':
+ needparen = True
+ elif tokens[j].value == ')':
+ break
+ else:
+ self.error(self.source,tokens[i].lineno,"Malformed defined()")
+ j += 1
+ tokens[i].type = self.t_INTEGER
+ tokens[i].value = self.t_INTEGER_TYPE(result)
+ del tokens[i+1:j+1]
+ i += 1
+ tokens = self.expand_macros(tokens)
+ for i,t in enumerate(tokens):
+ if t.type == self.t_ID:
+ tokens[i] = copy.copy(t)
+ tokens[i].type = self.t_INTEGER
+ tokens[i].value = self.t_INTEGER_TYPE("0L")
+ elif t.type == self.t_INTEGER:
+ tokens[i] = copy.copy(t)
+ # Strip off any trailing suffixes
+ tokens[i].value = str(tokens[i].value)
+ while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
+ tokens[i].value = tokens[i].value[:-1]
+
+ expr = "".join([str(x.value) for x in tokens])
+ expr = expr.replace("&&"," and ")
+ expr = expr.replace("||"," or ")
+ expr = expr.replace("!"," not ")
+ try:
+ result = eval(expr)
+ except StandardError:
+ self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression")
+ result = 0
+ return result
+
+ # ----------------------------------------------------------------------
+ # parsegen()
+ #
+ # Parse an input string/
+ # ----------------------------------------------------------------------
+ def parsegen(self,input,source=None):
+
+ # Replace trigraph sequences
+ t = trigraph(input)
+ lines = self.group_lines(t)
+
+ if not source:
+ source = ""
+
+ self.define("__FILE__ \"%s\"" % source)
+
+ self.source = source
+ chunk = []
+ enable = True
+ iftrigger = False
+ ifstack = []
+
+ for x in lines:
+ for i,tok in enumerate(x):
+ if tok.type not in self.t_WS: break
+ if tok.value == '#':
+ # Preprocessor directive
+
+ # insert necessary whitespace instead of eaten tokens
+ for tok in x:
+ if tok.type in self.t_WS and '\n' in tok.value:
+ chunk.append(tok)
+
+ dirtokens = self.tokenstrip(x[i+1:])
+ if dirtokens:
+ name = dirtokens[0].value
+ args = self.tokenstrip(dirtokens[1:])
+ else:
+ name = ""
+ args = []
+
+ if name == 'define':
+ if enable:
+ for tok in self.expand_macros(chunk):
+ yield tok
+ chunk = []
+ self.define(args)
+ elif name == 'include':
+ if enable:
+ for tok in self.expand_macros(chunk):
+ yield tok
+ chunk = []
+ oldfile = self.macros['__FILE__']
+ for tok in self.include(args):
+ yield tok
+ self.macros['__FILE__'] = oldfile
+ self.source = source
+ elif name == 'undef':
+ if enable:
+ for tok in self.expand_macros(chunk):
+ yield tok
+ chunk = []
+ self.undef(args)
+ elif name == 'ifdef':
+ ifstack.append((enable,iftrigger))
+ if enable:
+ if not args[0].value in self.macros:
+ enable = False
+ iftrigger = False
+ else:
+ iftrigger = True
+ elif name == 'ifndef':
+ ifstack.append((enable,iftrigger))
+ if enable:
+ if args[0].value in self.macros:
+ enable = False
+ iftrigger = False
+ else:
+ iftrigger = True
+ elif name == 'if':
+ ifstack.append((enable,iftrigger))
+ if enable:
+ result = self.evalexpr(args)
+ if not result:
+ enable = False
+ iftrigger = False
+ else:
+ iftrigger = True
+ elif name == 'elif':
+ if ifstack:
+ if ifstack[-1][0]: # We only pay attention if outer "if" allows this
+ if enable: # If already true, we flip enable False
+ enable = False
+ elif not iftrigger: # If False, but not triggered yet, we'll check expression
+ result = self.evalexpr(args)
+ if result:
+ enable = True
+ iftrigger = True
+ else:
+ self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
+
+ elif name == 'else':
+ if ifstack:
+ if ifstack[-1][0]:
+ if enable:
+ enable = False
+ elif not iftrigger:
+ enable = True
+ iftrigger = True
+ else:
+ self.error(self.source,dirtokens[0].lineno,"Misplaced #else")
+
+ elif name == 'endif':
+ if ifstack:
+ enable,iftrigger = ifstack.pop()
+ else:
+ self.error(self.source,dirtokens[0].lineno,"Misplaced #endif")
+ else:
+ # Unknown preprocessor directive
+ pass
+
+ else:
+ # Normal text
+ if enable:
+ chunk.extend(x)
+
+ for tok in self.expand_macros(chunk):
+ yield tok
+ chunk = []
+
+ # ----------------------------------------------------------------------
+ # include()
+ #
+ # Implementation of file-inclusion
+ # ----------------------------------------------------------------------
+
+ def include(self,tokens):
+ # Try to extract the filename and then process an include file
+ if not tokens:
+ return
+ if tokens:
+ if tokens[0].value != '<' and tokens[0].type != self.t_STRING:
+ tokens = self.expand_macros(tokens)
+
+ if tokens[0].value == '<':
+ # Include <...>
+ i = 1
+ while i < len(tokens):
+ if tokens[i].value == '>':
+ break
+ i += 1
+ else:
+ print("Malformed #include <...>")
+ return
+ filename = "".join([x.value for x in tokens[1:i]])
+ path = self.path + [""] + self.temp_path
+ elif tokens[0].type == self.t_STRING:
+ filename = tokens[0].value[1:-1]
+ path = self.temp_path + [""] + self.path
+ else:
+ print("Malformed #include statement")
+ return
+ for p in path:
+ iname = os.path.join(p,filename)
+ try:
+ data = open(iname,"r").read()
+ dname = os.path.dirname(iname)
+ if dname:
+ self.temp_path.insert(0,dname)
+ for tok in self.parsegen(data,filename):
+ yield tok
+ if dname:
+ del self.temp_path[0]
+ break
+ except IOError:
+ pass
+ else:
+ print("Couldn't find '%s'" % filename)
+
+ # ----------------------------------------------------------------------
+ # define()
+ #
+ # Define a new macro
+ # ----------------------------------------------------------------------
+
+ def define(self,tokens):
+ if isinstance(tokens,(str,unicode)):
+ tokens = self.tokenize(tokens)
+
+ linetok = tokens
+ try:
+ name = linetok[0]
+ if len(linetok) > 1:
+ mtype = linetok[1]
+ else:
+ mtype = None
+ if not mtype:
+ m = Macro(name.value,[])
+ self.macros[name.value] = m
+ elif mtype.type in self.t_WS:
+ # A normal macro
+ m = Macro(name.value,self.tokenstrip(linetok[2:]))
+ self.macros[name.value] = m
+ elif mtype.value == '(':
+ # A macro with arguments
+ tokcount, args, positions = self.collect_args(linetok[1:])
+ variadic = False
+ for a in args:
+ if variadic:
+ print("No more arguments may follow a variadic argument")
+ break
+ astr = "".join([str(_i.value) for _i in a])
+ if astr == "...":
+ variadic = True
+ a[0].type = self.t_ID
+ a[0].value = '__VA_ARGS__'
+ variadic = True
+ del a[1:]
+ continue
+ elif astr[-3:] == "..." and a[0].type == self.t_ID:
+ variadic = True
+ del a[1:]
+ # If, for some reason, "." is part of the identifier, strip off the name for the purposes
+ # of macro expansion
+ if a[0].value[-3:] == '...':
+ a[0].value = a[0].value[:-3]
+ continue
+ if len(a) > 1 or a[0].type != self.t_ID:
+ print("Invalid macro argument")
+ break
+ else:
+ mvalue = self.tokenstrip(linetok[1+tokcount:])
+ i = 0
+ while i < len(mvalue):
+ if i+1 < len(mvalue):
+ if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':
+ del mvalue[i]
+ continue
+ elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:
+ del mvalue[i+1]
+ i += 1
+ m = Macro(name.value,mvalue,[x[0].value for x in args],variadic)
+ self.macro_prescan(m)
+ self.macros[name.value] = m
+ else:
+ print("Bad macro definition")
+ except LookupError:
+ print("Bad macro definition")
+
+ # ----------------------------------------------------------------------
+ # undef()
+ #
+ # Undefine a macro
+ # ----------------------------------------------------------------------
+
+ def undef(self,tokens):
+ id = tokens[0].value
+ try:
+ del self.macros[id]
+ except LookupError:
+ pass
+
+ # ----------------------------------------------------------------------
+ # parse()
+ #
+ # Parse input text.
+ # ----------------------------------------------------------------------
+ def parse(self,input,source=None,ignore={}):
+ self.ignore = ignore
+ self.parser = self.parsegen(input,source)
+
+ # ----------------------------------------------------------------------
+ # token()
+ #
+ # Method to return individual tokens
+ # ----------------------------------------------------------------------
+ def token(self):
+ try:
+ while True:
+ tok = next(self.parser)
+ if tok.type not in self.ignore: return tok
+ except StopIteration:
+ self.parser = None
+ return None
+
+if __name__ == '__main__':
+ import ply.lex as lex
+ lexer = lex.lex()
+
+ # Run a preprocessor
+ import sys
+ f = open(sys.argv[1])
+ input = f.read()
+
+ p = Preprocessor(lexer)
+ p.parse(input,sys.argv[1])
+ while True:
+ tok = p.token()
+ if not tok: break
+ print(p.source, tok)
+
+
+
+
+
+
+
+
+
+
+
diff --git a/xos/genx/generator/ply/ctokens.py b/xos/genx/generator/ply/ctokens.py
new file mode 100644
index 0000000..f6f6952
--- /dev/null
+++ b/xos/genx/generator/ply/ctokens.py
@@ -0,0 +1,133 @@
+# ----------------------------------------------------------------------
+# ctokens.py
+#
+# Token specifications for symbols in ANSI C and C++. This file is
+# meant to be used as a library in other tokenizers.
+# ----------------------------------------------------------------------
+
+# Reserved words
+
+tokens = [
+ # Literals (identifier, integer constant, float constant, string constant, char const)
+ 'ID', 'TYPEID', 'INTEGER', 'FLOAT', 'STRING', 'CHARACTER',
+
+ # Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
+ 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULO',
+ 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
+ 'LOR', 'LAND', 'LNOT',
+ 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
+
+ # Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
+ 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
+ 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
+
+ # Increment/decrement (++,--)
+ 'INCREMENT', 'DECREMENT',
+
+ # Structure dereference (->)
+ 'ARROW',
+
+ # Ternary operator (?)
+ 'TERNARY',
+
+ # Delimeters ( ) [ ] { } , . ; :
+ 'LPAREN', 'RPAREN',
+ 'LBRACKET', 'RBRACKET',
+ 'LBRACE', 'RBRACE',
+ 'COMMA', 'PERIOD', 'SEMI', 'COLON',
+
+ # Ellipsis (...)
+ 'ELLIPSIS',
+]
+
+# Operators
+t_PLUS = r'\+'
+t_MINUS = r'-'
+t_TIMES = r'\*'
+t_DIVIDE = r'/'
+t_MODULO = r'%'
+t_OR = r'\|'
+t_AND = r'&'
+t_NOT = r'~'
+t_XOR = r'\^'
+t_LSHIFT = r'<<'
+t_RSHIFT = r'>>'
+t_LOR = r'\|\|'
+t_LAND = r'&&'
+t_LNOT = r'!'
+t_LT = r'<'
+t_GT = r'>'
+t_LE = r'<='
+t_GE = r'>='
+t_EQ = r'=='
+t_NE = r'!='
+
+# Assignment operators
+
+t_EQUALS = r'='
+t_TIMESEQUAL = r'\*='
+t_DIVEQUAL = r'/='
+t_MODEQUAL = r'%='
+t_PLUSEQUAL = r'\+='
+t_MINUSEQUAL = r'-='
+t_LSHIFTEQUAL = r'<<='
+t_RSHIFTEQUAL = r'>>='
+t_ANDEQUAL = r'&='
+t_OREQUAL = r'\|='
+t_XOREQUAL = r'\^='
+
+# Increment/decrement
+t_INCREMENT = r'\+\+'
+t_DECREMENT = r'--'
+
+# ->
+t_ARROW = r'->'
+
+# ?
+t_TERNARY = r'\?'
+
+# Delimeters
+t_LPAREN = r'\('
+t_RPAREN = r'\)'
+t_LBRACKET = r'\['
+t_RBRACKET = r'\]'
+t_LBRACE = r'\{'
+t_RBRACE = r'\}'
+t_COMMA = r','
+t_PERIOD = r'\.'
+t_SEMI = r';'
+t_COLON = r':'
+t_ELLIPSIS = r'\.\.\.'
+
+# Identifiers
+t_ID = r'[A-Za-z_][A-Za-z0-9_]*'
+
+# Integer literal
+t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
+
+# Floating literal
+t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
+
+# String literal
+t_STRING = r'\"([^\\\n]|(\\.))*?\"'
+
+# Character constant 'c' or L'c'
+t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\''
+
+# Comment (C-Style)
+def t_COMMENT(t):
+ r'/\*(.|\n)*?\*/'
+ t.lexer.lineno += t.value.count('\n')
+ return t
+
+# Comment (C++-Style)
+def t_CPPCOMMENT(t):
+ r'//.*\n'
+ t.lexer.lineno += 1
+ return t
+
+
+
+
+
+
diff --git a/xos/genx/generator/ply/lex.py b/xos/genx/generator/ply/lex.py
new file mode 100644
index 0000000..8f05537
--- /dev/null
+++ b/xos/genx/generator/ply/lex.py
@@ -0,0 +1,1063 @@
+# -----------------------------------------------------------------------------
+# ply: lex.py
+#
+# Copyright (C) 2001-2011,
+# David M. Beazley (Dabeaz LLC)
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of the David Beazley or Dabeaz LLC may be used to
+# endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# -----------------------------------------------------------------------------
+
+__version__ = "3.5"
+__tabversion__ = "3.5" # Version of table file used
+
+import re, sys, types, copy, os, inspect
+
+# This tuple contains known string types
+try:
+ # Python 2.6
+ StringTypes = (types.StringType, types.UnicodeType)
+except AttributeError:
+ # Python 3.0
+ StringTypes = (str, bytes)
+
+# Extract the code attribute of a function. Different implementations
+# are for Python 2/3 compatibility.
+
+if sys.version_info[0] < 3:
+ def func_code(f):
+ return f.func_code
+else:
+ def func_code(f):
+ return f.__code__
+
+# This regular expression is used to match valid token names
+_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
+
+# Exception thrown when invalid token encountered and no default error
+# handler is defined.
+
+class LexError(Exception):
+ def __init__(self,message,s):
+ self.args = (message,)
+ self.text = s
+
+# Token class. This class is used to represent the tokens produced.
+class LexToken(object):
+ def __str__(self):
+ return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
+ def __repr__(self):
+ return str(self)
+
+# This object is a stand-in for a logging object created by the
+# logging module.
+
+class PlyLogger(object):
+ def __init__(self,f):
+ self.f = f
+ def critical(self,msg,*args,**kwargs):
+ self.f.write((msg % args) + "\n")
+
+ def warning(self,msg,*args,**kwargs):
+ self.f.write("WARNING: "+ (msg % args) + "\n")
+
+ def error(self,msg,*args,**kwargs):
+ self.f.write("ERROR: " + (msg % args) + "\n")
+
+ info = critical
+ debug = critical
+
+# Null logger is used when no output is generated. Does nothing.
+class NullLogger(object):
+ def __getattribute__(self,name):
+ return self
+ def __call__(self,*args,**kwargs):
+ return self
+
+# -----------------------------------------------------------------------------
+# === Lexing Engine ===
+#
+# The following Lexer class implements the lexer runtime. There are only
+# a few public methods and attributes:
+#
+# input() - Store a new string in the lexer
+# token() - Get the next token
+# clone() - Clone the lexer
+#
+# lineno - Current line number
+# lexpos - Current position in the input string
+# -----------------------------------------------------------------------------
+
+class Lexer:
+ def __init__(self):
+ self.lexre = None # Master regular expression. This is a list of
+ # tuples (re,findex) where re is a compiled
+ # regular expression and findex is a list
+ # mapping regex group numbers to rules
+ self.lexretext = None # Current regular expression strings
+ self.lexstatere = {} # Dictionary mapping lexer states to master regexs
+ self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
+ self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
+ self.lexstate = "INITIAL" # Current lexer state
+ self.lexstatestack = [] # Stack of lexer states
+ self.lexstateinfo = None # State information
+ self.lexstateignore = {} # Dictionary of ignored characters for each state
+ self.lexstateerrorf = {} # Dictionary of error functions for each state
+ self.lexreflags = 0 # Optional re compile flags
+ self.lexdata = None # Actual input data (as a string)
+ self.lexpos = 0 # Current position in input text
+ self.lexlen = 0 # Length of the input text
+ self.lexerrorf = None # Error rule (if any)
+ self.lextokens = None # List of valid tokens
+ self.lexignore = "" # Ignored characters
+ self.lexliterals = "" # Literal characters that can be passed through
+ self.lexmodule = None # Module
+ self.lineno = 1 # Current line number
+ self.lexoptimize = 0 # Optimized mode
+
+ def clone(self,object=None):
+ c = copy.copy(self)
+
+ # If the object parameter has been supplied, it means we are attaching the
+ # lexer to a new object. In this case, we have to rebind all methods in
+ # the lexstatere and lexstateerrorf tables.
+
+ if object:
+ newtab = { }
+ for key, ritem in self.lexstatere.items():
+ newre = []
+ for cre, findex in ritem:
+ newfindex = []
+ for f in findex:
+ if not f or not f[0]:
+ newfindex.append(f)
+ continue
+ newfindex.append((getattr(object,f[0].__name__),f[1]))
+ newre.append((cre,newfindex))
+ newtab[key] = newre
+ c.lexstatere = newtab
+ c.lexstateerrorf = { }
+ for key, ef in self.lexstateerrorf.items():
+ c.lexstateerrorf[key] = getattr(object,ef.__name__)
+ c.lexmodule = object
+ return c
+
+ # ------------------------------------------------------------
+ # writetab() - Write lexer information to a table file
+ # ------------------------------------------------------------
+ def writetab(self,tabfile,outputdir=""):
+ if isinstance(tabfile,types.ModuleType):
+ return
+ basetabfilename = tabfile.split(".")[-1]
+ filename = os.path.join(outputdir,basetabfilename)+".py"
+ tf = open(filename,"w")
+ tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
+ tf.write("_tabversion = %s\n" % repr(__tabversion__))
+ tf.write("_lextokens = %s\n" % repr(self.lextokens))
+ tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
+ tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
+ tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
+
+ tabre = { }
+ # Collect all functions in the initial state
+ initial = self.lexstatere["INITIAL"]
+ initialfuncs = []
+ for part in initial:
+ for f in part[1]:
+ if f and f[0]:
+ initialfuncs.append(f)
+
+ for key, lre in self.lexstatere.items():
+ titem = []
+ for i in range(len(lre)):
+ titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
+ tabre[key] = titem
+
+ tf.write("_lexstatere = %s\n" % repr(tabre))
+ tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
+
+ taberr = { }
+ for key, ef in self.lexstateerrorf.items():
+ if ef:
+ taberr[key] = ef.__name__
+ else:
+ taberr[key] = None
+ tf.write("_lexstateerrorf = %s\n" % repr(taberr))
+ tf.close()
+
+ # ------------------------------------------------------------
+ # readtab() - Read lexer information from a tab file
+ # ------------------------------------------------------------
+ def readtab(self,tabfile,fdict):
+ if isinstance(tabfile,types.ModuleType):
+ lextab = tabfile
+ else:
+ if sys.version_info[0] < 3:
+ exec("import %s as lextab" % tabfile)
+ else:
+ env = { }
+ exec("import %s as lextab" % tabfile, env,env)
+ lextab = env['lextab']
+
+ if getattr(lextab,"_tabversion","0.0") != __tabversion__:
+ raise ImportError("Inconsistent PLY version")
+
+ self.lextokens = lextab._lextokens
+ self.lexreflags = lextab._lexreflags
+ self.lexliterals = lextab._lexliterals
+ self.lexstateinfo = lextab._lexstateinfo
+ self.lexstateignore = lextab._lexstateignore
+ self.lexstatere = { }
+ self.lexstateretext = { }
+ for key,lre in lextab._lexstatere.items():
+ titem = []
+ txtitem = []
+ for i in range(len(lre)):
+ titem.append((re.compile(lre[i][0],lextab._lexreflags | re.VERBOSE),_names_to_funcs(lre[i][1],fdict)))
+ txtitem.append(lre[i][0])
+ self.lexstatere[key] = titem
+ self.lexstateretext[key] = txtitem
+ self.lexstateerrorf = { }
+ for key,ef in lextab._lexstateerrorf.items():
+ self.lexstateerrorf[key] = fdict[ef]
+ self.begin('INITIAL')
+
+ # ------------------------------------------------------------
+ # input() - Push a new string into the lexer
+ # ------------------------------------------------------------
+ def input(self,s):
+ # Pull off the first character to see if s looks like a string
+ c = s[:1]
+ if not isinstance(c,StringTypes):
+ raise ValueError("Expected a string")
+ self.lexdata = s
+ self.lexpos = 0
+ self.lexlen = len(s)
+
+ # ------------------------------------------------------------
+ # begin() - Changes the lexing state
+ # ------------------------------------------------------------
+ def begin(self,state):
+ if not state in self.lexstatere:
+ raise ValueError("Undefined state")
+ self.lexre = self.lexstatere[state]
+ self.lexretext = self.lexstateretext[state]
+ self.lexignore = self.lexstateignore.get(state,"")
+ self.lexerrorf = self.lexstateerrorf.get(state,None)
+ self.lexstate = state
+
+ # ------------------------------------------------------------
+ # push_state() - Changes the lexing state and saves old on stack
+ # ------------------------------------------------------------
+ def push_state(self,state):
+ self.lexstatestack.append(self.lexstate)
+ self.begin(state)
+
+ # ------------------------------------------------------------
+ # pop_state() - Restores the previous state
+ # ------------------------------------------------------------
+ def pop_state(self):
+ self.begin(self.lexstatestack.pop())
+
+ # ------------------------------------------------------------
+ # current_state() - Returns the current lexing state
+ # ------------------------------------------------------------
+ def current_state(self):
+ return self.lexstate
+
+ # ------------------------------------------------------------
+ # skip() - Skip ahead n characters
+ # ------------------------------------------------------------
+ def skip(self,n):
+ self.lexpos += n
+
+ # ------------------------------------------------------------
+ # opttoken() - Return the next token from the Lexer
+ #
+ # Note: This function has been carefully implemented to be as fast
+ # as possible. Don't make changes unless you really know what
+ # you are doing
+ # ------------------------------------------------------------
+ def token(self):
+ # Make local copies of frequently referenced attributes
+ lexpos = self.lexpos
+ lexlen = self.lexlen
+ lexignore = self.lexignore
+ lexdata = self.lexdata
+
+ while lexpos < lexlen:
+ # This code provides some short-circuit code for whitespace, tabs, and other ignored characters
+ if lexdata[lexpos] in lexignore:
+ lexpos += 1
+ continue
+
+ # Look for a regular expression match
+ for lexre,lexindexfunc in self.lexre:
+ m = lexre.match(lexdata,lexpos)
+ if not m: continue
+
+ # Create a token for return
+ tok = LexToken()
+ tok.value = m.group()
+ tok.lineno = self.lineno
+ tok.lexpos = lexpos
+
+ i = m.lastindex
+ func,tok.type = lexindexfunc[i]
+
+ if not func:
+ # If no token type was set, it's an ignored token
+ if tok.type:
+ self.lexpos = m.end()
+ return tok
+ else:
+ lexpos = m.end()
+ break
+
+ lexpos = m.end()
+
+ # If token is processed by a function, call it
+
+ tok.lexer = self # Set additional attributes useful in token rules
+ self.lexmatch = m
+ self.lexpos = lexpos
+
+ newtok = func(tok)
+
+ # Every function must return a token, if nothing, we just move to next token
+ if not newtok:
+ lexpos = self.lexpos # This is here in case user has updated lexpos.
+ lexignore = self.lexignore # This is here in case there was a state change
+ break
+
+ # Verify type of the token. If not in the token map, raise an error
+ if not self.lexoptimize:
+ if not newtok.type in self.lextokens:
+ raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
+ func_code(func).co_filename, func_code(func).co_firstlineno,
+ func.__name__, newtok.type),lexdata[lexpos:])
+
+ return newtok
+ else:
+ # No match, see if in literals
+ if lexdata[lexpos] in self.lexliterals:
+ tok = LexToken()
+ tok.value = lexdata[lexpos]
+ tok.lineno = self.lineno
+ tok.type = tok.value
+ tok.lexpos = lexpos
+ self.lexpos = lexpos + 1
+ return tok
+
+ # No match. Call t_error() if defined.
+ if self.lexerrorf:
+ tok = LexToken()
+ tok.value = self.lexdata[lexpos:]
+ tok.lineno = self.lineno
+ tok.type = "error"
+ tok.lexer = self
+ tok.lexpos = lexpos
+ self.lexpos = lexpos
+ newtok = self.lexerrorf(tok)
+ if lexpos == self.lexpos:
+ # Error method didn't change text position at all. This is an error.
+ raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
+ lexpos = self.lexpos
+ if not newtok: continue
+ return newtok
+
+ self.lexpos = lexpos
+ raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
+
+ self.lexpos = lexpos + 1
+ if self.lexdata is None:
+ raise RuntimeError("No input string given with input()")
+ return None
+
+ # Iterator interface
+ def __iter__(self):
+ return self
+
+ def next(self):
+ t = self.token()
+ if t is None:
+ raise StopIteration
+ return t
+
+ __next__ = next
+
+# -----------------------------------------------------------------------------
+# ==== Lex Builder ===
+#
+# The functions and classes below are used to collect lexing information
+# and build a Lexer object from it.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# _get_regex(func)
+#
+# Returns the regular expression assigned to a function either as a doc string
+# or as a .regex attribute attached by the @TOKEN decorator.
+# -----------------------------------------------------------------------------
+
+def _get_regex(func):
+ return getattr(func,"regex",func.__doc__)
+
+# -----------------------------------------------------------------------------
+# get_caller_module_dict()
+#
+# This function returns a dictionary containing all of the symbols defined within
+# a caller further down the call stack. This is used to get the environment
+# associated with the yacc() call if none was provided.
+# -----------------------------------------------------------------------------
+
+def get_caller_module_dict(levels):
+ try:
+ raise RuntimeError
+ except RuntimeError:
+ e,b,t = sys.exc_info()
+ f = t.tb_frame
+ while levels > 0:
+ f = f.f_back
+ levels -= 1
+ ldict = f.f_globals.copy()
+ if f.f_globals != f.f_locals:
+ ldict.update(f.f_locals)
+
+ return ldict
+
+# -----------------------------------------------------------------------------
+# _funcs_to_names()
+#
+# Given a list of regular expression functions, this converts it to a list
+# suitable for output to a table file
+# -----------------------------------------------------------------------------
+
+def _funcs_to_names(funclist,namelist):
+ result = []
+ for f,name in zip(funclist,namelist):
+ if f and f[0]:
+ result.append((name, f[1]))
+ else:
+ result.append(f)
+ return result
+
+# -----------------------------------------------------------------------------
+# _names_to_funcs()
+#
+# Given a list of regular expression function names, this converts it back to
+# functions.
+# -----------------------------------------------------------------------------
+
+def _names_to_funcs(namelist,fdict):
+ result = []
+ for n in namelist:
+ if n and n[0]:
+ result.append((fdict[n[0]],n[1]))
+ else:
+ result.append(n)
+ return result
+
+# -----------------------------------------------------------------------------
+# _form_master_re()
+#
+# This function takes a list of all of the regex components and attempts to
+# form the master regular expression. Given limitations in the Python re
+# module, it may be necessary to break the master regex into separate expressions.
+# -----------------------------------------------------------------------------
+
+def _form_master_re(relist,reflags,ldict,toknames):
+ if not relist: return []
+ regex = "|".join(relist)
+ try:
+ lexre = re.compile(regex,re.VERBOSE | reflags)
+
+ # Build the index to function map for the matching engine
+ lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
+ lexindexnames = lexindexfunc[:]
+
+ for f,i in lexre.groupindex.items():
+ handle = ldict.get(f,None)
+ if type(handle) in (types.FunctionType, types.MethodType):
+ lexindexfunc[i] = (handle,toknames[f])
+ lexindexnames[i] = f
+ elif handle is not None:
+ lexindexnames[i] = f
+ if f.find("ignore_") > 0:
+ lexindexfunc[i] = (None,None)
+ else:
+ lexindexfunc[i] = (None, toknames[f])
+
+ return [(lexre,lexindexfunc)],[regex],[lexindexnames]
+ except Exception:
+ m = int(len(relist)/2)
+ if m == 0: m = 1
+ llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
+ rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
+ return llist+rlist, lre+rre, lnames+rnames
+
+# -----------------------------------------------------------------------------
+# def _statetoken(s,names)
+#
+# Given a declaration name s of the form "t_" and a dictionary whose keys are
+# state names, this function returns a tuple (states,tokenname) where states
+# is a tuple of state names and tokenname is the name of the token. For example,
+# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
+# -----------------------------------------------------------------------------
+
+def _statetoken(s,names):
+ nonstate = 1
+ parts = s.split("_")
+ for i in range(1,len(parts)):
+ if not parts[i] in names and parts[i] != 'ANY': break
+ if i > 1:
+ states = tuple(parts[1:i])
+ else:
+ states = ('INITIAL',)
+
+ if 'ANY' in states:
+ states = tuple(names)
+
+ tokenname = "_".join(parts[i:])
+ return (states,tokenname)
+
+
+# -----------------------------------------------------------------------------
+# LexerReflect()
+#
+# This class represents information needed to build a lexer as extracted from a
+# user's input file.
+# -----------------------------------------------------------------------------
+class LexerReflect(object):
+ def __init__(self,ldict,log=None,reflags=0):
+ self.ldict = ldict
+ self.error_func = None
+ self.tokens = []
+ self.reflags = reflags
+ self.stateinfo = { 'INITIAL' : 'inclusive'}
+ self.modules = {}
+ self.error = 0
+
+ if log is None:
+ self.log = PlyLogger(sys.stderr)
+ else:
+ self.log = log
+
+ # Get all of the basic information
+ def get_all(self):
+ self.get_tokens()
+ self.get_literals()
+ self.get_states()
+ self.get_rules()
+
+ # Validate all of the information
+ def validate_all(self):
+ self.validate_tokens()
+ self.validate_literals()
+ self.validate_rules()
+ return self.error
+
+ # Get the tokens map
+ def get_tokens(self):
+ tokens = self.ldict.get("tokens",None)
+ if not tokens:
+ self.log.error("No token list is defined")
+ self.error = 1
+ return
+
+ if not isinstance(tokens,(list, tuple)):
+ self.log.error("tokens must be a list or tuple")
+ self.error = 1
+ return
+
+ if not tokens:
+ self.log.error("tokens is empty")
+ self.error = 1
+ return
+
+ self.tokens = tokens
+
+ # Validate the tokens
+ def validate_tokens(self):
+ terminals = {}
+ for n in self.tokens:
+ if not _is_identifier.match(n):
+ self.log.error("Bad token name '%s'",n)
+ self.error = 1
+ if n in terminals:
+ self.log.warning("Token '%s' multiply defined", n)
+ terminals[n] = 1
+
+ # Get the literals specifier
+ def get_literals(self):
+ self.literals = self.ldict.get("literals","")
+ if not self.literals:
+ self.literals = ""
+
+ # Validate literals
+ def validate_literals(self):
+ try:
+ for c in self.literals:
+ if not isinstance(c,StringTypes) or len(c) > 1:
+ self.log.error("Invalid literal %s. Must be a single character", repr(c))
+ self.error = 1
+
+ except TypeError:
+ self.log.error("Invalid literals specification. literals must be a sequence of characters")
+ self.error = 1
+
+ def get_states(self):
+ self.states = self.ldict.get("states",None)
+ # Build statemap
+ if self.states:
+ if not isinstance(self.states,(tuple,list)):
+ self.log.error("states must be defined as a tuple or list")
+ self.error = 1
+ else:
+ for s in self.states:
+ if not isinstance(s,tuple) or len(s) != 2:
+ self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
+ self.error = 1
+ continue
+ name, statetype = s
+ if not isinstance(name,StringTypes):
+ self.log.error("State name %s must be a string", repr(name))
+ self.error = 1
+ continue
+ if not (statetype == 'inclusive' or statetype == 'exclusive'):
+ self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
+ self.error = 1
+ continue
+ if name in self.stateinfo:
+ self.log.error("State '%s' already defined",name)
+ self.error = 1
+ continue
+ self.stateinfo[name] = statetype
+
+ # Get all of the symbols with a t_ prefix and sort them into various
+ # categories (functions, strings, error functions, and ignore characters)
+
+ def get_rules(self):
+ tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
+
+ # Now build up a list of functions and a list of strings
+
+ self.toknames = { } # Mapping of symbols to token names
+ self.funcsym = { } # Symbols defined as functions
+ self.strsym = { } # Symbols defined as strings
+ self.ignore = { } # Ignore strings by state
+ self.errorf = { } # Error functions by state
+
+ for s in self.stateinfo:
+ self.funcsym[s] = []
+ self.strsym[s] = []
+
+ if len(tsymbols) == 0:
+ self.log.error("No rules of the form t_rulename are defined")
+ self.error = 1
+ return
+
+ for f in tsymbols:
+ t = self.ldict[f]
+ states, tokname = _statetoken(f,self.stateinfo)
+ self.toknames[f] = tokname
+
+ if hasattr(t,"__call__"):
+ if tokname == 'error':
+ for s in states:
+ self.errorf[s] = t
+ elif tokname == 'ignore':
+ line = func_code(t).co_firstlineno
+ file = func_code(t).co_filename
+ self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
+ self.error = 1
+ else:
+ for s in states:
+ self.funcsym[s].append((f,t))
+ elif isinstance(t, StringTypes):
+ if tokname == 'ignore':
+ for s in states:
+ self.ignore[s] = t
+ if "\\" in t:
+ self.log.warning("%s contains a literal backslash '\\'",f)
+
+ elif tokname == 'error':
+ self.log.error("Rule '%s' must be defined as a function", f)
+ self.error = 1
+ else:
+ for s in states:
+ self.strsym[s].append((f,t))
+ else:
+ self.log.error("%s not defined as a function or string", f)
+ self.error = 1
+
+ # Sort the functions by line number
+ for f in self.funcsym.values():
+ if sys.version_info[0] < 3:
+ f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
+ else:
+ # Python 3.0
+ f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
+
+ # Sort the strings by regular expression length
+ for s in self.strsym.values():
+ if sys.version_info[0] < 3:
+ s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
+ else:
+ # Python 3.0
+ s.sort(key=lambda x: len(x[1]),reverse=True)
+
+ # Validate all of the t_rules collected
+ def validate_rules(self):
+ for state in self.stateinfo:
+ # Validate all rules defined by functions
+
+
+
+ for fname, f in self.funcsym[state]:
+ line = func_code(f).co_firstlineno
+ file = func_code(f).co_filename
+ module = inspect.getmodule(f)
+ self.modules[module] = 1
+
+ tokname = self.toknames[fname]
+ if isinstance(f, types.MethodType):
+ reqargs = 2
+ else:
+ reqargs = 1
+ nargs = func_code(f).co_argcount
+ if nargs > reqargs:
+ self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
+ self.error = 1
+ continue
+
+ if nargs < reqargs:
+ self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
+ self.error = 1
+ continue
+
+ if not _get_regex(f):
+ self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
+ self.error = 1
+ continue
+
+ try:
+ c = re.compile("(?P<%s>%s)" % (fname, _get_regex(f)), re.VERBOSE | self.reflags)
+ if c.match(""):
+ self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
+ self.error = 1
+ except re.error:
+ _etype, e, _etrace = sys.exc_info()
+ self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
+ if '#' in _get_regex(f):
+ self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
+ self.error = 1
+
+ # Validate all rules defined by strings
+ for name,r in self.strsym[state]:
+ tokname = self.toknames[name]
+ if tokname == 'error':
+ self.log.error("Rule '%s' must be defined as a function", name)
+ self.error = 1
+ continue
+
+ if not tokname in self.tokens and tokname.find("ignore_") < 0:
+ self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
+ self.error = 1
+ continue
+
+ try:
+ c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
+ if (c.match("")):
+ self.log.error("Regular expression for rule '%s' matches empty string",name)
+ self.error = 1
+ except re.error:
+ _etype, e, _etrace = sys.exc_info()
+ self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
+ if '#' in r:
+ self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
+ self.error = 1
+
+ if not self.funcsym[state] and not self.strsym[state]:
+ self.log.error("No rules defined for state '%s'",state)
+ self.error = 1
+
+ # Validate the error function
+ efunc = self.errorf.get(state,None)
+ if efunc:
+ f = efunc
+ line = func_code(f).co_firstlineno
+ file = func_code(f).co_filename
+ module = inspect.getmodule(f)
+ self.modules[module] = 1
+
+ if isinstance(f, types.MethodType):
+ reqargs = 2
+ else:
+ reqargs = 1
+ nargs = func_code(f).co_argcount
+ if nargs > reqargs:
+ self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
+ self.error = 1
+
+ if nargs < reqargs:
+ self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
+ self.error = 1
+
+ for module in self.modules:
+ self.validate_module(module)
+
+
+ # -----------------------------------------------------------------------------
+ # validate_module()
+ #
+ # This checks to see if there are duplicated t_rulename() functions or strings
+ # in the parser input file. This is done using a simple regular expression
+ # match on each line in the source code of the given module.
+ # -----------------------------------------------------------------------------
+
+ def validate_module(self, module):
+ lines, linen = inspect.getsourcelines(module)
+
+ fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
+ sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
+
+ counthash = { }
+ linen += 1
+ for l in lines:
+ m = fre.match(l)
+ if not m:
+ m = sre.match(l)
+ if m:
+ name = m.group(1)
+ prev = counthash.get(name)
+ if not prev:
+ counthash[name] = linen
+ else:
+ filename = inspect.getsourcefile(module)
+ self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
+ self.error = 1
+ linen += 1
+
+# -----------------------------------------------------------------------------
+# lex(module)
+#
+# Build all of the regular expression rules from definitions in the supplied module
+# -----------------------------------------------------------------------------
+def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
+ global lexer
+ ldict = None
+ stateinfo = { 'INITIAL' : 'inclusive'}
+ lexobj = Lexer()
+ lexobj.lexoptimize = optimize
+ global token,input
+
+ if errorlog is None:
+ errorlog = PlyLogger(sys.stderr)
+
+ if debug:
+ if debuglog is None:
+ debuglog = PlyLogger(sys.stderr)
+
+ # Get the module dictionary used for the lexer
+ if object: module = object
+
+ if module:
+ _items = [(k,getattr(module,k)) for k in dir(module)]
+ ldict = dict(_items)
+ else:
+ ldict = get_caller_module_dict(2)
+
+ # Collect parser information from the dictionary
+ linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
+ linfo.get_all()
+ if not optimize:
+ if linfo.validate_all():
+ raise SyntaxError("Can't build lexer")
+
+ if optimize and lextab:
+ try:
+ lexobj.readtab(lextab,ldict)
+ token = lexobj.token
+ input = lexobj.input
+ lexer = lexobj
+ return lexobj
+
+ except ImportError:
+ pass
+
+ # Dump some basic debugging information
+ if debug:
+ debuglog.info("lex: tokens = %r", linfo.tokens)
+ debuglog.info("lex: literals = %r", linfo.literals)
+ debuglog.info("lex: states = %r", linfo.stateinfo)
+
+ # Build a dictionary of valid token names
+ lexobj.lextokens = { }
+ for n in linfo.tokens:
+ lexobj.lextokens[n] = 1
+
+ # Get literals specification
+ if isinstance(linfo.literals,(list,tuple)):
+ lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
+ else:
+ lexobj.lexliterals = linfo.literals
+
+ # Get the stateinfo dictionary
+ stateinfo = linfo.stateinfo
+
+ regexs = { }
+ # Build the master regular expressions
+ for state in stateinfo:
+ regex_list = []
+
+ # Add rules defined by functions first
+ for fname, f in linfo.funcsym[state]:
+ line = func_code(f).co_firstlineno
+ file = func_code(f).co_filename
+ regex_list.append("(?P<%s>%s)" % (fname,_get_regex(f)))
+ if debug:
+ debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,_get_regex(f), state)
+
+ # Now add all of the simple rules
+ for name,r in linfo.strsym[state]:
+ regex_list.append("(?P<%s>%s)" % (name,r))
+ if debug:
+ debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
+
+ regexs[state] = regex_list
+
+ # Build the master regular expressions
+
+ if debug:
+ debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
+
+ for state in regexs:
+ lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
+ lexobj.lexstatere[state] = lexre
+ lexobj.lexstateretext[state] = re_text
+ lexobj.lexstaterenames[state] = re_names
+ if debug:
+ for i in range(len(re_text)):
+ debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
+
+ # For inclusive states, we need to add the regular expressions from the INITIAL state
+ for state,stype in stateinfo.items():
+ if state != "INITIAL" and stype == 'inclusive':
+ lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
+ lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
+ lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
+
+ lexobj.lexstateinfo = stateinfo
+ lexobj.lexre = lexobj.lexstatere["INITIAL"]
+ lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
+ lexobj.lexreflags = reflags
+
+ # Set up ignore variables
+ lexobj.lexstateignore = linfo.ignore
+ lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
+
+ # Set up error functions
+ lexobj.lexstateerrorf = linfo.errorf
+ lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
+ if not lexobj.lexerrorf:
+ errorlog.warning("No t_error rule is defined")
+
+ # Check state information for ignore and error rules
+ for s,stype in stateinfo.items():
+ if stype == 'exclusive':
+ if not s in linfo.errorf:
+ errorlog.warning("No error rule is defined for exclusive state '%s'", s)
+ if not s in linfo.ignore and lexobj.lexignore:
+ errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
+ elif stype == 'inclusive':
+ if not s in linfo.errorf:
+ linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
+ if not s in linfo.ignore:
+ linfo.ignore[s] = linfo.ignore.get("INITIAL","")
+
+ # Create global versions of the token() and input() functions
+ token = lexobj.token
+ input = lexobj.input
+ lexer = lexobj
+
+ # If in optimize mode, we write the lextab
+ if lextab and optimize:
+ lexobj.writetab(lextab,outputdir)
+
+ return lexobj
+
+# -----------------------------------------------------------------------------
+# runmain()
+#
+# This runs the lexer as a main program
+# -----------------------------------------------------------------------------
+
+def runmain(lexer=None,data=None):
+ if not data:
+ try:
+ filename = sys.argv[1]
+ f = open(filename)
+ data = f.read()
+ f.close()
+ except IndexError:
+ sys.stdout.write("Reading from standard input (type EOF to end):\n")
+ data = sys.stdin.read()
+
+ if lexer:
+ _input = lexer.input
+ else:
+ _input = input
+ _input(data)
+ if lexer:
+ _token = lexer.token
+ else:
+ _token = token
+
+ while 1:
+ tok = _token()
+ if not tok: break
+ sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
+
+# -----------------------------------------------------------------------------
+# @TOKEN(regex)
+#
+# This decorator function can be used to set the regex expression on a function
+# when its docstring might need to be set in an alternative way
+# -----------------------------------------------------------------------------
+
+def TOKEN(r):
+ def set_regex(f):
+ if hasattr(r,"__call__"):
+ f.regex = _get_regex(r)
+ else:
+ f.regex = r
+ return f
+ return set_regex
+
+# Alternative spelling of the TOKEN decorator
+Token = TOKEN
+
diff --git a/xos/genx/generator/ply/yacc.py b/xos/genx/generator/ply/yacc.py
new file mode 100644
index 0000000..49d83d7
--- /dev/null
+++ b/xos/genx/generator/ply/yacc.py
@@ -0,0 +1,3331 @@
+# -----------------------------------------------------------------------------
+# ply: yacc.py
+#
+# Copyright (C) 2001-2011,
+# David M. Beazley (Dabeaz LLC)
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of the David Beazley or Dabeaz LLC may be used to
+# endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# -----------------------------------------------------------------------------
+#
+# This implements an LR parser that is constructed from grammar rules defined
+# as Python functions. The grammer is specified by supplying the BNF inside
+# Python documentation strings. The inspiration for this technique was borrowed
+# from John Aycock's Spark parsing system. PLY might be viewed as cross between
+# Spark and the GNU bison utility.
+#
+# The current implementation is only somewhat object-oriented. The
+# LR parser itself is defined in terms of an object (which allows multiple
+# parsers to co-exist). However, most of the variables used during table
+# construction are defined in terms of global variables. Users shouldn't
+# notice unless they are trying to define multiple parsers at the same
+# time using threads (in which case they should have their head examined).
+#
+# This implementation supports both SLR and LALR(1) parsing. LALR(1)
+# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
+# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
+# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
+# by the more efficient DeRemer and Pennello algorithm.
+#
+# :::::::: WARNING :::::::
+#
+# Construction of LR parsing tables is fairly complicated and expensive.
+# To make this module run fast, a *LOT* of work has been put into
+# optimization---often at the expensive of readability and what might
+# consider to be good Python "coding style." Modify the code at your
+# own risk!
+# ----------------------------------------------------------------------------
+
+__version__ = "3.5"
+__tabversion__ = "3.2" # Table version
+
+#-----------------------------------------------------------------------------
+# === User configurable parameters ===
+#
+# Change these to modify the default behavior of yacc (if you wish)
+#-----------------------------------------------------------------------------
+
+yaccdebug = 1 # Debugging mode. If set, yacc generates a
+ # a 'parser.out' file in the current directory
+
+debug_file = 'parser.out' # Default name of the debugging file
+tab_module = 'parsetab' # Default name of the table module
+default_lr = 'LALR' # Default LR table generation method
+
+error_count = 3 # Number of symbols that must be shifted to leave recovery mode
+
+yaccdevel = 0 # Set to True if developing yacc. This turns off optimized
+ # implementations of certain functions.
+
+resultlimit = 40 # Size limit of results when running in debug mode.
+
+pickle_protocol = 0 # Protocol to use when writing pickle files
+
+import re, types, sys, os.path, inspect
+
+# Compatibility function for python 2.6/3.0
+if sys.version_info[0] < 3:
+ def func_code(f):
+ return f.func_code
+else:
+ def func_code(f):
+ return f.__code__
+
+# String type-checking compatibility
+if sys.version_info[0] < 3:
+ string_types = basestring
+else:
+ string_types = str
+
+# Compatibility
+try:
+ MAXINT = sys.maxint
+except AttributeError:
+ MAXINT = sys.maxsize
+
+# Python 2.x/3.0 compatibility.
+def load_ply_lex():
+ if sys.version_info[0] < 3:
+ import lex
+ else:
+ import ply.lex as lex
+ return lex
+
+# This object is a stand-in for a logging object created by the
+# logging module. PLY will use this by default to create things
+# such as the parser.out file. If a user wants more detailed
+# information, they can create their own logging object and pass
+# it into PLY.
+
+class PlyLogger(object):
+ def __init__(self,f):
+ self.f = f
+ def debug(self,msg,*args,**kwargs):
+ self.f.write((msg % args) + "\n")
+ info = debug
+
+ def warning(self,msg,*args,**kwargs):
+ self.f.write("WARNING: "+ (msg % args) + "\n")
+
+ def error(self,msg,*args,**kwargs):
+ self.f.write("ERROR: " + (msg % args) + "\n")
+
+ critical = debug
+
+# Null logger is used when no output is generated. Does nothing.
+class NullLogger(object):
+ def __getattribute__(self,name):
+ return self
+ def __call__(self,*args,**kwargs):
+ return self
+
+# Exception raised for yacc-related errors
+class YaccError(Exception): pass
+
+# Format the result message that the parser produces when running in debug mode.
+def format_result(r):
+ repr_str = repr(r)
+ if '\n' in repr_str: repr_str = repr(repr_str)
+ if len(repr_str) > resultlimit:
+ repr_str = repr_str[:resultlimit]+" ..."
+ result = "<%s @ 0x%x> (%s)" % (type(r).__name__,id(r),repr_str)
+ return result
+
+
+# Format stack entries when the parser is running in debug mode
+def format_stack_entry(r):
+ repr_str = repr(r)
+ if '\n' in repr_str: repr_str = repr(repr_str)
+ if len(repr_str) < 16:
+ return repr_str
+ else:
+ return "<%s @ 0x%x>" % (type(r).__name__,id(r))
+
+# Panic mode error recovery support. This feature is being reworked--much of the
+# code here is to offer a deprecation/backwards compatible transition
+
+_errok = None
+_token = None
+_restart = None
+_warnmsg = """PLY: Don't use global functions errok(), token(), and restart() in p_error().
+Instead, invoke the methods on the associated parser instance:
+
+ def p_error(p):
+ ...
+ # Use parser.errok(), parser.token(), parser.restart()
+ ...
+
+ parser = yacc.yacc()
+"""
+import warnings
+def errok():
+ warnings.warn(_warnmsg)
+ return _errok()
+
+def restart():
+ warnings.warn(_warnmsg)
+ return _restart()
+
+def token():
+ warnings.warn(_warnmsg)
+ return _token()
+
+# Utility function to call the p_error() function with some deprecation hacks
+def call_errorfunc(errorfunc,token,parser):
+ global _errok, _token, _restart
+ _errok = parser.errok
+ _token = parser.token
+ _restart = parser.restart
+ r = errorfunc(token)
+ del _errok, _token, _restart
+
+#-----------------------------------------------------------------------------
+# === LR Parsing Engine ===
+#
+# The following classes are used for the LR parser itself. These are not
+# used during table construction and are independent of the actual LR
+# table generation algorithm
+#-----------------------------------------------------------------------------
+
+# This class is used to hold non-terminal grammar symbols during parsing.
+# It normally has the following attributes set:
+# .type = Grammar symbol type
+# .value = Symbol value
+# .lineno = Starting line number
+# .endlineno = Ending line number (optional, set automatically)
+# .lexpos = Starting lex position
+# .endlexpos = Ending lex position (optional, set automatically)
+
+class YaccSymbol:
+ def __str__(self): return self.type
+ def __repr__(self): return str(self)
+
+# This class is a wrapper around the objects actually passed to each
+# grammar rule. Index lookup and assignment actually assign the
+# .value attribute of the underlying YaccSymbol object.
+# The lineno() method returns the line number of a given
+# item (or 0 if not defined). The linespan() method returns
+# a tuple of (startline,endline) representing the range of lines
+# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
+# representing the range of positional information for a symbol.
+
+class YaccProduction:
+ def __init__(self,s,stack=None):
+ self.slice = s
+ self.stack = stack
+ self.lexer = None
+ self.parser= None
+ def __getitem__(self,n):
+ if isinstance(n, slice):
+ return [s.value for s in self.slice[n]]
+ elif n >= 0:
+ return self.slice[n].value
+ else:
+ return self.stack[n].value
+
+ def __setitem__(self,n,v):
+ self.slice[n].value = v
+
+ def __getslice__(self,i,j):
+ return [s.value for s in self.slice[i:j]]
+
+ def __len__(self):
+ return len(self.slice)
+
+ def lineno(self,n):
+ return getattr(self.slice[n],"lineno",0)
+
+ def set_lineno(self,n,lineno):
+ self.slice[n].lineno = lineno
+
+ def linespan(self,n):
+ startline = getattr(self.slice[n],"lineno",0)
+ endline = getattr(self.slice[n],"endlineno",startline)
+ return startline,endline
+
+ def lexpos(self,n):
+ return getattr(self.slice[n],"lexpos",0)
+
+ def lexspan(self,n):
+ startpos = getattr(self.slice[n],"lexpos",0)
+ endpos = getattr(self.slice[n],"endlexpos",startpos)
+ return startpos,endpos
+
+ def error(self):
+ raise SyntaxError
+
+
+# -----------------------------------------------------------------------------
+# == LRParser ==
+#
+# The LR Parsing engine.
+# -----------------------------------------------------------------------------
+
+class LRParser:
+ def __init__(self,lrtab,errorf):
+ self.productions = lrtab.lr_productions
+ self.action = lrtab.lr_action
+ self.goto = lrtab.lr_goto
+ self.errorfunc = errorf
+
+ def errok(self):
+ self.errorok = 1
+
+ def restart(self):
+ del self.statestack[:]
+ del self.symstack[:]
+ sym = YaccSymbol()
+ sym.type = '$end'
+ self.symstack.append(sym)
+ self.statestack.append(0)
+
+ def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
+ if debug or yaccdevel:
+ if isinstance(debug,int):
+ debug = PlyLogger(sys.stderr)
+ return self.parsedebug(input,lexer,debug,tracking,tokenfunc)
+ elif tracking:
+ return self.parseopt(input,lexer,debug,tracking,tokenfunc)
+ else:
+ return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc)
+
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # parsedebug().
+ #
+ # This is the debugging enabled version of parse(). All changes made to the
+ # parsing engine should be made here. For the non-debugging version,
+ # copy this code to a method parseopt() and delete all of the sections
+ # enclosed in:
+ #
+ # #--! DEBUG
+ # statements
+ # #--! DEBUG
+ #
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ def parsedebug(self,input=None,lexer=None,debug=None,tracking=0,tokenfunc=None):
+ lookahead = None # Current lookahead symbol
+ lookaheadstack = [ ] # Stack of lookahead symbols
+ actions = self.action # Local reference to action table (to avoid lookup on self.)
+ goto = self.goto # Local reference to goto table (to avoid lookup on self.)
+ prod = self.productions # Local reference to production list (to avoid lookup on self.)
+ pslice = YaccProduction(None) # Production object passed to grammar rules
+ errorcount = 0 # Used during error recovery
+
+ # --! DEBUG
+ debug.info("PLY: PARSE DEBUG START")
+ # --! DEBUG
+
+ # If no lexer was given, we will try to use the lex module
+ if not lexer:
+ lex = load_ply_lex()
+ lexer = lex.lexer
+
+ # Set up the lexer and parser objects on pslice
+ pslice.lexer = lexer
+ pslice.parser = self
+
+ # If input was supplied, pass to lexer
+ if input is not None:
+ lexer.input(input)
+
+ if tokenfunc is None:
+ # Tokenize function
+ get_token = lexer.token
+ else:
+ get_token = tokenfunc
+
+ # Set the parser() token method (sometimes used in error recovery)
+ self.token = get_token
+
+ # Set up the state and symbol stacks
+
+ statestack = [ ] # Stack of parsing states
+ self.statestack = statestack
+ symstack = [ ] # Stack of grammar symbols
+ self.symstack = symstack
+
+ pslice.stack = symstack # Put in the production
+ errtoken = None # Err token
+
+ # The start state is assumed to be (0,$end)
+
+ statestack.append(0)
+ sym = YaccSymbol()
+ sym.type = "$end"
+ symstack.append(sym)
+ state = 0
+ while 1:
+ # Get the next symbol on the input. If a lookahead symbol
+ # is already set, we just use that. Otherwise, we'll pull
+ # the next token off of the lookaheadstack or from the lexer
+
+ # --! DEBUG
+ debug.debug('')
+ debug.debug('State : %s', state)
+ # --! DEBUG
+
+ if not lookahead:
+ if not lookaheadstack:
+ lookahead = get_token() # Get the next token
+ else:
+ lookahead = lookaheadstack.pop()
+ if not lookahead:
+ lookahead = YaccSymbol()
+ lookahead.type = "$end"
+
+ # --! DEBUG
+ debug.debug('Stack : %s',
+ ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
+ # --! DEBUG
+
+ # Check the action table
+ ltype = lookahead.type
+ t = actions[state].get(ltype)
+
+ if t is not None:
+ if t > 0:
+ # shift a symbol on the stack
+ statestack.append(t)
+ state = t
+
+ # --! DEBUG
+ debug.debug("Action : Shift and goto state %s", t)
+ # --! DEBUG
+
+ symstack.append(lookahead)
+ lookahead = None
+
+ # Decrease error count on successful shift
+ if errorcount: errorcount -=1
+ continue
+
+ if t < 0:
+ # reduce a symbol on the stack, emit a production
+ p = prod[-t]
+ pname = p.name
+ plen = p.len
+
+ # Get production function
+ sym = YaccSymbol()
+ sym.type = pname # Production name
+ sym.value = None
+
+ # --! DEBUG
+ if plen:
+ debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, "["+",".join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+"]",-t)
+ else:
+ debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, [],-t)
+
+ # --! DEBUG
+
+ if plen:
+ targ = symstack[-plen-1:]
+ targ[0] = sym
+
+ # --! TRACKING
+ if tracking:
+ t1 = targ[1]
+ sym.lineno = t1.lineno
+ sym.lexpos = t1.lexpos
+ t1 = targ[-1]
+ sym.endlineno = getattr(t1,"endlineno",t1.lineno)
+ sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
+
+ # --! TRACKING
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # below as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ del symstack[-plen:]
+ del statestack[-plen:]
+ p.callable(pslice)
+ # --! DEBUG
+ debug.info("Result : %s", format_result(pslice[0]))
+ # --! DEBUG
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead)
+ symstack.pop()
+ statestack.pop()
+ state = statestack[-1]
+ sym.type = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = 0
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ else:
+
+ # --! TRACKING
+ if tracking:
+ sym.lineno = lexer.lineno
+ sym.lexpos = lexer.lexpos
+ # --! TRACKING
+
+ targ = [ sym ]
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # above as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ p.callable(pslice)
+ # --! DEBUG
+ debug.info("Result : %s", format_result(pslice[0]))
+ # --! DEBUG
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead)
+ symstack.pop()
+ statestack.pop()
+ state = statestack[-1]
+ sym.type = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = 0
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ if t == 0:
+ n = symstack[-1]
+ result = getattr(n,"value",None)
+ # --! DEBUG
+ debug.info("Done : Returning %s", format_result(result))
+ debug.info("PLY: PARSE DEBUG END")
+ # --! DEBUG
+ return result
+
+ if t == None:
+
+ # --! DEBUG
+ debug.error('Error : %s',
+ ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
+ # --! DEBUG
+
+ # We have some kind of parsing error here. To handle
+ # this, we are going to push the current token onto
+ # the tokenstack and replace it with an 'error' token.
+ # If there are any synchronization rules, they may
+ # catch it.
+ #
+ # In addition to pushing the error token, we call call
+ # the user defined p_error() function if this is the
+ # first syntax error. This function is only called if
+ # errorcount == 0.
+ if errorcount == 0 or self.errorok:
+ errorcount = error_count
+ self.errorok = 0
+ errtoken = lookahead
+ if errtoken.type == "$end":
+ errtoken = None # End of file!
+ if self.errorfunc:
+ if errtoken and not hasattr(errtoken,'lexer'):
+ errtoken.lexer = lexer
+ tok = call_errorfunc(self.errorfunc, errtoken, self)
+ if self.errorok:
+ # User must have done some kind of panic
+ # mode recovery on their own. The
+ # returned token is the next lookahead
+ lookahead = tok
+ errtoken = None
+ continue
+ else:
+ if errtoken:
+ if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
+ else: lineno = 0
+ if lineno:
+ sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
+ else:
+ sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
+ else:
+ sys.stderr.write("yacc: Parse error in input. EOF\n")
+ return
+
+ else:
+ errorcount = error_count
+
+ # case 1: the statestack only has 1 entry on it. If we're in this state, the
+ # entire parse has been rolled back and we're completely hosed. The token is
+ # discarded and we just keep going.
+
+ if len(statestack) <= 1 and lookahead.type != "$end":
+ lookahead = None
+ errtoken = None
+ state = 0
+ # Nuke the pushback stack
+ del lookaheadstack[:]
+ continue
+
+ # case 2: the statestack has a couple of entries on it, but we're
+ # at the end of the file. nuke the top entry and generate an error token
+
+ # Start nuking entries on the stack
+ if lookahead.type == "$end":
+ # Whoa. We're really hosed here. Bail out
+ return
+
+ if lookahead.type != 'error':
+ sym = symstack[-1]
+ if sym.type == 'error':
+ # Hmmm. Error is on top of stack, we'll just nuke input
+ # symbol and continue
+ if tracking:
+ sym.endlineno = getattr(lookahead,"lineno", sym.lineno)
+ sym.endlexpos = getattr(lookahead,"lexpos", sym.lexpos)
+ lookahead = None
+ continue
+ t = YaccSymbol()
+ t.type = 'error'
+ if hasattr(lookahead,"lineno"):
+ t.lineno = lookahead.lineno
+ if hasattr(lookahead,"lexpos"):
+ t.lexpos = lookahead.lexpos
+ t.value = lookahead
+ lookaheadstack.append(lookahead)
+ lookahead = t
+ else:
+ sym = symstack.pop()
+ if tracking:
+ lookahead.lineno = sym.lineno
+ lookahead.lexpos = sym.lexpos
+ statestack.pop()
+ state = statestack[-1] # Potential bug fix
+
+ continue
+
+ # Call an error function here
+ raise RuntimeError("yacc: internal parser error!!!\n")
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # parseopt().
+ #
+ # Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY.
+ # Edit the debug version above, then copy any modifications to the method
+ # below while removing #--! DEBUG sections.
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+
+ def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
+ lookahead = None # Current lookahead symbol
+ lookaheadstack = [ ] # Stack of lookahead symbols
+ actions = self.action # Local reference to action table (to avoid lookup on self.)
+ goto = self.goto # Local reference to goto table (to avoid lookup on self.)
+ prod = self.productions # Local reference to production list (to avoid lookup on self.)
+ pslice = YaccProduction(None) # Production object passed to grammar rules
+ errorcount = 0 # Used during error recovery
+
+ # If no lexer was given, we will try to use the lex module
+ if not lexer:
+ lex = load_ply_lex()
+ lexer = lex.lexer
+
+ # Set up the lexer and parser objects on pslice
+ pslice.lexer = lexer
+ pslice.parser = self
+
+ # If input was supplied, pass to lexer
+ if input is not None:
+ lexer.input(input)
+
+ if tokenfunc is None:
+ # Tokenize function
+ get_token = lexer.token
+ else:
+ get_token = tokenfunc
+
+ # Set the parser() token method (sometimes used in error recovery)
+ self.token = get_token
+
+ # Set up the state and symbol stacks
+
+ statestack = [ ] # Stack of parsing states
+ self.statestack = statestack
+ symstack = [ ] # Stack of grammar symbols
+ self.symstack = symstack
+
+ pslice.stack = symstack # Put in the production
+ errtoken = None # Err token
+
+ # The start state is assumed to be (0,$end)
+
+ statestack.append(0)
+ sym = YaccSymbol()
+ sym.type = '$end'
+ symstack.append(sym)
+ state = 0
+ while 1:
+ # Get the next symbol on the input. If a lookahead symbol
+ # is already set, we just use that. Otherwise, we'll pull
+ # the next token off of the lookaheadstack or from the lexer
+
+ if not lookahead:
+ if not lookaheadstack:
+ lookahead = get_token() # Get the next token
+ else:
+ lookahead = lookaheadstack.pop()
+ if not lookahead:
+ lookahead = YaccSymbol()
+ lookahead.type = '$end'
+
+ # Check the action table
+ ltype = lookahead.type
+ t = actions[state].get(ltype)
+
+ if t is not None:
+ if t > 0:
+ # shift a symbol on the stack
+ statestack.append(t)
+ state = t
+
+ symstack.append(lookahead)
+ lookahead = None
+
+ # Decrease error count on successful shift
+ if errorcount: errorcount -=1
+ continue
+
+ if t < 0:
+ # reduce a symbol on the stack, emit a production
+ p = prod[-t]
+ pname = p.name
+ plen = p.len
+
+ # Get production function
+ sym = YaccSymbol()
+ sym.type = pname # Production name
+ sym.value = None
+
+ if plen:
+ targ = symstack[-plen-1:]
+ targ[0] = sym
+
+ # --! TRACKING
+ if tracking:
+ t1 = targ[1]
+ sym.lineno = t1.lineno
+ sym.lexpos = t1.lexpos
+ t1 = targ[-1]
+ sym.endlineno = getattr(t1,"endlineno",t1.lineno)
+ sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
+
+ # --! TRACKING
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # below as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ del symstack[-plen:]
+ del statestack[-plen:]
+ p.callable(pslice)
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead)
+ symstack.pop()
+ statestack.pop()
+ state = statestack[-1]
+ sym.type = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = 0
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ else:
+
+ # --! TRACKING
+ if tracking:
+ sym.lineno = lexer.lineno
+ sym.lexpos = lexer.lexpos
+ # --! TRACKING
+
+ targ = [ sym ]
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # above as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ p.callable(pslice)
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead)
+ symstack.pop()
+ statestack.pop()
+ state = statestack[-1]
+ sym.type = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = 0
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ if t == 0:
+ n = symstack[-1]
+ return getattr(n,"value",None)
+
+ if t == None:
+
+ # We have some kind of parsing error here. To handle
+ # this, we are going to push the current token onto
+ # the tokenstack and replace it with an 'error' token.
+ # If there are any synchronization rules, they may
+ # catch it.
+ #
+ # In addition to pushing the error token, we call call
+ # the user defined p_error() function if this is the
+ # first syntax error. This function is only called if
+ # errorcount == 0.
+ if errorcount == 0 or self.errorok:
+ errorcount = error_count
+ self.errorok = 0
+ errtoken = lookahead
+ if errtoken.type == '$end':
+ errtoken = None # End of file!
+ if self.errorfunc:
+ if errtoken and not hasattr(errtoken,'lexer'):
+ errtoken.lexer = lexer
+ tok = call_errorfunc(self.errorfunc, errtoken, self)
+
+ if self.errorok:
+ # User must have done some kind of panic
+ # mode recovery on their own. The
+ # returned token is the next lookahead
+ lookahead = tok
+ errtoken = None
+ continue
+ else:
+ if errtoken:
+ if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
+ else: lineno = 0
+ if lineno:
+ sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
+ else:
+ sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
+ else:
+ sys.stderr.write("yacc: Parse error in input. EOF\n")
+ return
+
+ else:
+ errorcount = error_count
+
+ # case 1: the statestack only has 1 entry on it. If we're in this state, the
+ # entire parse has been rolled back and we're completely hosed. The token is
+ # discarded and we just keep going.
+
+ if len(statestack) <= 1 and lookahead.type != '$end':
+ lookahead = None
+ errtoken = None
+ state = 0
+ # Nuke the pushback stack
+ del lookaheadstack[:]
+ continue
+
+ # case 2: the statestack has a couple of entries on it, but we're
+ # at the end of the file. nuke the top entry and generate an error token
+
+ # Start nuking entries on the stack
+ if lookahead.type == '$end':
+ # Whoa. We're really hosed here. Bail out
+ return
+
+ if lookahead.type != 'error':
+ sym = symstack[-1]
+ if sym.type == 'error':
+ # Hmmm. Error is on top of stack, we'll just nuke input
+ # symbol and continue
+ if tracking:
+ sym.endlineno = getattr(lookahead,"lineno", sym.lineno)
+ sym.endlexpos = getattr(lookahead,"lexpos", sym.lexpos)
+ lookahead = None
+ continue
+ t = YaccSymbol()
+ t.type = 'error'
+ if hasattr(lookahead,"lineno"):
+ t.lineno = lookahead.lineno
+ if hasattr(lookahead,"lexpos"):
+ t.lexpos = lookahead.lexpos
+ t.value = lookahead
+ lookaheadstack.append(lookahead)
+ lookahead = t
+ else:
+ sym = symstack.pop()
+ if tracking:
+ lookahead.lineno = sym.lineno
+ lookahead.lexpos = sym.lexpos
+ statestack.pop()
+ state = statestack[-1] # Potential bug fix
+
+ continue
+
+ # Call an error function here
+ raise RuntimeError("yacc: internal parser error!!!\n")
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # parseopt_notrack().
+ #
+ # Optimized version of parseopt() with line number tracking removed.
+ # DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove
+ # code in the #--! TRACKING sections
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
+ lookahead = None # Current lookahead symbol
+ lookaheadstack = [ ] # Stack of lookahead symbols
+ actions = self.action # Local reference to action table (to avoid lookup on self.)
+ goto = self.goto # Local reference to goto table (to avoid lookup on self.)
+ prod = self.productions # Local reference to production list (to avoid lookup on self.)
+ pslice = YaccProduction(None) # Production object passed to grammar rules
+ errorcount = 0 # Used during error recovery
+
+ # If no lexer was given, we will try to use the lex module
+ if not lexer:
+ lex = load_ply_lex()
+ lexer = lex.lexer
+
+ # Set up the lexer and parser objects on pslice
+ pslice.lexer = lexer
+ pslice.parser = self
+
+ # If input was supplied, pass to lexer
+ if input is not None:
+ lexer.input(input)
+
+ if tokenfunc is None:
+ # Tokenize function
+ get_token = lexer.token
+ else:
+ get_token = tokenfunc
+
+ # Set the parser() token method (sometimes used in error recovery)
+ self.token = get_token
+
+ # Set up the state and symbol stacks
+
+ statestack = [ ] # Stack of parsing states
+ self.statestack = statestack
+ symstack = [ ] # Stack of grammar symbols
+ self.symstack = symstack
+
+ pslice.stack = symstack # Put in the production
+ errtoken = None # Err token
+
+ # The start state is assumed to be (0,$end)
+
+ statestack.append(0)
+ sym = YaccSymbol()
+ sym.type = '$end'
+ symstack.append(sym)
+ state = 0
+ while 1:
+ # Get the next symbol on the input. If a lookahead symbol
+ # is already set, we just use that. Otherwise, we'll pull
+ # the next token off of the lookaheadstack or from the lexer
+
+ if not lookahead:
+ if not lookaheadstack:
+ lookahead = get_token() # Get the next token
+ else:
+ lookahead = lookaheadstack.pop()
+ if not lookahead:
+ lookahead = YaccSymbol()
+ lookahead.type = '$end'
+
+ # Check the action table
+ ltype = lookahead.type
+ t = actions[state].get(ltype)
+
+ if t is not None:
+ if t > 0:
+ # shift a symbol on the stack
+ statestack.append(t)
+ state = t
+
+ symstack.append(lookahead)
+ lookahead = None
+
+ # Decrease error count on successful shift
+ if errorcount: errorcount -=1
+ continue
+
+ if t < 0:
+ # reduce a symbol on the stack, emit a production
+ p = prod[-t]
+ pname = p.name
+ plen = p.len
+
+ # Get production function
+ sym = YaccSymbol()
+ sym.type = pname # Production name
+ sym.value = None
+
+ if plen:
+ targ = symstack[-plen-1:]
+ targ[0] = sym
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # below as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ del symstack[-plen:]
+ del statestack[-plen:]
+ p.callable(pslice)
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead)
+ symstack.pop()
+ statestack.pop()
+ state = statestack[-1]
+ sym.type = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = 0
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ else:
+
+ targ = [ sym ]
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # above as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ p.callable(pslice)
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead)
+ symstack.pop()
+ statestack.pop()
+ state = statestack[-1]
+ sym.type = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = 0
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ if t == 0:
+ n = symstack[-1]
+ return getattr(n,"value",None)
+
+ if t == None:
+
+ # We have some kind of parsing error here. To handle
+ # this, we are going to push the current token onto
+ # the tokenstack and replace it with an 'error' token.
+ # If there are any synchronization rules, they may
+ # catch it.
+ #
+ # In addition to pushing the error token, we call call
+ # the user defined p_error() function if this is the
+ # first syntax error. This function is only called if
+ # errorcount == 0.
+ if errorcount == 0 or self.errorok:
+ errorcount = error_count
+ self.errorok = 0
+ errtoken = lookahead
+ if errtoken.type == '$end':
+ errtoken = None # End of file!
+ if self.errorfunc:
+ if errtoken and not hasattr(errtoken,'lexer'):
+ errtoken.lexer = lexer
+ tok = call_errorfunc(self.errorfunc, errtoken, self)
+
+ if self.errorok:
+ # User must have done some kind of panic
+ # mode recovery on their own. The
+ # returned token is the next lookahead
+ lookahead = tok
+ errtoken = None
+ continue
+ else:
+ if errtoken:
+ if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
+ else: lineno = 0
+ if lineno:
+ sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
+ else:
+ sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
+ else:
+ sys.stderr.write("yacc: Parse error in input. EOF\n")
+ return
+
+ else:
+ errorcount = error_count
+
+ # case 1: the statestack only has 1 entry on it. If we're in this state, the
+ # entire parse has been rolled back and we're completely hosed. The token is
+ # discarded and we just keep going.
+
+ if len(statestack) <= 1 and lookahead.type != '$end':
+ lookahead = None
+ errtoken = None
+ state = 0
+ # Nuke the pushback stack
+ del lookaheadstack[:]
+ continue
+
+ # case 2: the statestack has a couple of entries on it, but we're
+ # at the end of the file. nuke the top entry and generate an error token
+
+ # Start nuking entries on the stack
+ if lookahead.type == '$end':
+ # Whoa. We're really hosed here. Bail out
+ return
+
+ if lookahead.type != 'error':
+ sym = symstack[-1]
+ if sym.type == 'error':
+ # Hmmm. Error is on top of stack, we'll just nuke input
+ # symbol and continue
+ lookahead = None
+ continue
+ t = YaccSymbol()
+ t.type = 'error'
+ if hasattr(lookahead,"lineno"):
+ t.lineno = lookahead.lineno
+ if hasattr(lookahead,"lexpos"):
+ t.lexpos = lookahead.lexpos
+ t.value = lookahead
+ lookaheadstack.append(lookahead)
+ lookahead = t
+ else:
+ symstack.pop()
+ statestack.pop()
+ state = statestack[-1] # Potential bug fix
+
+ continue
+
+ # Call an error function here
+ raise RuntimeError("yacc: internal parser error!!!\n")
+
+# -----------------------------------------------------------------------------
+# === Grammar Representation ===
+#
+# The following functions, classes, and variables are used to represent and
+# manipulate the rules that make up a grammar.
+# -----------------------------------------------------------------------------
+
+import re
+
+# regex matching identifiers
+_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
+
+# -----------------------------------------------------------------------------
+# class Production:
+#
+# This class stores the raw information about a single production or grammar rule.
+# A grammar rule refers to a specification such as this:
+#
+# expr : expr PLUS term
+#
+# Here are the basic attributes defined on all productions
+#
+# name - Name of the production. For example 'expr'
+# prod - A list of symbols on the right side ['expr','PLUS','term']
+# prec - Production precedence level
+# number - Production number.
+# func - Function that executes on reduce
+# file - File where production function is defined
+# lineno - Line number where production function is defined
+#
+# The following attributes are defined or optional.
+#
+# len - Length of the production (number of symbols on right hand side)
+# usyms - Set of unique symbols found in the production
+# -----------------------------------------------------------------------------
+
+class Production(object):
+ reduced = 0
+ def __init__(self,number,name,prod,precedence=('right',0),func=None,file='',line=0):
+ self.name = name
+ self.prod = tuple(prod)
+ self.number = number
+ self.func = func
+ self.callable = None
+ self.file = file
+ self.line = line
+ self.prec = precedence
+
+ # Internal settings used during table construction
+
+ self.len = len(self.prod) # Length of the production
+
+ # Create a list of unique production symbols used in the production
+ self.usyms = [ ]
+ for s in self.prod:
+ if s not in self.usyms:
+ self.usyms.append(s)
+
+ # List of all LR items for the production
+ self.lr_items = []
+ self.lr_next = None
+
+ # Create a string representation
+ if self.prod:
+ self.str = "%s -> %s" % (self.name," ".join(self.prod))
+ else:
+ self.str = "%s -> <empty>" % self.name
+
+ def __str__(self):
+ return self.str
+
+ def __repr__(self):
+ return "Production("+str(self)+")"
+
+ def __len__(self):
+ return len(self.prod)
+
+ def __nonzero__(self):
+ return 1
+
+ def __getitem__(self,index):
+ return self.prod[index]
+
+ # Return the nth lr_item from the production (or None if at the end)
+ def lr_item(self,n):
+ if n > len(self.prod): return None
+ p = LRItem(self,n)
+
+ # Precompute the list of productions immediately following. Hack. Remove later
+ try:
+ p.lr_after = Prodnames[p.prod[n+1]]
+ except (IndexError,KeyError):
+ p.lr_after = []
+ try:
+ p.lr_before = p.prod[n-1]
+ except IndexError:
+ p.lr_before = None
+
+ return p
+
+ # Bind the production function name to a callable
+ def bind(self,pdict):
+ if self.func:
+ self.callable = pdict[self.func]
+
+# This class serves as a minimal standin for Production objects when
+# reading table data from files. It only contains information
+# actually used by the LR parsing engine, plus some additional
+# debugging information.
+class MiniProduction(object):
+ def __init__(self,str,name,len,func,file,line):
+ self.name = name
+ self.len = len
+ self.func = func
+ self.callable = None
+ self.file = file
+ self.line = line
+ self.str = str
+ def __str__(self):
+ return self.str
+ def __repr__(self):
+ return "MiniProduction(%s)" % self.str
+
+ # Bind the production function name to a callable
+ def bind(self,pdict):
+ if self.func:
+ self.callable = pdict[self.func]
+
+
+# -----------------------------------------------------------------------------
+# class LRItem
+#
+# This class represents a specific stage of parsing a production rule. For
+# example:
+#
+# expr : expr . PLUS term
+#
+# In the above, the "." represents the current location of the parse. Here
+# basic attributes:
+#
+# name - Name of the production. For example 'expr'
+# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
+# number - Production number.
+#
+# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
+# then lr_next refers to 'expr -> expr PLUS . term'
+# lr_index - LR item index (location of the ".") in the prod list.
+# lookaheads - LALR lookahead symbols for this item
+# len - Length of the production (number of symbols on right hand side)
+# lr_after - List of all productions that immediately follow
+# lr_before - Grammar symbol immediately before
+# -----------------------------------------------------------------------------
+
+class LRItem(object):
+ def __init__(self,p,n):
+ self.name = p.name
+ self.prod = list(p.prod)
+ self.number = p.number
+ self.lr_index = n
+ self.lookaheads = { }
+ self.prod.insert(n,".")
+ self.prod = tuple(self.prod)
+ self.len = len(self.prod)
+ self.usyms = p.usyms
+
+ def __str__(self):
+ if self.prod:
+ s = "%s -> %s" % (self.name," ".join(self.prod))
+ else:
+ s = "%s -> <empty>" % self.name
+ return s
+
+ def __repr__(self):
+ return "LRItem("+str(self)+")"
+
+# -----------------------------------------------------------------------------
+# rightmost_terminal()
+#
+# Return the rightmost terminal from a list of symbols. Used in add_production()
+# -----------------------------------------------------------------------------
+def rightmost_terminal(symbols, terminals):
+ i = len(symbols) - 1
+ while i >= 0:
+ if symbols[i] in terminals:
+ return symbols[i]
+ i -= 1
+ return None
+
+# -----------------------------------------------------------------------------
+# === GRAMMAR CLASS ===
+#
+# The following class represents the contents of the specified grammar along
+# with various computed properties such as first sets, follow sets, LR items, etc.
+# This data is used for critical parts of the table generation process later.
+# -----------------------------------------------------------------------------
+
+class GrammarError(YaccError): pass
+
+class Grammar(object):
+ def __init__(self,terminals):
+ self.Productions = [None] # A list of all of the productions. The first
+ # entry is always reserved for the purpose of
+ # building an augmented grammar
+
+ self.Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
+ # productions of that nonterminal.
+
+ self.Prodmap = { } # A dictionary that is only used to detect duplicate
+ # productions.
+
+ self.Terminals = { } # A dictionary mapping the names of terminal symbols to a
+ # list of the rules where they are used.
+
+ for term in terminals:
+ self.Terminals[term] = []
+
+ self.Terminals['error'] = []
+
+ self.Nonterminals = { } # A dictionary mapping names of nonterminals to a list
+ # of rule numbers where they are used.
+
+ self.First = { } # A dictionary of precomputed FIRST(x) symbols
+
+ self.Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
+
+ self.Precedence = { } # Precedence rules for each terminal. Contains tuples of the
+ # form ('right',level) or ('nonassoc', level) or ('left',level)
+
+ self.UsedPrecedence = { } # Precedence rules that were actually used by the grammer.
+ # This is only used to provide error checking and to generate
+ # a warning about unused precedence rules.
+
+ self.Start = None # Starting symbol for the grammar
+
+
+ def __len__(self):
+ return len(self.Productions)
+
+ def __getitem__(self,index):
+ return self.Productions[index]
+
+ # -----------------------------------------------------------------------------
+ # set_precedence()
+ #
+ # Sets the precedence for a given terminal. assoc is the associativity such as
+ # 'left','right', or 'nonassoc'. level is a numeric level.
+ #
+ # -----------------------------------------------------------------------------
+
+ def set_precedence(self,term,assoc,level):
+ assert self.Productions == [None],"Must call set_precedence() before add_production()"
+ if term in self.Precedence:
+ raise GrammarError("Precedence already specified for terminal %r" % term)
+ if assoc not in ['left','right','nonassoc']:
+ raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
+ self.Precedence[term] = (assoc,level)
+
+ # -----------------------------------------------------------------------------
+ # add_production()
+ #
+ # Given an action function, this function assembles a production rule and
+ # computes its precedence level.
+ #
+ # The production rule is supplied as a list of symbols. For example,
+ # a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
+ # symbols ['expr','PLUS','term'].
+ #
+ # Precedence is determined by the precedence of the right-most non-terminal
+ # or the precedence of a terminal specified by %prec.
+ #
+ # A variety of error checks are performed to make sure production symbols
+ # are valid and that %prec is used correctly.
+ # -----------------------------------------------------------------------------
+
+ def add_production(self,prodname,syms,func=None,file='',line=0):
+
+ if prodname in self.Terminals:
+ raise GrammarError("%s:%d: Illegal rule name %r. Already defined as a token" % (file,line,prodname))
+ if prodname == 'error':
+ raise GrammarError("%s:%d: Illegal rule name %r. error is a reserved word" % (file,line,prodname))
+ if not _is_identifier.match(prodname):
+ raise GrammarError("%s:%d: Illegal rule name %r" % (file,line,prodname))
+
+ # Look for literal tokens
+ for n,s in enumerate(syms):
+ if s[0] in "'\"":
+ try:
+ c = eval(s)
+ if (len(c) > 1):
+ raise GrammarError("%s:%d: Literal token %s in rule %r may only be a single character" % (file,line,s, prodname))
+ if not c in self.Terminals:
+ self.Terminals[c] = []
+ syms[n] = c
+ continue
+ except SyntaxError:
+ pass
+ if not _is_identifier.match(s) and s != '%prec':
+ raise GrammarError("%s:%d: Illegal name %r in rule %r" % (file,line,s, prodname))
+
+ # Determine the precedence level
+ if '%prec' in syms:
+ if syms[-1] == '%prec':
+ raise GrammarError("%s:%d: Syntax error. Nothing follows %%prec" % (file,line))
+ if syms[-2] != '%prec':
+ raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line))
+ precname = syms[-1]
+ prodprec = self.Precedence.get(precname)
+ if not prodprec:
+ raise GrammarError("%s:%d: Nothing known about the precedence of %r" % (file,line,precname))
+ else:
+ self.UsedPrecedence[precname] = 1
+ del syms[-2:] # Drop %prec from the rule
+ else:
+ # If no %prec, precedence is determined by the rightmost terminal symbol
+ precname = rightmost_terminal(syms,self.Terminals)
+ prodprec = self.Precedence.get(precname,('right',0))
+
+ # See if the rule is already in the rulemap
+ map = "%s -> %s" % (prodname,syms)
+ if map in self.Prodmap:
+ m = self.Prodmap[map]
+ raise GrammarError("%s:%d: Duplicate rule %s. " % (file,line, m) +
+ "Previous definition at %s:%d" % (m.file, m.line))
+
+ # From this point on, everything is valid. Create a new Production instance
+ pnumber = len(self.Productions)
+ if not prodname in self.Nonterminals:
+ self.Nonterminals[prodname] = [ ]
+
+ # Add the production number to Terminals and Nonterminals
+ for t in syms:
+ if t in self.Terminals:
+ self.Terminals[t].append(pnumber)
+ else:
+ if not t in self.Nonterminals:
+ self.Nonterminals[t] = [ ]
+ self.Nonterminals[t].append(pnumber)
+
+ # Create a production and add it to the list of productions
+ p = Production(pnumber,prodname,syms,prodprec,func,file,line)
+ self.Productions.append(p)
+ self.Prodmap[map] = p
+
+ # Add to the global productions list
+ try:
+ self.Prodnames[prodname].append(p)
+ except KeyError:
+ self.Prodnames[prodname] = [ p ]
+ return 0
+
+ # -----------------------------------------------------------------------------
+ # set_start()
+ #
+ # Sets the starting symbol and creates the augmented grammar. Production
+ # rule 0 is S' -> start where start is the start symbol.
+ # -----------------------------------------------------------------------------
+
+ def set_start(self,start=None):
+ if not start:
+ start = self.Productions[1].name
+ if start not in self.Nonterminals:
+ raise GrammarError("start symbol %s undefined" % start)
+ self.Productions[0] = Production(0,"S'",[start])
+ self.Nonterminals[start].append(0)
+ self.Start = start
+
+ # -----------------------------------------------------------------------------
+ # find_unreachable()
+ #
+ # Find all of the nonterminal symbols that can't be reached from the starting
+ # symbol. Returns a list of nonterminals that can't be reached.
+ # -----------------------------------------------------------------------------
+
+ def find_unreachable(self):
+
+ # Mark all symbols that are reachable from a symbol s
+ def mark_reachable_from(s):
+ if reachable[s]:
+ # We've already reached symbol s.
+ return
+ reachable[s] = 1
+ for p in self.Prodnames.get(s,[]):
+ for r in p.prod:
+ mark_reachable_from(r)
+
+ reachable = { }
+ for s in list(self.Terminals) + list(self.Nonterminals):
+ reachable[s] = 0
+
+ mark_reachable_from( self.Productions[0].prod[0] )
+
+ return [s for s in list(self.Nonterminals)
+ if not reachable[s]]
+
+ # -----------------------------------------------------------------------------
+ # infinite_cycles()
+ #
+ # This function looks at the various parsing rules and tries to detect
+ # infinite recursion cycles (grammar rules where there is no possible way
+ # to derive a string of only terminals).
+ # -----------------------------------------------------------------------------
+
+ def infinite_cycles(self):
+ terminates = {}
+
+ # Terminals:
+ for t in self.Terminals:
+ terminates[t] = 1
+
+ terminates['$end'] = 1
+
+ # Nonterminals:
+
+ # Initialize to false:
+ for n in self.Nonterminals:
+ terminates[n] = 0
+
+ # Then propagate termination until no change:
+ while 1:
+ some_change = 0
+ for (n,pl) in self.Prodnames.items():
+ # Nonterminal n terminates iff any of its productions terminates.
+ for p in pl:
+ # Production p terminates iff all of its rhs symbols terminate.
+ for s in p.prod:
+ if not terminates[s]:
+ # The symbol s does not terminate,
+ # so production p does not terminate.
+ p_terminates = 0
+ break
+ else:
+ # didn't break from the loop,
+ # so every symbol s terminates
+ # so production p terminates.
+ p_terminates = 1
+
+ if p_terminates:
+ # symbol n terminates!
+ if not terminates[n]:
+ terminates[n] = 1
+ some_change = 1
+ # Don't need to consider any more productions for this n.
+ break
+
+ if not some_change:
+ break
+
+ infinite = []
+ for (s,term) in terminates.items():
+ if not term:
+ if not s in self.Prodnames and not s in self.Terminals and s != 'error':
+ # s is used-but-not-defined, and we've already warned of that,
+ # so it would be overkill to say that it's also non-terminating.
+ pass
+ else:
+ infinite.append(s)
+
+ return infinite
+
+
+ # -----------------------------------------------------------------------------
+ # undefined_symbols()
+ #
+ # Find all symbols that were used the grammar, but not defined as tokens or
+ # grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
+ # and prod is the production where the symbol was used.
+ # -----------------------------------------------------------------------------
+ def undefined_symbols(self):
+ result = []
+ for p in self.Productions:
+ if not p: continue
+
+ for s in p.prod:
+ if not s in self.Prodnames and not s in self.Terminals and s != 'error':
+ result.append((s,p))
+ return result
+
+ # -----------------------------------------------------------------------------
+ # unused_terminals()
+ #
+ # Find all terminals that were defined, but not used by the grammar. Returns
+ # a list of all symbols.
+ # -----------------------------------------------------------------------------
+ def unused_terminals(self):
+ unused_tok = []
+ for s,v in self.Terminals.items():
+ if s != 'error' and not v:
+ unused_tok.append(s)
+
+ return unused_tok
+
+ # ------------------------------------------------------------------------------
+ # unused_rules()
+ #
+ # Find all grammar rules that were defined, but not used (maybe not reachable)
+ # Returns a list of productions.
+ # ------------------------------------------------------------------------------
+
+ def unused_rules(self):
+ unused_prod = []
+ for s,v in self.Nonterminals.items():
+ if not v:
+ p = self.Prodnames[s][0]
+ unused_prod.append(p)
+ return unused_prod
+
+ # -----------------------------------------------------------------------------
+ # unused_precedence()
+ #
+ # Returns a list of tuples (term,precedence) corresponding to precedence
+ # rules that were never used by the grammar. term is the name of the terminal
+ # on which precedence was applied and precedence is a string such as 'left' or
+ # 'right' corresponding to the type of precedence.
+ # -----------------------------------------------------------------------------
+
+ def unused_precedence(self):
+ unused = []
+ for termname in self.Precedence:
+ if not (termname in self.Terminals or termname in self.UsedPrecedence):
+ unused.append((termname,self.Precedence[termname][0]))
+
+ return unused
+
+ # -------------------------------------------------------------------------
+ # _first()
+ #
+ # Compute the value of FIRST1(beta) where beta is a tuple of symbols.
+ #
+ # During execution of compute_first1, the result may be incomplete.
+ # Afterward (e.g., when called from compute_follow()), it will be complete.
+ # -------------------------------------------------------------------------
+ def _first(self,beta):
+
+ # We are computing First(x1,x2,x3,...,xn)
+ result = [ ]
+ for x in beta:
+ x_produces_empty = 0
+
+ # Add all the non-<empty> symbols of First[x] to the result.
+ for f in self.First[x]:
+ if f == '<empty>':
+ x_produces_empty = 1
+ else:
+ if f not in result: result.append(f)
+
+ if x_produces_empty:
+ # We have to consider the next x in beta,
+ # i.e. stay in the loop.
+ pass
+ else:
+ # We don't have to consider any further symbols in beta.
+ break
+ else:
+ # There was no 'break' from the loop,
+ # so x_produces_empty was true for all x in beta,
+ # so beta produces empty as well.
+ result.append('<empty>')
+
+ return result
+
+ # -------------------------------------------------------------------------
+ # compute_first()
+ #
+ # Compute the value of FIRST1(X) for all symbols
+ # -------------------------------------------------------------------------
+ def compute_first(self):
+ if self.First:
+ return self.First
+
+ # Terminals:
+ for t in self.Terminals:
+ self.First[t] = [t]
+
+ self.First['$end'] = ['$end']
+
+ # Nonterminals:
+
+ # Initialize to the empty set:
+ for n in self.Nonterminals:
+ self.First[n] = []
+
+ # Then propagate symbols until no change:
+ while 1:
+ some_change = 0
+ for n in self.Nonterminals:
+ for p in self.Prodnames[n]:
+ for f in self._first(p.prod):
+ if f not in self.First[n]:
+ self.First[n].append( f )
+ some_change = 1
+ if not some_change:
+ break
+
+ return self.First
+
+ # ---------------------------------------------------------------------
+ # compute_follow()
+ #
+ # Computes all of the follow sets for every non-terminal symbol. The
+ # follow set is the set of all symbols that might follow a given
+ # non-terminal. See the Dragon book, 2nd Ed. p. 189.
+ # ---------------------------------------------------------------------
+ def compute_follow(self,start=None):
+ # If already computed, return the result
+ if self.Follow:
+ return self.Follow
+
+ # If first sets not computed yet, do that first.
+ if not self.First:
+ self.compute_first()
+
+ # Add '$end' to the follow list of the start symbol
+ for k in self.Nonterminals:
+ self.Follow[k] = [ ]
+
+ if not start:
+ start = self.Productions[1].name
+
+ self.Follow[start] = [ '$end' ]
+
+ while 1:
+ didadd = 0
+ for p in self.Productions[1:]:
+ # Here is the production set
+ for i in range(len(p.prod)):
+ B = p.prod[i]
+ if B in self.Nonterminals:
+ # Okay. We got a non-terminal in a production
+ fst = self._first(p.prod[i+1:])
+ hasempty = 0
+ for f in fst:
+ if f != '<empty>' and f not in self.Follow[B]:
+ self.Follow[B].append(f)
+ didadd = 1
+ if f == '<empty>':
+ hasempty = 1
+ if hasempty or i == (len(p.prod)-1):
+ # Add elements of follow(a) to follow(b)
+ for f in self.Follow[p.name]:
+ if f not in self.Follow[B]:
+ self.Follow[B].append(f)
+ didadd = 1
+ if not didadd: break
+ return self.Follow
+
+
+ # -----------------------------------------------------------------------------
+ # build_lritems()
+ #
+ # This function walks the list of productions and builds a complete set of the
+ # LR items. The LR items are stored in two ways: First, they are uniquely
+ # numbered and placed in the list _lritems. Second, a linked list of LR items
+ # is built for each production. For example:
+ #
+ # E -> E PLUS E
+ #
+ # Creates the list
+ #
+ # [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
+ # -----------------------------------------------------------------------------
+
+ def build_lritems(self):
+ for p in self.Productions:
+ lastlri = p
+ i = 0
+ lr_items = []
+ while 1:
+ if i > len(p):
+ lri = None
+ else:
+ lri = LRItem(p,i)
+ # Precompute the list of productions immediately following
+ try:
+ lri.lr_after = self.Prodnames[lri.prod[i+1]]
+ except (IndexError,KeyError):
+ lri.lr_after = []
+ try:
+ lri.lr_before = lri.prod[i-1]
+ except IndexError:
+ lri.lr_before = None
+
+ lastlri.lr_next = lri
+ if not lri: break
+ lr_items.append(lri)
+ lastlri = lri
+ i += 1
+ p.lr_items = lr_items
+
+# -----------------------------------------------------------------------------
+# == Class LRTable ==
+#
+# This basic class represents a basic table of LR parsing information.
+# Methods for generating the tables are not defined here. They are defined
+# in the derived class LRGeneratedTable.
+# -----------------------------------------------------------------------------
+
+class VersionError(YaccError): pass
+
+class LRTable(object):
+ def __init__(self):
+ self.lr_action = None
+ self.lr_goto = None
+ self.lr_productions = None
+ self.lr_method = None
+
+ def read_table(self,module):
+ if isinstance(module,types.ModuleType):
+ parsetab = module
+ else:
+ if sys.version_info[0] < 3:
+ exec("import %s as parsetab" % module)
+ else:
+ env = { }
+ exec("import %s as parsetab" % module, env, env)
+ parsetab = env['parsetab']
+
+ if parsetab._tabversion != __tabversion__:
+ raise VersionError("yacc table file version is out of date")
+
+ self.lr_action = parsetab._lr_action
+ self.lr_goto = parsetab._lr_goto
+
+ self.lr_productions = []
+ for p in parsetab._lr_productions:
+ self.lr_productions.append(MiniProduction(*p))
+
+ self.lr_method = parsetab._lr_method
+ return parsetab._lr_signature
+
+ def read_pickle(self,filename):
+ try:
+ import cPickle as pickle
+ except ImportError:
+ import pickle
+
+ in_f = open(filename,"rb")
+
+ tabversion = pickle.load(in_f)
+ if tabversion != __tabversion__:
+ raise VersionError("yacc table file version is out of date")
+ self.lr_method = pickle.load(in_f)
+ signature = pickle.load(in_f)
+ self.lr_action = pickle.load(in_f)
+ self.lr_goto = pickle.load(in_f)
+ productions = pickle.load(in_f)
+
+ self.lr_productions = []
+ for p in productions:
+ self.lr_productions.append(MiniProduction(*p))
+
+ in_f.close()
+ return signature
+
+ # Bind all production function names to callable objects in pdict
+ def bind_callables(self,pdict):
+ for p in self.lr_productions:
+ p.bind(pdict)
+
+# -----------------------------------------------------------------------------
+# === LR Generator ===
+#
+# The following classes and functions are used to generate LR parsing tables on
+# a grammar.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# digraph()
+# traverse()
+#
+# The following two functions are used to compute set valued functions
+# of the form:
+#
+# F(x) = F'(x) U U{F(y) | x R y}
+#
+# This is used to compute the values of Read() sets as well as FOLLOW sets
+# in LALR(1) generation.
+#
+# Inputs: X - An input set
+# R - A relation
+# FP - Set-valued function
+# ------------------------------------------------------------------------------
+
+def digraph(X,R,FP):
+ N = { }
+ for x in X:
+ N[x] = 0
+ stack = []
+ F = { }
+ for x in X:
+ if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
+ return F
+
+def traverse(x,N,stack,F,X,R,FP):
+ stack.append(x)
+ d = len(stack)
+ N[x] = d
+ F[x] = FP(x) # F(X) <- F'(x)
+
+ rel = R(x) # Get y's related to x
+ for y in rel:
+ if N[y] == 0:
+ traverse(y,N,stack,F,X,R,FP)
+ N[x] = min(N[x],N[y])
+ for a in F.get(y,[]):
+ if a not in F[x]: F[x].append(a)
+ if N[x] == d:
+ N[stack[-1]] = MAXINT
+ F[stack[-1]] = F[x]
+ element = stack.pop()
+ while element != x:
+ N[stack[-1]] = MAXINT
+ F[stack[-1]] = F[x]
+ element = stack.pop()
+
+class LALRError(YaccError): pass
+
+# -----------------------------------------------------------------------------
+# == LRGeneratedTable ==
+#
+# This class implements the LR table generation algorithm. There are no
+# public methods except for write()
+# -----------------------------------------------------------------------------
+
+class LRGeneratedTable(LRTable):
+ def __init__(self,grammar,method='LALR',log=None):
+ if method not in ['SLR','LALR']:
+ raise LALRError("Unsupported method %s" % method)
+
+ self.grammar = grammar
+ self.lr_method = method
+
+ # Set up the logger
+ if not log:
+ log = NullLogger()
+ self.log = log
+
+ # Internal attributes
+ self.lr_action = {} # Action table
+ self.lr_goto = {} # Goto table
+ self.lr_productions = grammar.Productions # Copy of grammar Production array
+ self.lr_goto_cache = {} # Cache of computed gotos
+ self.lr0_cidhash = {} # Cache of closures
+
+ self._add_count = 0 # Internal counter used to detect cycles
+
+ # Diagonistic information filled in by the table generator
+ self.sr_conflict = 0
+ self.rr_conflict = 0
+ self.conflicts = [] # List of conflicts
+
+ self.sr_conflicts = []
+ self.rr_conflicts = []
+
+ # Build the tables
+ self.grammar.build_lritems()
+ self.grammar.compute_first()
+ self.grammar.compute_follow()
+ self.lr_parse_table()
+
+ # Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
+
+ def lr0_closure(self,I):
+ self._add_count += 1
+
+ # Add everything in I to J
+ J = I[:]
+ didadd = 1
+ while didadd:
+ didadd = 0
+ for j in J:
+ for x in j.lr_after:
+ if getattr(x,"lr0_added",0) == self._add_count: continue
+ # Add B --> .G to J
+ J.append(x.lr_next)
+ x.lr0_added = self._add_count
+ didadd = 1
+
+ return J
+
+ # Compute the LR(0) goto function goto(I,X) where I is a set
+ # of LR(0) items and X is a grammar symbol. This function is written
+ # in a way that guarantees uniqueness of the generated goto sets
+ # (i.e. the same goto set will never be returned as two different Python
+ # objects). With uniqueness, we can later do fast set comparisons using
+ # id(obj) instead of element-wise comparison.
+
+ def lr0_goto(self,I,x):
+ # First we look for a previously cached entry
+ g = self.lr_goto_cache.get((id(I),x))
+ if g: return g
+
+ # Now we generate the goto set in a way that guarantees uniqueness
+ # of the result
+
+ s = self.lr_goto_cache.get(x)
+ if not s:
+ s = { }
+ self.lr_goto_cache[x] = s
+
+ gs = [ ]
+ for p in I:
+ n = p.lr_next
+ if n and n.lr_before == x:
+ s1 = s.get(id(n))
+ if not s1:
+ s1 = { }
+ s[id(n)] = s1
+ gs.append(n)
+ s = s1
+ g = s.get('$end')
+ if not g:
+ if gs:
+ g = self.lr0_closure(gs)
+ s['$end'] = g
+ else:
+ s['$end'] = gs
+ self.lr_goto_cache[(id(I),x)] = g
+ return g
+
+ # Compute the LR(0) sets of item function
+ def lr0_items(self):
+
+ C = [ self.lr0_closure([self.grammar.Productions[0].lr_next]) ]
+ i = 0
+ for I in C:
+ self.lr0_cidhash[id(I)] = i
+ i += 1
+
+ # Loop over the items in C and each grammar symbols
+ i = 0
+ while i < len(C):
+ I = C[i]
+ i += 1
+
+ # Collect all of the symbols that could possibly be in the goto(I,X) sets
+ asyms = { }
+ for ii in I:
+ for s in ii.usyms:
+ asyms[s] = None
+
+ for x in asyms:
+ g = self.lr0_goto(I,x)
+ if not g: continue
+ if id(g) in self.lr0_cidhash: continue
+ self.lr0_cidhash[id(g)] = len(C)
+ C.append(g)
+
+ return C
+
+ # -----------------------------------------------------------------------------
+ # ==== LALR(1) Parsing ====
+ #
+ # LALR(1) parsing is almost exactly the same as SLR except that instead of
+ # relying upon Follow() sets when performing reductions, a more selective
+ # lookahead set that incorporates the state of the LR(0) machine is utilized.
+ # Thus, we mainly just have to focus on calculating the lookahead sets.
+ #
+ # The method used here is due to DeRemer and Pennelo (1982).
+ #
+ # DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
+ # Lookahead Sets", ACM Transactions on Programming Languages and Systems,
+ # Vol. 4, No. 4, Oct. 1982, pp. 615-649
+ #
+ # Further details can also be found in:
+ #
+ # J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
+ # McGraw-Hill Book Company, (1985).
+ #
+ # -----------------------------------------------------------------------------
+
+ # -----------------------------------------------------------------------------
+ # compute_nullable_nonterminals()
+ #
+ # Creates a dictionary containing all of the non-terminals that might produce
+ # an empty production.
+ # -----------------------------------------------------------------------------
+
+ def compute_nullable_nonterminals(self):
+ nullable = {}
+ num_nullable = 0
+ while 1:
+ for p in self.grammar.Productions[1:]:
+ if p.len == 0:
+ nullable[p.name] = 1
+ continue
+ for t in p.prod:
+ if not t in nullable: break
+ else:
+ nullable[p.name] = 1
+ if len(nullable) == num_nullable: break
+ num_nullable = len(nullable)
+ return nullable
+
+ # -----------------------------------------------------------------------------
+ # find_nonterminal_trans(C)
+ #
+ # Given a set of LR(0) items, this functions finds all of the non-terminal
+ # transitions. These are transitions in which a dot appears immediately before
+ # a non-terminal. Returns a list of tuples of the form (state,N) where state
+ # is the state number and N is the nonterminal symbol.
+ #
+ # The input C is the set of LR(0) items.
+ # -----------------------------------------------------------------------------
+
+ def find_nonterminal_transitions(self,C):
+ trans = []
+ for state in range(len(C)):
+ for p in C[state]:
+ if p.lr_index < p.len - 1:
+ t = (state,p.prod[p.lr_index+1])
+ if t[1] in self.grammar.Nonterminals:
+ if t not in trans: trans.append(t)
+ state = state + 1
+ return trans
+
+ # -----------------------------------------------------------------------------
+ # dr_relation()
+ #
+ # Computes the DR(p,A) relationships for non-terminal transitions. The input
+ # is a tuple (state,N) where state is a number and N is a nonterminal symbol.
+ #
+ # Returns a list of terminals.
+ # -----------------------------------------------------------------------------
+
+ def dr_relation(self,C,trans,nullable):
+ dr_set = { }
+ state,N = trans
+ terms = []
+
+ g = self.lr0_goto(C[state],N)
+ for p in g:
+ if p.lr_index < p.len - 1:
+ a = p.prod[p.lr_index+1]
+ if a in self.grammar.Terminals:
+ if a not in terms: terms.append(a)
+
+ # This extra bit is to handle the start state
+ if state == 0 and N == self.grammar.Productions[0].prod[0]:
+ terms.append('$end')
+
+ return terms
+
+ # -----------------------------------------------------------------------------
+ # reads_relation()
+ #
+ # Computes the READS() relation (p,A) READS (t,C).
+ # -----------------------------------------------------------------------------
+
+ def reads_relation(self,C, trans, empty):
+ # Look for empty transitions
+ rel = []
+ state, N = trans
+
+ g = self.lr0_goto(C[state],N)
+ j = self.lr0_cidhash.get(id(g),-1)
+ for p in g:
+ if p.lr_index < p.len - 1:
+ a = p.prod[p.lr_index + 1]
+ if a in empty:
+ rel.append((j,a))
+
+ return rel
+
+ # -----------------------------------------------------------------------------
+ # compute_lookback_includes()
+ #
+ # Determines the lookback and includes relations
+ #
+ # LOOKBACK:
+ #
+ # This relation is determined by running the LR(0) state machine forward.
+ # For example, starting with a production "N : . A B C", we run it forward
+ # to obtain "N : A B C ." We then build a relationship between this final
+ # state and the starting state. These relationships are stored in a dictionary
+ # lookdict.
+ #
+ # INCLUDES:
+ #
+ # Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
+ #
+ # This relation is used to determine non-terminal transitions that occur
+ # inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
+ # if the following holds:
+ #
+ # B -> LAT, where T -> epsilon and p' -L-> p
+ #
+ # L is essentially a prefix (which may be empty), T is a suffix that must be
+ # able to derive an empty string. State p' must lead to state p with the string L.
+ #
+ # -----------------------------------------------------------------------------
+
+ def compute_lookback_includes(self,C,trans,nullable):
+
+ lookdict = {} # Dictionary of lookback relations
+ includedict = {} # Dictionary of include relations
+
+ # Make a dictionary of non-terminal transitions
+ dtrans = {}
+ for t in trans:
+ dtrans[t] = 1
+
+ # Loop over all transitions and compute lookbacks and includes
+ for state,N in trans:
+ lookb = []
+ includes = []
+ for p in C[state]:
+ if p.name != N: continue
+
+ # Okay, we have a name match. We now follow the production all the way
+ # through the state machine until we get the . on the right hand side
+
+ lr_index = p.lr_index
+ j = state
+ while lr_index < p.len - 1:
+ lr_index = lr_index + 1
+ t = p.prod[lr_index]
+
+ # Check to see if this symbol and state are a non-terminal transition
+ if (j,t) in dtrans:
+ # Yes. Okay, there is some chance that this is an includes relation
+ # the only way to know for certain is whether the rest of the
+ # production derives empty
+
+ li = lr_index + 1
+ while li < p.len:
+ if p.prod[li] in self.grammar.Terminals: break # No forget it
+ if not p.prod[li] in nullable: break
+ li = li + 1
+ else:
+ # Appears to be a relation between (j,t) and (state,N)
+ includes.append((j,t))
+
+ g = self.lr0_goto(C[j],t) # Go to next set
+ j = self.lr0_cidhash.get(id(g),-1) # Go to next state
+
+ # When we get here, j is the final state, now we have to locate the production
+ for r in C[j]:
+ if r.name != p.name: continue
+ if r.len != p.len: continue
+ i = 0
+ # This look is comparing a production ". A B C" with "A B C ."
+ while i < r.lr_index:
+ if r.prod[i] != p.prod[i+1]: break
+ i = i + 1
+ else:
+ lookb.append((j,r))
+ for i in includes:
+ if not i in includedict: includedict[i] = []
+ includedict[i].append((state,N))
+ lookdict[(state,N)] = lookb
+
+ return lookdict,includedict
+
+ # -----------------------------------------------------------------------------
+ # compute_read_sets()
+ #
+ # Given a set of LR(0) items, this function computes the read sets.
+ #
+ # Inputs: C = Set of LR(0) items
+ # ntrans = Set of nonterminal transitions
+ # nullable = Set of empty transitions
+ #
+ # Returns a set containing the read sets
+ # -----------------------------------------------------------------------------
+
+ def compute_read_sets(self,C, ntrans, nullable):
+ FP = lambda x: self.dr_relation(C,x,nullable)
+ R = lambda x: self.reads_relation(C,x,nullable)
+ F = digraph(ntrans,R,FP)
+ return F
+
+ # -----------------------------------------------------------------------------
+ # compute_follow_sets()
+ #
+ # Given a set of LR(0) items, a set of non-terminal transitions, a readset,
+ # and an include set, this function computes the follow sets
+ #
+ # Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
+ #
+ # Inputs:
+ # ntrans = Set of nonterminal transitions
+ # readsets = Readset (previously computed)
+ # inclsets = Include sets (previously computed)
+ #
+ # Returns a set containing the follow sets
+ # -----------------------------------------------------------------------------
+
+ def compute_follow_sets(self,ntrans,readsets,inclsets):
+ FP = lambda x: readsets[x]
+ R = lambda x: inclsets.get(x,[])
+ F = digraph(ntrans,R,FP)
+ return F
+
+ # -----------------------------------------------------------------------------
+ # add_lookaheads()
+ #
+ # Attaches the lookahead symbols to grammar rules.
+ #
+ # Inputs: lookbacks - Set of lookback relations
+ # followset - Computed follow set
+ #
+ # This function directly attaches the lookaheads to productions contained
+ # in the lookbacks set
+ # -----------------------------------------------------------------------------
+
+ def add_lookaheads(self,lookbacks,followset):
+ for trans,lb in lookbacks.items():
+ # Loop over productions in lookback
+ for state,p in lb:
+ if not state in p.lookaheads:
+ p.lookaheads[state] = []
+ f = followset.get(trans,[])
+ for a in f:
+ if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
+
+ # -----------------------------------------------------------------------------
+ # add_lalr_lookaheads()
+ #
+ # This function does all of the work of adding lookahead information for use
+ # with LALR parsing
+ # -----------------------------------------------------------------------------
+
+ def add_lalr_lookaheads(self,C):
+ # Determine all of the nullable nonterminals
+ nullable = self.compute_nullable_nonterminals()
+
+ # Find all non-terminal transitions
+ trans = self.find_nonterminal_transitions(C)
+
+ # Compute read sets
+ readsets = self.compute_read_sets(C,trans,nullable)
+
+ # Compute lookback/includes relations
+ lookd, included = self.compute_lookback_includes(C,trans,nullable)
+
+ # Compute LALR FOLLOW sets
+ followsets = self.compute_follow_sets(trans,readsets,included)
+
+ # Add all of the lookaheads
+ self.add_lookaheads(lookd,followsets)
+
+ # -----------------------------------------------------------------------------
+ # lr_parse_table()
+ #
+ # This function constructs the parse tables for SLR or LALR
+ # -----------------------------------------------------------------------------
+ def lr_parse_table(self):
+ Productions = self.grammar.Productions
+ Precedence = self.grammar.Precedence
+ goto = self.lr_goto # Goto array
+ action = self.lr_action # Action array
+ log = self.log # Logger for output
+
+ actionp = { } # Action production array (temporary)
+
+ log.info("Parsing method: %s", self.lr_method)
+
+ # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
+ # This determines the number of states
+
+ C = self.lr0_items()
+
+ if self.lr_method == 'LALR':
+ self.add_lalr_lookaheads(C)
+
+ # Build the parser table, state by state
+ st = 0
+ for I in C:
+ # Loop over each production in I
+ actlist = [ ] # List of actions
+ st_action = { }
+ st_actionp = { }
+ st_goto = { }
+ log.info("")
+ log.info("state %d", st)
+ log.info("")
+ for p in I:
+ log.info(" (%d) %s", p.number, str(p))
+ log.info("")
+
+ for p in I:
+ if p.len == p.lr_index + 1:
+ if p.name == "S'":
+ # Start symbol. Accept!
+ st_action["$end"] = 0
+ st_actionp["$end"] = p
+ else:
+ # We are at the end of a production. Reduce!
+ if self.lr_method == 'LALR':
+ laheads = p.lookaheads[st]
+ else:
+ laheads = self.grammar.Follow[p.name]
+ for a in laheads:
+ actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
+ r = st_action.get(a)
+ if r is not None:
+ # Whoa. Have a shift/reduce or reduce/reduce conflict
+ if r > 0:
+ # Need to decide on shift or reduce here
+ # By default we favor shifting. Need to add
+ # some precedence rules here.
+ sprec,slevel = Productions[st_actionp[a].number].prec
+ rprec,rlevel = Precedence.get(a,('right',0))
+ if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
+ # We really need to reduce here.
+ st_action[a] = -p.number
+ st_actionp[a] = p
+ if not slevel and not rlevel:
+ log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
+ self.sr_conflicts.append((st,a,'reduce'))
+ Productions[p.number].reduced += 1
+ elif (slevel == rlevel) and (rprec == 'nonassoc'):
+ st_action[a] = None
+ else:
+ # Hmmm. Guess we'll keep the shift
+ if not rlevel:
+ log.info(" ! shift/reduce conflict for %s resolved as shift",a)
+ self.sr_conflicts.append((st,a,'shift'))
+ elif r < 0:
+ # Reduce/reduce conflict. In this case, we favor the rule
+ # that was defined first in the grammar file
+ oldp = Productions[-r]
+ pp = Productions[p.number]
+ if oldp.line > pp.line:
+ st_action[a] = -p.number
+ st_actionp[a] = p
+ chosenp,rejectp = pp,oldp
+ Productions[p.number].reduced += 1
+ Productions[oldp.number].reduced -= 1
+ else:
+ chosenp,rejectp = oldp,pp
+ self.rr_conflicts.append((st,chosenp,rejectp))
+ log.info(" ! reduce/reduce conflict for %s resolved using rule %d (%s)", a,st_actionp[a].number, st_actionp[a])
+ else:
+ raise LALRError("Unknown conflict in state %d" % st)
+ else:
+ st_action[a] = -p.number
+ st_actionp[a] = p
+ Productions[p.number].reduced += 1
+ else:
+ i = p.lr_index
+ a = p.prod[i+1] # Get symbol right after the "."
+ if a in self.grammar.Terminals:
+ g = self.lr0_goto(I,a)
+ j = self.lr0_cidhash.get(id(g),-1)
+ if j >= 0:
+ # We are in a shift state
+ actlist.append((a,p,"shift and go to state %d" % j))
+ r = st_action.get(a)
+ if r is not None:
+ # Whoa have a shift/reduce or shift/shift conflict
+ if r > 0:
+ if r != j:
+ raise LALRError("Shift/shift conflict in state %d" % st)
+ elif r < 0:
+ # Do a precedence check.
+ # - if precedence of reduce rule is higher, we reduce.
+ # - if precedence of reduce is same and left assoc, we reduce.
+ # - otherwise we shift
+ rprec,rlevel = Productions[st_actionp[a].number].prec
+ sprec,slevel = Precedence.get(a,('right',0))
+ if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
+ # We decide to shift here... highest precedence to shift
+ Productions[st_actionp[a].number].reduced -= 1
+ st_action[a] = j
+ st_actionp[a] = p
+ if not rlevel:
+ log.info(" ! shift/reduce conflict for %s resolved as shift",a)
+ self.sr_conflicts.append((st,a,'shift'))
+ elif (slevel == rlevel) and (rprec == 'nonassoc'):
+ st_action[a] = None
+ else:
+ # Hmmm. Guess we'll keep the reduce
+ if not slevel and not rlevel:
+ log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
+ self.sr_conflicts.append((st,a,'reduce'))
+
+ else:
+ raise LALRError("Unknown conflict in state %d" % st)
+ else:
+ st_action[a] = j
+ st_actionp[a] = p
+
+ # Print the actions associated with each terminal
+ _actprint = { }
+ for a,p,m in actlist:
+ if a in st_action:
+ if p is st_actionp[a]:
+ log.info(" %-15s %s",a,m)
+ _actprint[(a,m)] = 1
+ log.info("")
+ # Print the actions that were not used. (debugging)
+ not_used = 0
+ for a,p,m in actlist:
+ if a in st_action:
+ if p is not st_actionp[a]:
+ if not (a,m) in _actprint:
+ log.debug(" ! %-15s [ %s ]",a,m)
+ not_used = 1
+ _actprint[(a,m)] = 1
+ if not_used:
+ log.debug("")
+
+ # Construct the goto table for this state
+
+ nkeys = { }
+ for ii in I:
+ for s in ii.usyms:
+ if s in self.grammar.Nonterminals:
+ nkeys[s] = None
+ for n in nkeys:
+ g = self.lr0_goto(I,n)
+ j = self.lr0_cidhash.get(id(g),-1)
+ if j >= 0:
+ st_goto[n] = j
+ log.info(" %-30s shift and go to state %d",n,j)
+
+ action[st] = st_action
+ actionp[st] = st_actionp
+ goto[st] = st_goto
+ st += 1
+
+
+ # -----------------------------------------------------------------------------
+ # write()
+ #
+ # This function writes the LR parsing tables to a file
+ # -----------------------------------------------------------------------------
+
+ def write_table(self,modulename,outputdir='',signature=""):
+ basemodulename = modulename.split(".")[-1]
+ filename = os.path.join(outputdir,basemodulename) + ".py"
+ try:
+ f = open(filename,"w")
+
+ f.write("""
+# %s
+# This file is automatically generated. Do not edit.
+_tabversion = %r
+
+_lr_method = %r
+
+_lr_signature = %r
+ """ % (filename, __tabversion__, self.lr_method, signature))
+
+ # Change smaller to 0 to go back to original tables
+ smaller = 1
+
+ # Factor out names to try and make smaller
+ if smaller:
+ items = { }
+
+ for s,nd in self.lr_action.items():
+ for name,v in nd.items():
+ i = items.get(name)
+ if not i:
+ i = ([],[])
+ items[name] = i
+ i[0].append(s)
+ i[1].append(v)
+
+ f.write("\n_lr_action_items = {")
+ for k,v in items.items():
+ f.write("%r:([" % k)
+ for i in v[0]:
+ f.write("%r," % i)
+ f.write("],[")
+ for i in v[1]:
+ f.write("%r," % i)
+
+ f.write("]),")
+ f.write("}\n")
+
+ f.write("""
+_lr_action = { }
+for _k, _v in _lr_action_items.items():
+ for _x,_y in zip(_v[0],_v[1]):
+ if not _x in _lr_action: _lr_action[_x] = { }
+ _lr_action[_x][_k] = _y
+del _lr_action_items
+""")
+
+ else:
+ f.write("\n_lr_action = { ");
+ for k,v in self.lr_action.items():
+ f.write("(%r,%r):%r," % (k[0],k[1],v))
+ f.write("}\n");
+
+ if smaller:
+ # Factor out names to try and make smaller
+ items = { }
+
+ for s,nd in self.lr_goto.items():
+ for name,v in nd.items():
+ i = items.get(name)
+ if not i:
+ i = ([],[])
+ items[name] = i
+ i[0].append(s)
+ i[1].append(v)
+
+ f.write("\n_lr_goto_items = {")
+ for k,v in items.items():
+ f.write("%r:([" % k)
+ for i in v[0]:
+ f.write("%r," % i)
+ f.write("],[")
+ for i in v[1]:
+ f.write("%r," % i)
+
+ f.write("]),")
+ f.write("}\n")
+
+ f.write("""
+_lr_goto = { }
+for _k, _v in _lr_goto_items.items():
+ for _x,_y in zip(_v[0],_v[1]):
+ if not _x in _lr_goto: _lr_goto[_x] = { }
+ _lr_goto[_x][_k] = _y
+del _lr_goto_items
+""")
+ else:
+ f.write("\n_lr_goto = { ");
+ for k,v in self.lr_goto.items():
+ f.write("(%r,%r):%r," % (k[0],k[1],v))
+ f.write("}\n");
+
+ # Write production table
+ f.write("_lr_productions = [\n")
+ for p in self.lr_productions:
+ if p.func:
+ f.write(" (%r,%r,%d,%r,%r,%d),\n" % (p.str,p.name, p.len, p.func,p.file,p.line))
+ else:
+ f.write(" (%r,%r,%d,None,None,None),\n" % (str(p),p.name, p.len))
+ f.write("]\n")
+ f.close()
+
+ except IOError:
+ e = sys.exc_info()[1]
+ sys.stderr.write("Unable to create %r\n" % filename)
+ sys.stderr.write(str(e)+"\n")
+ return
+
+
+ # -----------------------------------------------------------------------------
+ # pickle_table()
+ #
+ # This function pickles the LR parsing tables to a supplied file object
+ # -----------------------------------------------------------------------------
+
+ def pickle_table(self,filename,signature=""):
+ try:
+ import cPickle as pickle
+ except ImportError:
+ import pickle
+ outf = open(filename,"wb")
+ pickle.dump(__tabversion__,outf,pickle_protocol)
+ pickle.dump(self.lr_method,outf,pickle_protocol)
+ pickle.dump(signature,outf,pickle_protocol)
+ pickle.dump(self.lr_action,outf,pickle_protocol)
+ pickle.dump(self.lr_goto,outf,pickle_protocol)
+
+ outp = []
+ for p in self.lr_productions:
+ if p.func:
+ outp.append((p.str,p.name, p.len, p.func,p.file,p.line))
+ else:
+ outp.append((str(p),p.name,p.len,None,None,None))
+ pickle.dump(outp,outf,pickle_protocol)
+ outf.close()
+
+# -----------------------------------------------------------------------------
+# === INTROSPECTION ===
+#
+# The following functions and classes are used to implement the PLY
+# introspection features followed by the yacc() function itself.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# get_caller_module_dict()
+#
+# This function returns a dictionary containing all of the symbols defined within
+# a caller further down the call stack. This is used to get the environment
+# associated with the yacc() call if none was provided.
+# -----------------------------------------------------------------------------
+
+def get_caller_module_dict(levels):
+ try:
+ raise RuntimeError
+ except RuntimeError:
+ e,b,t = sys.exc_info()
+ f = t.tb_frame
+ while levels > 0:
+ f = f.f_back
+ levels -= 1
+ ldict = f.f_globals.copy()
+ if f.f_globals != f.f_locals:
+ ldict.update(f.f_locals)
+
+ return ldict
+
+# -----------------------------------------------------------------------------
+# parse_grammar()
+#
+# This takes a raw grammar rule string and parses it into production data
+# -----------------------------------------------------------------------------
+def parse_grammar(doc,file,line):
+ grammar = []
+ # Split the doc string into lines
+ pstrings = doc.splitlines()
+ lastp = None
+ dline = line
+ for ps in pstrings:
+ dline += 1
+ p = ps.split()
+ if not p: continue
+ try:
+ if p[0] == '|':
+ # This is a continuation of a previous rule
+ if not lastp:
+ raise SyntaxError("%s:%d: Misplaced '|'" % (file,dline))
+ prodname = lastp
+ syms = p[1:]
+ else:
+ prodname = p[0]
+ lastp = prodname
+ syms = p[2:]
+ assign = p[1]
+ if assign != ':' and assign != '::=':
+ raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file,dline))
+
+ grammar.append((file,dline,prodname,syms))
+ except SyntaxError:
+ raise
+ except Exception:
+ raise SyntaxError("%s:%d: Syntax error in rule %r" % (file,dline,ps.strip()))
+
+ return grammar
+
+# -----------------------------------------------------------------------------
+# ParserReflect()
+#
+# This class represents information extracted for building a parser including
+# start symbol, error function, tokens, precedence list, action functions,
+# etc.
+# -----------------------------------------------------------------------------
+class ParserReflect(object):
+ def __init__(self,pdict,log=None):
+ self.pdict = pdict
+ self.start = None
+ self.error_func = None
+ self.tokens = None
+ self.modules = {}
+ self.grammar = []
+ self.error = 0
+
+ if log is None:
+ self.log = PlyLogger(sys.stderr)
+ else:
+ self.log = log
+
+ # Get all of the basic information
+ def get_all(self):
+ self.get_start()
+ self.get_error_func()
+ self.get_tokens()
+ self.get_precedence()
+ self.get_pfunctions()
+
+ # Validate all of the information
+ def validate_all(self):
+ self.validate_start()
+ self.validate_error_func()
+ self.validate_tokens()
+ self.validate_precedence()
+ self.validate_pfunctions()
+ self.validate_modules()
+ return self.error
+
+ # Compute a signature over the grammar
+ def signature(self):
+ try:
+ from hashlib import md5
+ except ImportError:
+ from md5 import md5
+ try:
+ sig = md5()
+ if self.start:
+ sig.update(self.start.encode('latin-1'))
+ if self.prec:
+ sig.update("".join(["".join(p) for p in self.prec]).encode('latin-1'))
+ if self.tokens:
+ sig.update(" ".join(self.tokens).encode('latin-1'))
+ for f in self.pfuncs:
+ if f[3]:
+ sig.update(f[3].encode('latin-1'))
+ except (TypeError,ValueError):
+ pass
+ return sig.digest()
+
+ # -----------------------------------------------------------------------------
+ # validate_modules()
+ #
+ # This method checks to see if there are duplicated p_rulename() functions
+ # in the parser module file. Without this function, it is really easy for
+ # users to make mistakes by cutting and pasting code fragments (and it's a real
+ # bugger to try and figure out why the resulting parser doesn't work). Therefore,
+ # we just do a little regular expression pattern matching of def statements
+ # to try and detect duplicates.
+ # -----------------------------------------------------------------------------
+
+ def validate_modules(self):
+ # Match def p_funcname(
+ fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
+
+ for module in self.modules.keys():
+ lines, linen = inspect.getsourcelines(module)
+
+ counthash = { }
+ for linen,l in enumerate(lines):
+ linen += 1
+ m = fre.match(l)
+ if m:
+ name = m.group(1)
+ prev = counthash.get(name)
+ if not prev:
+ counthash[name] = linen
+ else:
+ filename = inspect.getsourcefile(module)
+ self.log.warning("%s:%d: Function %s redefined. Previously defined on line %d", filename,linen,name,prev)
+
+ # Get the start symbol
+ def get_start(self):
+ self.start = self.pdict.get('start')
+
+ # Validate the start symbol
+ def validate_start(self):
+ if self.start is not None:
+ if not isinstance(self.start, string_types):
+ self.log.error("'start' must be a string")
+
+ # Look for error handler
+ def get_error_func(self):
+ self.error_func = self.pdict.get('p_error')
+
+ # Validate the error function
+ def validate_error_func(self):
+ if self.error_func:
+ if isinstance(self.error_func,types.FunctionType):
+ ismethod = 0
+ elif isinstance(self.error_func, types.MethodType):
+ ismethod = 1
+ else:
+ self.log.error("'p_error' defined, but is not a function or method")
+ self.error = 1
+ return
+
+ eline = func_code(self.error_func).co_firstlineno
+ efile = func_code(self.error_func).co_filename
+ module = inspect.getmodule(self.error_func)
+ self.modules[module] = 1
+
+ argcount = func_code(self.error_func).co_argcount - ismethod
+ if argcount != 1:
+ self.log.error("%s:%d: p_error() requires 1 argument",efile,eline)
+ self.error = 1
+
+ # Get the tokens map
+ def get_tokens(self):
+ tokens = self.pdict.get("tokens")
+ if not tokens:
+ self.log.error("No token list is defined")
+ self.error = 1
+ return
+
+ if not isinstance(tokens,(list, tuple)):
+ self.log.error("tokens must be a list or tuple")
+ self.error = 1
+ return
+
+ if not tokens:
+ self.log.error("tokens is empty")
+ self.error = 1
+ return
+
+ self.tokens = tokens
+
+ # Validate the tokens
+ def validate_tokens(self):
+ # Validate the tokens.
+ if 'error' in self.tokens:
+ self.log.error("Illegal token name 'error'. Is a reserved word")
+ self.error = 1
+ return
+
+ terminals = {}
+ for n in self.tokens:
+ if n in terminals:
+ self.log.warning("Token %r multiply defined", n)
+ terminals[n] = 1
+
+ # Get the precedence map (if any)
+ def get_precedence(self):
+ self.prec = self.pdict.get("precedence")
+
+ # Validate and parse the precedence map
+ def validate_precedence(self):
+ preclist = []
+ if self.prec:
+ if not isinstance(self.prec,(list,tuple)):
+ self.log.error("precedence must be a list or tuple")
+ self.error = 1
+ return
+ for level,p in enumerate(self.prec):
+ if not isinstance(p,(list,tuple)):
+ self.log.error("Bad precedence table")
+ self.error = 1
+ return
+
+ if len(p) < 2:
+ self.log.error("Malformed precedence entry %s. Must be (assoc, term, ..., term)",p)
+ self.error = 1
+ return
+ assoc = p[0]
+ if not isinstance(assoc, string_types):
+ self.log.error("precedence associativity must be a string")
+ self.error = 1
+ return
+ for term in p[1:]:
+ if not isinstance(term, string_types):
+ self.log.error("precedence items must be strings")
+ self.error = 1
+ return
+ preclist.append((term, assoc, level+1))
+ self.preclist = preclist
+
+ # Get all p_functions from the grammar
+ def get_pfunctions(self):
+ p_functions = []
+ for name, item in self.pdict.items():
+ if not name.startswith('p_'): continue
+ if name == 'p_error': continue
+ if isinstance(item,(types.FunctionType,types.MethodType)):
+ line = func_code(item).co_firstlineno
+ module = inspect.getmodule(item)
+ p_functions.append((line,module,name,item.__doc__))
+
+ # Sort all of the actions by line number
+ p_functions.sort()
+ self.pfuncs = p_functions
+
+
+ # Validate all of the p_functions
+ def validate_pfunctions(self):
+ grammar = []
+ # Check for non-empty symbols
+ if len(self.pfuncs) == 0:
+ self.log.error("no rules of the form p_rulename are defined")
+ self.error = 1
+ return
+
+ for line, module, name, doc in self.pfuncs:
+ file = inspect.getsourcefile(module)
+ func = self.pdict[name]
+ if isinstance(func, types.MethodType):
+ reqargs = 2
+ else:
+ reqargs = 1
+ if func_code(func).co_argcount > reqargs:
+ self.log.error("%s:%d: Rule %r has too many arguments",file,line,func.__name__)
+ self.error = 1
+ elif func_code(func).co_argcount < reqargs:
+ self.log.error("%s:%d: Rule %r requires an argument",file,line,func.__name__)
+ self.error = 1
+ elif not func.__doc__:
+ self.log.warning("%s:%d: No documentation string specified in function %r (ignored)",file,line,func.__name__)
+ else:
+ try:
+ parsed_g = parse_grammar(doc,file,line)
+ for g in parsed_g:
+ grammar.append((name, g))
+ except SyntaxError:
+ e = sys.exc_info()[1]
+ self.log.error(str(e))
+ self.error = 1
+
+ # Looks like a valid grammar rule
+ # Mark the file in which defined.
+ self.modules[module] = 1
+
+ # Secondary validation step that looks for p_ definitions that are not functions
+ # or functions that look like they might be grammar rules.
+
+ for n,v in self.pdict.items():
+ if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)): continue
+ if n.startswith('t_'): continue
+ if n.startswith('p_') and n != 'p_error':
+ self.log.warning("%r not defined as a function", n)
+ if ((isinstance(v,types.FunctionType) and func_code(v).co_argcount == 1) or
+ (isinstance(v,types.MethodType) and func_code(v).co_argcount == 2)):
+ try:
+ doc = v.__doc__.split(" ")
+ if doc[1] == ':':
+ self.log.warning("%s:%d: Possible grammar rule %r defined without p_ prefix",
+ func_code(v).co_filename, func_code(v).co_firstlineno,n)
+ except Exception:
+ pass
+
+ self.grammar = grammar
+
+# -----------------------------------------------------------------------------
+# yacc(module)
+#
+# Build a parser
+# -----------------------------------------------------------------------------
+
+def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
+ check_recursion=1, optimize=0, write_tables=1, debugfile=debug_file,outputdir='',
+ debuglog=None, errorlog = None, picklefile=None):
+
+ global parse # Reference to the parsing method of the last built parser
+
+ # If pickling is enabled, table files are not created
+
+ if picklefile:
+ write_tables = 0
+
+ if errorlog is None:
+ errorlog = PlyLogger(sys.stderr)
+
+ # Get the module dictionary used for the parser
+ if module:
+ _items = [(k,getattr(module,k)) for k in dir(module)]
+ pdict = dict(_items)
+ else:
+ pdict = get_caller_module_dict(2)
+
+ # Collect parser information from the dictionary
+ pinfo = ParserReflect(pdict,log=errorlog)
+ pinfo.get_all()
+
+ if pinfo.error:
+ raise YaccError("Unable to build parser")
+
+ # Check signature against table files (if any)
+ signature = pinfo.signature()
+
+ # Read the tables
+ try:
+ lr = LRTable()
+ if picklefile:
+ read_signature = lr.read_pickle(picklefile)
+ else:
+ read_signature = lr.read_table(tabmodule)
+ if optimize or (read_signature == signature):
+ try:
+ lr.bind_callables(pinfo.pdict)
+ parser = LRParser(lr,pinfo.error_func)
+ parse = parser.parse
+ return parser
+ except Exception:
+ e = sys.exc_info()[1]
+ errorlog.warning("There was a problem loading the table file: %s", repr(e))
+ except VersionError:
+ e = sys.exc_info()
+ errorlog.warning(str(e))
+ except Exception:
+ pass
+
+ if debuglog is None:
+ if debug:
+ debuglog = PlyLogger(open(debugfile,"w"))
+ else:
+ debuglog = NullLogger()
+
+ debuglog.info("Created by PLY version %s (http://www.dabeaz.com/ply)", __version__)
+
+
+ errors = 0
+
+ # Validate the parser information
+ if pinfo.validate_all():
+ raise YaccError("Unable to build parser")
+
+ if not pinfo.error_func:
+ errorlog.warning("no p_error() function is defined")
+
+ # Create a grammar object
+ grammar = Grammar(pinfo.tokens)
+
+ # Set precedence level for terminals
+ for term, assoc, level in pinfo.preclist:
+ try:
+ grammar.set_precedence(term,assoc,level)
+ except GrammarError:
+ e = sys.exc_info()[1]
+ errorlog.warning("%s",str(e))
+
+ # Add productions to the grammar
+ for funcname, gram in pinfo.grammar:
+ file, line, prodname, syms = gram
+ try:
+ grammar.add_production(prodname,syms,funcname,file,line)
+ except GrammarError:
+ e = sys.exc_info()[1]
+ errorlog.error("%s",str(e))
+ errors = 1
+
+ # Set the grammar start symbols
+ try:
+ if start is None:
+ grammar.set_start(pinfo.start)
+ else:
+ grammar.set_start(start)
+ except GrammarError:
+ e = sys.exc_info()[1]
+ errorlog.error(str(e))
+ errors = 1
+
+ if errors:
+ raise YaccError("Unable to build parser")
+
+ # Verify the grammar structure
+ undefined_symbols = grammar.undefined_symbols()
+ for sym, prod in undefined_symbols:
+ errorlog.error("%s:%d: Symbol %r used, but not defined as a token or a rule",prod.file,prod.line,sym)
+ errors = 1
+
+ unused_terminals = grammar.unused_terminals()
+ if unused_terminals:
+ debuglog.info("")
+ debuglog.info("Unused terminals:")
+ debuglog.info("")
+ for term in unused_terminals:
+ errorlog.warning("Token %r defined, but not used", term)
+ debuglog.info(" %s", term)
+
+ # Print out all productions to the debug log
+ if debug:
+ debuglog.info("")
+ debuglog.info("Grammar")
+ debuglog.info("")
+ for n,p in enumerate(grammar.Productions):
+ debuglog.info("Rule %-5d %s", n, p)
+
+ # Find unused non-terminals
+ unused_rules = grammar.unused_rules()
+ for prod in unused_rules:
+ errorlog.warning("%s:%d: Rule %r defined, but not used", prod.file, prod.line, prod.name)
+
+ if len(unused_terminals) == 1:
+ errorlog.warning("There is 1 unused token")
+ if len(unused_terminals) > 1:
+ errorlog.warning("There are %d unused tokens", len(unused_terminals))
+
+ if len(unused_rules) == 1:
+ errorlog.warning("There is 1 unused rule")
+ if len(unused_rules) > 1:
+ errorlog.warning("There are %d unused rules", len(unused_rules))
+
+ if debug:
+ debuglog.info("")
+ debuglog.info("Terminals, with rules where they appear")
+ debuglog.info("")
+ terms = list(grammar.Terminals)
+ terms.sort()
+ for term in terms:
+ debuglog.info("%-20s : %s", term, " ".join([str(s) for s in grammar.Terminals[term]]))
+
+ debuglog.info("")
+ debuglog.info("Nonterminals, with rules where they appear")
+ debuglog.info("")
+ nonterms = list(grammar.Nonterminals)
+ nonterms.sort()
+ for nonterm in nonterms:
+ debuglog.info("%-20s : %s", nonterm, " ".join([str(s) for s in grammar.Nonterminals[nonterm]]))
+ debuglog.info("")
+
+ if check_recursion:
+ unreachable = grammar.find_unreachable()
+ for u in unreachable:
+ errorlog.warning("Symbol %r is unreachable",u)
+
+ infinite = grammar.infinite_cycles()
+ for inf in infinite:
+ errorlog.error("Infinite recursion detected for symbol %r", inf)
+ errors = 1
+
+ unused_prec = grammar.unused_precedence()
+ for term, assoc in unused_prec:
+ errorlog.error("Precedence rule %r defined for unknown symbol %r", assoc, term)
+ errors = 1
+
+ if errors:
+ raise YaccError("Unable to build parser")
+
+ # Run the LRGeneratedTable on the grammar
+ if debug:
+ errorlog.debug("Generating %s tables", method)
+
+ lr = LRGeneratedTable(grammar,method,debuglog)
+
+ if debug:
+ num_sr = len(lr.sr_conflicts)
+
+ # Report shift/reduce and reduce/reduce conflicts
+ if num_sr == 1:
+ errorlog.warning("1 shift/reduce conflict")
+ elif num_sr > 1:
+ errorlog.warning("%d shift/reduce conflicts", num_sr)
+
+ num_rr = len(lr.rr_conflicts)
+ if num_rr == 1:
+ errorlog.warning("1 reduce/reduce conflict")
+ elif num_rr > 1:
+ errorlog.warning("%d reduce/reduce conflicts", num_rr)
+
+ # Write out conflicts to the output file
+ if debug and (lr.sr_conflicts or lr.rr_conflicts):
+ debuglog.warning("")
+ debuglog.warning("Conflicts:")
+ debuglog.warning("")
+
+ for state, tok, resolution in lr.sr_conflicts:
+ debuglog.warning("shift/reduce conflict for %s in state %d resolved as %s", tok, state, resolution)
+
+ already_reported = {}
+ for state, rule, rejected in lr.rr_conflicts:
+ if (state,id(rule),id(rejected)) in already_reported:
+ continue
+ debuglog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
+ debuglog.warning("rejected rule (%s) in state %d", rejected,state)
+ errorlog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
+ errorlog.warning("rejected rule (%s) in state %d", rejected, state)
+ already_reported[state,id(rule),id(rejected)] = 1
+
+ warned_never = []
+ for state, rule, rejected in lr.rr_conflicts:
+ if not rejected.reduced and (rejected not in warned_never):
+ debuglog.warning("Rule (%s) is never reduced", rejected)
+ errorlog.warning("Rule (%s) is never reduced", rejected)
+ warned_never.append(rejected)
+
+ # Write the table file if requested
+ if write_tables:
+ lr.write_table(tabmodule,outputdir,signature)
+
+ # Write a pickled version of the tables
+ if picklefile:
+ lr.pickle_table(picklefile,signature)
+
+ # Build the parser
+ lr.bind_callables(pinfo.pdict)
+ parser = LRParser(lr,pinfo.error_func)
+
+ parse = parser.parse
+ return parser
diff --git a/xos/genx/generator/plyproto/__init__.py b/xos/genx/generator/plyproto/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/xos/genx/generator/plyproto/__init__.py
diff --git a/xos/genx/generator/plyproto/model.py b/xos/genx/generator/plyproto/model.py
new file mode 100644
index 0000000..bfa5e95
--- /dev/null
+++ b/xos/genx/generator/plyproto/model.py
@@ -0,0 +1,455 @@
+__author__ = "Dusan (Ph4r05) Klinec"
+__copyright__ = "Copyright (C) 2014 Dusan (ph4r05) Klinec"
+__license__ = "Apache License, Version 2.0"
+__version__ = "1.0"
+
+class Visitor(object):
+
+ def __init__(self, verbose=False):
+ self.verbose = verbose
+
+ def __getattr__(self, name):
+ if not name.startswith('visit_'):
+ raise AttributeError('name must start with visit_ but was {}'.format(name))
+
+ def f(element):
+ if self.verbose:
+ msg = 'unimplemented call to {}; ignoring ({})'
+ print(msg.format(name, element))
+ return True
+ return f
+
+ # visitor.visit_PackageStatement(self)
+ # visitor.visit_ImportStatement(self)
+ # visitor.visit_OptionStatement(self)
+ # visitor.visit_FieldDirective(self)
+ # visitor.visit_FieldType(self)
+ # visitor.visit_FieldDefinition(self)
+ # visitor.visit_EnumFieldDefinition(self)
+ # visitor.visit_EnumDefinition(self)
+ # visitor.visit_MessageDefinition(self)
+ # visitor.visit_MessageExtension(self)
+ # visitor.visit_MethodDefinition(self)
+ # visitor.visit_ServiceDefinition(self)
+ # visitor.visit_ExtensionsDirective(self)
+ # visitor.visit_Literal(self)
+ # visitor.visit_Name(self)
+ # visitor.visit_Proto(self)
+ # visitor.visit_LU(self)
+
+class Base(object):
+ parent = None
+ lexspan = None
+ linespan = None
+
+ def v(self, obj, visitor):
+ if obj == None:
+ return
+ elif hasattr(obj, "accept"):
+ obj.accept(visitor)
+ elif isinstance(obj, list):
+ for s in obj:
+ self.v(s, visitor)
+ pass
+ pass
+
+ @staticmethod
+ def p(obj, parent):
+ if isinstance(obj, list):
+ for s in obj:
+ Base.p(s, parent)
+
+ if hasattr(obj, "parent"):
+ obj.parent = parent
+
+# Lexical unit - contains lexspan and linespan for later analysis.
+class LU(Base):
+ def __init__(self, p, idx):
+ self.p = p
+ self.idx = idx
+ self.pval = p[idx]
+ self.lexspan = p.lexspan(idx)
+ self.linespan = p.linespan(idx)
+
+ # If string is in the value (raw value) and start and stop lexspan is the same, add real span
+ # obtained by string length.
+ if isinstance(self.pval, str) \
+ and self.lexspan != None \
+ and self.lexspan[0] == self.lexspan[1] \
+ and self.lexspan[0] != 0:
+ self.lexspan = tuple([self.lexspan[0], self.lexspan[0] + len(self.pval)])
+ super(LU, self).__init__()
+
+ @staticmethod
+ def i(p, idx):
+ if isinstance(p[idx], LU): return p[idx]
+ if isinstance(p[idx], str): return LU(p, idx)
+ return p[idx]
+
+ def describe(self):
+ return "LU(%s,%s)" % (self.pval, self.lexspan)
+
+ def __str__(self):
+ return self.pval
+
+ def __repr__(self):
+ return self.describe()
+
+ def accept(self, visitor):
+ self.v(self.pval, visitor)
+
+ def __iter__(self):
+ for x in self.pval:
+ yield x
+
+# Base node
+class SourceElement(Base):
+ '''
+ A SourceElement is the base class for all elements that occur in a Protocol Buffers
+ file parsed by plyproto.
+ '''
+ def __init__(self, linespan=[], lexspan=[], p=None):
+ super(SourceElement, self).__init__()
+ self._fields = [] # ['linespan', 'lexspan']
+ self.linespan = linespan
+ self.lexspan = lexspan
+ self.p = p
+
+ def __repr__(self):
+ equals = ("{0}={1!r}".format(k, getattr(self, k))
+ for k in self._fields)
+ args = ", ".join(equals)
+ return "{0}({1})".format(self.__class__.__name__, args)
+
+ def __eq__(self, other):
+ try:
+ return self.__dict__ == other.__dict__
+ except AttributeError:
+ return False
+
+ def __ne__(self, other):
+ return not self == other
+
+ def setLexData(self, linespan, lexspan):
+ self.linespan = linespan
+ self.lexspan = lexspan
+
+ def setLexObj(self, p):
+ self.p = p
+
+ def accept(self, visitor):
+ pass
+
+class PackageStatement(SourceElement):
+ def __init__(self, name, linespan=None, lexspan=None, p=None):
+ super(PackageStatement, self).__init__(linespan=linespan, lexspan=lexspan, p=p)
+ self._fields += ['name']
+ self.name = name
+ Base.p(self.name, self)
+
+ def accept(self, visitor):
+ visitor.visit_PackageStatement(self)
+
+class ImportStatement(SourceElement):
+ def __init__(self, name, linespan=None, lexspan=None, p=None):
+ super(ImportStatement, self).__init__(linespan=linespan, lexspan=lexspan, p=p)
+ self._fields += ['name']
+ self.name = name
+ Base.p(self.name, self)
+
+ def accept(self, visitor):
+ visitor.visit_ImportStatement(self)
+
+class OptionStatement(SourceElement):
+ def __init__(self, name, value, linespan=None, lexspan=None, p=None):
+ super(OptionStatement, self).__init__(linespan=linespan, lexspan=lexspan, p=p)
+ self._fields += ['name', 'value']
+ self.name = name
+ Base.p(self.name, self)
+ self.value = value
+ Base.p(self.value, self)
+
+ def accept(self, visitor):
+ visitor.visit_OptionStatement(self)
+
+class FieldDirective(SourceElement):
+ def __init__(self, name, value, linespan=None, lexspan=None, p=None):
+ super(FieldDirective, self).__init__(linespan=linespan, lexspan=lexspan, p=p)
+ self._fields += ['name', 'value']
+ self.name = name
+ Base.p(self.name, self)
+ self.value = value
+ Base.p(self.value, self)
+
+ def accept(self, visitor):
+ if visitor.visit_FieldDirective(self):
+ self.v(self.name, visitor)
+ self.v(self.value, visitor)
+ visitor.visit_FieldDirective_post(self)
+
+class FieldType(SourceElement):
+ def __init__(self, name, linespan=None, lexspan=None, p=None):
+ super(FieldType, self).__init__(linespan=linespan, lexspan=lexspan, p=p)
+ self._fields += ['name']
+ self.name = name
+ Base.p(self.name, self)
+
+ def accept(self, visitor):
+ if visitor.visit_FieldType(self):
+ self.v(self.name, visitor)
+
+class LinkDefinition(SourceElement):
+ def __init__(self, link_type, src_port, name, dst_port, linespan=None, lexspan=None, p=None):
+ super(LinkDefinition, self).__init__(linespan=linespan, lexspan=lexspan, p=p)
+ self._fields += ['link_type', 'src_port', 'name', 'dst_port']
+ self.link_type = link_type
+ Base.p(self.link_type, self)
+ self.src_port = src_port
+ Base.p(self.src_port, self)
+ self.name = name
+ Base.p(self.name, self)
+ self.dst_port = dst_port
+ Base.p(self.dst_port, self)
+
+ def accept(self, visitor):
+ visitor.visit_LinkDefinition(self)
+
+class FieldDefinition(SourceElement):
+ def __init__(self, field_modifier, ftype, name, fieldId, fieldDirective, linespan=None, lexspan=None, p=None):
+ super(FieldDefinition, self).__init__(linespan=linespan, lexspan=lexspan, p=p)
+ self._fields += ['field_modifier', 'ftype', 'name', 'fieldId', 'fieldDirective']
+ self.name = name
+ Base.p(self.name, self)
+ self.field_modifier = field_modifier
+ Base.p(self.field_modifier, self)
+ self.ftype = ftype
+ Base.p(self.ftype, self)
+ self.fieldId = fieldId
+ Base.p(self.fieldId, self)
+ self.fieldDirective = fieldDirective
+ Base.p(self.fieldDirective, self)
+
+ def accept(self, visitor):
+ if visitor.visit_FieldDefinition(self):
+ self.v(self.name, visitor)
+ self.v(self.field_modifier, visitor)
+ self.v(self.ftype, visitor)
+ self.v(self.fieldId, visitor)
+ self.v(self.fieldDirective, visitor)
+ visitor.visit_FieldDefinition_post(self)
+
+class EnumFieldDefinition(SourceElement):
+ def __init__(self, name, fieldId, linespan=None, lexspan=None, p=None):
+ super(EnumFieldDefinition, self).__init__(linespan=linespan, lexspan=lexspan, p=p)
+ self._fields += ['name', 'fieldId']
+ self.name = name
+ Base.p(self.name, self)
+ self.fieldId = fieldId
+ Base.p(self.fieldId, self)
+
+ def accept(self, visitor):
+ if visitor.visit_EnumFieldDefinition(self):
+ self.v(self.name, visitor)
+ self.v(self.fieldId, visitor)
+
+class EnumDefinition(SourceElement):
+ def __init__(self, name, body, linespan=None, lexspan=None, p=None):
+ super(EnumDefinition, self).__init__(linespan=linespan, lexspan=lexspan, p=p)
+ self._fields += ['name', 'body']
+ self.name = name
+ Base.p(self.name, self)
+ self.body = body
+ Base.p(self.body, self)
+
+ def accept(self, visitor):
+ if visitor.visit_EnumDefinition(self):
+ self.v(self.name, visitor)
+ self.v(self.body, visitor)
+
+class LinkSpec(SourceElement):
+ def __init__(self, field_spec, link_spec, linespan=None, lexspan=None, p=None):
+ super(LinkSpec, self).__init__(linespan=linespan, lexspan=lexspan, p=p)
+ self._fields += ['link_def', 'field_def']
+ self.link_def = link_spec
+ Base.p(self.link_def, self)
+ self.field_def = field_spec
+ Base.p(self.field_def, self)
+
+ def accept(self, visitor):
+ if visitor.visit_LinkSpec(self):
+ self.v(self.link_def, visitor)
+ self.v(self.field_def, visitor)
+ visitor.visit_LinkSpec_post(self)
+
+class MessageDefinition(SourceElement):
+ def __init__(self, name, bclass, body, linespan=None, lexspan=None, p=None):
+ super(MessageDefinition, self).__init__(linespan=linespan, lexspan=lexspan, p=p)
+ self._fields += ['name', 'bclass', 'body']
+ self.name = name
+ Base.p(self.name, self)
+ self.bclass = bclass
+ Base.p(self.bclass, self)
+ self.body = body
+ Base.p(self.body, self)
+
+ def accept(self, visitor):
+ if visitor.visit_MessageDefinition(self):
+ self.v(self.name, visitor)
+ self.v(self.bclass, visitor)
+ self.v(self.body, visitor)
+ visitor.visit_MessageDefinition_post(self)
+
+
+"""
+class MessageDefinition(SourceElement):
+ def __init__(self, name, body, linespan=None, lexspan=None, p=None):
+ super(MessageDefinition, self).__init__(linespan=linespan, lexspan=lexspan, p=p)
+ self._fields += ['name', 'body']
+ self.name = name
+ Base.p(self.name, self)
+ self.body = body
+ Base.p(self.body, self)
+
+ def accept(self, visitor):
+ if visitor.visit_MessageDefinition(self):
+ self.v(self.name, visitor)
+ self.v(self.body, visitor)
+ visitor.visit_MessageDefinition_post(self)
+"""
+
+class MessageExtension(SourceElement):
+ def __init__(self, name, body, linespan=None, lexspan=None, p=None):
+ super(MessageExtension, self).__init__(linespan=linespan, lexspan=lexspan, p=p)
+ self._fields += ['name', 'body']
+ self.name = name
+ Base.p(self.name, self)
+ self.body = body
+ Base.p(self.body, self)
+
+ def accept(self, visitor):
+ if visitor.visit_MessageExtension(self):
+ self.v(self.name, visitor)
+ self.v(self.body, visitor)
+ visitor.visit_MessageExtension_post(self)
+
+class MethodDefinition(SourceElement):
+ def __init__(self, name, name2, name3, linespan=None, lexspan=None, p=None):
+ super(MethodDefinition, self).__init__(linespan=linespan, lexspan=lexspan, p=p)
+ self._fields += ['name', 'name2', 'name3']
+ self.name = name
+ Base.p(self.name, self)
+ self.name2 = name2
+ Base.p(self.name, self)
+ self.name3 = name3
+ Base.p(self.name, self)
+
+ def accept(self, visitor):
+ if visitor.visit_MethodDefinition(self):
+ self.v(self.name, visitor)
+ self.v(self.name2, visitor)
+ self.v(self.name3, visitor)
+ visitor.visit_MethodDefinition_post(self)
+
+class ServiceDefinition(SourceElement):
+ def __init__(self, name, body, linespan=None, lexspan=None, p=None):
+ super(ServiceDefinition, self).__init__(linespan=linespan, lexspan=lexspan, p=p)
+ self._fields += ['name', 'body']
+ self.name = name
+ Base.p(self.name, self)
+ self.body = body
+ Base.p(self.body, self)
+
+ def accept(self, visitor):
+ if visitor.visit_ServiceDefinition(self):
+ self.v(self.name, visitor)
+ self.v(self.body, visitor)
+ visitor.visit_ServiceDefinition_post(self)
+
+class ExtensionsMax(SourceElement):
+ pass
+
+class ExtensionsDirective(SourceElement):
+ def __init__(self, fromVal, toVal, linespan=None, lexspan=None, p=None):
+ super(ExtensionsDirective, self).__init__(linespan=linespan, lexspan=lexspan, p=p)
+ self._fields += ['fromVal', 'toVal']
+ self.fromVal = fromVal
+ Base.p(self.fromVal, self)
+ self.toVal = toVal
+ Base.p(self.toVal, self)
+
+ def accept(self, visitor):
+ if visitor.visit_ExtensionsDirective(self):
+ self.v(self.fromVal, visitor)
+ self.v(self.toVal, visitor)
+ visitor.visit_ExtensionsDirective_post(self)
+
+class Literal(SourceElement):
+
+ def __init__(self, value, linespan=None, lexspan=None, p=None):
+ super(Literal, self).__init__(linespan=linespan, lexspan=lexspan, p=p)
+ self._fields += ['value']
+ self.value = value
+
+ def accept(self, visitor):
+ visitor.visit_Literal(self)
+
+class Name(SourceElement):
+
+ def __init__(self, value, linespan=None, lexspan=None, p=None):
+ super(Name, self).__init__(linespan=linespan, lexspan=lexspan, p=p)
+ self._fields += ['value']
+ self.value = value
+ self.deriveLex()
+
+ def append_name(self, name):
+ try:
+ self.value = self.value + '.' + name.value
+ except:
+ self.value = self.value + '.' + name
+
+ def deriveLex(self):
+ if hasattr(self.value, "lexspan"):
+ self.lexspan = self.value.lexspan
+ self.linespan = self.value.linespan
+ else:
+ return
+
+ def accept(self, visitor):
+ visitor.visit_Name(self)
+
+class DotName(Name):
+ elements = []
+ def __init__(self, elements, linespan=None, lexspan=None, p=None):
+ super(DotName, self).__init__('.'.join([str(x) for x in elements]), linespan=linespan, lexspan=lexspan, p=p)
+ self._fields += ['elements']
+ self.elements = elements
+ self.deriveLex()
+
+ def deriveLex(self):
+ if isinstance(self.elements, list) and len(self.elements)>0:
+ self.lexspan = (min([x.lexspan[0] for x in self.elements if x.lexspan[0] != 0]), max([x.lexspan[1] for x in self.elements if x.lexspan[1] != 0]))
+ self.linespan = (min([x.linespan[0] for x in self.elements if x.linespan[0] != 0]), max([x.linespan[1] for x in self.elements if x.linespan[1] != 0]))
+ elif hasattr(self.elements, "lexspan"):
+ self.lexspan = self.elements.lexspan
+ self.linespan = self.elements.linespan
+ else:
+ return
+
+ def accept(self, visitor):
+ visitor.visit_DotName(self)
+
+class ProtoFile(SourceElement):
+
+ def __init__(self, pkg, body, linespan=None, lexspan=None, p=None):
+ super(ProtoFile, self).__init__(linespan=linespan, lexspan=lexspan, p=p)
+ self._fields += ['pkg', 'body']
+ self.pkg = pkg
+ Base.p(self.pkg, self)
+ self.body = body
+ Base.p(self.body, self)
+
+ def accept(self, visitor):
+ if visitor.visit_Proto(self):
+ self.v(self.pkg, visitor)
+ self.v(self.body, visitor)
+ visitor.visit_Proto_post(self)
diff --git a/xos/genx/generator/plyproto/parser-old.py b/xos/genx/generator/plyproto/parser-old.py
new file mode 100755
index 0000000..8986cb8
--- /dev/null
+++ b/xos/genx/generator/plyproto/parser-old.py
@@ -0,0 +1,343 @@
+__author__ = "Dusan (Ph4r05) Klinec"
+__copyright__ = "Copyright (C) 2014 Dusan (ph4r05) Klinec"
+__license__ = "Apache License, Version 2.0"
+__version__ = "1.0"
+
+import ply.lex as lex
+import ply.yacc as yacc
+from .model import *
+
+class ProtobufLexer(object):
+ keywords = ('double', 'float', 'int32', 'int64', 'uint32', 'uint64', 'sint32', 'sint64',
+ 'fixed32', 'fixed64', 'sfixed32', 'sfixed64', 'bool', 'string', 'bytes',
+ 'message', 'required', 'optional', 'repeated', 'enum', 'extensions', 'max', 'extends', 'extend',
+ 'to', 'package', 'service', 'rpc', 'returns', 'true', 'false', 'option', 'import', 'onetomany', 'manytomany', 'onetoone')
+
+ tokens = [
+ 'NAME',
+ 'NUM',
+ 'STRING_LITERAL',
+ 'LINE_COMMENT', 'BLOCK_COMMENT',
+
+ 'LBRACE', 'RBRACE', 'LBRACK', 'RBRACK',
+ 'LPAR', 'RPAR', 'EQ', 'SEMI', 'DOT',
+ 'ARROW', 'COLON'
+ 'STARTTOKEN'
+
+ ] + [k.upper() for k in keywords]
+ literals = '()+-*/=?:,.^|&~!=[]{};<>@%'
+
+ t_NUM = r'[+-]?\d+'
+ t_STRING_LITERAL = r'\"([^\\\n]|(\\.))*?\"'
+
+ t_ignore_LINE_COMMENT = '//.*'
+ def t_BLOCK_COMMENT(self, t):
+ r'/\*(.|\n)*?\*/'
+ t.lexer.lineno += t.value.count('\n')
+
+ t_LBRACE = '{'
+ t_RBRACE = '}'
+ t_LBRACK = '\\['
+ t_RBRACK = '\\]'
+ t_LPAR = '\\('
+ t_RPAR = '\\)'
+ t_EQ = '='
+ t_COLON = ':'
+ t_ARROW = '\\-\\>'
+ t_SEMI = ';'
+ t_DOT = '\\.'
+ t_ignore = ' \t\f'
+ t_STARTTOKEN = '\\+'
+
+ def t_NAME(self, t):
+ '[A-Za-z_$][A-Za-z0-9_$]*'
+ if t.value in ProtobufLexer.keywords:
+ #print "type: %s val %s t %s" % (t.type, t.value, t)
+ t.type = t.value.upper()
+ return t
+
+ def t_newline(self, t):
+ r'\n+'
+ t.lexer.lineno += len(t.value)
+
+ def t_newline2(self, t):
+ r'(\r\n)+'
+ t.lexer.lineno += len(t.value) / 2
+
+ def t_error(self, t):
+ print("Illegal character '{}' ({}) in line {}".format(t.value[0], hex(ord(t.value[0])), t.lexer.lineno))
+ t.lexer.skip(1)
+
+class LexHelper:
+ offset = 0
+ def get_max_linespan(self, p):
+ defSpan=[1e60, -1]
+ mSpan=[1e60, -1]
+ for sp in range(0, len(p)):
+ csp = p.linespan(sp)
+ if csp[0] == 0 and csp[1] == 0:
+ if hasattr(p[sp], "linespan"):
+ csp = p[sp].linespan
+ else:
+ continue
+ if csp == None or len(csp) != 2: continue
+ if csp[0] == 0 and csp[1] == 0: continue
+ if csp[0] < mSpan[0]: mSpan[0] = csp[0]
+ if csp[1] > mSpan[1]: mSpan[1] = csp[1]
+ if defSpan == mSpan: return (0,0)
+ return tuple([mSpan[0]-self.offset, mSpan[1]-self.offset])
+
+ def get_max_lexspan(self, p):
+ defSpan=[1e60, -1]
+ mSpan=[1e60, -1]
+ for sp in range(0, len(p)):
+ csp = p.lexspan(sp)
+ if csp[0] == 0 and csp[1] == 0:
+ if hasattr(p[sp], "lexspan"):
+ csp = p[sp].lexspan
+ else:
+ continue
+ if csp == None or len(csp) != 2: continue
+ if csp[0] == 0 and csp[1] == 0: continue
+ if csp[0] < mSpan[0]: mSpan[0] = csp[0]
+ if csp[1] > mSpan[1]: mSpan[1] = csp[1]
+ if defSpan == mSpan: return (0,0)
+ return tuple([mSpan[0]-self.offset, mSpan[1]-self.offset])
+
+ def set_parse_object(self, dst, p):
+ dst.setLexData(linespan=self.get_max_linespan(p), lexspan=self.get_max_lexspan(p))
+ dst.setLexObj(p)
+
+class ProtobufParser(object):
+ tokens = ProtobufLexer.tokens
+ offset = 0
+ lh = LexHelper()
+
+ def setOffset(self, of):
+ self.offset = of
+ self.lh.offset = of
+
+ def p_empty(self, p):
+ '''empty :'''
+ pass
+
+ def p_field_modifier(self,p):
+ '''field_modifier : REQUIRED
+ | OPTIONAL'''
+ p[0] = LU.i(p,1)
+
+ def p_primitive_type(self, p):
+ '''primitive_type : DOUBLE
+ | FLOAT
+ | INT32
+ | INT64
+ | UINT32
+ | UINT64
+ | SINT32
+ | SINT64
+ | FIXED32
+ | FIXED64
+ | SFIXED32
+ | SFIXED64
+ | BOOL
+ | STRING
+ | BYTES'''
+ p[0] = LU.i(p,1)
+
+ def p_link_type(self, p):
+ '''link_type : ONETOONE
+ | ONETOMANY
+ | MANYTOMANY'''
+ p[0] = LU.i(p,1)
+
+ def p_field_id(self, p):
+ '''field_id : NUM'''
+ p[0] = LU.i(p,1)
+
+ def p_rvalue(self, p):
+ '''rvalue : NUM
+ | TRUE
+ | FALSE'''
+ p[0] = LU.i(p,1)
+
+ def p_rvalue2(self, p):
+ '''rvalue : NAME'''
+ p[0] = Name(LU.i(p, 1))
+ self.lh.set_parse_object(p[0], p)
+ p[0].deriveLex()
+
+ def p_field_directive(self, p):
+ '''field_directive : LBRACK NAME EQ rvalue RBRACK'''
+ p[0] = FieldDirective(Name(LU.i(p, 2)), LU.i(p,4))
+ self.lh.set_parse_object(p[0], p)
+
+ def p_field_directive_times(self, p):
+ '''field_directive_times : field_directive_plus'''
+ p[0] = p[1]
+
+ def p_field_directive_times2(self, p):
+ '''field_directive_times : empty'''
+ p[0] = []
+
+ def p_field_directive_plus(self, p):
+ '''field_directive_plus : field_directive
+ | field_directive_plus field_directive'''
+ if len(p) == 2:
+ p[0] = [LU(p,1)]
+ else:
+ p[0] = p[1] + [LU(p,2)]
+
+ def p_dotname(self, p):
+ '''dotname : NAME
+ | dotname DOT NAME'''
+ if len(p) == 2:
+ p[0] = [LU(p,1)]
+ else:
+ p[0] = p[1] + [LU(p,3)]
+
+ # Hack for cases when there is a field named 'message' or 'max'
+ def p_fieldName(self, p):
+ '''field_name : NAME
+ | MESSAGE
+ | MAX'''
+ p[0] = Name(LU.i(p,1))
+ self.lh.set_parse_object(p[0], p)
+ p[0].deriveLex()
+
+ def p_field_type(self, p):
+ '''field_type : primitive_type'''
+ p[0] = FieldType(LU.i(p,1))
+ self.lh.set_parse_object(p[0], p)
+
+ def p_field_type2(self, p):
+ '''field_type : dotname'''
+ p[0] = DotName(LU.i(p, 1))
+ self.lh.set_parse_object(p[0], p)
+ p[0].deriveLex()
+
+ # Root of the field declaration.
+ def p_field_definition(self, p):
+ '''field_definition : field_modifier field_type field_name EQ field_id field_directive_times SEMI'''
+ p[0] = FieldDefinition(LU.i(p,1), LU.i(p,2), LU.i(p, 3), LU.i(p,5), LU.i(p,6))
+ self.lh.set_parse_object(p[0], p)
+
+ # Link definition
+ #def p_link_definition(self, p):
+ # '''link_definition : link_type field_modifier field_name ARROW NAME COLON field_name EQ field_id SEMI'''
+ # #p[0] = FieldDefinition(LU.i(p,1), FieldType(INT32), LU.i(p, 3), LU.i(p, 9), [FieldDirective('type', 'link'), FieldDirective('model',LU.i(p,5)), FieldDirective('port',LU.i(p,7))])
+ # p[0] = FieldDefinition(LU.i(p,2), FieldType(INT32), LU.i(p, 3), LU.i(p, 9), [])
+ # self.lh.set_parse_object(p[0], p)
+
+ def p_message_body_part(self, p):
+ '''message_body_part : field_definition
+ | message_definition'''
+
+ #'''message_body_part : field_definition
+ # | link_definition
+ # | message_definition'''
+
+ p[0] = p[1]
+
+ # message_body ::= { field_definition | enum_definition | message_definition | extensions_definition | message_extension }*
+ def p_message_body(self, p):
+ '''message_body : empty'''
+ p[0] = []
+
+ # message_body ::= { field_definition | enum_definition | message_definition | extensions_definition | message_extension }*
+ def p_message_body2(self, p):
+ '''message_body : message_body_part
+ | message_body message_body_part'''
+ if len(p) == 2:
+ p[0] = [p[1]]
+ else:
+ p[0] = p[1] + [p[2]]
+
+ # Root of the message declaration.
+ # message_definition = MESSAGE_ - ident("messageId") + LBRACE + message_body("body") + RBRACE
+ def p_message_definition(self, p):
+ '''message_definition : MESSAGE NAME LBRACE message_body RBRACE'''
+ p[0] = MessageDefinition(Name(LU.i(p, 2)), LU.i(p,4))
+ self.lh.set_parse_object(p[0], p)
+
+ # package_directive ::= 'package' ident [ '.' ident]* ';'
+ def p_package_directive(self,p):
+ '''package_directive : PACKAGE dotname SEMI'''
+ p[0] = PackageStatement(Name(LU.i(p, 2)))
+ self.lh.set_parse_object(p[0], p)
+
+ # import_directive = IMPORT_ - quotedString("importFileSpec") + SEMI
+ def p_import_directive(self, p):
+ '''import_directive : IMPORT STRING_LITERAL SEMI'''
+ p[0] = ImportStatement(Literal(LU.i(p,2)))
+ self.lh.set_parse_object(p[0], p)
+
+ # topLevelStatement = Group(message_definition | message_extension | enum_definition | service_definition | import_directive | option_directive)
+ def p_topLevel(self,p):
+ '''topLevel : message_definition'''
+ p[0] = p[1]
+
+ def p_package_definition(self, p):
+ '''package_definition : package_directive'''
+ p[0] = p[1]
+
+ def p_packages2(self, p):
+ '''package_definition : empty'''
+ p[0] = []
+
+ def p_statements2(self, p):
+ '''statements : topLevel
+ | statements topLevel'''
+ if len(p) == 2:
+ p[0] = [p[1]]
+ else:
+ p[0] = p[1] + [p[2]]
+
+ def p_statements(self, p):
+ '''statements : empty'''
+ p[0] = []
+
+ # parser = Optional(package_directive) + ZeroOrMore(topLevelStatement)
+ def p_protofile(self, p):
+ '''protofile : package_definition statements'''
+ p[0] = ProtoFile(LU.i(p,1), LU.i(p,2))
+ self.lh.set_parse_object(p[0], p)
+
+ # Parsing starting point
+ def p_goal(self, p):
+ '''goal : STARTTOKEN protofile'''
+ p[0] = p[2]
+
+ def p_error(self, p):
+ print('error: {}'.format(p))
+
+class ProtobufAnalyzer(object):
+
+ def __init__(self):
+ self.lexer = lex.lex(module=ProtobufLexer(), optimize=1)
+ self.parser = yacc.yacc(module=ProtobufParser(), start='goal', optimize=1)
+
+ def tokenize_string(self, code):
+ self.lexer.input(code)
+ for token in self.lexer:
+ print(token)
+
+ def tokenize_file(self, _file):
+ if type(_file) == str:
+ _file = file(_file)
+ content = ''
+ for line in _file:
+ content += line
+ return self.tokenize_string(content)
+
+ def parse_string(self, code, debug=0, lineno=1, prefix='+'):
+ self.lexer.lineno = lineno
+ self.parser.offset = len(prefix)
+ return self.parser.parse(prefix + code, lexer=self.lexer, debug=debug)
+
+ def parse_file(self, _file, debug=0):
+ if type(_file) == str:
+ _file = file(_file)
+ content = ''
+ for line in _file:
+ content += line
+ return self.parse_string(content, debug=debug)
diff --git a/xos/genx/generator/plyproto/parser.py b/xos/genx/generator/plyproto/parser.py
new file mode 100755
index 0000000..eb2d776
--- /dev/null
+++ b/xos/genx/generator/plyproto/parser.py
@@ -0,0 +1,493 @@
+__author__ = "Dusan (Ph4r05) Klinec"
+
+__copyright__ = "Copyright (C) 2014 Dusan (ph4r05) Klinec"
+__license__ = "Apache License, Version 2.0"
+__version__ = "1.0"
+
+import ply.lex as lex
+import ply.yacc as yacc
+from .model import *
+
+import pdb
+
+class ProtobufLexer(object):
+ keywords = ('double', 'float', 'int32', 'int64', 'uint32', 'uint64', 'sint32', 'sint64',
+ 'fixed32', 'fixed64', 'sfixed32', 'sfixed64', 'bool', 'string', 'bytes',
+ 'message', 'required', 'optional', 'repeated', 'enum', 'extensions', 'max', 'extends', 'extend',
+ 'to', 'package', '_service', 'rpc', 'returns', 'true', 'false', 'option', 'import', 'manytoone', 'manytomany', 'onetoone')
+
+ tokens = [
+ 'NAME',
+ 'NUM',
+ 'STRING_LITERAL',
+ 'LINE_COMMENT', 'BLOCK_COMMENT',
+
+ 'LBRACE', 'RBRACE', 'LBRACK', 'RBRACK',
+ 'LPAR', 'RPAR', 'EQ', 'SEMI', 'DOT',
+ 'ARROW', 'COLON', 'COMMA',
+ 'STARTTOKEN'
+
+ ] + [k.upper() for k in keywords]
+ literals = '()+-*/=?:,.^|&~!=[]{};<>@%'
+
+ t_NUM = r'[+-]?\d+'
+ t_STRING_LITERAL = r'\"([^\\\n]|(\\.))*?\"'
+
+ t_ignore_LINE_COMMENT = '//.*'
+ def t_BLOCK_COMMENT(self, t):
+ r'/\*(.|\n)*?\*/'
+ t.lexer.lineno += t.value.count('\n')
+
+ t_LBRACE = '{'
+ t_RBRACE = '}'
+ t_LBRACK = '\\['
+ t_RBRACK = '\\]'
+ t_LPAR = '\\('
+ t_RPAR = '\\)'
+ t_EQ = '='
+ t_SEMI = ';'
+ t_ARROW = '\\-\\>'
+ t_COLON = '\\:'
+ t_COMMA = '\\,'
+ t_DOT = '\\.'
+ t_ignore = ' \t\f'
+ t_STARTTOKEN = '\\+'
+
+ def t_NAME(self, t):
+ '[A-Za-z_$][A-Za-z0-9_$]*'
+ if t.value in ProtobufLexer.keywords:
+ #print "type: %s val %s t %s" % (t.type, t.value, t)
+ t.type = t.value.upper()
+ return t
+
+ def t_newline(self, t):
+ r'\n+'
+ t.lexer.lineno += len(t.value)
+
+ def t_newline2(self, t):
+ r'(\r\n)+'
+ t.lexer.lineno += len(t.value) / 2
+
+ def t_error(self, t):
+ print("Illegal character '{}' ({}) in line {}".format(t.value[0], hex(ord(t.value[0])), t.lexer.lineno))
+ t.lexer.skip(1)
+
+class LexHelper:
+ offset = 0
+ def get_max_linespan(self, p):
+ defSpan=[1e60, -1]
+ mSpan=[1e60, -1]
+ for sp in range(0, len(p)):
+ csp = p.linespan(sp)
+ if csp[0] == 0 and csp[1] == 0:
+ if hasattr(p[sp], "linespan"):
+ csp = p[sp].linespan
+ else:
+ continue
+ if csp == None or len(csp) != 2: continue
+ if csp[0] == 0 and csp[1] == 0: continue
+ if csp[0] < mSpan[0]: mSpan[0] = csp[0]
+ if csp[1] > mSpan[1]: mSpan[1] = csp[1]
+ if defSpan == mSpan: return (0,0)
+ return tuple([mSpan[0]-self.offset, mSpan[1]-self.offset])
+
+ def get_max_lexspan(self, p):
+ defSpan=[1e60, -1]
+ mSpan=[1e60, -1]
+ for sp in range(0, len(p)):
+ csp = p.lexspan(sp)
+ if csp[0] == 0 and csp[1] == 0:
+ if hasattr(p[sp], "lexspan"):
+ csp = p[sp].lexspan
+ else:
+ continue
+ if csp == None or len(csp) != 2: continue
+ if csp[0] == 0 and csp[1] == 0: continue
+ if csp[0] < mSpan[0]: mSpan[0] = csp[0]
+ if csp[1] > mSpan[1]: mSpan[1] = csp[1]
+ if defSpan == mSpan: return (0,0)
+ return tuple([mSpan[0]-self.offset, mSpan[1]-self.offset])
+
+ def set_parse_object(self, dst, p):
+ dst.setLexData(linespan=self.get_max_linespan(p), lexspan=self.get_max_lexspan(p))
+ dst.setLexObj(p)
+
+def srcPort(x):
+ if (x):
+ return [FieldDirective(Name('port'),x)]
+ else:
+ return []
+
+
+class ProtobufParser(object):
+ tokens = ProtobufLexer.tokens
+ offset = 0
+ lh = LexHelper()
+
+ def setOffset(self, of):
+ self.offset = of
+ self.lh.offset = of
+
+ def p_empty(self, p):
+ '''empty :'''
+ pass
+
+ def p_field_modifier(self,p):
+ '''field_modifier : REQUIRED
+ | OPTIONAL
+ | REPEATED'''
+ p[0] = LU.i(p,1)
+
+ def p_primitive_type(self, p):
+ '''primitive_type : DOUBLE
+ | FLOAT
+ | INT32
+ | INT64
+ | UINT32
+ | UINT64
+ | SINT32
+ | SINT64
+ | FIXED32
+ | FIXED64
+ | SFIXED32
+ | SFIXED64
+ | BOOL
+ | STRING
+ | BYTES'''
+ p[0] = LU.i(p,1)
+
+ def p_link_type(self, p):
+ '''link_type : ONETOONE
+ | MANYTOONE
+ | MANYTOMANY'''
+ p[0] = LU.i(p,1)
+
+ def p_field_id(self, p):
+ '''field_id : NUM'''
+ p[0] = LU.i(p,1)
+
+ def p_rvalue(self, p):
+ '''rvalue : NUM
+ | TRUE
+ | FALSE'''
+ p[0] = LU.i(p,1)
+
+ def p_rvalue3(self, p):
+ '''rvalue : STRING_LITERAL'''
+ p[0] = Name(LU.i(p, 1))
+ self.lh.set_parse_object(p[0], p)
+ p[0].deriveLex()
+
+ def p_rvalue2(self, p):
+ '''rvalue : NAME'''
+ p[0] = Name(LU.i(p, 1))
+ self.lh.set_parse_object(p[0], p)
+ p[0].deriveLex()
+
+ def p_field_directives2(self, p):
+ '''field_directives : empty'''
+ p[0] = []
+
+ def p_field_directives(self, p):
+ '''field_directives : LBRACK field_directive_times RBRACK'''
+ p[0] = p[2]
+ #self.lh.set_parse_object(p[0], p)
+
+ def p_field_directive(self, p):
+ '''field_directive : NAME EQ rvalue'''
+ p[0] = FieldDirective(Name(LU.i(p, 1)), LU.i(p, 3))
+ self.lh.set_parse_object(p[0], p)
+
+ def p_field_directive_times(self, p):
+ '''field_directive_times : field_directive_plus'''
+ p[0] = p[1]
+
+ def p_field_directive_times2(self, p):
+ '''field_directive_times : empty'''
+ p[0] = []
+
+ def p_field_directive_plus(self, p):
+ '''field_directive_plus : field_directive
+ | field_directive_plus COMMA field_directive'''
+ if len(p) == 2:
+ p[0] = [LU(p,1)]
+ else:
+ p[0] = p[1] + [LU(p,3)]
+
+ def p_dotname(self, p):
+ '''dotname : NAME
+ | dotname DOT NAME'''
+ if len(p) == 2:
+ p[0] = [LU(p,1)]
+ else:
+ p[0] = p[1] + [LU(p,3)]
+
+ # Hack for cases when there is a field named 'message' or 'max'
+ def p_fieldName(self, p):
+ '''field_name : NAME
+ | MESSAGE
+ | MAX'''
+ p[0] = Name(LU.i(p,1))
+ self.lh.set_parse_object(p[0], p)
+ p[0].deriveLex()
+
+ def p_field_type(self, p):
+ '''field_type : primitive_type'''
+ p[0] = FieldType(LU.i(p,1))
+ self.lh.set_parse_object(p[0], p)
+
+ def p_field_type2(self, p):
+ '''field_type : dotname'''
+ p[0] = DotName(LU.i(p, 1))
+ self.lh.set_parse_object(p[0], p)
+ p[0].deriveLex()
+
+ def p_colon_fieldname(self, p):
+ '''colon_fieldname : COLON field_name'''
+ p[0] = p[2]
+ self.lh.set_parse_object(p[0], p)
+
+ def p_colon_fieldname2(self, p):
+ '''colon_fieldname : empty'''
+ p[0] = None
+
+ # TODO: Add directives to link definition
+ def p_link_definition(self, p):
+ '''link_definition : field_modifier link_type field_name ARROW NAME colon_fieldname EQ field_id field_directives SEMI'''
+ p[0] = LinkSpec(
+ FieldDefinition(LU.i(p,1), Name('int32'), LU.i(p, 3), LU.i(p, 8), [FieldDirective(Name('type'), Name('link')), FieldDirective(Name('model'),LU.i(p, 5))] + srcPort(LU.i(p,6)) + LU.i(p,9)),
+ LinkDefinition(LU.i(p,2), LU.i(p,3), LU.i(p,5), LU.i(p,6)))
+
+ self.lh.set_parse_object(p[0], p)
+
+ # Root of the field declaration.
+ def p_field_definition(self, p):
+ '''field_definition : field_modifier field_type field_name EQ field_id field_directives SEMI'''
+ p[0] = FieldDefinition(LU.i(p,1), LU.i(p,2), LU.i(p, 3), LU.i(p,5), LU.i(p,6))
+ self.lh.set_parse_object(p[0], p)
+
+ # Root of the enum field declaration.
+ def p_enum_field(self, p):
+ '''enum_field : field_name EQ NUM SEMI'''
+ p[0] = EnumFieldDefinition(LU.i(p, 1), LU.i(p,3))
+ self.lh.set_parse_object(p[0], p)
+
+ def p_enum_body_part(self, p):
+ '''enum_body_part : enum_field
+ | option_directive'''
+ p[0] = p[1]
+
+ def p_enum_body(self, p):
+ '''enum_body : enum_body_part
+ | enum_body enum_body_part'''
+ if len(p) == 2:
+ p[0] = [p[1]]
+ else:
+ p[0] = p[1] + [p[2]]
+
+ def p_enum_body_opt(self, p):
+ '''enum_body_opt : empty'''
+ p[0] = []
+
+ def p_enum_body_opt2(self, p):
+ '''enum_body_opt : enum_body'''
+ p[0] = p[1]
+
+ # Root of the enum declaration.
+ # enum_definition ::= 'enum' ident '{' { ident '=' integer ';' }* '}'
+ def p_enum_definition(self, p):
+ '''enum_definition : ENUM NAME LBRACE enum_body_opt RBRACE'''
+ p[0] = EnumDefinition(Name(LU.i(p, 2)), LU.i(p,4))
+ self.lh.set_parse_object(p[0], p)
+
+ def p_extensions_to(self, p):
+ '''extensions_to : MAX'''
+ p[0] = ExtensionsMax()
+ self.lh.set_parse_object(p[0], p)
+
+ def p_extensions_to2(self, p):
+ '''extensions_to : NUM'''
+ p[0] = LU.i(p, 1)
+
+ # extensions_definition ::= 'extensions' integer 'to' integer ';'
+ def p_extensions_definition(self, p):
+ '''extensions_definition : EXTENSIONS NUM TO extensions_to SEMI'''
+ p[0] = ExtensionsDirective(LU.i(p,2), LU.i(p,4))
+ self.lh.set_parse_object(p[0], p)
+
+ # message_extension ::= 'extend' ident '{' message_body '}'
+ def p_message_extension(self, p):
+ '''message_extension : EXTEND NAME LBRACE message_body RBRACE'''
+ p[0] = MessageExtension(Name(LU.i(p, 2)), LU.i(p,4))
+ self.lh.set_parse_object(p[0], p)
+
+ def p_message_body_part(self, p):
+ '''message_body_part : field_definition
+ | link_definition
+ | enum_definition
+ | message_definition
+ | extensions_definition
+ | message_extension'''
+ p[0] = p[1]
+
+ # message_body ::= { field_definition | enum_definition | message_definition | extensions_definition | message_extension }*
+ def p_message_body(self, p):
+ '''message_body : empty'''
+ p[0] = []
+
+ # message_body ::= { field_definition | enum_definition | message_definition | extensions_definition | message_extension }*
+ def p_message_body2(self, p):
+ '''message_body : message_body_part
+ | message_body message_body_part'''
+ if len(p) == 2:
+ p[0] = [p[1]]
+ else:
+ p[0] = p[1] + [p[2]]
+
+ def p_base_definition(self, p):
+ '''base_definition : LPAR NAME RPAR'''
+ p[0] = p[2]
+
+ def p_base_definition2(self, p):
+ '''base_definition : empty'''
+ p[0] = None
+
+ # Root of the message declaration.
+ # message_definition = MESSAGE_ - ident("messageId") + LBRACE + message_body("body") + RBRACE
+ def p_message_definition(self, p):
+ '''message_definition : MESSAGE NAME base_definition LBRACE message_body RBRACE'''
+ p[0] = MessageDefinition(Name(LU.i(p, 2)), LU.i(p, 3), LU.i(p,5))
+ self.lh.set_parse_object(p[0], p)
+
+ # method_definition ::= 'rpc' ident '(' [ ident ] ')' 'returns' '(' [ ident ] ')' ';'
+ def p_method_definition(self, p):
+ '''method_definition : RPC NAME LPAR NAME RPAR RETURNS LPAR NAME RPAR'''
+ p[0] = MethodDefinition(Name(LU.i(p, 2)), Name(LU.i(p, 4)), Name(LU.i(p, 8)))
+ self.lh.set_parse_object(p[0], p)
+
+ def p_method_definition_opt(self, p):
+ '''method_definition_opt : empty'''
+ p[0] = []
+
+ def p_method_definition_opt2(self, p):
+ '''method_definition_opt : method_definition
+ | method_definition_opt method_definition'''
+ if len(p) == 2:
+ p[0] = [p[1]]
+ else:
+ p[0] = p[1] + [p[2]]
+
+ # service_definition ::= 'service' ident '{' method_definition* '}'
+ # service_definition = SERVICE_ - ident("serviceName") + LBRACE + ZeroOrMore(Group(method_definition)) + RBRACE
+ def p_service_definition(self, p):
+ '''service_definition : _SERVICE NAME LBRACE method_definition_opt RBRACE'''
+ p[0] = ServiceDefinition(Name(LU.i(p, 2)), LU.i(p,4))
+ self.lh.set_parse_object(p[0], p)
+
+ # package_directive ::= 'package' ident [ '.' ident]* ';'
+ def p_package_directive(self,p):
+ '''package_directive : PACKAGE dotname SEMI'''
+ p[0] = PackageStatement(Name(LU.i(p, 2)))
+ self.lh.set_parse_object(p[0], p)
+
+ # import_directive = IMPORT_ - quotedString("importFileSpec") + SEMI
+ def p_import_directive(self, p):
+ '''import_directive : IMPORT STRING_LITERAL SEMI'''
+ p[0] = ImportStatement(Literal(LU.i(p,2)))
+ self.lh.set_parse_object(p[0], p)
+
+ def p_option_rvalue(self, p):
+ '''option_rvalue : NUM
+ | TRUE
+ | FALSE'''
+ p[0] = LU(p, 1)
+
+ def p_option_rvalue2(self, p):
+ '''option_rvalue : STRING_LITERAL'''
+ p[0] = Literal(LU(p,1))
+
+ def p_option_rvalue3(self, p):
+ '''option_rvalue : NAME'''
+ p[0] = Name(LU.i(p,1))
+
+ # option_directive = OPTION_ - ident("optionName") + EQ + quotedString("optionValue") + SEMI
+ def p_option_directive(self, p):
+ '''option_directive : OPTION NAME EQ option_rvalue SEMI'''
+ p[0] = OptionStatement(Name(LU.i(p, 2)), LU.i(p,4))
+ self.lh.set_parse_object(p[0], p)
+
+ # topLevelStatement = Group(message_definition | message_extension | enum_definition | service_definition | import_directive | option_directive)
+ def p_topLevel(self,p):
+ '''topLevel : message_definition
+ | message_extension
+ | enum_definition
+ | service_definition
+ | import_directive
+ | option_directive'''
+ p[0] = p[1]
+
+ def p_package_definition(self, p):
+ '''package_definition : package_directive'''
+ p[0] = p[1]
+
+ def p_packages2(self, p):
+ '''package_definition : empty'''
+ p[0] = []
+
+ def p_statements2(self, p):
+ '''statements : topLevel
+ | statements topLevel'''
+ if len(p) == 2:
+ p[0] = [p[1]]
+ else:
+ p[0] = p[1] + [p[2]]
+
+ def p_statements(self, p):
+ '''statements : empty'''
+ p[0] = []
+
+ # parser = Optional(package_directive) + ZeroOrMore(topLevelStatement)
+ def p_protofile(self, p):
+ '''protofile : package_definition statements'''
+ p[0] = ProtoFile(LU.i(p,1), LU.i(p,2))
+ self.lh.set_parse_object(p[0], p)
+
+ # Parsing starting point
+ def p_goal(self, p):
+ '''goal : STARTTOKEN protofile'''
+ p[0] = p[2]
+
+ def p_error(self, p):
+ print('error: {}'.format(p))
+
+class ProtobufAnalyzer(object):
+
+ def __init__(self):
+ self.lexer = lex.lex(module=ProtobufLexer())#, optimize=1)
+ self.parser = yacc.yacc(module=ProtobufParser(), start='goal', debug=0)#optimize=1)
+
+ def tokenize_string(self, code):
+ self.lexer.input(code)
+ for token in self.lexer:
+ print(token)
+
+ def tokenize_file(self, _file):
+ if type(_file) == str:
+ _file = file(_file)
+ content = ''
+ for line in _file:
+ content += line
+ return self.tokenize_string(content)
+
+ def parse_string(self, code, debug=0, lineno=1, prefix='+'):
+ self.lexer.lineno = lineno
+ self.parser.offset = len(prefix)
+ return self.parser.parse(prefix + code, lexer=self.lexer, debug=debug)
+
+ def parse_file(self, _file, debug=0):
+ if type(_file) == str:
+ _file = file(_file)
+ content = ''
+ for line in _file:
+ content += line
+ return self.parse_string(content, debug=debug)
diff --git a/xos/genx/generator/plyproto/uber b/xos/genx/generator/plyproto/uber
new file mode 100644
index 0000000..b00158a
--- /dev/null
+++ b/xos/genx/generator/plyproto/uber
@@ -0,0 +1,17 @@
+from .model import *
+import pdb
+import argparse
+import plyproto.parser as plyproto
+
+parse = argparse.ArgumentParser(description='XOS code generator')
+parse.add_argument('--xosproto', dest='input', action='store',default=None, help='Filename in XOS-enhanced Protobufs')
+parse.add_argument('--proto', dest='input', action='store',default=None, help='Filename in Protobufs')
+parse.add_argument('--output', dest='output', action='store',default=None, help='Output format, corresponding to <output>.yaml file')
+
+args = parse.parse_args()
+
+parser = plyproto.ProtobufAnalyzer()
+input = open(args.input).read()
+
+ast = parser.parse_string(input)
+priint ast
diff --git a/xos/genx/generator/proto2xproto.py b/xos/genx/generator/proto2xproto.py
new file mode 100644
index 0000000..f2c7620
--- /dev/null
+++ b/xos/genx/generator/proto2xproto.py
@@ -0,0 +1,151 @@
+import plyproto.model as m
+import pdb
+import argparse
+import plyproto.parser as plyproto
+import traceback
+import sys
+import jinja2
+import os
+
+
+''' Proto2XProto overrides the underlying visitor pattern to transform the tree
+ in addition to traversing it '''
+class Proto2XProto(m.Visitor):
+ stack = Stack()
+ count_stack = Stack()
+ content=""
+ offset=0
+ statementsChanged=0
+
+ def map_field(self, obj, s):
+ if 'model' in s:
+ link = m.LinkDefinition('onetoone','src','name','dst', obj.linespan, obj.lexspan, obj.p)
+ lspec = m.LinkSpec(link, obj)
+ else:
+ lspec = obj
+ return lspec
+
+
+ def get_stack(self):
+ return stack
+
+ def __init__(self):
+ super(Proto2XProto, self).__init__()
+
+ self.verbose = 0
+ self.first_field = True
+ self.first_method = True
+
+ def visit_PackageStatement(self, obj):
+ '''Ignore'''
+ return True
+
+ def visit_ImportStatement(self, obj):
+ '''Ignore'''
+ return True
+
+ def visit_OptionStatement(self, obj):
+ '''Ignore'''
+ return True
+
+ def visit_LU(self, obj):
+ return True
+
+ def visit_default(self, obj):
+ return True
+
+ def visit_FieldDirective(self, obj):
+ return True
+
+ def visit_FieldDirective_post(self, obj):
+ try:
+ name = obj.name.value.pval
+ except AttributeError:
+ name = obj.name.value
+
+ try:
+ value = obj.value.value.pval
+ except AttributeError:
+ try:
+ value = obj.value.value
+ except AttributeError:
+ value = obj.value.pval
+
+ self.stack.push([name,value])
+ return True
+
+ def visit_FieldType(self, obj):
+ return True
+
+ def visit_LinkDefinition(self, obj):
+ return True
+
+ def visit_FieldDefinition(self, obj):
+ self.count_stack.push(len(obj.fieldDirective))
+ return True
+
+ def visit_FieldDefinition_post(self, obj):
+ opts = {}
+ n = self.count_stack.pop()
+ for i in range(0, n):
+ k,v = self.stack.pop()
+ opts[k] = v
+
+ f = self.map_field(obj, s)
+ self.stack.push(f)
+ return True
+
+ def visit_EnumFieldDefinition(self, obj):
+ return True
+
+ def visit_EnumDefinition(self, obj):
+ return True
+
+ def visit_MessageDefinition(self, obj):
+ self.count_stack.push(len(obj.body))
+ return True
+
+ def visit_MessageDefinition_post(self, obj):
+ stack_num = self.count_stack.pop()
+ fields = []
+ links = []
+ for i in range(0,stack_num):
+ f = self.stack.pop()
+ if (f['_type']=='link'):
+ links.insert(0,f)
+ else:
+ fields.insert(0,f)
+
+ self.stack.push({'name':obj.name.value.pval,'fields':fields,'links':links})
+ return True
+
+ def visit_MessageExtension(self, obj):
+ return True
+
+ def visit_MethodDefinition(self, obj):
+ return True
+
+ def visit_ServiceDefinition(self, obj):
+ return True
+
+ def visit_ExtensionsDirective(self, obj):
+ return True
+
+ def visit_Literal(self, obj):
+ return True
+
+ def visit_Name(self, obj):
+ return True
+
+ def visit_DotName(self, obj):
+ return True
+
+ def visit_Proto(self, obj):
+ self.count_stack.push(len(obj.body))
+ return True
+
+ def visit_Proto_post(self, obj):
+ return True
+
+ def visit_LinkSpec(self, obj):
+ return False
diff --git a/xos/genx/generator/uber b/xos/genx/generator/uber
new file mode 100755
index 0000000..b60071b
--- /dev/null
+++ b/xos/genx/generator/uber
@@ -0,0 +1,86 @@
+#!/usr/bin/python
+
+import plyproto.model as m
+import pdb
+import argparse
+import plyproto.parser as plyproto
+import traceback
+import sys
+import jinja2
+import os
+from xos2jinja import XOS2Jinja
+import lib
+
+parse = argparse.ArgumentParser(description='XOS code generator')
+parse.add_argument('--xosproto', dest='input', action='store',default=None, help='Filename in XOS-enhanced Protobufs')
+parse.add_argument('--input', dest='input', action='store',default=None, help='Filename in Protobufs')
+parse.add_argument('--output', dest='output', action='store',default=None, help='Output format, corresponding to <output>.yaml file')
+
+args = parse.parse_args()
+
+
+def include_file(name):
+ print name
+ return jinja2.Markup(loader.get_source(env, name)[0])
+
+loader = jinja2.PackageLoader(__name__, 'templates')
+env = jinja2.Environment(loader=loader)
+
+def main():
+ try:
+ v = XOS2Jinja()
+ parser = plyproto.ProtobufAnalyzer()
+ input = open(args.input).read()
+
+ ast = parser.parse_string(input,debug=0)
+ ast.accept(v)
+
+ template_name = os.path.abspath(args.output)
+
+ os_template_loader = jinja2.FileSystemLoader( searchpath=[os.path.split(template_name)[0]])
+ os_template_env = jinja2.Environment(loader=os_template_loader)
+ os_template_env.globals['include_file'] = include_file
+
+ for f in dir(lib):
+ if f.startswith('xproto'):
+ os_template_env.globals[f] = getattr(lib, f)
+
+ template = os_template_env.get_template(os.path.split(template_name)[1])
+ rendered = template.render({"proto": {'messages':v.messages}})
+
+ lines = rendered.splitlines()
+ current_buffer = []
+ for l in lines:
+ if (l.startswith('+++')):
+ filename = l[4:]
+ fil = open(filename,'w')
+ buf = '\n'.join(current_buffer)
+
+ obuf = buf
+ for d in options.dict:
+ df = open(d).read()
+ d = json.loads(df)
+
+ pattern = re.compile(r'\b(' + '|'.join(d.keys()) + r')\b')
+ obuf = pattern.sub(lambda x: d[x.group()], buf)
+ fil.write(obuf)
+ fil.close()
+
+ print 'Written to file %s'%filename
+ current_buffer = []
+ else:
+ current_buffer.append(l)
+
+ if (current_buffer):
+ print '\n'.join(current_buffer)
+
+
+ except Exception as e:
+ print " Error occurred! file[%s]" % (args.input), e
+ print '-'*60
+ traceback.print_exc(file=sys.stdout)
+ print '-'*60
+ exit(1)
+
+if __name__=='__main__':
+ main()
diff --git a/xos/genx/generator/xos2jinja.py b/xos/genx/generator/xos2jinja.py
new file mode 100644
index 0000000..6e7771d
--- /dev/null
+++ b/xos/genx/generator/xos2jinja.py
@@ -0,0 +1,191 @@
+import plyproto.model as m
+import pdb
+import argparse
+import plyproto.parser as plyproto
+import traceback
+import sys
+import jinja2
+import os
+
+class Stack(list):
+ def push(self,x):
+ self.append(x)
+
+''' XOS2Jinja overrides the underlying visitor pattern to transform the tree
+ in addition to traversing it '''
+class XOS2Jinja(m.Visitor):
+ stack = Stack()
+ count_stack = Stack()
+ content=""
+ offset=0
+ doNameSanitization=False
+ statementsChanged=0
+ prefix=""
+
+ def get_stack(self):
+ return stack
+
+ def __init__(self):
+ super(XOS2Jinja, self).__init__()
+
+ self.verbose = 0
+ self.first_field = True
+ self.first_method = True
+
+ def visit_PackageStatement(self, obj):
+ '''Ignore'''
+ return True
+
+ def visit_ImportStatement(self, obj):
+ '''Ignore'''
+ return True
+
+ def visit_OptionStatement(self, obj):
+ '''Ignore'''
+ return True
+
+ def visit_LU(self, obj):
+ return True
+
+ def visit_default(self, obj):
+ return True
+
+ def visit_FieldDirective(self, obj):
+ return True
+
+ def visit_FieldDirective_post(self, obj):
+ try:
+ name = obj.name.value.pval
+ except AttributeError:
+ name = obj.name.value
+
+ try:
+ value = obj.value.value.pval
+ except AttributeError:
+ try:
+ value = obj.value.value
+ except AttributeError:
+ value = obj.value.pval
+
+ self.stack.push([name,value])
+ return True
+
+ def visit_FieldType(self, obj):
+ '''Field type, if type is name, then it may need refactoring consistent with refactoring rules according to the table'''
+ return True
+
+ def visit_LinkDefinition(self, obj):
+ s={}
+ s['link_type'] = obj.link_type.pval
+ s['src_port'] = obj.src_port.value.pval
+ s['name'] = obj.src_port.value.pval
+ try:
+ s['dst_port'] = obj.dst_port.value
+ except AttributeError:
+ s['dst_port'] = ''
+ s['peer'] = obj.name
+ s['_type'] = 'link'
+ s['options'] = {'modifier':'optional'}
+
+ self.stack.push(s)
+ return True
+
+ def visit_FieldDefinition(self, obj):
+ self.count_stack.push(len(obj.fieldDirective))
+ return True
+
+ def visit_FieldDefinition_post(self, obj):
+ s={'_type':'field'}
+ if isinstance(obj.ftype, m.Name):
+ s['type'] = obj.ftype.value
+ else:
+ s['type'] = obj.ftype.name.pval
+ s['name'] = obj.name.value.pval
+ s['modifier'] = obj.field_modifier.pval
+ s['id'] = obj.fieldId.pval
+
+ opts = {'modifier':s['modifier']}
+ n = self.count_stack.pop()
+ for i in range(0, n):
+ k,v = self.stack.pop()
+ opts[k] = v
+
+ s['options'] = opts
+ self.stack.push(s)
+ return True
+
+ def visit_EnumFieldDefinition(self, obj):
+ if self.verbose > 4:
+ print "\tEnumField: name=%s, %s" % (obj.name, obj)
+
+ return True
+
+ def visit_EnumDefinition(self, obj):
+ '''New enum definition, refactor name'''
+ if self.verbose > 3:
+ print "Enum, [%s] body=%s\n\n" % (obj.name, obj.body)
+
+ self.prefixize(obj.name, obj.name.value)
+ return True
+
+ def visit_MessageDefinition(self, obj):
+ self.count_stack.push(len(obj.body))
+ return True
+
+ def visit_MessageDefinition_post(self, obj):
+ stack_num = self.count_stack.pop()
+ fields = []
+ links = []
+ last_field = None
+ for i in range(0,stack_num):
+ f = self.stack.pop()
+ if (f['_type']=='link'):
+ f['options']={i:d[i] for d in [f['options'],last_field['options']] for i in d}
+
+ links.insert(0,f)
+ else:
+ fields.insert(0,f)
+ last_field = f
+
+ self.stack.push({'name':obj.name.value.pval,'fields':fields,'links':links, 'bclass':obj.bclass})
+ return True
+
+ def visit_MessageExtension(self, obj):
+ return True
+
+ def visit_MethodDefinition(self, obj):
+ return True
+
+ def visit_ServiceDefinition(self, obj):
+ return True
+
+ def visit_ExtensionsDirective(self, obj):
+ return True
+
+ def visit_Literal(self, obj):
+ return True
+
+ def visit_Name(self, obj):
+ return True
+
+ def visit_DotName(self, obj):
+ return True
+
+ def visit_Proto(self, obj):
+ self.count_stack.push(len(obj.body))
+ return True
+
+ def visit_Proto_post(self, obj):
+ count = self.count_stack.pop()
+ messages = []
+ for i in range(0,count):
+ m = self.stack.pop()
+ messages.insert(0,m)
+
+ self.messages = messages
+ return True
+
+ def visit_LinkSpec(self, obj):
+ count = self.count_stack.pop()
+ self.count_stack.push(count+1)
+ return True
diff --git a/xos/genx/generator/xos_demo.py b/xos/genx/generator/xos_demo.py
new file mode 100755
index 0000000..2c10ac6
--- /dev/null
+++ b/xos/genx/generator/xos_demo.py
@@ -0,0 +1,18 @@
+#!/usr/bin/python
+
+import plyproto.parser as plyproto
+
+
+test2 = """package tutorial;
+
+message Person {
+ required onetoone brother -> Person:email = 1;
+ required int32 id = 2;
+ optional string email = 3;
+}
+
+"""
+
+parser = plyproto.ProtobufAnalyzer()
+
+print(parser.parse_string(test2))
diff --git a/xos/genx/targets/django.xtarget b/xos/genx/targets/django.xtarget
new file mode 100644
index 0000000..b35d0be
--- /dev/null
+++ b/xos/genx/targets/django.xtarget
@@ -0,0 +1,21 @@
+{% for m in proto.messages %}
+
+{%- for l in m.links %}
+from core.models.{{ l.peer | lower }} import {{ l.peer }}
+{%- endfor %}
+
+{{ include_file('header.py') }}
+{{ include_file(m+'_header.py') }}
+
+class {{ m.name }}({{ m.bclass }}):
+ # Primitive Fields (Not Relations)
+ {%- for f in m.fields %}
+ {{ f.name }} = {{ xproto_django_type(f.type, f.options) }}( {{ xproto_django_options_str(f) }} );
+ {%- endfor %}
+
+ # Relations
+ {%- for l in m.links %}
+ {{ l.src_port }} = {{ l.dst_port }} {{ xproto_django_link_type(l) }}( {{ l.peer }}, {{ xproto_django_options_str(l) }} );
+ {%- endfor %}
+}
+{% endfor %}
diff --git a/xos/genx/targets/dot.xtarget b/xos/genx/targets/dot.xtarget
new file mode 100644
index 0000000..d0e0369
--- /dev/null
+++ b/xos/genx/targets/dot.xtarget
@@ -0,0 +1,6 @@
+{% for m in proto.messages %}
+ {%- for l in m.links %}
+ {{ l.src_port }} -> {{ l.dst_port }};
+ {%- endfor %}
+}
+{% endfor %}
diff --git a/xos/genx/targets/proto.xtarget b/xos/genx/targets/proto.xtarget
new file mode 100644
index 0000000..9a168ca
--- /dev/null
+++ b/xos/genx/targets/proto.xtarget
@@ -0,0 +1,11 @@
+{% for m in proto.messages %}
+message {{ m.name }} {
+ {%- for f in m.fields %}
+ {{ f.modifier }} {{f.type}} {{f.name}} = {{ f.id }}{% if f.options %} [{% for k,v in f.options.iteritems() %} {{ k }} = {{ v}}{% if not loop.last %},{% endif %} {% endfor %}]{% endif %};
+ {%- endfor %}
+
+ {%- for l in m.links %}
+ {{ l.link_type }}
+ {%- endfor %}
+}
+{% endfor %}