Chameleon swagger support

Change-Id: I63b8dc7b31d5e87aa0e5153da302537d90ff733e
diff --git a/grpc_client/grpc_client.py b/grpc_client/grpc_client.py
index e9ce5c6..790bab3 100644
--- a/grpc_client/grpc_client.py
+++ b/grpc_client/grpc_client.py
@@ -190,8 +190,11 @@
         :return: None
         """
         google_api_dir = os.path.abspath(os.path.join(
-            os.path.dirname(__file__),
-            '../protos/third_party'
+            os.path.dirname(__file__), '../protos/third_party'
+        ))
+
+        chameleon_base_dir = os.path.abspath(os.path.join(
+            os.path.dirname(__file__), '../..'
         ))
 
         for fname in [f for f in os.listdir(self.work_dir)
@@ -208,11 +211,14 @@
                 '--grpc_python_out=. '
                 '--plugin=protoc-gen-gw=%s/gw_gen.py '
                 '--gw_out=. '
+                '--plugin=protoc-gen-swagger=%s/swagger_gen.py '
+                '--swagger_out=. '
                 '%s' % (
                     self.work_dir,
                     ':'.join([os.environ['PATH'], self.plugin_dir]),
+                    ':'.join([google_api_dir, chameleon_base_dir]),
                     google_api_dir,
-                    google_api_dir,
+                    self.plugin_dir,
                     self.plugin_dir,
                     fname)
             )
diff --git a/protoc_plugins/descriptor.desc b/protoc_plugins/descriptor.desc
new file mode 100644
index 0000000..ac12c36
--- /dev/null
+++ b/protoc_plugins/descriptor.desc
Binary files differ
diff --git a/protoc_plugins/descriptor_parser.py b/protoc_plugins/descriptor_parser.py
new file mode 100644
index 0000000..c23f497
--- /dev/null
+++ b/protoc_plugins/descriptor_parser.py
@@ -0,0 +1,164 @@
+#
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+from collections import OrderedDict
+
+from google.protobuf import descriptor_pb2
+from google.protobuf.descriptor import FieldDescriptor, Descriptor
+from google.protobuf.message import Message
+
+
+class InvalidDescriptorError(Exception): pass
+
+
+class DescriptorParser(object):
+    """
+    Used to parse protobuf FileDescriptor objects into native Python
+    data structures (nested dict/list/intrinsic values. Two of the typical
+    sources of FileDescriptor objects are:
+    1. CodeGeneratorRequest, used as binary input to any protoc plugin,
+       contains a list of these FileDescriptor objects (under the
+       proto_file attribute)
+    2. FileDescriptorSet, as saved by protoc when using the -o option.
+
+    An important feature of the parser is that it can process the source
+    code annotations and can fold comments into the relevant defintions
+    present in the proto file.
+
+    Usage (in a protoc plugin):
+    >>> request = plugin.CodeGeneratorRequest()
+    >>> request.ParseFromString(sys.stdin.read())
+    >>> parser = DescriptorParser()
+    >>> for proto_file in request.proto_file:
+    >>>     parsed_data = parser.parse_file_descriptor()
+    >>>     print json.dumps(parsed_data, indent=4)
+    """
+
+    meta = None
+
+    def __init__(self):
+        if DescriptorParser.meta is None:
+            DescriptorParser.meta = self.load_meta_descriptor()
+
+    def load_meta_descriptor(self):
+        """
+        Load the protobuf version of descriptor.proto to use it in
+        decoding protobuf paths.
+        """
+        fpath = os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                             'descriptor.desc'))
+        with open(fpath, 'r') as f:
+            blob = f.read()
+        proto = descriptor_pb2.FileDescriptorSet()
+        proto.ParseFromString(blob)
+        assert len(proto.file) == 1
+        return proto.file[0]
+
+    parser_table = {
+        unicode: lambda x: x,
+        int: lambda x: x,
+        bool: lambda x: x,
+    }
+
+    def parse(self, o, type_tag_name=None):
+        if isinstance(o, Message):
+            return self.parse_message(o, type_tag_name)
+        else:
+            return self.parser_table[type(o)](o)
+
+    def parse_message(self, m, type_tag_name=None):
+        assert isinstance(m, Message)
+        d = OrderedDict()
+        for field, value in m.ListFields():
+            assert isinstance(field, FieldDescriptor)
+            if field.label in (1, 2):
+                d[field.name] = self.parse(value, type_tag_name)
+            elif field.label == 3:
+                d[field.name] = [self.parse(x, type_tag_name) for x in
+                                 value]
+            else:
+                raise InvalidDescriptorError()
+
+        if type_tag_name is not None:
+            d[type_tag_name] = m.DESCRIPTOR.full_name.strip('.')
+
+        return d
+
+    def parse_file_descriptor(self, descriptor,
+                              type_tag_name=None,
+                              fold_comments=False):
+
+        d = self.parse(descriptor, type_tag_name=type_tag_name)
+
+        if fold_comments:
+            locations = d.get('source_code_info', {}).get('location', [])
+            for location in locations:
+                path = location.get('path', [])
+                comments = ''.join([
+                    location.get('leading_comments', '').strip(' '),
+                    location.get('trailing_comments', '').strip(' '),
+                    ''.join(block.strip(' ') for block
+                            in
+                            location.get('leading_detached_comments', ''))
+                ]).strip()
+
+                # ignore locations with no comments
+                if not comments:
+                    continue
+
+                # we ignore path with odd number of entries, since these do
+                # not address our schema nodes, but rather the meta schema
+                if (len(path) % 2 == 0):
+                    node = self.find_node_by_path(
+                        path, self.meta.DESCRIPTOR, d)
+                    assert isinstance(node, dict)
+                    node['_description'] = comments
+
+            # remove source_code_info
+            del d['source_code_info']
+
+        return d
+
+    def parse_file_descriptors(self, descriptors,
+                              type_tag_name=None,
+                              fold_comments=False):
+        return [self.parse_file_descriptor(descriptor,
+                                           type_tag_name=type_tag_name,
+                                           fold_comments=fold_comments)
+                for descriptor in descriptors]
+
+    def find_node_by_path(self, path, meta, o):
+        # stop recursion when path is empty
+        if not path:
+            return o
+
+        # sanity check
+        assert len(path) >= 2
+        assert isinstance(meta, Descriptor)
+        assert isinstance(o, dict)
+
+        # find field name, then actual field
+        field_number = path.pop(0)
+        field_def = meta.fields_by_number[field_number]
+        field = o[field_def.name]
+
+        # field must be a list, extract entry with given index
+        assert isinstance(field, list)  # expected to be a list field
+        index = path.pop(0)
+        child_o = field[index]
+
+        child_meta = field_def.message_type
+        return self.find_node_by_path(path, child_meta, child_o)
diff --git a/protoc_plugins/gw_gen.py b/protoc_plugins/gw_gen.py
index e6dac24..77f74fa 100755
--- a/protoc_plugins/gw_gen.py
+++ b/protoc_plugins/gw_gen.py
@@ -14,19 +14,16 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-import os
 import sys
 
-from google.protobuf import descriptor as _descriptor
 from google.protobuf.compiler import plugin_pb2 as plugin
-from google.protobuf.descriptor import FieldDescriptor
-from google.protobuf.descriptor_pb2 import ServiceDescriptorProto, MethodOptions
-from google.protobuf.message import Message
+from google.protobuf.descriptor_pb2 import ServiceDescriptorProto, \
+    MethodOptions
 from jinja2 import Template
 from simplejson import dumps
 
-# without this import, http method annotations would not be recognized:
-from google.api import annotations_pb2, http_pb2
+from chameleon.protos.third_party.google.api import annotations_pb2, http_pb2
+_ = annotations_pb2, http_pb2  # to keep import line from being optimized out
 
 
 template = Template("""
diff --git a/protoc_plugins/protobuf_introspect.py b/protoc_plugins/protobuf_introspect.py
new file mode 100755
index 0000000..408a6da
--- /dev/null
+++ b/protoc_plugins/protobuf_introspect.py
@@ -0,0 +1,218 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Load a protobuf description file or protoc CodeGeneratorRequest an make
+sense of it
+"""
+
+import os
+import inspect
+from collections import OrderedDict
+
+import sys
+
+from google.protobuf.compiler.plugin_pb2 import CodeGeneratorRequest
+from google.protobuf.descriptor import FieldDescriptor, Descriptor
+from google.protobuf.descriptor_pb2 import FileDescriptorProto, MethodOptions
+from google.protobuf.message import Message, DecodeError
+from simplejson import dumps
+
+from google.protobuf import descriptor_pb2
+
+
+class InvalidDescriptorError(Exception): pass
+
+
+class DescriptorParser(object):
+
+    def __init__(self, ignore_empty_source_code_info=True):
+        self.ignore_empty_source_code_info = ignore_empty_source_code_info
+        self.catalog = {}
+        self.meta, blob = self.load_root_descriptor()
+        self.load_descriptor(blob)
+
+    def load_root_descriptor(self):
+        """Load descriptor.desc to make things more data driven"""
+        with open('descriptor.desc', 'r') as f:
+            blob = f.read()
+        proto = descriptor_pb2.FileDescriptorSet()
+        proto.ParseFromString(blob)
+        assert len(proto.file) == 1
+        fdp = proto.file[0]
+
+        # for i, (fd, v) in enumerate(fdp.ListFields()):
+        #     assert isinstance(fd, FieldDescriptor)
+        #     print fd.name, fd.full_name, fd.number, fd.type, fd.label, fd.message_type, type(v)
+
+        return fdp, blob
+
+    def get_catalog(self):
+        return self.catalog
+
+    def load_descriptor(self, descriptor_blob,
+                        fold_comments=True,
+                        type_tag_name='_type'):
+
+        # decode file descriptor set or if that is not possible,
+        # try plugin request
+        try:
+            message = descriptor_pb2.FileDescriptorSet()
+            message.ParseFromString(descriptor_blob)
+        except DecodeError:
+            message = CodeGeneratorRequest()
+            message.ParseFromString(descriptor_blob)
+
+        d = self.parse(message, type_tag_name=type_tag_name)
+        print d.keys()
+        for _file in d.get('file', None) or d['proto_file']:
+            if fold_comments:
+                self.fold_comments_in(_file)
+            self.catalog[_file['package']] = _file
+
+    def parse_message(self, m, type_tag_name=None):
+        assert isinstance(m, Message)
+        d = OrderedDict()
+        for fd, v in m.ListFields():
+            assert isinstance(fd, FieldDescriptor)
+            if fd.label in (1, 2):
+                d[fd.name] = self.parse(v, type_tag_name)
+            elif fd.label == 3:
+                d[fd.name] = [self.parse(x, type_tag_name) for x in v]
+            else:
+                raise InvalidDescriptorError()
+
+        if type_tag_name is not None:
+            d[type_tag_name] = m.DESCRIPTOR.full_name
+
+        return d
+
+    parser_table = {
+        unicode: lambda x: x,
+        int: lambda x: x,
+        bool: lambda x: x,
+    }
+
+    def parse(self, o, type_tag_name=None):
+        if isinstance(o, Message):
+            return self.parse_message(o, type_tag_name)
+        else:
+            return self.parser_table[type(o)](o)
+
+    def fold_comments_in(self, descriptor):
+        assert isinstance(descriptor, dict)
+
+        locations = descriptor.get('source_code_info', {}).get('location', [])
+        for location in locations:
+            path = location.get('path', [])
+            comments = ''.join([
+                location.get('leading_comments', '').strip(' '),
+                location.get('trailing_comments', '').strip(' '),
+                ''.join(block.strip(' ') for block
+                          in location.get('leading_detached_comments', ''))
+            ]).strip()
+
+            # ignore locations with no comments
+            if not comments:
+                continue
+
+            # we ignore path with odd number of entries, since these do
+            # not address our schema nodes, but rather the meta schema
+            if (len(path) % 2 == 0):
+                node = self.find_node_by_path(
+                    path, self.meta.DESCRIPTOR, descriptor)
+                assert isinstance(node, dict)
+                node['_description'] = comments
+
+        # remove source_code_info
+        del descriptor['source_code_info']
+
+    def find_node_by_path(self, path, meta, o):
+
+        # stop recursion when path is empty
+        if not path:
+            return o
+
+        # sanity check
+        assert len(path) >= 2
+        assert isinstance(meta, Descriptor)
+        assert isinstance(o, dict)
+
+        # find field name, then actual field
+        field_number = path.pop(0)
+        field_def = meta.fields_by_number[field_number]
+        field = o[field_def.name]
+
+        # field must be a list, extract entry with given index
+        assert isinstance(field, list)  # expected to be a list field
+        index = path.pop(0)
+        child_o = field[index]
+
+        child_meta = field_def.message_type
+        return self.find_node_by_path(path, child_meta, child_o)
+
+
+if __name__ == '__main__':
+
+    # try loading voltha descriptor and turn it into JSON data as a preparation
+    # for generating JSON Schema / swagger file (to be done later)
+    if len(sys.argv) >= 2:
+        desc_file = sys.argv[1]
+    else:
+        desc_dir = os.path.dirname(inspect.getfile(voltha_pb2))
+        desc_file = os.path.join(desc_dir, 'voltha.desc')
+
+    from voltha.protos import voltha_pb2
+    with open(desc_file, 'rb') as f:
+        descriptor_blob = f.read()
+
+    parser = DescriptorParser()
+    parser.save_file_desc = '/tmp/grpc_introspection.out'
+
+    parser.load_descriptor(descriptor_blob)
+    print dumps(parser.get_catalog(), indent=4)
+    sys.exit(0)
+
+    # try to see if we can decode binary data into JSON automatically
+    from random import seed, randint
+    seed(0)
+
+    def make_mc(name, n_children=0):
+        mc = voltha_pb2.MoreComplex(
+            name=name,
+            foo_counter=randint(0, 10000),
+            health=voltha_pb2.HealthStatus(
+                state=voltha_pb2.HealthStatus.OVERLOADED
+            ),
+            address=voltha_pb2.Address(
+                street='1383 N McDowell Blvd',
+                city='Petaluma',
+                zip=94954,
+                state='CA'
+            ),
+            children=[make_mc('child%d' % (i + 1)) for i in xrange(n_children)]
+        )
+        return mc
+
+    mc = make_mc('root', 3)
+    blob = mc.SerializeToString()
+    print len(blob), 'bytes'
+    mc2 = voltha_pb2.MoreComplex()
+    mc2.ParseFromString(blob)
+    assert mc == mc2
+
+    print dumps(parser.parse(mc, type_tag_name='_type'), indent=4)
diff --git a/protoc_plugins/swagger_gen.py b/protoc_plugins/swagger_gen.py
new file mode 100755
index 0000000..0342d87
--- /dev/null
+++ b/protoc_plugins/swagger_gen.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import sys
+
+from google.protobuf.compiler import plugin_pb2 as plugin
+from simplejson import dumps
+
+# without this import, http method annotations would not be recognized:
+from google.api import annotations_pb2, http_pb2
+
+from chameleon.protoc_plugins.descriptor_parser import DescriptorParser
+from swagger_template import native_descriptors_to_swagger
+
+
+def generate_code(request, response):
+
+    assert isinstance(request, plugin.CodeGeneratorRequest)
+
+    parser = DescriptorParser()
+    native_data = parser.parse_file_descriptors(request.proto_file,
+                                                type_tag_name='_type',
+                                                fold_comments=True)
+    swagger = native_descriptors_to_swagger(native_data)
+
+    # generate the native decoded schema as json
+    # f = response.file.add()
+    # f.name = proto_file.name.replace('.proto', '.native.json')
+    # f.content = dumps(data)
+
+    # generate the real swagger.json file
+    f = response.file.add()
+    f.name = 'swagger.json'
+    f.content = dumps(swagger)
+
+
+if __name__ == '__main__':
+
+    if len(sys.argv) >= 2:
+        # read input from file, to allow troubleshooting
+        with open(sys.argv[1], 'r') as f:
+            data = f.read()
+    else:
+        # read input from stdin
+        data = sys.stdin.read()
+
+    # parse request
+    request = plugin.CodeGeneratorRequest()
+    request.ParseFromString(data)
+
+    # create response object
+    response = plugin.CodeGeneratorResponse()
+
+    # generate the output and the response
+    generate_code(request, response)
+
+    # serialize the response
+    output = response.SerializeToString()
+
+    # write response to stdout
+    sys.stdout.write(output)
diff --git a/protoc_plugins/swagger_template.py b/protoc_plugins/swagger_template.py
new file mode 100644
index 0000000..d93c8da
--- /dev/null
+++ b/protoc_plugins/swagger_template.py
@@ -0,0 +1,468 @@
+#
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import re
+from collections import OrderedDict
+from copy import copy
+
+from google.protobuf.descriptor import FieldDescriptor
+
+re_path_param = re.compile(r'/{([^{]+)}')
+re_segment = re.compile(r'/(?P<absolute>[^{}/]+)|(?P<symbolic>{[^}]+})')
+
+
+class DuplicateMethodAndPathError(Exception): pass
+class ProtobufCompilationFailedError(Exception): pass
+class InvalidPathArgumentError(Exception): pass
+
+
+def native_descriptors_to_swagger(native_descriptors):
+    """
+    Generate a swagger data dict from the native descriptors extracted
+    from protobuf file(s).
+    :param native_descriptors:
+        Dict as extracted from proto file descriptors.
+        See DescriptorParser and its parse_file_descriptors() method.
+    :return: dict ready to be serialized to JSON as swagger.json file.
+    """
+
+    # gather all top-level and nested message type definitions and build map
+    message_types_dict = gather_all_message_types(native_descriptors)
+    message_type_names = set(message_types_dict.iterkeys())
+
+    # create similar map for all top-level and nested enum definitions
+    enum_types_dict = gather_all_enum_types(native_descriptors)
+    enum_type_names = set(enum_types_dict.iterkeys())
+
+    # make sure none clashes and generate set of all names (for sanity checks)
+    assert not message_type_names.intersection(enum_type_names)
+    all_type_names = message_type_names.union(enum_type_names)
+    all_types = {}
+    all_types.update(message_types_dict)
+    all_types.update(enum_types_dict)
+
+    # gather all method definitions and collect all referenced input/output
+    # types
+    types_referenced, methods_dict = gather_all_methods(native_descriptors)
+
+    # process all directly and indirectly referenced types into JSON schema
+    # type definitions
+    definitions = generate_definitions(types_referenced, all_types)
+
+    # process all method and generate the swagger path entries
+    paths = generate_paths(methods_dict, definitions)
+
+    # static part
+    # last descriptor is assumed to be the top-most one
+    root_descriptor = native_descriptors[-1]
+    swagger = {
+        'swagger': "2.0",
+        'info': {
+            'title': root_descriptor['name'],
+            'version': "version not set"
+        },
+        'schemes': ["http", "https"],
+        'consumes': ["application/json"],
+        'produces': ["application/json"],
+        'paths': paths,
+        'definitions': definitions
+    }
+
+    return swagger
+
+
+def gather_all_message_types(descriptors):
+    return dict(
+        (full_name, message_type)
+        for full_name, message_type
+        in iterate_message_types(descriptors)
+    )
+
+
+def gather_all_enum_types(descriptors):
+    return dict(
+        (full_name, enum_type)
+        for full_name, enum_type
+        in iterate_enum_types(descriptors)
+    )
+
+
+def gather_all_methods(descriptors):
+    types_referenced = set()
+    methods = OrderedDict()
+    for full_name, service, method in iterate_methods(descriptors):
+        methods[full_name] = (service, method)
+        types_referenced.add(method['input_type'].strip('.'))
+        types_referenced.add(method['output_type'].strip('.'))
+    return types_referenced, methods
+
+
+def iterate_methods(descriptors):
+    for descriptor in descriptors:
+        package = descriptor['package']
+        for service in descriptor.get('service', []):
+            service_prefix = package + '.' + service['name']
+            for method in service.get('method', []):
+                # skip methods that do not have http options
+                options = method['options']
+                if options.has_key('http'):
+                    full_name = service_prefix + '.' + method['name']
+                    yield full_name, service, method
+
+
+def iterate_for_type_in(message_types, prefix):
+    for message_type in message_types:
+        full_name = prefix + '.' + message_type['name']
+        yield full_name, message_type
+        for nested_full_name, nested in iterate_for_type_in(
+                message_type.get('nested_type', []), full_name):
+            yield nested_full_name, nested
+
+
+def iterate_message_types(descriptors):
+    for descriptor in descriptors:
+        package = descriptor['package']
+        top_types = descriptor.get('message_type', [])
+        for full_name, message_type in iterate_for_type_in(top_types, package):
+            yield full_name, message_type
+
+
+def iterate_enum_types(descriptors):
+    for descriptor in descriptors:
+        package = descriptor['package']
+        for enum in descriptor.get('enum_type', []):
+            enum_full_name = package + '.' + enum['name']
+            yield enum_full_name, enum
+        top_types = descriptor.get('message_type', [])
+        for full_name, message_type in iterate_for_type_in(top_types, package):
+            for enum in message_type.get('enum_type', []):
+                enum_full_name = full_name + '.' + enum['name']
+                yield enum_full_name, enum
+
+
+def generate_definitions(types_referenced, types):
+    """Walk all the referenced types and for each, generate a JSON schema
+       definition. These may also refer to other types, so keep the needed
+       set up-to-date.
+    """
+    definitions = {}
+    wanted = copy(types_referenced)
+    while wanted:
+        full_name = wanted.pop()
+        type = types[full_name]
+        definition, types_referenced = make_definition(type, types)
+        definitions[full_name] = definition
+        for type_referenced in types_referenced:
+            if not definitions.has_key(type_referenced):
+                wanted.add(type_referenced)
+    return definitions
+
+
+def make_definition(type, types):
+    if type['_type'] == 'google.protobuf.EnumDescriptorProto':
+        return make_enum_definition(type), set()
+    else:
+        return make_object_definition(type, types)
+
+
+def make_enum_definition(type):
+
+    def make_value_desc(enum_value):
+        txt = ' - {}'.format(enum_value['name'])
+        description = enum_value.get('_description', '')
+        if description:
+            txt += ': {}'.format(description)
+        return txt
+
+    string_values = [v['name'] for v in type['value']]
+    default = type['value'][0]['name']
+    description = (
+        (type.get('_description', '') or type['name'])
+        + '\nValid values:\n'
+        + '\n'.join(make_value_desc(v) for v in type['value'])
+    )
+
+    definition = {
+        'type': 'string',
+        'enum': string_values,
+        'default': default,
+        'description': description
+    }
+
+    return definition
+
+
+def make_object_definition(type, types):
+
+    definition = {
+        'type': 'object'
+    }
+
+    referenced = set()
+    properties = {}
+    for field in type.get('field', []):
+        field_name, property, referenced_by_field = make_property(field, types)
+        properties[field_name] = property
+        referenced.update(referenced_by_field)
+
+    if properties:
+        definition['properties'] = properties
+
+    if type.has_key('_description'):
+        definition['description'] = type['_description']
+
+    return definition, referenced
+
+
+def make_property(field, types):
+
+    referenced = set()
+
+    repeated = field['label'] == FieldDescriptor.LABEL_REPEATED
+
+    def check_if_map_entry(type_name):
+        type = types[type_name]
+        if type.get('options', {}).get('map_entry', False):
+            _, property, __ = make_property(type['field'][1], types)
+            return property
+
+    if field['type'] == FieldDescriptor.TYPE_MESSAGE:
+
+        type_name = field['type_name'].strip('.')
+
+        maybe_map_value_type = check_if_map_entry(type_name)
+        if maybe_map_value_type:
+            # map-entries are inlined
+            repeated = False
+            property = {
+                'type': 'object',
+                'additionalProperties': maybe_map_value_type
+            }
+
+        elif type_name == 'google.protobuf.Timestamp':
+            # time-stamp is mapped back to JSON schema date-time string
+            property = {
+                'type': 'string',
+                'format': 'date-time'
+            }
+
+        else:
+            # normal nested object field
+            property = {
+                '$ref': '#/definitions/{}'.format(type_name)
+            }
+            referenced.add(type_name)
+
+    elif field['type'] == FieldDescriptor.TYPE_ENUM:
+        type_name = field['type_name'].strip('.')
+        property = {
+            '$ref': '#/definitions/{}'.format(type_name)
+        }
+        referenced.add(type_name)
+
+    elif field['type'] == FieldDescriptor.TYPE_GROUP:
+        raise NotImplementedError()
+
+    else:
+        _type, format = TYPE_MAP[field['type']]
+        property = {
+            'type': _type,
+            'format': format
+        }
+
+    if repeated:
+        property = {
+            'type': 'array',
+            'items': property
+        }
+
+    if field.has_key('_description'):
+        property['description'] = field['_description']
+
+    return field['name'], property, referenced
+
+
+def generate_paths(methods_dict, definitions):
+
+    paths = {}
+
+    def _iterate():
+        for full_name, (service, method) in methods_dict.iteritems():
+            http_option = method['options']['http']
+            yield service, method, http_option
+            for binding in http_option.get('additional_bindings', []):
+                yield service, method, binding
+
+    def prune_path(path):
+        """rid '=<stuff>' pattern from path symbolic segments"""
+        segments = re_segment.findall(path)
+        pruned_segments = []
+        for absolute, symbolic in segments:
+            if symbolic:
+                full_symbol = symbolic[1:-1]
+                pruned_symbol = full_symbol.split('=', 2)[0]
+                pruned_segments.append('{' + pruned_symbol + '}')
+            else:
+                pruned_segments.append(absolute)
+
+        return '/' + '/'.join(pruned_segments)
+
+    def lookup_input_type(input_type_name):
+        return definitions[input_type_name.strip('.')]
+
+    def lookup_type(input_type, field_name):
+        local_field_name, _, rest = field_name.partition('.')
+        properties = input_type['properties']
+        if not properties.has_key(local_field_name):
+            raise InvalidPathArgumentError(
+                'Input type has no field {}'.format(field_name))
+        field = properties[local_field_name]
+        if rest:
+            field_type = field.get('type', 'object')
+            assert field_type == 'object', (
+                'Nested field name "%s" refers to field that of type "%s" '
+                '(.%s should be nested object field)'
+                % (field_name, field_type, local_field_name))
+            ref = field['$ref']
+            assert ref.startswith('#/definitions/')
+            type_name = ref.replace('#/definitions/', '')
+            nested_input_type = lookup_input_type(type_name)
+            return lookup_type(nested_input_type, rest)
+        else:
+            return field['type'], field['format']
+
+    def make_entry(service, method, http):
+        parameters = []
+        verb = None
+        for verb_candidate in ('get', 'delete', 'patch', 'post', 'put'):
+            if verb_candidate in http:
+                verb, path = verb_candidate, http[verb_candidate]
+                break
+        if 'custom' in http:
+            assert verb is None
+            verb = http['custom']['kind']
+            path = http['custom']['path']
+        assert verb is not None
+        path = prune_path(path)
+
+        # for each symbolic segment in path, add a path parameter entry
+        input_type = lookup_input_type(method['input_type'])
+        for segment in re_path_param.findall(path):
+            symbol = segment.split('=')[0]
+            _type, format = lookup_type(input_type, symbol)
+            parameters.append({
+                'in': 'path',
+                'name': symbol,
+                'required': True,
+                'type': _type,
+                'format': format
+            })
+
+        if 'body' in http:
+            if 'body' in http:  # TODO validate if body lists fields
+                parameters.append({
+                    'in': 'body',
+                    'name': 'body',
+                    'required': True,
+                    'schema': {'$ref': '#/definitions/{}'.format(
+                        method['input_type'].strip('.'))}
+                })
+
+        entry = {
+            'operationId': method['name'],
+            'tags': [service['name'],],
+            'responses': {
+                '200': {  # TODO: code is 201 and 209 in POST/DELETE?
+                    'description': unicode(""),  # TODO: ever filled by proto?
+                    'schema': {
+                        '$ref': '#/definitions/{}'.format(
+                        method['output_type'].strip('.'))
+                    }
+                },
+                # TODO shall we prefill with standard error (verb specific),
+                # such as 400, 403, 404, 409, 509, 500, 503 etc.
+            }
+        }
+
+        if parameters:
+            entry['parameters'] = parameters
+
+        summary, description = extract_summary_and_description(method)
+        if summary:
+            entry['summary'] = summary
+        if description:
+            entry['description'] = description
+
+        return path, verb, entry
+
+    for service, method, http in _iterate():
+        path, verb, entry = make_entry(service, method, http)
+        path_dict = paths.setdefault(path, {})
+        if verb in path_dict:
+            raise DuplicateMethodAndPathError(
+                'There is already a {} method defined for path ({})'.format(
+                verb, path))
+        path_dict[verb] = entry
+
+    return paths
+
+
+def extract_summary_and_description(obj):
+    """
+    Break raw _description field (if present) into a summary line and/or
+    detailed description text as follows:
+    * if text is a single line (not counting white-spaces), then it is a
+      summary and there is no detailed description.
+    * if text starts with a non-empty line followied by an empty line followed
+      by at least one non-empty line, that the 1s line is the summary and the
+      lines after the empty line is the description.
+    * in all other cases the text is considered a description and no summary
+      is generated.
+    """
+    assert isinstance(obj, dict)
+    summary, description = None, None
+    text = obj.get('_description', '')
+    if text:
+        s, blank, d = (text.split('\n', 2) + ['', ''])[:3]  # so we can demux
+        if not blank.strip():
+            summary = s
+            if d.strip():
+                description = d
+        else:
+            description = text
+
+    return summary, description
+
+
+TYPE_MAP = {
+        FieldDescriptor.TYPE_BOOL: ('boolean', 'boolean'),
+        FieldDescriptor.TYPE_BYTES: ('string', 'byte'),
+        FieldDescriptor.TYPE_DOUBLE: ('number', 'double'),
+        FieldDescriptor.TYPE_ENUM: ('string', 'string'),
+        FieldDescriptor.TYPE_FIXED32: ('integer', 'int64'),
+        FieldDescriptor.TYPE_FIXED64: ('string', 'uint64'),
+        FieldDescriptor.TYPE_FLOAT: ('number', 'float'),
+        FieldDescriptor.TYPE_INT32: ('integer', 'int32'),
+        FieldDescriptor.TYPE_INT64: ('string', 'int64'),
+        FieldDescriptor.TYPE_SFIXED32: ('integer', 'int32'),
+        FieldDescriptor.TYPE_SFIXED64: ('string', 'int64'),
+        FieldDescriptor.TYPE_STRING: ('string', 'string'),
+        FieldDescriptor.TYPE_SINT32: ('integer', 'int32'),
+        FieldDescriptor.TYPE_SINT64: ('string', 'int64'),
+        FieldDescriptor.TYPE_UINT32: ('integer', 'int64'),
+        FieldDescriptor.TYPE_UINT64: ('string', 'uint64'),
+        # FieldDescriptor.TYPE_MESSAGE:
+        # FieldDescriptor.TYPE_GROUP:
+}
diff --git a/protos/third_party/__init__.py b/protos/third_party/__init__.py
index e69de29..19192c4 100644
--- a/protos/third_party/__init__.py
+++ b/protos/third_party/__init__.py
@@ -0,0 +1,50 @@
+#
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+This helps loading http_pb2 and annotations_pb2.
+Without this, the Python importer will not be able to process the lines:
+from google.api import http_pb2 or
+from google.api import annotations_pb2
+(Without importing these, the protobuf loader will not recognize http options
+in the protobuf definitions.)
+"""
+
+from importlib import import_module
+import os
+import sys
+
+
+class GoogleApiImporter(object):
+
+    def find_module(self, full_name, path=None):
+        if full_name == 'google.api':
+            self.path = [os.path.dirname(__file__)]
+            return self
+
+    def load_module(self, name):
+        if name in sys.modules:
+            return sys.modules[name]
+        full_name = 'chameleon.protos.third_party.' + name
+        import_module(full_name)
+        module = sys.modules[full_name]
+        sys.modules[name] = module
+        return module
+
+
+sys.meta_path.append(GoogleApiImporter())
+from google.api import http_pb2, annotations_pb2
+_ = http_pb2, annotations_pb2
diff --git a/protos/third_party/google/api/__init__.py b/protos/third_party/google/api/__init__.py
index e69de29..4484f93 100644
--- a/protos/third_party/google/api/__init__.py
+++ b/protos/third_party/google/api/__init__.py
@@ -0,0 +1 @@
+import http_pb2, annotations_pb2