Initial Contribution
diff --git a/froofle/protobuf/__init__.py b/froofle/protobuf/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/froofle/protobuf/__init__.py
diff --git a/froofle/protobuf/descriptor.py b/froofle/protobuf/descriptor.py
new file mode 100644
index 0000000..e74cf25
--- /dev/null
+++ b/froofle/protobuf/descriptor.py
@@ -0,0 +1,433 @@
+# Protocol Buffers - Google's data interchange format
+# Copyright 2008 Google Inc. All rights reserved.
+# http://code.google.com/p/protobuf/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# TODO(robinson): We probably need to provide deep-copy methods for
+# descriptor types. When a FieldDescriptor is passed into
+# Descriptor.__init__(), we should make a deep copy and then set
+# containing_type on it. Alternatively, we could just get
+# rid of containing_type (iit's not needed for reflection.py, at least).
+#
+# TODO(robinson): Print method?
+#
+# TODO(robinson): Useful __repr__?
+
+"""Descriptors essentially contain exactly the information found in a .proto
+file, in types that make this information accessible in Python.
+"""
+
+__author__ = 'robinson@google.com (Will Robinson)'
+
+class DescriptorBase(object):
+
+ """Descriptors base class.
+
+ This class is the base of all descriptor classes. It provides common options
+ related functionaility.
+ """
+
+ def __init__(self, options, options_class_name):
+ """Initialize the descriptor given its options message and the name of the
+ class of the options message. The name of the class is required in case
+ the options message is None and has to be created.
+ """
+ self._options = options
+ self._options_class_name = options_class_name
+
+ def GetOptions(self):
+ """Retrieves descriptor options.
+
+ This method returns the options set or creates the default options for the
+ descriptor.
+ """
+ if self._options:
+ return self._options
+ from froofle.protobuf import descriptor_pb2
+ try:
+ options_class = getattr(descriptor_pb2, self._options_class_name)
+ except AttributeError:
+ raise RuntimeError('Unknown options class name %s!' %
+ (self._options_class_name))
+ self._options = options_class()
+ return self._options
+
+
+class Descriptor(DescriptorBase):
+
+ """Descriptor for a protocol message type.
+
+ A Descriptor instance has the following attributes:
+
+ name: (str) Name of this protocol message type.
+ full_name: (str) Fully-qualified name of this protocol message type,
+ which will include protocol "package" name and the name of any
+ enclosing types.
+
+ filename: (str) Name of the .proto file containing this message.
+
+ containing_type: (Descriptor) Reference to the descriptor of the
+ type containing us, or None if we have no containing type.
+
+ fields: (list of FieldDescriptors) Field descriptors for all
+ fields in this type.
+ fields_by_number: (dict int -> FieldDescriptor) Same FieldDescriptor
+ objects as in |fields|, but indexed by "number" attribute in each
+ FieldDescriptor.
+ fields_by_name: (dict str -> FieldDescriptor) Same FieldDescriptor
+ objects as in |fields|, but indexed by "name" attribute in each
+ FieldDescriptor.
+
+ nested_types: (list of Descriptors) Descriptor references
+ for all protocol message types nested within this one.
+ nested_types_by_name: (dict str -> Descriptor) Same Descriptor
+ objects as in |nested_types|, but indexed by "name" attribute
+ in each Descriptor.
+
+ enum_types: (list of EnumDescriptors) EnumDescriptor references
+ for all enums contained within this type.
+ enum_types_by_name: (dict str ->EnumDescriptor) Same EnumDescriptor
+ objects as in |enum_types|, but indexed by "name" attribute
+ in each EnumDescriptor.
+ enum_values_by_name: (dict str -> EnumValueDescriptor) Dict mapping
+ from enum value name to EnumValueDescriptor for that value.
+
+ extensions: (list of FieldDescriptor) All extensions defined directly
+ within this message type (NOT within a nested type).
+ extensions_by_name: (dict, string -> FieldDescriptor) Same FieldDescriptor
+ objects as |extensions|, but indexed by "name" attribute of each
+ FieldDescriptor.
+
+ options: (descriptor_pb2.MessageOptions) Protocol message options or None
+ to use default message options.
+ """
+
+ def __init__(self, name, full_name, filename, containing_type,
+ fields, nested_types, enum_types, extensions, options=None):
+ """Arguments to __init__() are as described in the description
+ of Descriptor fields above.
+ """
+ super(Descriptor, self).__init__(options, 'MessageOptions')
+ self.name = name
+ self.full_name = full_name
+ self.filename = filename
+ self.containing_type = containing_type
+
+ # We have fields in addition to fields_by_name and fields_by_number,
+ # so that:
+ # 1. Clients can index fields by "order in which they're listed."
+ # 2. Clients can easily iterate over all fields with the terse
+ # syntax: for f in descriptor.fields: ...
+ self.fields = fields
+ for field in self.fields:
+ field.containing_type = self
+ self.fields_by_number = dict((f.number, f) for f in fields)
+ self.fields_by_name = dict((f.name, f) for f in fields)
+
+ self.nested_types = nested_types
+ self.nested_types_by_name = dict((t.name, t) for t in nested_types)
+
+ self.enum_types = enum_types
+ for enum_type in self.enum_types:
+ enum_type.containing_type = self
+ self.enum_types_by_name = dict((t.name, t) for t in enum_types)
+ self.enum_values_by_name = dict(
+ (v.name, v) for t in enum_types for v in t.values)
+
+ self.extensions = extensions
+ for extension in self.extensions:
+ extension.extension_scope = self
+ self.extensions_by_name = dict((f.name, f) for f in extensions)
+
+
+# TODO(robinson): We should have aggressive checking here,
+# for example:
+# * If you specify a repeated field, you should not be allowed
+# to specify a default value.
+# * [Other examples here as needed].
+#
+# TODO(robinson): for this and other *Descriptor classes, we
+# might also want to lock things down aggressively (e.g.,
+# prevent clients from setting the attributes). Having
+# stronger invariants here in general will reduce the number
+# of runtime checks we must do in reflection.py...
+class FieldDescriptor(DescriptorBase):
+
+ """Descriptor for a single field in a .proto file.
+
+ A FieldDescriptor instance has the following attriubtes:
+
+ name: (str) Name of this field, exactly as it appears in .proto.
+ full_name: (str) Name of this field, including containing scope. This is
+ particularly relevant for extensions.
+ index: (int) Dense, 0-indexed index giving the order that this
+ field textually appears within its message in the .proto file.
+ number: (int) Tag number declared for this field in the .proto file.
+
+ type: (One of the TYPE_* constants below) Declared type.
+ cpp_type: (One of the CPPTYPE_* constants below) C++ type used to
+ represent this field.
+
+ label: (One of the LABEL_* constants below) Tells whether this
+ field is optional, required, or repeated.
+ default_value: (Varies) Default value of this field. Only
+ meaningful for non-repeated scalar fields. Repeated fields
+ should always set this to [], and non-repeated composite
+ fields should always set this to None.
+
+ containing_type: (Descriptor) Descriptor of the protocol message
+ type that contains this field. Set by the Descriptor constructor
+ if we're passed into one.
+ Somewhat confusingly, for extension fields, this is the
+ descriptor of the EXTENDED message, not the descriptor
+ of the message containing this field. (See is_extension and
+ extension_scope below).
+ message_type: (Descriptor) If a composite field, a descriptor
+ of the message type contained in this field. Otherwise, this is None.
+ enum_type: (EnumDescriptor) If this field contains an enum, a
+ descriptor of that enum. Otherwise, this is None.
+
+ is_extension: True iff this describes an extension field.
+ extension_scope: (Descriptor) Only meaningful if is_extension is True.
+ Gives the message that immediately contains this extension field.
+ Will be None iff we're a top-level (file-level) extension field.
+
+ options: (descriptor_pb2.FieldOptions) Protocol message field options or
+ None to use default field options.
+ """
+
+ # Must be consistent with C++ FieldDescriptor::Type enum in
+ # descriptor.h.
+ #
+ # TODO(robinson): Find a way to eliminate this repetition.
+ TYPE_DOUBLE = 1
+ TYPE_FLOAT = 2
+ TYPE_INT64 = 3
+ TYPE_UINT64 = 4
+ TYPE_INT32 = 5
+ TYPE_FIXED64 = 6
+ TYPE_FIXED32 = 7
+ TYPE_BOOL = 8
+ TYPE_STRING = 9
+ TYPE_GROUP = 10
+ TYPE_MESSAGE = 11
+ TYPE_BYTES = 12
+ TYPE_UINT32 = 13
+ TYPE_ENUM = 14
+ TYPE_SFIXED32 = 15
+ TYPE_SFIXED64 = 16
+ TYPE_SINT32 = 17
+ TYPE_SINT64 = 18
+ MAX_TYPE = 18
+
+ # Must be consistent with C++ FieldDescriptor::CppType enum in
+ # descriptor.h.
+ #
+ # TODO(robinson): Find a way to eliminate this repetition.
+ CPPTYPE_INT32 = 1
+ CPPTYPE_INT64 = 2
+ CPPTYPE_UINT32 = 3
+ CPPTYPE_UINT64 = 4
+ CPPTYPE_DOUBLE = 5
+ CPPTYPE_FLOAT = 6
+ CPPTYPE_BOOL = 7
+ CPPTYPE_ENUM = 8
+ CPPTYPE_STRING = 9
+ CPPTYPE_MESSAGE = 10
+ MAX_CPPTYPE = 10
+
+ # Must be consistent with C++ FieldDescriptor::Label enum in
+ # descriptor.h.
+ #
+ # TODO(robinson): Find a way to eliminate this repetition.
+ LABEL_OPTIONAL = 1
+ LABEL_REQUIRED = 2
+ LABEL_REPEATED = 3
+ MAX_LABEL = 3
+
+ def __init__(self, name, full_name, index, number, type, cpp_type, label,
+ default_value, message_type, enum_type, containing_type,
+ is_extension, extension_scope, options=None):
+ """The arguments are as described in the description of FieldDescriptor
+ attributes above.
+
+ Note that containing_type may be None, and may be set later if necessary
+ (to deal with circular references between message types, for example).
+ Likewise for extension_scope.
+ """
+ super(FieldDescriptor, self).__init__(options, 'FieldOptions')
+ self.name = name
+ self.full_name = full_name
+ self.index = index
+ self.number = number
+ self.type = type
+ self.cpp_type = cpp_type
+ self.label = label
+ self.default_value = default_value
+ self.containing_type = containing_type
+ self.message_type = message_type
+ self.enum_type = enum_type
+ self.is_extension = is_extension
+ self.extension_scope = extension_scope
+
+
+class EnumDescriptor(DescriptorBase):
+
+ """Descriptor for an enum defined in a .proto file.
+
+ An EnumDescriptor instance has the following attributes:
+
+ name: (str) Name of the enum type.
+ full_name: (str) Full name of the type, including package name
+ and any enclosing type(s).
+ filename: (str) Name of the .proto file in which this appears.
+
+ values: (list of EnumValueDescriptors) List of the values
+ in this enum.
+ values_by_name: (dict str -> EnumValueDescriptor) Same as |values|,
+ but indexed by the "name" field of each EnumValueDescriptor.
+ values_by_number: (dict int -> EnumValueDescriptor) Same as |values|,
+ but indexed by the "number" field of each EnumValueDescriptor.
+ containing_type: (Descriptor) Descriptor of the immediate containing
+ type of this enum, or None if this is an enum defined at the
+ top level in a .proto file. Set by Descriptor's constructor
+ if we're passed into one.
+ options: (descriptor_pb2.EnumOptions) Enum options message or
+ None to use default enum options.
+ """
+
+ def __init__(self, name, full_name, filename, values,
+ containing_type=None, options=None):
+ """Arguments are as described in the attribute description above."""
+ super(EnumDescriptor, self).__init__(options, 'EnumOptions')
+ self.name = name
+ self.full_name = full_name
+ self.filename = filename
+ self.values = values
+ for value in self.values:
+ value.type = self
+ self.values_by_name = dict((v.name, v) for v in values)
+ self.values_by_number = dict((v.number, v) for v in values)
+ self.containing_type = containing_type
+
+
+class EnumValueDescriptor(DescriptorBase):
+
+ """Descriptor for a single value within an enum.
+
+ name: (str) Name of this value.
+ index: (int) Dense, 0-indexed index giving the order that this
+ value appears textually within its enum in the .proto file.
+ number: (int) Actual number assigned to this enum value.
+ type: (EnumDescriptor) EnumDescriptor to which this value
+ belongs. Set by EnumDescriptor's constructor if we're
+ passed into one.
+ options: (descriptor_pb2.EnumValueOptions) Enum value options message or
+ None to use default enum value options options.
+ """
+
+ def __init__(self, name, index, number, type=None, options=None):
+ """Arguments are as described in the attribute description above."""
+ super(EnumValueDescriptor, self).__init__(options, 'EnumValueOptions')
+ self.name = name
+ self.index = index
+ self.number = number
+ self.type = type
+
+
+class ServiceDescriptor(DescriptorBase):
+
+ """Descriptor for a service.
+
+ name: (str) Name of the service.
+ full_name: (str) Full name of the service, including package name.
+ index: (int) 0-indexed index giving the order that this services
+ definition appears withing the .proto file.
+ methods: (list of MethodDescriptor) List of methods provided by this
+ service.
+ options: (descriptor_pb2.ServiceOptions) Service options message or
+ None to use default service options.
+ """
+
+ def __init__(self, name, full_name, index, methods, options=None):
+ super(ServiceDescriptor, self).__init__(options, 'ServiceOptions')
+ self.name = name
+ self.full_name = full_name
+ self.index = index
+ self.methods = methods
+ # Set the containing service for each method in this service.
+ for method in self.methods:
+ method.containing_service = self
+
+ def FindMethodByName(self, name):
+ """Searches for the specified method, and returns its descriptor."""
+ for method in self.methods:
+ if name == method.name:
+ return method
+ return None
+
+
+class MethodDescriptor(DescriptorBase):
+
+ """Descriptor for a method in a service.
+
+ name: (str) Name of the method within the service.
+ full_name: (str) Full name of method.
+ index: (int) 0-indexed index of the method inside the service.
+ containing_service: (ServiceDescriptor) The service that contains this
+ method.
+ input_type: The descriptor of the message that this method accepts.
+ output_type: The descriptor of the message that this method returns.
+ options: (descriptor_pb2.MethodOptions) Method options message or
+ None to use default method options.
+ """
+
+ def __init__(self, name, full_name, index, containing_service,
+ input_type, output_type, options=None):
+ """The arguments are as described in the description of MethodDescriptor
+ attributes above.
+
+ Note that containing_service may be None, and may be set later if necessary.
+ """
+ super(MethodDescriptor, self).__init__(options, 'MethodOptions')
+ self.name = name
+ self.full_name = full_name
+ self.index = index
+ self.containing_service = containing_service
+ self.input_type = input_type
+ self.output_type = output_type
+
+
+def _ParseOptions(message, string):
+ """Parses serialized options.
+
+ This helper function is used to parse serialized options in generated
+ proto2 files. It must not be used outside proto2.
+ """
+ message.ParseFromString(string)
+ return message;
diff --git a/froofle/protobuf/descriptor_pb2.py b/froofle/protobuf/descriptor_pb2.py
new file mode 100644
index 0000000..1687383
--- /dev/null
+++ b/froofle/protobuf/descriptor_pb2.py
@@ -0,0 +1,950 @@
+#!/usr/bin/python2.4
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+
+from froofle.protobuf import descriptor
+from froofle.protobuf import message
+from froofle.protobuf import reflection
+from froofle.protobuf import service
+from froofle.protobuf import service_reflection
+
+
+_FIELDDESCRIPTORPROTO_TYPE = descriptor.EnumDescriptor(
+ name='Type',
+ full_name='froofle.protobuf.FieldDescriptorProto.Type',
+ filename='Type',
+ values=[
+ descriptor.EnumValueDescriptor(
+ name='TYPE_DOUBLE', index=0, number=1,
+ options=None,
+ type=None),
+ descriptor.EnumValueDescriptor(
+ name='TYPE_FLOAT', index=1, number=2,
+ options=None,
+ type=None),
+ descriptor.EnumValueDescriptor(
+ name='TYPE_INT64', index=2, number=3,
+ options=None,
+ type=None),
+ descriptor.EnumValueDescriptor(
+ name='TYPE_UINT64', index=3, number=4,
+ options=None,
+ type=None),
+ descriptor.EnumValueDescriptor(
+ name='TYPE_INT32', index=4, number=5,
+ options=None,
+ type=None),
+ descriptor.EnumValueDescriptor(
+ name='TYPE_FIXED64', index=5, number=6,
+ options=None,
+ type=None),
+ descriptor.EnumValueDescriptor(
+ name='TYPE_FIXED32', index=6, number=7,
+ options=None,
+ type=None),
+ descriptor.EnumValueDescriptor(
+ name='TYPE_BOOL', index=7, number=8,
+ options=None,
+ type=None),
+ descriptor.EnumValueDescriptor(
+ name='TYPE_STRING', index=8, number=9,
+ options=None,
+ type=None),
+ descriptor.EnumValueDescriptor(
+ name='TYPE_GROUP', index=9, number=10,
+ options=None,
+ type=None),
+ descriptor.EnumValueDescriptor(
+ name='TYPE_MESSAGE', index=10, number=11,
+ options=None,
+ type=None),
+ descriptor.EnumValueDescriptor(
+ name='TYPE_BYTES', index=11, number=12,
+ options=None,
+ type=None),
+ descriptor.EnumValueDescriptor(
+ name='TYPE_UINT32', index=12, number=13,
+ options=None,
+ type=None),
+ descriptor.EnumValueDescriptor(
+ name='TYPE_ENUM', index=13, number=14,
+ options=None,
+ type=None),
+ descriptor.EnumValueDescriptor(
+ name='TYPE_SFIXED32', index=14, number=15,
+ options=None,
+ type=None),
+ descriptor.EnumValueDescriptor(
+ name='TYPE_SFIXED64', index=15, number=16,
+ options=None,
+ type=None),
+ descriptor.EnumValueDescriptor(
+ name='TYPE_SINT32', index=16, number=17,
+ options=None,
+ type=None),
+ descriptor.EnumValueDescriptor(
+ name='TYPE_SINT64', index=17, number=18,
+ options=None,
+ type=None),
+ ],
+ options=None,
+)
+
+_FIELDDESCRIPTORPROTO_LABEL = descriptor.EnumDescriptor(
+ name='Label',
+ full_name='froofle.protobuf.FieldDescriptorProto.Label',
+ filename='Label',
+ values=[
+ descriptor.EnumValueDescriptor(
+ name='LABEL_OPTIONAL', index=0, number=1,
+ options=None,
+ type=None),
+ descriptor.EnumValueDescriptor(
+ name='LABEL_REQUIRED', index=1, number=2,
+ options=None,
+ type=None),
+ descriptor.EnumValueDescriptor(
+ name='LABEL_REPEATED', index=2, number=3,
+ options=None,
+ type=None),
+ ],
+ options=None,
+)
+
+_FILEOPTIONS_OPTIMIZEMODE = descriptor.EnumDescriptor(
+ name='OptimizeMode',
+ full_name='froofle.protobuf.FileOptions.OptimizeMode',
+ filename='OptimizeMode',
+ values=[
+ descriptor.EnumValueDescriptor(
+ name='SPEED', index=0, number=1,
+ options=None,
+ type=None),
+ descriptor.EnumValueDescriptor(
+ name='CODE_SIZE', index=1, number=2,
+ options=None,
+ type=None),
+ ],
+ options=None,
+)
+
+_FIELDOPTIONS_CTYPE = descriptor.EnumDescriptor(
+ name='CType',
+ full_name='froofle.protobuf.FieldOptions.CType',
+ filename='CType',
+ values=[
+ descriptor.EnumValueDescriptor(
+ name='CORD', index=0, number=1,
+ options=None,
+ type=None),
+ descriptor.EnumValueDescriptor(
+ name='STRING_PIECE', index=1, number=2,
+ options=None,
+ type=None),
+ ],
+ options=None,
+)
+
+
+_FILEDESCRIPTORSET = descriptor.Descriptor(
+ name='FileDescriptorSet',
+ full_name='froofle.protobuf.FileDescriptorSet',
+ filename='froofle/protobuf/descriptor.proto',
+ containing_type=None,
+ fields=[
+ descriptor.FieldDescriptor(
+ name='file', full_name='froofle.protobuf.FileDescriptorSet.file', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[], # TODO(robinson): Implement.
+ enum_types=[
+ ],
+ options=None)
+
+
+_FILEDESCRIPTORPROTO = descriptor.Descriptor(
+ name='FileDescriptorProto',
+ full_name='froofle.protobuf.FileDescriptorProto',
+ filename='froofle/protobuf/descriptor.proto',
+ containing_type=None,
+ fields=[
+ descriptor.FieldDescriptor(
+ name='name', full_name='froofle.protobuf.FileDescriptorProto.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ default_value=unicode("", "utf-8"),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='package', full_name='froofle.protobuf.FileDescriptorProto.package', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ default_value=unicode("", "utf-8"),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='dependency', full_name='froofle.protobuf.FileDescriptorProto.dependency', index=2,
+ number=3, type=9, cpp_type=9, label=3,
+ default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='message_type', full_name='froofle.protobuf.FileDescriptorProto.message_type', index=3,
+ number=4, type=11, cpp_type=10, label=3,
+ default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='enum_type', full_name='froofle.protobuf.FileDescriptorProto.enum_type', index=4,
+ number=5, type=11, cpp_type=10, label=3,
+ default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='service', full_name='froofle.protobuf.FileDescriptorProto.service', index=5,
+ number=6, type=11, cpp_type=10, label=3,
+ default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='extension', full_name='froofle.protobuf.FileDescriptorProto.extension', index=6,
+ number=7, type=11, cpp_type=10, label=3,
+ default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='options', full_name='froofle.protobuf.FileDescriptorProto.options', index=7,
+ number=8, type=11, cpp_type=10, label=1,
+ default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[], # TODO(robinson): Implement.
+ enum_types=[
+ ],
+ options=None)
+
+
+_DESCRIPTORPROTO_EXTENSIONRANGE = descriptor.Descriptor(
+ name='ExtensionRange',
+ full_name='froofle.protobuf.DescriptorProto.ExtensionRange',
+ filename='froofle/protobuf/descriptor.proto',
+ containing_type=None,
+ fields=[
+ descriptor.FieldDescriptor(
+ name='start', full_name='froofle.protobuf.DescriptorProto.ExtensionRange.start', index=0,
+ number=1, type=5, cpp_type=1, label=1,
+ default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='end', full_name='froofle.protobuf.DescriptorProto.ExtensionRange.end', index=1,
+ number=2, type=5, cpp_type=1, label=1,
+ default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[], # TODO(robinson): Implement.
+ enum_types=[
+ ],
+ options=None)
+
+_DESCRIPTORPROTO = descriptor.Descriptor(
+ name='DescriptorProto',
+ full_name='froofle.protobuf.DescriptorProto',
+ filename='froofle/protobuf/descriptor.proto',
+ containing_type=None,
+ fields=[
+ descriptor.FieldDescriptor(
+ name='name', full_name='froofle.protobuf.DescriptorProto.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ default_value=unicode("", "utf-8"),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='field', full_name='froofle.protobuf.DescriptorProto.field', index=1,
+ number=2, type=11, cpp_type=10, label=3,
+ default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='extension', full_name='froofle.protobuf.DescriptorProto.extension', index=2,
+ number=6, type=11, cpp_type=10, label=3,
+ default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='nested_type', full_name='froofle.protobuf.DescriptorProto.nested_type', index=3,
+ number=3, type=11, cpp_type=10, label=3,
+ default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='enum_type', full_name='froofle.protobuf.DescriptorProto.enum_type', index=4,
+ number=4, type=11, cpp_type=10, label=3,
+ default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='extension_range', full_name='froofle.protobuf.DescriptorProto.extension_range', index=5,
+ number=5, type=11, cpp_type=10, label=3,
+ default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='options', full_name='froofle.protobuf.DescriptorProto.options', index=6,
+ number=7, type=11, cpp_type=10, label=1,
+ default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[], # TODO(robinson): Implement.
+ enum_types=[
+ ],
+ options=None)
+
+
+_FIELDDESCRIPTORPROTO = descriptor.Descriptor(
+ name='FieldDescriptorProto',
+ full_name='froofle.protobuf.FieldDescriptorProto',
+ filename='froofle/protobuf/descriptor.proto',
+ containing_type=None,
+ fields=[
+ descriptor.FieldDescriptor(
+ name='name', full_name='froofle.protobuf.FieldDescriptorProto.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ default_value=unicode("", "utf-8"),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='number', full_name='froofle.protobuf.FieldDescriptorProto.number', index=1,
+ number=3, type=5, cpp_type=1, label=1,
+ default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='label', full_name='froofle.protobuf.FieldDescriptorProto.label', index=2,
+ number=4, type=14, cpp_type=8, label=1,
+ default_value=1,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='type', full_name='froofle.protobuf.FieldDescriptorProto.type', index=3,
+ number=5, type=14, cpp_type=8, label=1,
+ default_value=1,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='type_name', full_name='froofle.protobuf.FieldDescriptorProto.type_name', index=4,
+ number=6, type=9, cpp_type=9, label=1,
+ default_value=unicode("", "utf-8"),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='extendee', full_name='froofle.protobuf.FieldDescriptorProto.extendee', index=5,
+ number=2, type=9, cpp_type=9, label=1,
+ default_value=unicode("", "utf-8"),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='default_value', full_name='froofle.protobuf.FieldDescriptorProto.default_value', index=6,
+ number=7, type=9, cpp_type=9, label=1,
+ default_value=unicode("", "utf-8"),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='options', full_name='froofle.protobuf.FieldDescriptorProto.options', index=7,
+ number=8, type=11, cpp_type=10, label=1,
+ default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[], # TODO(robinson): Implement.
+ enum_types=[
+ _FIELDDESCRIPTORPROTO_TYPE,
+ _FIELDDESCRIPTORPROTO_LABEL,
+ ],
+ options=None)
+
+
+_ENUMDESCRIPTORPROTO = descriptor.Descriptor(
+ name='EnumDescriptorProto',
+ full_name='froofle.protobuf.EnumDescriptorProto',
+ filename='froofle/protobuf/descriptor.proto',
+ containing_type=None,
+ fields=[
+ descriptor.FieldDescriptor(
+ name='name', full_name='froofle.protobuf.EnumDescriptorProto.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ default_value=unicode("", "utf-8"),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='value', full_name='froofle.protobuf.EnumDescriptorProto.value', index=1,
+ number=2, type=11, cpp_type=10, label=3,
+ default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='options', full_name='froofle.protobuf.EnumDescriptorProto.options', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[], # TODO(robinson): Implement.
+ enum_types=[
+ ],
+ options=None)
+
+
+_ENUMVALUEDESCRIPTORPROTO = descriptor.Descriptor(
+ name='EnumValueDescriptorProto',
+ full_name='froofle.protobuf.EnumValueDescriptorProto',
+ filename='froofle/protobuf/descriptor.proto',
+ containing_type=None,
+ fields=[
+ descriptor.FieldDescriptor(
+ name='name', full_name='froofle.protobuf.EnumValueDescriptorProto.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ default_value=unicode("", "utf-8"),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='number', full_name='froofle.protobuf.EnumValueDescriptorProto.number', index=1,
+ number=2, type=5, cpp_type=1, label=1,
+ default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='options', full_name='froofle.protobuf.EnumValueDescriptorProto.options', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[], # TODO(robinson): Implement.
+ enum_types=[
+ ],
+ options=None)
+
+
+_SERVICEDESCRIPTORPROTO = descriptor.Descriptor(
+ name='ServiceDescriptorProto',
+ full_name='froofle.protobuf.ServiceDescriptorProto',
+ filename='froofle/protobuf/descriptor.proto',
+ containing_type=None,
+ fields=[
+ descriptor.FieldDescriptor(
+ name='name', full_name='froofle.protobuf.ServiceDescriptorProto.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ default_value=unicode("", "utf-8"),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='method', full_name='froofle.protobuf.ServiceDescriptorProto.method', index=1,
+ number=2, type=11, cpp_type=10, label=3,
+ default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='options', full_name='froofle.protobuf.ServiceDescriptorProto.options', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[], # TODO(robinson): Implement.
+ enum_types=[
+ ],
+ options=None)
+
+
+_METHODDESCRIPTORPROTO = descriptor.Descriptor(
+ name='MethodDescriptorProto',
+ full_name='froofle.protobuf.MethodDescriptorProto',
+ filename='froofle/protobuf/descriptor.proto',
+ containing_type=None,
+ fields=[
+ descriptor.FieldDescriptor(
+ name='name', full_name='froofle.protobuf.MethodDescriptorProto.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ default_value=unicode("", "utf-8"),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='input_type', full_name='froofle.protobuf.MethodDescriptorProto.input_type', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ default_value=unicode("", "utf-8"),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='output_type', full_name='froofle.protobuf.MethodDescriptorProto.output_type', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ default_value=unicode("", "utf-8"),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='options', full_name='froofle.protobuf.MethodDescriptorProto.options', index=3,
+ number=4, type=11, cpp_type=10, label=1,
+ default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[], # TODO(robinson): Implement.
+ enum_types=[
+ ],
+ options=None)
+
+
+_FILEOPTIONS = descriptor.Descriptor(
+ name='FileOptions',
+ full_name='froofle.protobuf.FileOptions',
+ filename='froofle/protobuf/descriptor.proto',
+ containing_type=None,
+ fields=[
+ descriptor.FieldDescriptor(
+ name='java_package', full_name='froofle.protobuf.FileOptions.java_package', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ default_value=unicode("", "utf-8"),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='java_outer_classname', full_name='froofle.protobuf.FileOptions.java_outer_classname', index=1,
+ number=8, type=9, cpp_type=9, label=1,
+ default_value=unicode("", "utf-8"),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='java_multiple_files', full_name='froofle.protobuf.FileOptions.java_multiple_files', index=2,
+ number=10, type=8, cpp_type=7, label=1,
+ default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='optimize_for', full_name='froofle.protobuf.FileOptions.optimize_for', index=3,
+ number=9, type=14, cpp_type=8, label=1,
+ default_value=2,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='uninterpreted_option', full_name='froofle.protobuf.FileOptions.uninterpreted_option', index=4,
+ number=999, type=11, cpp_type=10, label=3,
+ default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[], # TODO(robinson): Implement.
+ enum_types=[
+ _FILEOPTIONS_OPTIMIZEMODE,
+ ],
+ options=None)
+
+
+_MESSAGEOPTIONS = descriptor.Descriptor(
+ name='MessageOptions',
+ full_name='froofle.protobuf.MessageOptions',
+ filename='froofle/protobuf/descriptor.proto',
+ containing_type=None,
+ fields=[
+ descriptor.FieldDescriptor(
+ name='message_set_wire_format', full_name='froofle.protobuf.MessageOptions.message_set_wire_format', index=0,
+ number=1, type=8, cpp_type=7, label=1,
+ default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='uninterpreted_option', full_name='froofle.protobuf.MessageOptions.uninterpreted_option', index=1,
+ number=999, type=11, cpp_type=10, label=3,
+ default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[], # TODO(robinson): Implement.
+ enum_types=[
+ ],
+ options=None)
+
+
+_FIELDOPTIONS = descriptor.Descriptor(
+ name='FieldOptions',
+ full_name='froofle.protobuf.FieldOptions',
+ filename='froofle/protobuf/descriptor.proto',
+ containing_type=None,
+ fields=[
+ descriptor.FieldDescriptor(
+ name='ctype', full_name='froofle.protobuf.FieldOptions.ctype', index=0,
+ number=1, type=14, cpp_type=8, label=1,
+ default_value=1,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='experimental_map_key', full_name='froofle.protobuf.FieldOptions.experimental_map_key', index=1,
+ number=9, type=9, cpp_type=9, label=1,
+ default_value=unicode("", "utf-8"),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='uninterpreted_option', full_name='froofle.protobuf.FieldOptions.uninterpreted_option', index=2,
+ number=999, type=11, cpp_type=10, label=3,
+ default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[], # TODO(robinson): Implement.
+ enum_types=[
+ _FIELDOPTIONS_CTYPE,
+ ],
+ options=None)
+
+
+_ENUMOPTIONS = descriptor.Descriptor(
+ name='EnumOptions',
+ full_name='froofle.protobuf.EnumOptions',
+ filename='froofle/protobuf/descriptor.proto',
+ containing_type=None,
+ fields=[
+ descriptor.FieldDescriptor(
+ name='uninterpreted_option', full_name='froofle.protobuf.EnumOptions.uninterpreted_option', index=0,
+ number=999, type=11, cpp_type=10, label=3,
+ default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[], # TODO(robinson): Implement.
+ enum_types=[
+ ],
+ options=None)
+
+
+_ENUMVALUEOPTIONS = descriptor.Descriptor(
+ name='EnumValueOptions',
+ full_name='froofle.protobuf.EnumValueOptions',
+ filename='froofle/protobuf/descriptor.proto',
+ containing_type=None,
+ fields=[
+ descriptor.FieldDescriptor(
+ name='uninterpreted_option', full_name='froofle.protobuf.EnumValueOptions.uninterpreted_option', index=0,
+ number=999, type=11, cpp_type=10, label=3,
+ default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[], # TODO(robinson): Implement.
+ enum_types=[
+ ],
+ options=None)
+
+
+_SERVICEOPTIONS = descriptor.Descriptor(
+ name='ServiceOptions',
+ full_name='froofle.protobuf.ServiceOptions',
+ filename='froofle/protobuf/descriptor.proto',
+ containing_type=None,
+ fields=[
+ descriptor.FieldDescriptor(
+ name='uninterpreted_option', full_name='froofle.protobuf.ServiceOptions.uninterpreted_option', index=0,
+ number=999, type=11, cpp_type=10, label=3,
+ default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[], # TODO(robinson): Implement.
+ enum_types=[
+ ],
+ options=None)
+
+
+_METHODOPTIONS = descriptor.Descriptor(
+ name='MethodOptions',
+ full_name='froofle.protobuf.MethodOptions',
+ filename='froofle/protobuf/descriptor.proto',
+ containing_type=None,
+ fields=[
+ descriptor.FieldDescriptor(
+ name='uninterpreted_option', full_name='froofle.protobuf.MethodOptions.uninterpreted_option', index=0,
+ number=999, type=11, cpp_type=10, label=3,
+ default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[], # TODO(robinson): Implement.
+ enum_types=[
+ ],
+ options=None)
+
+
+_UNINTERPRETEDOPTION_NAMEPART = descriptor.Descriptor(
+ name='NamePart',
+ full_name='froofle.protobuf.UninterpretedOption.NamePart',
+ filename='froofle/protobuf/descriptor.proto',
+ containing_type=None,
+ fields=[
+ descriptor.FieldDescriptor(
+ name='name_part', full_name='froofle.protobuf.UninterpretedOption.NamePart.name_part', index=0,
+ number=1, type=9, cpp_type=9, label=2,
+ default_value=unicode("", "utf-8"),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='is_extension', full_name='froofle.protobuf.UninterpretedOption.NamePart.is_extension', index=1,
+ number=2, type=8, cpp_type=7, label=2,
+ default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[], # TODO(robinson): Implement.
+ enum_types=[
+ ],
+ options=None)
+
+_UNINTERPRETEDOPTION = descriptor.Descriptor(
+ name='UninterpretedOption',
+ full_name='froofle.protobuf.UninterpretedOption',
+ filename='froofle/protobuf/descriptor.proto',
+ containing_type=None,
+ fields=[
+ descriptor.FieldDescriptor(
+ name='name', full_name='froofle.protobuf.UninterpretedOption.name', index=0,
+ number=2, type=11, cpp_type=10, label=3,
+ default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='identifier_value', full_name='froofle.protobuf.UninterpretedOption.identifier_value', index=1,
+ number=3, type=9, cpp_type=9, label=1,
+ default_value=unicode("", "utf-8"),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='positive_int_value', full_name='froofle.protobuf.UninterpretedOption.positive_int_value', index=2,
+ number=4, type=4, cpp_type=4, label=1,
+ default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='negative_int_value', full_name='froofle.protobuf.UninterpretedOption.negative_int_value', index=3,
+ number=5, type=3, cpp_type=2, label=1,
+ default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='double_value', full_name='froofle.protobuf.UninterpretedOption.double_value', index=4,
+ number=6, type=1, cpp_type=5, label=1,
+ default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ descriptor.FieldDescriptor(
+ name='string_value', full_name='froofle.protobuf.UninterpretedOption.string_value', index=5,
+ number=7, type=12, cpp_type=9, label=1,
+ default_value="",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[], # TODO(robinson): Implement.
+ enum_types=[
+ ],
+ options=None)
+
+
+_FILEDESCRIPTORSET.fields_by_name['file'].message_type = _FILEDESCRIPTORPROTO
+_FILEDESCRIPTORPROTO.fields_by_name['message_type'].message_type = _DESCRIPTORPROTO
+_FILEDESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO
+_FILEDESCRIPTORPROTO.fields_by_name['service'].message_type = _SERVICEDESCRIPTORPROTO
+_FILEDESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO
+_FILEDESCRIPTORPROTO.fields_by_name['options'].message_type = _FILEOPTIONS
+_DESCRIPTORPROTO.fields_by_name['field'].message_type = _FIELDDESCRIPTORPROTO
+_DESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO
+_DESCRIPTORPROTO.fields_by_name['nested_type'].message_type = _DESCRIPTORPROTO
+_DESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO
+_DESCRIPTORPROTO.fields_by_name['extension_range'].message_type = _DESCRIPTORPROTO_EXTENSIONRANGE
+_DESCRIPTORPROTO.fields_by_name['options'].message_type = _MESSAGEOPTIONS
+_FIELDDESCRIPTORPROTO.fields_by_name['label'].enum_type = _FIELDDESCRIPTORPROTO_LABEL
+_FIELDDESCRIPTORPROTO.fields_by_name['type'].enum_type = _FIELDDESCRIPTORPROTO_TYPE
+_FIELDDESCRIPTORPROTO.fields_by_name['options'].message_type = _FIELDOPTIONS
+_ENUMDESCRIPTORPROTO.fields_by_name['value'].message_type = _ENUMVALUEDESCRIPTORPROTO
+_ENUMDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMOPTIONS
+_ENUMVALUEDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMVALUEOPTIONS
+_SERVICEDESCRIPTORPROTO.fields_by_name['method'].message_type = _METHODDESCRIPTORPROTO
+_SERVICEDESCRIPTORPROTO.fields_by_name['options'].message_type = _SERVICEOPTIONS
+_METHODDESCRIPTORPROTO.fields_by_name['options'].message_type = _METHODOPTIONS
+_FILEOPTIONS.fields_by_name['optimize_for'].enum_type = _FILEOPTIONS_OPTIMIZEMODE
+_FILEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
+_MESSAGEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
+_FIELDOPTIONS.fields_by_name['ctype'].enum_type = _FIELDOPTIONS_CTYPE
+_FIELDOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
+_ENUMOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
+_ENUMVALUEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
+_SERVICEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
+_METHODOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
+_UNINTERPRETEDOPTION.fields_by_name['name'].message_type = _UNINTERPRETEDOPTION_NAMEPART
+
+class FileDescriptorSet(message.Message):
+ __metaclass__ = reflection.GeneratedProtocolMessageType
+ DESCRIPTOR = _FILEDESCRIPTORSET
+
+class FileDescriptorProto(message.Message):
+ __metaclass__ = reflection.GeneratedProtocolMessageType
+ DESCRIPTOR = _FILEDESCRIPTORPROTO
+
+class DescriptorProto(message.Message):
+ __metaclass__ = reflection.GeneratedProtocolMessageType
+
+ class ExtensionRange(message.Message):
+ __metaclass__ = reflection.GeneratedProtocolMessageType
+ DESCRIPTOR = _DESCRIPTORPROTO_EXTENSIONRANGE
+ DESCRIPTOR = _DESCRIPTORPROTO
+
+class FieldDescriptorProto(message.Message):
+ __metaclass__ = reflection.GeneratedProtocolMessageType
+ DESCRIPTOR = _FIELDDESCRIPTORPROTO
+
+class EnumDescriptorProto(message.Message):
+ __metaclass__ = reflection.GeneratedProtocolMessageType
+ DESCRIPTOR = _ENUMDESCRIPTORPROTO
+
+class EnumValueDescriptorProto(message.Message):
+ __metaclass__ = reflection.GeneratedProtocolMessageType
+ DESCRIPTOR = _ENUMVALUEDESCRIPTORPROTO
+
+class ServiceDescriptorProto(message.Message):
+ __metaclass__ = reflection.GeneratedProtocolMessageType
+ DESCRIPTOR = _SERVICEDESCRIPTORPROTO
+
+class MethodDescriptorProto(message.Message):
+ __metaclass__ = reflection.GeneratedProtocolMessageType
+ DESCRIPTOR = _METHODDESCRIPTORPROTO
+
+class FileOptions(message.Message):
+ __metaclass__ = reflection.GeneratedProtocolMessageType
+ DESCRIPTOR = _FILEOPTIONS
+
+class MessageOptions(message.Message):
+ __metaclass__ = reflection.GeneratedProtocolMessageType
+ DESCRIPTOR = _MESSAGEOPTIONS
+
+class FieldOptions(message.Message):
+ __metaclass__ = reflection.GeneratedProtocolMessageType
+ DESCRIPTOR = _FIELDOPTIONS
+
+class EnumOptions(message.Message):
+ __metaclass__ = reflection.GeneratedProtocolMessageType
+ DESCRIPTOR = _ENUMOPTIONS
+
+class EnumValueOptions(message.Message):
+ __metaclass__ = reflection.GeneratedProtocolMessageType
+ DESCRIPTOR = _ENUMVALUEOPTIONS
+
+class ServiceOptions(message.Message):
+ __metaclass__ = reflection.GeneratedProtocolMessageType
+ DESCRIPTOR = _SERVICEOPTIONS
+
+class MethodOptions(message.Message):
+ __metaclass__ = reflection.GeneratedProtocolMessageType
+ DESCRIPTOR = _METHODOPTIONS
+
+class UninterpretedOption(message.Message):
+ __metaclass__ = reflection.GeneratedProtocolMessageType
+
+ class NamePart(message.Message):
+ __metaclass__ = reflection.GeneratedProtocolMessageType
+ DESCRIPTOR = _UNINTERPRETEDOPTION_NAMEPART
+ DESCRIPTOR = _UNINTERPRETEDOPTION
+
diff --git a/froofle/protobuf/internal/__init__.py b/froofle/protobuf/internal/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/froofle/protobuf/internal/__init__.py
diff --git a/froofle/protobuf/internal/decoder.py b/froofle/protobuf/internal/decoder.py
new file mode 100644
index 0000000..2dd4c96
--- /dev/null
+++ b/froofle/protobuf/internal/decoder.py
@@ -0,0 +1,209 @@
+# Protocol Buffers - Google's data interchange format
+# Copyright 2008 Google Inc. All rights reserved.
+# http://code.google.com/p/protobuf/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Class for decoding protocol buffer primitives.
+
+Contains the logic for decoding every logical protocol field type
+from one of the 5 physical wire types.
+"""
+
+__author__ = 'robinson@google.com (Will Robinson)'
+
+import struct
+from froofle.protobuf import message
+from froofle.protobuf.internal import input_stream
+from froofle.protobuf.internal import wire_format
+
+
+
+# Note that much of this code is ported from //net/proto/ProtocolBuffer, and
+# that the interface is strongly inspired by WireFormat from the C++ proto2
+# implementation.
+
+
+class Decoder(object):
+
+ """Decodes logical protocol buffer fields from the wire."""
+
+ def __init__(self, s):
+ """Initializes the decoder to read from s.
+
+ Args:
+ s: An immutable sequence of bytes, which must be accessible
+ via the Python buffer() primitive (i.e., buffer(s)).
+ """
+ self._stream = input_stream.InputStream(s)
+
+ def EndOfStream(self):
+ """Returns true iff we've reached the end of the bytes we're reading."""
+ return self._stream.EndOfStream()
+
+ def Position(self):
+ """Returns the 0-indexed position in |s|."""
+ return self._stream.Position()
+
+ def ReadFieldNumberAndWireType(self):
+ """Reads a tag from the wire. Returns a (field_number, wire_type) pair."""
+ tag_and_type = self.ReadUInt32()
+ return wire_format.UnpackTag(tag_and_type)
+
+ def SkipBytes(self, bytes):
+ """Skips the specified number of bytes on the wire."""
+ self._stream.SkipBytes(bytes)
+
+ # Note that the Read*() methods below are not exactly symmetrical with the
+ # corresponding Encoder.Append*() methods. Those Encoder methods first
+ # encode a tag, but the Read*() methods below assume that the tag has already
+ # been read, and that the client wishes to read a field of the specified type
+ # starting at the current position.
+
+ def ReadInt32(self):
+ """Reads and returns a signed, varint-encoded, 32-bit integer."""
+ return self._stream.ReadVarint32()
+
+ def ReadInt64(self):
+ """Reads and returns a signed, varint-encoded, 64-bit integer."""
+ return self._stream.ReadVarint64()
+
+ def ReadUInt32(self):
+ """Reads and returns an signed, varint-encoded, 32-bit integer."""
+ return self._stream.ReadVarUInt32()
+
+ def ReadUInt64(self):
+ """Reads and returns an signed, varint-encoded,64-bit integer."""
+ return self._stream.ReadVarUInt64()
+
+ def ReadSInt32(self):
+ """Reads and returns a signed, zigzag-encoded, varint-encoded,
+ 32-bit integer."""
+ return wire_format.ZigZagDecode(self._stream.ReadVarUInt32())
+
+ def ReadSInt64(self):
+ """Reads and returns a signed, zigzag-encoded, varint-encoded,
+ 64-bit integer."""
+ return wire_format.ZigZagDecode(self._stream.ReadVarUInt64())
+
+ def ReadFixed32(self):
+ """Reads and returns an unsigned, fixed-width, 32-bit integer."""
+ return self._stream.ReadLittleEndian32()
+
+ def ReadFixed64(self):
+ """Reads and returns an unsigned, fixed-width, 64-bit integer."""
+ return self._stream.ReadLittleEndian64()
+
+ def ReadSFixed32(self):
+ """Reads and returns a signed, fixed-width, 32-bit integer."""
+ value = self._stream.ReadLittleEndian32()
+ if value >= (1 << 31):
+ value -= (1 << 32)
+ return value
+
+ def ReadSFixed64(self):
+ """Reads and returns a signed, fixed-width, 64-bit integer."""
+ value = self._stream.ReadLittleEndian64()
+ if value >= (1 << 63):
+ value -= (1 << 64)
+ return value
+
+ def ReadFloat(self):
+ """Reads and returns a 4-byte floating-point number."""
+ serialized = self._stream.ReadBytes(4)
+ return struct.unpack('f', serialized)[0]
+
+ def ReadDouble(self):
+ """Reads and returns an 8-byte floating-point number."""
+ serialized = self._stream.ReadBytes(8)
+ return struct.unpack('d', serialized)[0]
+
+ def ReadBool(self):
+ """Reads and returns a bool."""
+ i = self._stream.ReadVarUInt32()
+ return bool(i)
+
+ def ReadEnum(self):
+ """Reads and returns an enum value."""
+ return self._stream.ReadVarUInt32()
+
+ def ReadString(self):
+ """Reads and returns a length-delimited string."""
+ bytes = self.ReadBytes()
+ return unicode(bytes, 'utf-8')
+
+ def ReadBytes(self):
+ """Reads and returns a length-delimited byte sequence."""
+ length = self._stream.ReadVarUInt32()
+ return self._stream.ReadBytes(length)
+
+ def ReadMessageInto(self, msg):
+ """Calls msg.MergeFromString() to merge
+ length-delimited serialized message data into |msg|.
+
+ REQUIRES: The decoder must be positioned at the serialized "length"
+ prefix to a length-delmiited serialized message.
+
+ POSTCONDITION: The decoder is positioned just after the
+ serialized message, and we have merged those serialized
+ contents into |msg|.
+ """
+ length = self._stream.ReadVarUInt32()
+ sub_buffer = self._stream.GetSubBuffer(length)
+ num_bytes_used = msg.MergeFromString(sub_buffer)
+ if num_bytes_used != length:
+ raise message.DecodeError(
+ 'Submessage told to deserialize from %d-byte encoding, '
+ 'but used only %d bytes' % (length, num_bytes_used))
+ self._stream.SkipBytes(num_bytes_used)
+
+ def ReadGroupInto(self, expected_field_number, group):
+ """Calls group.MergeFromString() to merge
+ END_GROUP-delimited serialized message data into |group|.
+ We'll raise an exception if we don't find an END_GROUP
+ tag immediately after the serialized message contents.
+
+ REQUIRES: The decoder is positioned just after the START_GROUP
+ tag for this group.
+
+ POSTCONDITION: The decoder is positioned just after the
+ END_GROUP tag for this group, and we have merged
+ the contents of the group into |group|.
+ """
+ sub_buffer = self._stream.GetSubBuffer() # No a priori length limit.
+ num_bytes_used = group.MergeFromString(sub_buffer)
+ if num_bytes_used < 0:
+ raise message.DecodeError('Group message reported negative bytes read.')
+ self._stream.SkipBytes(num_bytes_used)
+ field_number, field_type = self.ReadFieldNumberAndWireType()
+ if field_type != wire_format.WIRETYPE_END_GROUP:
+ raise message.DecodeError('Group message did not end with an END_GROUP.')
+ if field_number != expected_field_number:
+ raise message.DecodeError('END_GROUP tag had field '
+ 'number %d, was expecting field number %d' % (
+ field_number, expected_field_number))
+ # We're now positioned just after the END_GROUP tag. Perfect.
diff --git a/froofle/protobuf/internal/encoder.py b/froofle/protobuf/internal/encoder.py
new file mode 100644
index 0000000..8b924b3
--- /dev/null
+++ b/froofle/protobuf/internal/encoder.py
@@ -0,0 +1,206 @@
+# Protocol Buffers - Google's data interchange format
+# Copyright 2008 Google Inc. All rights reserved.
+# http://code.google.com/p/protobuf/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Class for encoding protocol message primitives.
+
+Contains the logic for encoding every logical protocol field type
+into one of the 5 physical wire types.
+"""
+
+__author__ = 'robinson@google.com (Will Robinson)'
+
+import struct
+from froofle.protobuf import message
+from froofle.protobuf.internal import wire_format
+from froofle.protobuf.internal import output_stream
+
+
+# Note that much of this code is ported from //net/proto/ProtocolBuffer, and
+# that the interface is strongly inspired by WireFormat from the C++ proto2
+# implementation.
+
+
+class Encoder(object):
+
+ """Encodes logical protocol buffer fields to the wire format."""
+
+ def __init__(self):
+ self._stream = output_stream.OutputStream()
+
+ def ToString(self):
+ """Returns all values encoded in this object as a string."""
+ return self._stream.ToString()
+
+ # All the Append*() methods below first append a tag+type pair to the buffer
+ # before appending the specified value.
+
+ def AppendInt32(self, field_number, value):
+ """Appends a 32-bit integer to our buffer, varint-encoded."""
+ self._AppendTag(field_number, wire_format.WIRETYPE_VARINT)
+ self._stream.AppendVarint32(value)
+
+ def AppendInt64(self, field_number, value):
+ """Appends a 64-bit integer to our buffer, varint-encoded."""
+ self._AppendTag(field_number, wire_format.WIRETYPE_VARINT)
+ self._stream.AppendVarint64(value)
+
+ def AppendUInt32(self, field_number, unsigned_value):
+ """Appends an unsigned 32-bit integer to our buffer, varint-encoded."""
+ self._AppendTag(field_number, wire_format.WIRETYPE_VARINT)
+ self._stream.AppendVarUInt32(unsigned_value)
+
+ def AppendUInt64(self, field_number, unsigned_value):
+ """Appends an unsigned 64-bit integer to our buffer, varint-encoded."""
+ self._AppendTag(field_number, wire_format.WIRETYPE_VARINT)
+ self._stream.AppendVarUInt64(unsigned_value)
+
+ def AppendSInt32(self, field_number, value):
+ """Appends a 32-bit integer to our buffer, zigzag-encoded and then
+ varint-encoded.
+ """
+ self._AppendTag(field_number, wire_format.WIRETYPE_VARINT)
+ zigzag_value = wire_format.ZigZagEncode(value)
+ self._stream.AppendVarUInt32(zigzag_value)
+
+ def AppendSInt64(self, field_number, value):
+ """Appends a 64-bit integer to our buffer, zigzag-encoded and then
+ varint-encoded.
+ """
+ self._AppendTag(field_number, wire_format.WIRETYPE_VARINT)
+ zigzag_value = wire_format.ZigZagEncode(value)
+ self._stream.AppendVarUInt64(zigzag_value)
+
+ def AppendFixed32(self, field_number, unsigned_value):
+ """Appends an unsigned 32-bit integer to our buffer, in little-endian
+ byte-order.
+ """
+ self._AppendTag(field_number, wire_format.WIRETYPE_FIXED32)
+ self._stream.AppendLittleEndian32(unsigned_value)
+
+ def AppendFixed64(self, field_number, unsigned_value):
+ """Appends an unsigned 64-bit integer to our buffer, in little-endian
+ byte-order.
+ """
+ self._AppendTag(field_number, wire_format.WIRETYPE_FIXED64)
+ self._stream.AppendLittleEndian64(unsigned_value)
+
+ def AppendSFixed32(self, field_number, value):
+ """Appends a signed 32-bit integer to our buffer, in little-endian
+ byte-order.
+ """
+ sign = (value & 0x80000000) and -1 or 0
+ if value >> 32 != sign:
+ raise message.EncodeError('SFixed32 out of range: %d' % value)
+ self._AppendTag(field_number, wire_format.WIRETYPE_FIXED32)
+ self._stream.AppendLittleEndian32(value & 0xffffffff)
+
+ def AppendSFixed64(self, field_number, value):
+ """Appends a signed 64-bit integer to our buffer, in little-endian
+ byte-order.
+ """
+ sign = (value & 0x8000000000000000) and -1 or 0
+ if value >> 64 != sign:
+ raise message.EncodeError('SFixed64 out of range: %d' % value)
+ self._AppendTag(field_number, wire_format.WIRETYPE_FIXED64)
+ self._stream.AppendLittleEndian64(value & 0xffffffffffffffff)
+
+ def AppendFloat(self, field_number, value):
+ """Appends a floating-point number to our buffer."""
+ self._AppendTag(field_number, wire_format.WIRETYPE_FIXED32)
+ self._stream.AppendRawBytes(struct.pack('f', value))
+
+ def AppendDouble(self, field_number, value):
+ """Appends a double-precision floating-point number to our buffer."""
+ self._AppendTag(field_number, wire_format.WIRETYPE_FIXED64)
+ self._stream.AppendRawBytes(struct.pack('d', value))
+
+ def AppendBool(self, field_number, value):
+ """Appends a boolean to our buffer."""
+ self.AppendInt32(field_number, value)
+
+ def AppendEnum(self, field_number, value):
+ """Appends an enum value to our buffer."""
+ self.AppendInt32(field_number, value)
+
+ def AppendString(self, field_number, value):
+ """Appends a length-prefixed unicode string, encoded as UTF-8 to our buffer,
+ with the length varint-encoded.
+ """
+ self.AppendBytes(field_number, value.encode('utf-8'))
+
+ def AppendBytes(self, field_number, value):
+ """Appends a length-prefixed sequence of bytes to our buffer, with the
+ length varint-encoded.
+ """
+ self._AppendTag(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
+ self._stream.AppendVarUInt32(len(value))
+ self._stream.AppendRawBytes(value)
+
+ # TODO(robinson): For AppendGroup() and AppendMessage(), we'd really like to
+ # avoid the extra string copy here. We can do so if we widen the Message
+ # interface to be able to serialize to a stream in addition to a string. The
+ # challenge when thinking ahead to the Python/C API implementation of Message
+ # is finding a stream-like Python thing to which we can write raw bytes
+ # from C. I'm not sure such a thing exists(?). (array.array is pretty much
+ # what we want, but it's not directly exposed in the Python/C API).
+
+ def AppendGroup(self, field_number, group):
+ """Appends a group to our buffer.
+ """
+ self._AppendTag(field_number, wire_format.WIRETYPE_START_GROUP)
+ self._stream.AppendRawBytes(group.SerializeToString())
+ self._AppendTag(field_number, wire_format.WIRETYPE_END_GROUP)
+
+ def AppendMessage(self, field_number, msg):
+ """Appends a nested message to our buffer.
+ """
+ self._AppendTag(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
+ self._stream.AppendVarUInt32(msg.ByteSize())
+ self._stream.AppendRawBytes(msg.SerializeToString())
+
+ def AppendMessageSetItem(self, field_number, msg):
+ """Appends an item using the message set wire format.
+
+ The message set message looks like this:
+ message MessageSet {
+ repeated group Item = 1 {
+ required int32 type_id = 2;
+ required string message = 3;
+ }
+ }
+ """
+ self._AppendTag(1, wire_format.WIRETYPE_START_GROUP)
+ self.AppendInt32(2, field_number)
+ self.AppendMessage(3, msg)
+ self._AppendTag(1, wire_format.WIRETYPE_END_GROUP)
+
+ def _AppendTag(self, field_number, wire_type):
+ """Appends a tag containing field number and wire type information."""
+ self._stream.AppendVarUInt32(wire_format.PackTag(field_number, wire_type))
diff --git a/froofle/protobuf/internal/input_stream.py b/froofle/protobuf/internal/input_stream.py
new file mode 100644
index 0000000..26a26dc
--- /dev/null
+++ b/froofle/protobuf/internal/input_stream.py
@@ -0,0 +1,326 @@
+# Protocol Buffers - Google's data interchange format
+# Copyright 2008 Google Inc. All rights reserved.
+# http://code.google.com/p/protobuf/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""InputStream is the primitive interface for reading bits from the wire.
+
+All protocol buffer deserialization can be expressed in terms of
+the InputStream primitives provided here.
+"""
+
+__author__ = 'robinson@google.com (Will Robinson)'
+
+import struct
+from array import array
+from froofle.protobuf import message
+from froofle.protobuf.internal import wire_format
+
+
+# Note that much of this code is ported from //net/proto/ProtocolBuffer, and
+# that the interface is strongly inspired by CodedInputStream from the C++
+# proto2 implementation.
+
+
+class InputStreamBuffer(object):
+
+ """Contains all logic for reading bits, and dealing with stream position.
+
+ If an InputStream method ever raises an exception, the stream is left
+ in an indeterminate state and is not safe for further use.
+ """
+
+ def __init__(self, s):
+ # What we really want is something like array('B', s), where elements we
+ # read from the array are already given to us as one-byte integers. BUT
+ # using array() instead of buffer() would force full string copies to result
+ # from each GetSubBuffer() call.
+ #
+ # So, if the N serialized bytes of a single protocol buffer object are
+ # split evenly between 2 child messages, and so on recursively, using
+ # array('B', s) instead of buffer() would incur an additional N*logN bytes
+ # copied during deserialization.
+ #
+ # The higher constant overhead of having to ord() for every byte we read
+ # from the buffer in _ReadVarintHelper() could definitely lead to worse
+ # performance in many real-world scenarios, even if the asymptotic
+ # complexity is better. However, our real answer is that the mythical
+ # Python/C extension module output mode for the protocol compiler will
+ # be blazing-fast and will eliminate most use of this class anyway.
+ self._buffer = buffer(s)
+ self._pos = 0
+
+ def EndOfStream(self):
+ """Returns true iff we're at the end of the stream.
+ If this returns true, then a call to any other InputStream method
+ will raise an exception.
+ """
+ return self._pos >= len(self._buffer)
+
+ def Position(self):
+ """Returns the current position in the stream, or equivalently, the
+ number of bytes read so far.
+ """
+ return self._pos
+
+ def GetSubBuffer(self, size=None):
+ """Returns a sequence-like object that represents a portion of our
+ underlying sequence.
+
+ Position 0 in the returned object corresponds to self.Position()
+ in this stream.
+
+ If size is specified, then the returned object ends after the
+ next "size" bytes in this stream. If size is not specified,
+ then the returned object ends at the end of this stream.
+
+ We guarantee that the returned object R supports the Python buffer
+ interface (and thus that the call buffer(R) will work).
+
+ Note that the returned buffer is read-only.
+
+ The intended use for this method is for nested-message and nested-group
+ deserialization, where we want to make a recursive MergeFromString()
+ call on the portion of the original sequence that contains the serialized
+ nested message. (And we'd like to do so without making unnecessary string
+ copies).
+
+ REQUIRES: size is nonnegative.
+ """
+ # Note that buffer() doesn't perform any actual string copy.
+ if size is None:
+ return buffer(self._buffer, self._pos)
+ else:
+ if size < 0:
+ raise message.DecodeError('Negative size %d' % size)
+ return buffer(self._buffer, self._pos, size)
+
+ def SkipBytes(self, num_bytes):
+ """Skip num_bytes bytes ahead, or go to the end of the stream, whichever
+ comes first.
+
+ REQUIRES: num_bytes is nonnegative.
+ """
+ if num_bytes < 0:
+ raise message.DecodeError('Negative num_bytes %d' % num_bytes)
+ self._pos += num_bytes
+ self._pos = min(self._pos, len(self._buffer))
+
+ def ReadBytes(self, size):
+ """Reads up to 'size' bytes from the stream, stopping early
+ only if we reach the end of the stream. Returns the bytes read
+ as a string.
+ """
+ if size < 0:
+ raise message.DecodeError('Negative size %d' % size)
+ s = (self._buffer[self._pos : self._pos + size])
+ self._pos += len(s) # Only advance by the number of bytes actually read.
+ return s
+
+ def ReadLittleEndian32(self):
+ """Interprets the next 4 bytes of the stream as a little-endian
+ encoded, unsiged 32-bit integer, and returns that integer.
+ """
+ try:
+ i = struct.unpack(wire_format.FORMAT_UINT32_LITTLE_ENDIAN,
+ self._buffer[self._pos : self._pos + 4])
+ self._pos += 4
+ return i[0] # unpack() result is a 1-element tuple.
+ except struct.error, e:
+ raise message.DecodeError(e)
+
+ def ReadLittleEndian64(self):
+ """Interprets the next 8 bytes of the stream as a little-endian
+ encoded, unsiged 64-bit integer, and returns that integer.
+ """
+ try:
+ i = struct.unpack(wire_format.FORMAT_UINT64_LITTLE_ENDIAN,
+ self._buffer[self._pos : self._pos + 8])
+ self._pos += 8
+ return i[0] # unpack() result is a 1-element tuple.
+ except struct.error, e:
+ raise message.DecodeError(e)
+
+ def ReadVarint32(self):
+ """Reads a varint from the stream, interprets this varint
+ as a signed, 32-bit integer, and returns the integer.
+ """
+ i = self.ReadVarint64()
+ if not wire_format.INT32_MIN <= i <= wire_format.INT32_MAX:
+ raise message.DecodeError('Value out of range for int32: %d' % i)
+ return int(i)
+
+ def ReadVarUInt32(self):
+ """Reads a varint from the stream, interprets this varint
+ as an unsigned, 32-bit integer, and returns the integer.
+ """
+ i = self.ReadVarUInt64()
+ if i > wire_format.UINT32_MAX:
+ raise message.DecodeError('Value out of range for uint32: %d' % i)
+ return i
+
+ def ReadVarint64(self):
+ """Reads a varint from the stream, interprets this varint
+ as a signed, 64-bit integer, and returns the integer.
+ """
+ i = self.ReadVarUInt64()
+ if i > wire_format.INT64_MAX:
+ i -= (1 << 64)
+ return i
+
+ def ReadVarUInt64(self):
+ """Reads a varint from the stream, interprets this varint
+ as an unsigned, 64-bit integer, and returns the integer.
+ """
+ i = self._ReadVarintHelper()
+ if not 0 <= i <= wire_format.UINT64_MAX:
+ raise message.DecodeError('Value out of range for uint64: %d' % i)
+ return i
+
+ def _ReadVarintHelper(self):
+ """Helper for the various varint-reading methods above.
+ Reads an unsigned, varint-encoded integer from the stream and
+ returns this integer.
+
+ Does no bounds checking except to ensure that we read at most as many bytes
+ as could possibly be present in a varint-encoded 64-bit number.
+ """
+ result = 0
+ shift = 0
+ while 1:
+ if shift >= 64:
+ raise message.DecodeError('Too many bytes when decoding varint.')
+ try:
+ b = ord(self._buffer[self._pos])
+ except IndexError:
+ raise message.DecodeError('Truncated varint.')
+ self._pos += 1
+ result |= ((b & 0x7f) << shift)
+ shift += 7
+ if not (b & 0x80):
+ return result
+
+class InputStreamArray(object):
+ def __init__(self, s):
+ self._buffer = array('B', s)
+ self._pos = 0
+
+ def EndOfStream(self):
+ return self._pos >= len(self._buffer)
+
+ def Position(self):
+ return self._pos
+
+ def GetSubBuffer(self, size=None):
+ if size is None:
+ return self._buffer[self._pos : ].tostring()
+ else:
+ if size < 0:
+ raise message.DecodeError('Negative size %d' % size)
+ return self._buffer[self._pos : self._pos + size].tostring()
+
+ def SkipBytes(self, num_bytes):
+ if num_bytes < 0:
+ raise message.DecodeError('Negative num_bytes %d' % num_bytes)
+ self._pos += num_bytes
+ self._pos = min(self._pos, len(self._buffer))
+
+ def ReadBytes(self, size):
+ if size < 0:
+ raise message.DecodeError('Negative size %d' % size)
+ s = self._buffer[self._pos : self._pos + size].tostring()
+ self._pos += len(s) # Only advance by the number of bytes actually read.
+ return s
+
+ def ReadLittleEndian32(self):
+ try:
+ i = struct.unpack(wire_format.FORMAT_UINT32_LITTLE_ENDIAN,
+ self._buffer[self._pos : self._pos + 4])
+ self._pos += 4
+ return i[0] # unpack() result is a 1-element tuple.
+ except struct.error, e:
+ raise message.DecodeError(e)
+
+ def ReadLittleEndian64(self):
+ try:
+ i = struct.unpack(wire_format.FORMAT_UINT64_LITTLE_ENDIAN,
+ self._buffer[self._pos : self._pos + 8])
+ self._pos += 8
+ return i[0] # unpack() result is a 1-element tuple.
+ except struct.error, e:
+ raise message.DecodeError(e)
+
+ def ReadVarint32(self):
+ i = self.ReadVarint64()
+ if not wire_format.INT32_MIN <= i <= wire_format.INT32_MAX:
+ raise message.DecodeError('Value out of range for int32: %d' % i)
+ return int(i)
+
+ def ReadVarUInt32(self):
+ i = self.ReadVarUInt64()
+ if i > wire_format.UINT32_MAX:
+ raise message.DecodeError('Value out of range for uint32: %d' % i)
+ return i
+
+ def ReadVarint64(self):
+ i = self.ReadVarUInt64()
+ if i > wire_format.INT64_MAX:
+ i -= (1 << 64)
+ return i
+
+ def ReadVarUInt64(self):
+ i = self._ReadVarintHelper()
+ if not 0 <= i <= wire_format.UINT64_MAX:
+ raise message.DecodeError('Value out of range for uint64: %d' % i)
+ return i
+
+ def _ReadVarintHelper(self):
+ result = 0
+ shift = 0
+ while 1:
+ if shift >= 64:
+ raise message.DecodeError('Too many bytes when decoding varint.')
+ try:
+ b = self._buffer[self._pos]
+ except IndexError:
+ raise message.DecodeError('Truncated varint.')
+ self._pos += 1
+ result |= ((b & 0x7f) << shift)
+ shift += 7
+ if not (b & 0x80):
+ return result
+
+try:
+ buffer("")
+ InputStream = InputStreamBuffer
+except NotImplementedError:
+ # Google App Engine: dev_appserver.py
+ InputStream = InputStreamArray
+except RuntimeError:
+ # Google App Engine: production
+ InputStream = InputStreamArray
diff --git a/froofle/protobuf/internal/message_listener.py b/froofle/protobuf/internal/message_listener.py
new file mode 100644
index 0000000..4397895
--- /dev/null
+++ b/froofle/protobuf/internal/message_listener.py
@@ -0,0 +1,69 @@
+# Protocol Buffers - Google's data interchange format
+# Copyright 2008 Google Inc. All rights reserved.
+# http://code.google.com/p/protobuf/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Defines a listener interface for observing certain
+state transitions on Message objects.
+
+Also defines a null implementation of this interface.
+"""
+
+__author__ = 'robinson@google.com (Will Robinson)'
+
+
+class MessageListener(object):
+
+ """Listens for transitions to nonempty and for invalidations of cached
+ byte sizes. Meant to be registered via Message._SetListener().
+ """
+
+ def TransitionToNonempty(self):
+ """Called the *first* time that this message becomes nonempty.
+ Implementations are free (but not required) to call this method multiple
+ times after the message has become nonempty.
+ """
+ raise NotImplementedError
+
+ def ByteSizeDirty(self):
+ """Called *every* time the cached byte size value
+ for this object is invalidated (transitions from being
+ "clean" to "dirty").
+ """
+ raise NotImplementedError
+
+
+class NullMessageListener(object):
+
+ """No-op MessageListener implementation."""
+
+ def TransitionToNonempty(self):
+ pass
+
+ def ByteSizeDirty(self):
+ pass
diff --git a/froofle/protobuf/internal/output_stream.py b/froofle/protobuf/internal/output_stream.py
new file mode 100644
index 0000000..f62cd1c
--- /dev/null
+++ b/froofle/protobuf/internal/output_stream.py
@@ -0,0 +1,125 @@
+# Protocol Buffers - Google's data interchange format
+# Copyright 2008 Google Inc. All rights reserved.
+# http://code.google.com/p/protobuf/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""OutputStream is the primitive interface for sticking bits on the wire.
+
+All protocol buffer serialization can be expressed in terms of
+the OutputStream primitives provided here.
+"""
+
+__author__ = 'robinson@google.com (Will Robinson)'
+
+import array
+import struct
+from froofle.protobuf import message
+from froofle.protobuf.internal import wire_format
+
+
+
+# Note that much of this code is ported from //net/proto/ProtocolBuffer, and
+# that the interface is strongly inspired by CodedOutputStream from the C++
+# proto2 implementation.
+
+
+class OutputStream(object):
+
+ """Contains all logic for writing bits, and ToString() to get the result."""
+
+ def __init__(self):
+ self._buffer = array.array('B')
+
+ def AppendRawBytes(self, raw_bytes):
+ """Appends raw_bytes to our internal buffer."""
+ self._buffer.fromstring(raw_bytes)
+
+ def AppendLittleEndian32(self, unsigned_value):
+ """Appends an unsigned 32-bit integer to the internal buffer,
+ in little-endian byte order.
+ """
+ if not 0 <= unsigned_value <= wire_format.UINT32_MAX:
+ raise message.EncodeError(
+ 'Unsigned 32-bit out of range: %d' % unsigned_value)
+ self._buffer.fromstring(struct.pack(
+ wire_format.FORMAT_UINT32_LITTLE_ENDIAN, unsigned_value))
+
+ def AppendLittleEndian64(self, unsigned_value):
+ """Appends an unsigned 64-bit integer to the internal buffer,
+ in little-endian byte order.
+ """
+ if not 0 <= unsigned_value <= wire_format.UINT64_MAX:
+ raise message.EncodeError(
+ 'Unsigned 64-bit out of range: %d' % unsigned_value)
+ self._buffer.fromstring(struct.pack(
+ wire_format.FORMAT_UINT64_LITTLE_ENDIAN, unsigned_value))
+
+ def AppendVarint32(self, value):
+ """Appends a signed 32-bit integer to the internal buffer,
+ encoded as a varint. (Note that a negative varint32 will
+ always require 10 bytes of space.)
+ """
+ if not wire_format.INT32_MIN <= value <= wire_format.INT32_MAX:
+ raise message.EncodeError('Value out of range: %d' % value)
+ self.AppendVarint64(value)
+
+ def AppendVarUInt32(self, value):
+ """Appends an unsigned 32-bit integer to the internal buffer,
+ encoded as a varint.
+ """
+ if not 0 <= value <= wire_format.UINT32_MAX:
+ raise message.EncodeError('Value out of range: %d' % value)
+ self.AppendVarUInt64(value)
+
+ def AppendVarint64(self, value):
+ """Appends a signed 64-bit integer to the internal buffer,
+ encoded as a varint.
+ """
+ if not wire_format.INT64_MIN <= value <= wire_format.INT64_MAX:
+ raise message.EncodeError('Value out of range: %d' % value)
+ if value < 0:
+ value += (1 << 64)
+ self.AppendVarUInt64(value)
+
+ def AppendVarUInt64(self, unsigned_value):
+ """Appends an unsigned 64-bit integer to the internal buffer,
+ encoded as a varint.
+ """
+ if not 0 <= unsigned_value <= wire_format.UINT64_MAX:
+ raise message.EncodeError('Value out of range: %d' % unsigned_value)
+ while True:
+ bits = unsigned_value & 0x7f
+ unsigned_value >>= 7
+ if not unsigned_value:
+ self._buffer.append(bits)
+ break
+ self._buffer.append(0x80|bits)
+
+ def ToString(self):
+ """Returns a string containing the bytes in our internal buffer."""
+ return self._buffer.tostring()
diff --git a/froofle/protobuf/internal/type_checkers.py b/froofle/protobuf/internal/type_checkers.py
new file mode 100644
index 0000000..aaf7a84
--- /dev/null
+++ b/froofle/protobuf/internal/type_checkers.py
@@ -0,0 +1,268 @@
+# Protocol Buffers - Google's data interchange format
+# Copyright 2008 Google Inc. All rights reserved.
+# http://code.google.com/p/protobuf/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Provides type checking routines.
+
+This module defines type checking utilities in the forms of dictionaries:
+
+VALUE_CHECKERS: A dictionary of field types and a value validation object.
+TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing
+ function.
+TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization
+ function.
+FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their
+ coresponding wire types.
+TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization
+ function.
+"""
+
+__author__ = 'robinson@google.com (Will Robinson)'
+
+from froofle.protobuf.internal import decoder
+from froofle.protobuf.internal import encoder
+from froofle.protobuf.internal import wire_format
+from froofle.protobuf import descriptor
+
+_FieldDescriptor = descriptor.FieldDescriptor
+
+
+def GetTypeChecker(cpp_type, field_type):
+ """Returns a type checker for a message field of the specified types.
+
+ Args:
+ cpp_type: C++ type of the field (see descriptor.py).
+ field_type: Protocol message field type (see descriptor.py).
+
+ Returns:
+ An instance of TypeChecker which can be used to verify the types
+ of values assigned to a field of the specified type.
+ """
+ if (cpp_type == _FieldDescriptor.CPPTYPE_STRING and
+ field_type == _FieldDescriptor.TYPE_STRING):
+ return UnicodeValueChecker()
+ return _VALUE_CHECKERS[cpp_type]
+
+
+# None of the typecheckers below make any attempt to guard against people
+# subclassing builtin types and doing weird things. We're not trying to
+# protect against malicious clients here, just people accidentally shooting
+# themselves in the foot in obvious ways.
+
+class TypeChecker(object):
+
+ """Type checker used to catch type errors as early as possible
+ when the client is setting scalar fields in protocol messages.
+ """
+
+ def __init__(self, *acceptable_types):
+ self._acceptable_types = acceptable_types
+
+ def CheckValue(self, proposed_value):
+ if not isinstance(proposed_value, self._acceptable_types):
+ message = ('%.1024r has type %s, but expected one of: %s' %
+ (proposed_value, type(proposed_value), self._acceptable_types))
+ raise TypeError(message)
+
+
+# IntValueChecker and its subclasses perform integer type-checks
+# and bounds-checks.
+class IntValueChecker(object):
+
+ """Checker used for integer fields. Performs type-check and range check."""
+
+ def CheckValue(self, proposed_value):
+ if not isinstance(proposed_value, (int, long)):
+ message = ('%.1024r has type %s, but expected one of: %s' %
+ (proposed_value, type(proposed_value), (int, long)))
+ raise TypeError(message)
+ if not self._MIN <= proposed_value <= self._MAX:
+ raise ValueError('Value out of range: %d' % proposed_value)
+
+
+class UnicodeValueChecker(object):
+
+ """Checker used for string fields."""
+
+ def CheckValue(self, proposed_value):
+ if not isinstance(proposed_value, (str, unicode)):
+ message = ('%.1024r has type %s, but expected one of: %s' %
+ (proposed_value, type(proposed_value), (str, unicode)))
+ raise TypeError(message)
+
+ # If the value is of type 'str' make sure that it is in 7-bit ASCII
+ # encoding.
+ if isinstance(proposed_value, str):
+ try:
+ unicode(proposed_value, 'ascii')
+ except UnicodeDecodeError:
+ raise ValueError('%.1024r isn\'t in 7-bit ASCII encoding.'
+ % (proposed_value))
+
+
+class Int32ValueChecker(IntValueChecker):
+ # We're sure to use ints instead of longs here since comparison may be more
+ # efficient.
+ _MIN = -2147483648
+ _MAX = 2147483647
+
+
+class Uint32ValueChecker(IntValueChecker):
+ _MIN = 0
+ _MAX = (1 << 32) - 1
+
+
+class Int64ValueChecker(IntValueChecker):
+ _MIN = -(1 << 63)
+ _MAX = (1 << 63) - 1
+
+
+class Uint64ValueChecker(IntValueChecker):
+ _MIN = 0
+ _MAX = (1 << 64) - 1
+
+
+# Type-checkers for all scalar CPPTYPEs.
+_VALUE_CHECKERS = {
+ _FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(),
+ _FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(),
+ _FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(),
+ _FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(),
+ _FieldDescriptor.CPPTYPE_DOUBLE: TypeChecker(
+ float, int, long),
+ _FieldDescriptor.CPPTYPE_FLOAT: TypeChecker(
+ float, int, long),
+ _FieldDescriptor.CPPTYPE_BOOL: TypeChecker(bool, int),
+ _FieldDescriptor.CPPTYPE_ENUM: Int32ValueChecker(),
+ _FieldDescriptor.CPPTYPE_STRING: TypeChecker(str),
+ }
+
+
+# Map from field type to a function F, such that F(field_num, value)
+# gives the total byte size for a value of the given type. This
+# byte size includes tag information and any other additional space
+# associated with serializing "value".
+TYPE_TO_BYTE_SIZE_FN = {
+ _FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize,
+ _FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize,
+ _FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize,
+ _FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize,
+ _FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize,
+ _FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize,
+ _FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize,
+ _FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize,
+ _FieldDescriptor.TYPE_STRING: wire_format.StringByteSize,
+ _FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize,
+ _FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize,
+ _FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize,
+ _FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize,
+ _FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize,
+ _FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize,
+ _FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize,
+ _FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize,
+ _FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize
+ }
+
+
+# Maps from field type to an unbound Encoder method F, such that
+# F(encoder, field_number, value) will append the serialization
+# of a value of this type to the encoder.
+_Encoder = encoder.Encoder
+TYPE_TO_SERIALIZE_METHOD = {
+ _FieldDescriptor.TYPE_DOUBLE: _Encoder.AppendDouble,
+ _FieldDescriptor.TYPE_FLOAT: _Encoder.AppendFloat,
+ _FieldDescriptor.TYPE_INT64: _Encoder.AppendInt64,
+ _FieldDescriptor.TYPE_UINT64: _Encoder.AppendUInt64,
+ _FieldDescriptor.TYPE_INT32: _Encoder.AppendInt32,
+ _FieldDescriptor.TYPE_FIXED64: _Encoder.AppendFixed64,
+ _FieldDescriptor.TYPE_FIXED32: _Encoder.AppendFixed32,
+ _FieldDescriptor.TYPE_BOOL: _Encoder.AppendBool,
+ _FieldDescriptor.TYPE_STRING: _Encoder.AppendString,
+ _FieldDescriptor.TYPE_GROUP: _Encoder.AppendGroup,
+ _FieldDescriptor.TYPE_MESSAGE: _Encoder.AppendMessage,
+ _FieldDescriptor.TYPE_BYTES: _Encoder.AppendBytes,
+ _FieldDescriptor.TYPE_UINT32: _Encoder.AppendUInt32,
+ _FieldDescriptor.TYPE_ENUM: _Encoder.AppendEnum,
+ _FieldDescriptor.TYPE_SFIXED32: _Encoder.AppendSFixed32,
+ _FieldDescriptor.TYPE_SFIXED64: _Encoder.AppendSFixed64,
+ _FieldDescriptor.TYPE_SINT32: _Encoder.AppendSInt32,
+ _FieldDescriptor.TYPE_SINT64: _Encoder.AppendSInt64,
+ }
+
+
+# Maps from field type to expected wiretype.
+FIELD_TYPE_TO_WIRE_TYPE = {
+ _FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64,
+ _FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32,
+ _FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT,
+ _FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT,
+ _FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT,
+ _FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64,
+ _FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32,
+ _FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT,
+ _FieldDescriptor.TYPE_STRING:
+ wire_format.WIRETYPE_LENGTH_DELIMITED,
+ _FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP,
+ _FieldDescriptor.TYPE_MESSAGE:
+ wire_format.WIRETYPE_LENGTH_DELIMITED,
+ _FieldDescriptor.TYPE_BYTES:
+ wire_format.WIRETYPE_LENGTH_DELIMITED,
+ _FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT,
+ _FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT,
+ _FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32,
+ _FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64,
+ _FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT,
+ _FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT,
+ }
+
+
+# Maps from field type to an unbound Decoder method F,
+# such that F(decoder) will read a field of the requested type.
+#
+# Note that Message and Group are intentionally missing here.
+# They're handled by _RecursivelyMerge().
+_Decoder = decoder.Decoder
+TYPE_TO_DESERIALIZE_METHOD = {
+ _FieldDescriptor.TYPE_DOUBLE: _Decoder.ReadDouble,
+ _FieldDescriptor.TYPE_FLOAT: _Decoder.ReadFloat,
+ _FieldDescriptor.TYPE_INT64: _Decoder.ReadInt64,
+ _FieldDescriptor.TYPE_UINT64: _Decoder.ReadUInt64,
+ _FieldDescriptor.TYPE_INT32: _Decoder.ReadInt32,
+ _FieldDescriptor.TYPE_FIXED64: _Decoder.ReadFixed64,
+ _FieldDescriptor.TYPE_FIXED32: _Decoder.ReadFixed32,
+ _FieldDescriptor.TYPE_BOOL: _Decoder.ReadBool,
+ _FieldDescriptor.TYPE_STRING: _Decoder.ReadString,
+ _FieldDescriptor.TYPE_BYTES: _Decoder.ReadBytes,
+ _FieldDescriptor.TYPE_UINT32: _Decoder.ReadUInt32,
+ _FieldDescriptor.TYPE_ENUM: _Decoder.ReadEnum,
+ _FieldDescriptor.TYPE_SFIXED32: _Decoder.ReadSFixed32,
+ _FieldDescriptor.TYPE_SFIXED64: _Decoder.ReadSFixed64,
+ _FieldDescriptor.TYPE_SINT32: _Decoder.ReadSInt32,
+ _FieldDescriptor.TYPE_SINT64: _Decoder.ReadSInt64,
+ }
diff --git a/froofle/protobuf/internal/wire_format.py b/froofle/protobuf/internal/wire_format.py
new file mode 100644
index 0000000..4d823c8
--- /dev/null
+++ b/froofle/protobuf/internal/wire_format.py
@@ -0,0 +1,236 @@
+# Protocol Buffers - Google's data interchange format
+# Copyright 2008 Google Inc. All rights reserved.
+# http://code.google.com/p/protobuf/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Constants and static functions to support protocol buffer wire format."""
+
+__author__ = 'robinson@google.com (Will Robinson)'
+
+import struct
+from froofle.protobuf import message
+
+
+TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag.
+_TAG_TYPE_MASK = (1 << TAG_TYPE_BITS) - 1 # 0x7
+
+# These numbers identify the wire type of a protocol buffer value.
+# We use the least-significant TAG_TYPE_BITS bits of the varint-encoded
+# tag-and-type to store one of these WIRETYPE_* constants.
+# These values must match WireType enum in //net/proto2/public/wire_format.h.
+WIRETYPE_VARINT = 0
+WIRETYPE_FIXED64 = 1
+WIRETYPE_LENGTH_DELIMITED = 2
+WIRETYPE_START_GROUP = 3
+WIRETYPE_END_GROUP = 4
+WIRETYPE_FIXED32 = 5
+_WIRETYPE_MAX = 5
+
+
+# Bounds for various integer types.
+INT32_MAX = int((1 << 31) - 1)
+INT32_MIN = int(-(1 << 31))
+UINT32_MAX = (1 << 32) - 1
+
+INT64_MAX = (1 << 63) - 1
+INT64_MIN = -(1 << 63)
+UINT64_MAX = (1 << 64) - 1
+
+# "struct" format strings that will encode/decode the specified formats.
+FORMAT_UINT32_LITTLE_ENDIAN = '<I'
+FORMAT_UINT64_LITTLE_ENDIAN = '<Q'
+
+
+# We'll have to provide alternate implementations of AppendLittleEndian*() on
+# any architectures where these checks fail.
+if struct.calcsize(FORMAT_UINT32_LITTLE_ENDIAN) != 4:
+ raise AssertionError('Format "I" is not a 32-bit number.')
+if struct.calcsize(FORMAT_UINT64_LITTLE_ENDIAN) != 8:
+ raise AssertionError('Format "Q" is not a 64-bit number.')
+
+
+def PackTag(field_number, wire_type):
+ """Returns an unsigned 32-bit integer that encodes the field number and
+ wire type information in standard protocol message wire format.
+
+ Args:
+ field_number: Expected to be an integer in the range [1, 1 << 29)
+ wire_type: One of the WIRETYPE_* constants.
+ """
+ if not 0 <= wire_type <= _WIRETYPE_MAX:
+ raise message.EncodeError('Unknown wire type: %d' % wire_type)
+ return (field_number << TAG_TYPE_BITS) | wire_type
+
+
+def UnpackTag(tag):
+ """The inverse of PackTag(). Given an unsigned 32-bit number,
+ returns a (field_number, wire_type) tuple.
+ """
+ return (tag >> TAG_TYPE_BITS), (tag & _TAG_TYPE_MASK)
+
+
+def ZigZagEncode(value):
+ """ZigZag Transform: Encodes signed integers so that they can be
+ effectively used with varint encoding. See wire_format.h for
+ more details.
+ """
+ if value >= 0:
+ return value << 1
+ return (value << 1) ^ (~0)
+
+
+def ZigZagDecode(value):
+ """Inverse of ZigZagEncode()."""
+ if not value & 0x1:
+ return value >> 1
+ return (value >> 1) ^ (~0)
+
+
+
+# The *ByteSize() functions below return the number of bytes required to
+# serialize "field number + type" information and then serialize the value.
+
+
+def Int32ByteSize(field_number, int32):
+ return Int64ByteSize(field_number, int32)
+
+
+def Int64ByteSize(field_number, int64):
+ # Have to convert to uint before calling UInt64ByteSize().
+ return UInt64ByteSize(field_number, 0xffffffffffffffff & int64)
+
+
+def UInt32ByteSize(field_number, uint32):
+ return UInt64ByteSize(field_number, uint32)
+
+
+def UInt64ByteSize(field_number, uint64):
+ return _TagByteSize(field_number) + _VarUInt64ByteSizeNoTag(uint64)
+
+
+def SInt32ByteSize(field_number, int32):
+ return UInt32ByteSize(field_number, ZigZagEncode(int32))
+
+
+def SInt64ByteSize(field_number, int64):
+ return UInt64ByteSize(field_number, ZigZagEncode(int64))
+
+
+def Fixed32ByteSize(field_number, fixed32):
+ return _TagByteSize(field_number) + 4
+
+
+def Fixed64ByteSize(field_number, fixed64):
+ return _TagByteSize(field_number) + 8
+
+
+def SFixed32ByteSize(field_number, sfixed32):
+ return _TagByteSize(field_number) + 4
+
+
+def SFixed64ByteSize(field_number, sfixed64):
+ return _TagByteSize(field_number) + 8
+
+
+def FloatByteSize(field_number, flt):
+ return _TagByteSize(field_number) + 4
+
+
+def DoubleByteSize(field_number, double):
+ return _TagByteSize(field_number) + 8
+
+
+def BoolByteSize(field_number, b):
+ return _TagByteSize(field_number) + 1
+
+
+def EnumByteSize(field_number, enum):
+ return UInt32ByteSize(field_number, enum)
+
+
+def StringByteSize(field_number, string):
+ return BytesByteSize(field_number, string.encode('utf-8'))
+
+
+def BytesByteSize(field_number, b):
+ return (_TagByteSize(field_number)
+ + _VarUInt64ByteSizeNoTag(len(b))
+ + len(b))
+
+
+def GroupByteSize(field_number, message):
+ return (2 * _TagByteSize(field_number) # START and END group.
+ + message.ByteSize())
+
+
+def MessageByteSize(field_number, message):
+ return (_TagByteSize(field_number)
+ + _VarUInt64ByteSizeNoTag(message.ByteSize())
+ + message.ByteSize())
+
+
+def MessageSetItemByteSize(field_number, msg):
+ # First compute the sizes of the tags.
+ # There are 2 tags for the beginning and ending of the repeated group, that
+ # is field number 1, one with field number 2 (type_id) and one with field
+ # number 3 (message).
+ total_size = (2 * _TagByteSize(1) + _TagByteSize(2) + _TagByteSize(3))
+
+ # Add the number of bytes for type_id.
+ total_size += _VarUInt64ByteSizeNoTag(field_number)
+
+ message_size = msg.ByteSize()
+
+ # The number of bytes for encoding the length of the message.
+ total_size += _VarUInt64ByteSizeNoTag(message_size)
+
+ # The size of the message.
+ total_size += message_size
+ return total_size
+
+
+# Private helper functions for the *ByteSize() functions above.
+
+
+def _TagByteSize(field_number):
+ """Returns the bytes required to serialize a tag with this field number."""
+ # Just pass in type 0, since the type won't affect the tag+type size.
+ return _VarUInt64ByteSizeNoTag(PackTag(field_number, 0))
+
+
+def _VarUInt64ByteSizeNoTag(uint64):
+ """Returns the bytes required to serialize a single varint.
+ uint64 must be unsigned.
+ """
+ if uint64 > UINT64_MAX:
+ raise message.EncodeError('Value out of range: %d' % uint64)
+ bytes = 1
+ while uint64 > 0x7f:
+ bytes += 1
+ uint64 >>= 7
+ return bytes
diff --git a/froofle/protobuf/message.py b/froofle/protobuf/message.py
new file mode 100644
index 0000000..ed71485
--- /dev/null
+++ b/froofle/protobuf/message.py
@@ -0,0 +1,246 @@
+# Protocol Buffers - Google's data interchange format
+# Copyright 2008 Google Inc. All rights reserved.
+# http://code.google.com/p/protobuf/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# TODO(robinson): We should just make these methods all "pure-virtual" and move
+# all implementation out, into reflection.py for now.
+
+
+"""Contains an abstract base class for protocol messages."""
+
+__author__ = 'robinson@google.com (Will Robinson)'
+
+from froofle.protobuf import text_format
+
+class Error(Exception): pass
+class DecodeError(Error): pass
+class EncodeError(Error): pass
+
+
+class Message(object):
+
+ """Abstract base class for protocol messages.
+
+ Protocol message classes are almost always generated by the protocol
+ compiler. These generated types subclass Message and implement the methods
+ shown below.
+
+ TODO(robinson): Link to an HTML document here.
+
+ TODO(robinson): Document that instances of this class will also
+ have an Extensions attribute with __getitem__ and __setitem__.
+ Again, not sure how to best convey this.
+
+ TODO(robinson): Document that the class must also have a static
+ RegisterExtension(extension_field) method.
+ Not sure how to best express at this point.
+ """
+
+ # TODO(robinson): Document these fields and methods.
+
+ __slots__ = []
+
+ DESCRIPTOR = None
+
+ def __eq__(self, other_msg):
+ raise NotImplementedError
+
+ def __ne__(self, other_msg):
+ # Can't just say self != other_msg, since that would infinitely recurse. :)
+ return not self == other_msg
+
+ def __str__(self):
+ return text_format.MessageToString(self)
+
+ def MergeFrom(self, other_msg):
+ """Merges the contents of the specified message into current message.
+
+ This method merges the contents of the specified message into the current
+ message. Singular fields that are set in the specified message overwrite
+ the corresponding fields in the current message. Repeated fields are
+ appended. Singular sub-messages and groups are recursively merged.
+
+ Args:
+ other_msg: Message to merge into the current message.
+ """
+ raise NotImplementedError
+
+ def CopyFrom(self, other_msg):
+ """Copies the content of the specified message into the current message.
+
+ The method clears the current message and then merges the specified
+ message using MergeFrom.
+
+ Args:
+ other_msg: Message to copy into the current one.
+ """
+ if self == other_msg:
+ return
+ self.Clear()
+ self.MergeFrom(other_msg)
+
+ def Clear(self):
+ """Clears all data that was set in the message."""
+ raise NotImplementedError
+
+ def IsInitialized(self):
+ """Checks if the message is initialized.
+
+ Returns:
+ The method returns True if the message is initialized (i.e. all of its
+ required fields are set).
+ """
+ raise NotImplementedError
+
+ # TODO(robinson): MergeFromString() should probably return None and be
+ # implemented in terms of a helper that returns the # of bytes read. Our
+ # deserialization routines would use the helper when recursively
+ # deserializing, but the end user would almost always just want the no-return
+ # MergeFromString().
+
+ def MergeFromString(self, serialized):
+ """Merges serialized protocol buffer data into this message.
+
+ When we find a field in |serialized| that is already present
+ in this message:
+ - If it's a "repeated" field, we append to the end of our list.
+ - Else, if it's a scalar, we overwrite our field.
+ - Else, (it's a nonrepeated composite), we recursively merge
+ into the existing composite.
+
+ TODO(robinson): Document handling of unknown fields.
+
+ Args:
+ serialized: Any object that allows us to call buffer(serialized)
+ to access a string of bytes using the buffer interface.
+
+ TODO(robinson): When we switch to a helper, this will return None.
+
+ Returns:
+ The number of bytes read from |serialized|.
+ For non-group messages, this will always be len(serialized),
+ but for messages which are actually groups, this will
+ generally be less than len(serialized), since we must
+ stop when we reach an END_GROUP tag. Note that if
+ we *do* stop because of an END_GROUP tag, the number
+ of bytes returned does not include the bytes
+ for the END_GROUP tag information.
+ """
+ raise NotImplementedError
+
+ def ParseFromString(self, serialized):
+ """Like MergeFromString(), except we clear the object first."""
+ self.Clear()
+ self.MergeFromString(serialized)
+
+ def SerializeToString(self):
+ """Serializes the protocol message to a binary string.
+
+ Returns:
+ A binary string representation of the message if all of the required
+ fields in the message are set (i.e. the message is initialized).
+
+ Raises:
+ message.EncodeError if the message isn't initialized.
+ """
+ raise NotImplementedError
+
+ def SerializePartialToString(self):
+ """Serializes the protocol message to a binary string.
+
+ This method is similar to SerializeToString but doesn't check if the
+ message is initialized.
+
+ Returns:
+ A string representation of the partial message.
+ """
+ raise NotImplementedError
+
+ # TODO(robinson): Decide whether we like these better
+ # than auto-generated has_foo() and clear_foo() methods
+ # on the instances themselves. This way is less consistent
+ # with C++, but it makes reflection-type access easier and
+ # reduces the number of magically autogenerated things.
+ #
+ # TODO(robinson): Be sure to document (and test) exactly
+ # which field names are accepted here. Are we case-sensitive?
+ # What do we do with fields that share names with Python keywords
+ # like 'lambda' and 'yield'?
+ #
+ # nnorwitz says:
+ # """
+ # Typically (in python), an underscore is appended to names that are
+ # keywords. So they would become lambda_ or yield_.
+ # """
+ def ListFields(self, field_name):
+ """Returns a list of (FieldDescriptor, value) tuples for all
+ fields in the message which are not empty. A singular field is non-empty
+ if HasField() would return true, and a repeated field is non-empty if
+ it contains at least one element. The fields are ordered by field
+ number"""
+ raise NotImplementedError
+
+ def HasField(self, field_name):
+ raise NotImplementedError
+
+ def ClearField(self, field_name):
+ raise NotImplementedError
+
+ def HasExtension(self, extension_handle):
+ raise NotImplementedError
+
+ def ClearExtension(self, extension_handle):
+ raise NotImplementedError
+
+ def ByteSize(self):
+ """Returns the serialized size of this message.
+ Recursively calls ByteSize() on all contained messages.
+ """
+ raise NotImplementedError
+
+ def _SetListener(self, message_listener):
+ """Internal method used by the protocol message implementation.
+ Clients should not call this directly.
+
+ Sets a listener that this message will call on certain state transitions.
+
+ The purpose of this method is to register back-edges from children to
+ parents at runtime, for the purpose of setting "has" bits and
+ byte-size-dirty bits in the parent and ancestor objects whenever a child or
+ descendant object is modified.
+
+ If the client wants to disconnect this Message from the object tree, she
+ explicitly sets callback to None.
+
+ If message_listener is None, unregisters any existing listener. Otherwise,
+ message_listener must implement the MessageListener interface in
+ internal/message_listener.py, and we discard any listener registered
+ via a previous _SetListener() call.
+ """
+ raise NotImplementedError
diff --git a/froofle/protobuf/reflection.py b/froofle/protobuf/reflection.py
new file mode 100644
index 0000000..e2abff0
--- /dev/null
+++ b/froofle/protobuf/reflection.py
@@ -0,0 +1,1653 @@
+# Protocol Buffers - Google's data interchange format
+# Copyright 2008 Google Inc. All rights reserved.
+# http://code.google.com/p/protobuf/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This code is meant to work on Python 2.4 and above only.
+#
+# TODO(robinson): Helpers for verbose, common checks like seeing if a
+# descriptor's cpp_type is CPPTYPE_MESSAGE.
+
+"""Contains a metaclass and helper functions used to create
+protocol message classes from Descriptor objects at runtime.
+
+Recall that a metaclass is the "type" of a class.
+(A class is to a metaclass what an instance is to a class.)
+
+In this case, we use the GeneratedProtocolMessageType metaclass
+to inject all the useful functionality into the classes
+output by the protocol compiler at compile-time.
+
+The upshot of all this is that the real implementation
+details for ALL pure-Python protocol buffers are *here in
+this file*.
+"""
+
+__author__ = 'robinson@google.com (Will Robinson)'
+
+import heapq
+import threading
+import weakref
+# We use "as" to avoid name collisions with variables.
+from froofle.protobuf.internal import decoder
+from froofle.protobuf.internal import encoder
+from froofle.protobuf.internal import message_listener as message_listener_mod
+from froofle.protobuf.internal import type_checkers
+from froofle.protobuf.internal import wire_format
+from froofle.protobuf import descriptor as descriptor_mod
+from froofle.protobuf import message as message_mod
+
+_FieldDescriptor = descriptor_mod.FieldDescriptor
+
+
+class GeneratedProtocolMessageType(type):
+
+ """Metaclass for protocol message classes created at runtime from Descriptors.
+
+ We add implementations for all methods described in the Message class. We
+ also create properties to allow getting/setting all fields in the protocol
+ message. Finally, we create slots to prevent users from accidentally
+ "setting" nonexistent fields in the protocol message, which then wouldn't get
+ serialized / deserialized properly.
+
+ The protocol compiler currently uses this metaclass to create protocol
+ message classes at runtime. Clients can also manually create their own
+ classes at runtime, as in this example:
+
+ mydescriptor = Descriptor(.....)
+ class MyProtoClass(Message):
+ __metaclass__ = GeneratedProtocolMessageType
+ DESCRIPTOR = mydescriptor
+ myproto_instance = MyProtoClass()
+ myproto.foo_field = 23
+ ...
+ """
+
+ # Must be consistent with the protocol-compiler code in
+ # proto2/compiler/internal/generator.*.
+ _DESCRIPTOR_KEY = 'DESCRIPTOR'
+
+ def __new__(cls, name, bases, dictionary):
+ """Custom allocation for runtime-generated class types.
+
+ We override __new__ because this is apparently the only place
+ where we can meaningfully set __slots__ on the class we're creating(?).
+ (The interplay between metaclasses and slots is not very well-documented).
+
+ Args:
+ name: Name of the class (ignored, but required by the
+ metaclass protocol).
+ bases: Base classes of the class we're constructing.
+ (Should be message.Message). We ignore this field, but
+ it's required by the metaclass protocol
+ dictionary: The class dictionary of the class we're
+ constructing. dictionary[_DESCRIPTOR_KEY] must contain
+ a Descriptor object describing this protocol message
+ type.
+
+ Returns:
+ Newly-allocated class.
+ """
+ descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY]
+ _AddSlots(descriptor, dictionary)
+ _AddClassAttributesForNestedExtensions(descriptor, dictionary)
+ superclass = super(GeneratedProtocolMessageType, cls)
+ return superclass.__new__(cls, name, bases, dictionary)
+
+ def __init__(cls, name, bases, dictionary):
+ """Here we perform the majority of our work on the class.
+ We add enum getters, an __init__ method, implementations
+ of all Message methods, and properties for all fields
+ in the protocol type.
+
+ Args:
+ name: Name of the class (ignored, but required by the
+ metaclass protocol).
+ bases: Base classes of the class we're constructing.
+ (Should be message.Message). We ignore this field, but
+ it's required by the metaclass protocol
+ dictionary: The class dictionary of the class we're
+ constructing. dictionary[_DESCRIPTOR_KEY] must contain
+ a Descriptor object describing this protocol message
+ type.
+ """
+ descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY]
+ # We act as a "friend" class of the descriptor, setting
+ # its _concrete_class attribute the first time we use a
+ # given descriptor to initialize a concrete protocol message
+ # class.
+ concrete_class_attr_name = '_concrete_class'
+ if not hasattr(descriptor, concrete_class_attr_name):
+ setattr(descriptor, concrete_class_attr_name, cls)
+ cls._known_extensions = []
+ _AddEnumValues(descriptor, cls)
+ _AddInitMethod(descriptor, cls)
+ _AddPropertiesForFields(descriptor, cls)
+ _AddStaticMethods(cls)
+ _AddMessageMethods(descriptor, cls)
+ _AddPrivateHelperMethods(cls)
+ superclass = super(GeneratedProtocolMessageType, cls)
+ superclass.__init__(cls, name, bases, dictionary)
+
+
+# Stateless helpers for GeneratedProtocolMessageType below.
+# Outside clients should not access these directly.
+#
+# I opted not to make any of these methods on the metaclass, to make it more
+# clear that I'm not really using any state there and to keep clients from
+# thinking that they have direct access to these construction helpers.
+
+
+def _PropertyName(proto_field_name):
+ """Returns the name of the public property attribute which
+ clients can use to get and (in some cases) set the value
+ of a protocol message field.
+
+ Args:
+ proto_field_name: The protocol message field name, exactly
+ as it appears (or would appear) in a .proto file.
+ """
+ # TODO(robinson): Escape Python keywords (e.g., yield), and test this support.
+ # nnorwitz makes my day by writing:
+ # """
+ # FYI. See the keyword module in the stdlib. This could be as simple as:
+ #
+ # if keyword.iskeyword(proto_field_name):
+ # return proto_field_name + "_"
+ # return proto_field_name
+ # """
+ return proto_field_name
+
+
+def _ValueFieldName(proto_field_name):
+ """Returns the name of the (internal) instance attribute which objects
+ should use to store the current value for a given protocol message field.
+
+ Args:
+ proto_field_name: The protocol message field name, exactly
+ as it appears (or would appear) in a .proto file.
+ """
+ return '_value_' + proto_field_name
+
+
+def _HasFieldName(proto_field_name):
+ """Returns the name of the (internal) instance attribute which
+ objects should use to store a boolean telling whether this field
+ is explicitly set or not.
+
+ Args:
+ proto_field_name: The protocol message field name, exactly
+ as it appears (or would appear) in a .proto file.
+ """
+ return '_has_' + proto_field_name
+
+
+def _AddSlots(message_descriptor, dictionary):
+ """Adds a __slots__ entry to dictionary, containing the names of all valid
+ attributes for this message type.
+
+ Args:
+ message_descriptor: A Descriptor instance describing this message type.
+ dictionary: Class dictionary to which we'll add a '__slots__' entry.
+ """
+ field_names = [_ValueFieldName(f.name) for f in message_descriptor.fields]
+ field_names.extend(_HasFieldName(f.name) for f in message_descriptor.fields
+ if f.label != _FieldDescriptor.LABEL_REPEATED)
+ field_names.extend(('Extensions',
+ '_cached_byte_size',
+ '_cached_byte_size_dirty',
+ '_called_transition_to_nonempty',
+ '_listener',
+ '_lock', '__weakref__'))
+ dictionary['__slots__'] = field_names
+
+
+def _AddClassAttributesForNestedExtensions(descriptor, dictionary):
+ extension_dict = descriptor.extensions_by_name
+ for extension_name, extension_field in extension_dict.iteritems():
+ assert extension_name not in dictionary
+ dictionary[extension_name] = extension_field
+
+
+def _AddEnumValues(descriptor, cls):
+ """Sets class-level attributes for all enum fields defined in this message.
+
+ Args:
+ descriptor: Descriptor object for this message type.
+ cls: Class we're constructing for this message type.
+ """
+ for enum_type in descriptor.enum_types:
+ for enum_value in enum_type.values:
+ setattr(cls, enum_value.name, enum_value.number)
+
+
+def _DefaultValueForField(message, field):
+ """Returns a default value for a field.
+
+ Args:
+ message: Message instance containing this field, or a weakref proxy
+ of same.
+ field: FieldDescriptor object for this field.
+
+ Returns: A default value for this field. May refer back to |message|
+ via a weak reference.
+ """
+ # TODO(robinson): Only the repeated fields need a reference to 'message' (so
+ # that they can set the 'has' bit on the containing Message when someone
+ # append()s a value). We could special-case this, and avoid an extra
+ # function call on __init__() and Clear() for non-repeated fields.
+
+ # TODO(robinson): Find a better place for the default value assertion in this
+ # function. No need to repeat them every time the client calls Clear('foo').
+ # (We should probably just assert these things once and as early as possible,
+ # by tightening checking in the descriptor classes.)
+ if field.label == _FieldDescriptor.LABEL_REPEATED:
+ if field.default_value != []:
+ raise ValueError('Repeated field default value not empty list: %s' % (
+ field.default_value))
+ listener = _Listener(message, None)
+ if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
+ # We can't look at _concrete_class yet since it might not have
+ # been set. (Depends on order in which we initialize the classes).
+ return _RepeatedCompositeFieldContainer(listener, field.message_type)
+ else:
+ return _RepeatedScalarFieldContainer(
+ listener, type_checkers.GetTypeChecker(field.cpp_type, field.type))
+
+ if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
+ assert field.default_value is None
+
+ return field.default_value
+
+
+def _AddInitMethod(message_descriptor, cls):
+ """Adds an __init__ method to cls."""
+ fields = message_descriptor.fields
+ def init(self):
+ self._cached_byte_size = 0
+ self._cached_byte_size_dirty = False
+ self._listener = message_listener_mod.NullMessageListener()
+ self._called_transition_to_nonempty = False
+ # TODO(robinson): We should only create a lock if we really need one
+ # in this class.
+ self._lock = threading.Lock()
+ for field in fields:
+ default_value = _DefaultValueForField(self, field)
+ python_field_name = _ValueFieldName(field.name)
+ setattr(self, python_field_name, default_value)
+ if field.label != _FieldDescriptor.LABEL_REPEATED:
+ setattr(self, _HasFieldName(field.name), False)
+ self.Extensions = _ExtensionDict(self, cls._known_extensions)
+
+ init.__module__ = None
+ init.__doc__ = None
+ cls.__init__ = init
+
+
+def _AddPropertiesForFields(descriptor, cls):
+ """Adds properties for all fields in this protocol message type."""
+ for field in descriptor.fields:
+ _AddPropertiesForField(field, cls)
+
+
+def _AddPropertiesForField(field, cls):
+ """Adds a public property for a protocol message field.
+ Clients can use this property to get and (in the case
+ of non-repeated scalar fields) directly set the value
+ of a protocol message field.
+
+ Args:
+ field: A FieldDescriptor for this field.
+ cls: The class we're constructing.
+ """
+ # Catch it if we add other types that we should
+ # handle specially here.
+ assert _FieldDescriptor.MAX_CPPTYPE == 10
+
+ if field.label == _FieldDescriptor.LABEL_REPEATED:
+ _AddPropertiesForRepeatedField(field, cls)
+ elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
+ _AddPropertiesForNonRepeatedCompositeField(field, cls)
+ else:
+ _AddPropertiesForNonRepeatedScalarField(field, cls)
+
+
+def _AddPropertiesForRepeatedField(field, cls):
+ """Adds a public property for a "repeated" protocol message field. Clients
+ can use this property to get the value of the field, which will be either a
+ _RepeatedScalarFieldContainer or _RepeatedCompositeFieldContainer (see
+ below).
+
+ Note that when clients add values to these containers, we perform
+ type-checking in the case of repeated scalar fields, and we also set any
+ necessary "has" bits as a side-effect.
+
+ Args:
+ field: A FieldDescriptor for this field.
+ cls: The class we're constructing.
+ """
+ proto_field_name = field.name
+ python_field_name = _ValueFieldName(proto_field_name)
+ property_name = _PropertyName(proto_field_name)
+
+ def getter(self):
+ return getattr(self, python_field_name)
+ getter.__module__ = None
+ getter.__doc__ = 'Getter for %s.' % proto_field_name
+
+ # We define a setter just so we can throw an exception with a more
+ # helpful error message.
+ def setter(self, new_value):
+ raise AttributeError('Assignment not allowed to repeated field '
+ '"%s" in protocol message object.' % proto_field_name)
+
+ doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
+ setattr(cls, property_name, property(getter, setter, doc=doc))
+
+
+def _AddPropertiesForNonRepeatedScalarField(field, cls):
+ """Adds a public property for a nonrepeated, scalar protocol message field.
+ Clients can use this property to get and directly set the value of the field.
+ Note that when the client sets the value of a field by using this property,
+ all necessary "has" bits are set as a side-effect, and we also perform
+ type-checking.
+
+ Args:
+ field: A FieldDescriptor for this field.
+ cls: The class we're constructing.
+ """
+ proto_field_name = field.name
+ python_field_name = _ValueFieldName(proto_field_name)
+ has_field_name = _HasFieldName(proto_field_name)
+ property_name = _PropertyName(proto_field_name)
+ type_checker = type_checkers.GetTypeChecker(field.cpp_type, field.type)
+
+ def getter(self):
+ return getattr(self, python_field_name)
+ getter.__module__ = None
+ getter.__doc__ = 'Getter for %s.' % proto_field_name
+ def setter(self, new_value):
+ type_checker.CheckValue(new_value)
+ setattr(self, has_field_name, True)
+ self._MarkByteSizeDirty()
+ self._MaybeCallTransitionToNonemptyCallback()
+ setattr(self, python_field_name, new_value)
+ setter.__module__ = None
+ setter.__doc__ = 'Setter for %s.' % proto_field_name
+
+ # Add a property to encapsulate the getter/setter.
+ doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
+ setattr(cls, property_name, property(getter, setter, doc=doc))
+
+
+def _AddPropertiesForNonRepeatedCompositeField(field, cls):
+ """Adds a public property for a nonrepeated, composite protocol message field.
+ A composite field is a "group" or "message" field.
+
+ Clients can use this property to get the value of the field, but cannot
+ assign to the property directly.
+
+ Args:
+ field: A FieldDescriptor for this field.
+ cls: The class we're constructing.
+ """
+ # TODO(robinson): Remove duplication with similar method
+ # for non-repeated scalars.
+ proto_field_name = field.name
+ python_field_name = _ValueFieldName(proto_field_name)
+ has_field_name = _HasFieldName(proto_field_name)
+ property_name = _PropertyName(proto_field_name)
+ message_type = field.message_type
+
+ def getter(self):
+ # TODO(robinson): Appropriately scary note about double-checked locking.
+ field_value = getattr(self, python_field_name)
+ if field_value is None:
+ self._lock.acquire()
+ try:
+ field_value = getattr(self, python_field_name)
+ if field_value is None:
+ field_class = message_type._concrete_class
+ field_value = field_class()
+ field_value._SetListener(_Listener(self, has_field_name))
+ setattr(self, python_field_name, field_value)
+ finally:
+ self._lock.release()
+ return field_value
+ getter.__module__ = None
+ getter.__doc__ = 'Getter for %s.' % proto_field_name
+
+ # We define a setter just so we can throw an exception with a more
+ # helpful error message.
+ def setter(self, new_value):
+ raise AttributeError('Assignment not allowed to composite field '
+ '"%s" in protocol message object.' % proto_field_name)
+
+ # Add a property to encapsulate the getter.
+ doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
+ setattr(cls, property_name, property(getter, setter, doc=doc))
+
+
+def _AddStaticMethods(cls):
+ # TODO(robinson): This probably needs to be thread-safe(?)
+ def RegisterExtension(extension_handle):
+ extension_handle.containing_type = cls.DESCRIPTOR
+ cls._known_extensions.append(extension_handle)
+ cls.RegisterExtension = staticmethod(RegisterExtension)
+
+
+def _AddListFieldsMethod(message_descriptor, cls):
+ """Helper for _AddMessageMethods()."""
+
+ # Ensure that we always list in ascending field-number order.
+ # For non-extension fields, we can do the sort once, here, at import-time.
+ # For extensions, we sort on each ListFields() call, though
+ # we could do better if we have to.
+ fields = sorted(message_descriptor.fields, key=lambda f: f.number)
+ has_field_names = (_HasFieldName(f.name) for f in fields)
+ value_field_names = (_ValueFieldName(f.name) for f in fields)
+ triplets = zip(has_field_names, value_field_names, fields)
+
+ def ListFields(self):
+ # We need to list all extension and non-extension fields
+ # together, in sorted order by field number.
+
+ # Step 0: Get an iterator over all "set" non-extension fields,
+ # sorted by field number.
+ # This iterator yields (field_number, field_descriptor, value) tuples.
+ def SortedSetFieldsIter():
+ # Note that triplets is already sorted by field number.
+ for has_field_name, value_field_name, field_descriptor in triplets:
+ if field_descriptor.label == _FieldDescriptor.LABEL_REPEATED:
+ value = getattr(self, _ValueFieldName(field_descriptor.name))
+ if len(value) > 0:
+ yield (field_descriptor.number, field_descriptor, value)
+ elif getattr(self, _HasFieldName(field_descriptor.name)):
+ value = getattr(self, _ValueFieldName(field_descriptor.name))
+ yield (field_descriptor.number, field_descriptor, value)
+ sorted_fields = SortedSetFieldsIter()
+
+ # Step 1: Get an iterator over all "set" extension fields,
+ # sorted by field number.
+ # This iterator ALSO yields (field_number, field_descriptor, value) tuples.
+ # TODO(robinson): It's not necessary to repeat this with each
+ # serialization call. We can do better.
+ sorted_extension_fields = sorted(
+ [(f.number, f, v) for f, v in self.Extensions._ListSetExtensions()])
+
+ # Step 2: Create a composite iterator that merges the extension-
+ # and non-extension fields, and that still yields fields in
+ # sorted order.
+ all_set_fields = _ImergeSorted(sorted_fields, sorted_extension_fields)
+
+ # Step 3: Strip off the field numbers and return.
+ return [field[1:] for field in all_set_fields]
+
+ cls.ListFields = ListFields
+
+def _AddHasFieldMethod(cls):
+ """Helper for _AddMessageMethods()."""
+ def HasField(self, field_name):
+ try:
+ return getattr(self, _HasFieldName(field_name))
+ except AttributeError:
+ raise ValueError('Protocol message has no "%s" field.' % field_name)
+ cls.HasField = HasField
+
+
+def _AddClearFieldMethod(cls):
+ """Helper for _AddMessageMethods()."""
+ def ClearField(self, field_name):
+ try:
+ field = self.DESCRIPTOR.fields_by_name[field_name]
+ except KeyError:
+ raise ValueError('Protocol message has no "%s" field.' % field_name)
+ proto_field_name = field.name
+ python_field_name = _ValueFieldName(proto_field_name)
+ has_field_name = _HasFieldName(proto_field_name)
+ default_value = _DefaultValueForField(self, field)
+ if field.label == _FieldDescriptor.LABEL_REPEATED:
+ self._MarkByteSizeDirty()
+ else:
+ if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
+ old_field_value = getattr(self, python_field_name)
+ if old_field_value is not None:
+ # Snip the old object out of the object tree.
+ old_field_value._SetListener(None)
+ if getattr(self, has_field_name):
+ setattr(self, has_field_name, False)
+ # Set dirty bit on ourself and parents only if
+ # we're actually changing state.
+ self._MarkByteSizeDirty()
+ setattr(self, python_field_name, default_value)
+ cls.ClearField = ClearField
+
+
+def _AddClearExtensionMethod(cls):
+ """Helper for _AddMessageMethods()."""
+ def ClearExtension(self, extension_handle):
+ self.Extensions._ClearExtension(extension_handle)
+ cls.ClearExtension = ClearExtension
+
+
+def _AddClearMethod(cls):
+ """Helper for _AddMessageMethods()."""
+ def Clear(self):
+ # Clear fields.
+ fields = self.DESCRIPTOR.fields
+ for field in fields:
+ self.ClearField(field.name)
+ # Clear extensions.
+ extensions = self.Extensions._ListSetExtensions()
+ for extension in extensions:
+ self.ClearExtension(extension[0])
+ cls.Clear = Clear
+
+
+def _AddHasExtensionMethod(cls):
+ """Helper for _AddMessageMethods()."""
+ def HasExtension(self, extension_handle):
+ return self.Extensions._HasExtension(extension_handle)
+ cls.HasExtension = HasExtension
+
+
+def _AddEqualsMethod(message_descriptor, cls):
+ """Helper for _AddMessageMethods()."""
+ def __eq__(self, other):
+ if self is other:
+ return True
+
+ # Compare all fields contained directly in this message.
+ for field_descriptor in message_descriptor.fields:
+ label = field_descriptor.label
+ property_name = _PropertyName(field_descriptor.name)
+ # Non-repeated field equality requires matching "has" bits as well
+ # as having an equal value.
+ if label != _FieldDescriptor.LABEL_REPEATED:
+ self_has = self.HasField(property_name)
+ other_has = other.HasField(property_name)
+ if self_has != other_has:
+ return False
+ if not self_has:
+ # If the "has" bit for this field is False, we must stop here.
+ # Otherwise we will recurse forever on recursively-defined protos.
+ continue
+ if getattr(self, property_name) != getattr(other, property_name):
+ return False
+
+ # Compare the extensions present in both messages.
+ return self.Extensions == other.Extensions
+ cls.__eq__ = __eq__
+
+
+def _AddSetListenerMethod(cls):
+ """Helper for _AddMessageMethods()."""
+ def SetListener(self, listener):
+ if listener is None:
+ self._listener = message_listener_mod.NullMessageListener()
+ else:
+ self._listener = listener
+ cls._SetListener = SetListener
+
+
+def _BytesForNonRepeatedElement(value, field_number, field_type):
+ """Returns the number of bytes needed to serialize a non-repeated element.
+ The returned byte count includes space for tag information and any
+ other additional space associated with serializing value.
+
+ Args:
+ value: Value we're serializing.
+ field_number: Field number of this value. (Since the field number
+ is stored as part of a varint-encoded tag, this has an impact
+ on the total bytes required to serialize the value).
+ field_type: The type of the field. One of the TYPE_* constants
+ within FieldDescriptor.
+ """
+ try:
+ fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type]
+ return fn(field_number, value)
+ except KeyError:
+ raise message_mod.EncodeError('Unrecognized field type: %d' % field_type)
+
+
+def _AddByteSizeMethod(message_descriptor, cls):
+ """Helper for _AddMessageMethods()."""
+
+ def BytesForField(message, field, value):
+ """Returns the number of bytes required to serialize a single field
+ in message. The field may be repeated or not, composite or not.
+
+ Args:
+ message: The Message instance containing a field of the given type.
+ field: A FieldDescriptor describing the field of interest.
+ value: The value whose byte size we're interested in.
+
+ Returns: The number of bytes required to serialize the current value
+ of "field" in "message", including space for tags and any other
+ necessary information.
+ """
+
+ if _MessageSetField(field):
+ return wire_format.MessageSetItemByteSize(field.number, value)
+
+ field_number, field_type = field.number, field.type
+
+ # Repeated fields.
+ if field.label == _FieldDescriptor.LABEL_REPEATED:
+ elements = value
+ else:
+ elements = [value]
+
+ size = sum(_BytesForNonRepeatedElement(element, field_number, field_type)
+ for element in elements)
+ return size
+
+ fields = message_descriptor.fields
+ has_field_names = (_HasFieldName(f.name) for f in fields)
+ zipped = zip(has_field_names, fields)
+
+ def ByteSize(self):
+ if not self._cached_byte_size_dirty:
+ return self._cached_byte_size
+
+ size = 0
+ # Hardcoded fields first.
+ for has_field_name, field in zipped:
+ if (field.label == _FieldDescriptor.LABEL_REPEATED
+ or getattr(self, has_field_name)):
+ value = getattr(self, _ValueFieldName(field.name))
+ size += BytesForField(self, field, value)
+ # Extensions next.
+ for field, value in self.Extensions._ListSetExtensions():
+ size += BytesForField(self, field, value)
+
+ self._cached_byte_size = size
+ self._cached_byte_size_dirty = False
+ return size
+ cls.ByteSize = ByteSize
+
+
+def _MessageSetField(field_descriptor):
+ """Checks if a field should be serialized using the message set wire format.
+
+ Args:
+ field_descriptor: Descriptor of the field.
+
+ Returns:
+ True if the field should be serialized using the message set wire format,
+ false otherwise.
+ """
+ return (field_descriptor.is_extension and
+ field_descriptor.label != _FieldDescriptor.LABEL_REPEATED and
+ field_descriptor.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and
+ field_descriptor.containing_type.GetOptions().message_set_wire_format)
+
+
+def _SerializeValueToEncoder(value, field_number, field_descriptor, encoder):
+ """Appends the serialization of a single value to encoder.
+
+ Args:
+ value: Value to serialize.
+ field_number: Field number of this value.
+ field_descriptor: Descriptor of the field to serialize.
+ encoder: encoder.Encoder object to which we should serialize this value.
+ """
+ if _MessageSetField(field_descriptor):
+ encoder.AppendMessageSetItem(field_number, value)
+ return
+
+ try:
+ method = type_checkers.TYPE_TO_SERIALIZE_METHOD[field_descriptor.type]
+ method(encoder, field_number, value)
+ except KeyError:
+ raise message_mod.EncodeError('Unrecognized field type: %d' %
+ field_descriptor.type)
+
+
+def _ImergeSorted(*streams):
+ """Merges N sorted iterators into a single sorted iterator.
+ Each element in streams must be an iterable that yields
+ its elements in sorted order, and the elements contained
+ in each stream must all be comparable.
+
+ There may be repeated elements in the component streams or
+ across the streams; the repeated elements will all be repeated
+ in the merged iterator as well.
+
+ I believe that the heapq module at HEAD in the Python
+ sources has a method like this, but for now we roll our own.
+ """
+ iters = [iter(stream) for stream in streams]
+ heap = []
+ for index, it in enumerate(iters):
+ try:
+ heap.append((it.next(), index))
+ except StopIteration:
+ pass
+ heapq.heapify(heap)
+
+ while heap:
+ smallest_value, idx = heap[0]
+ yield smallest_value
+ try:
+ next_element = iters[idx].next()
+ heapq.heapreplace(heap, (next_element, idx))
+ except StopIteration:
+ heapq.heappop(heap)
+
+
+def _AddSerializeToStringMethod(message_descriptor, cls):
+ """Helper for _AddMessageMethods()."""
+
+ def SerializeToString(self):
+ # Check if the message has all of its required fields set.
+ errors = []
+ if not _InternalIsInitialized(self, errors):
+ raise message_mod.EncodeError('\n'.join(errors))
+ return self.SerializePartialToString()
+ cls.SerializeToString = SerializeToString
+
+
+def _AddSerializePartialToStringMethod(message_descriptor, cls):
+ """Helper for _AddMessageMethods()."""
+ Encoder = encoder.Encoder
+
+ def SerializePartialToString(self):
+ encoder = Encoder()
+ # We need to serialize all extension and non-extension fields
+ # together, in sorted order by field number.
+ for field_descriptor, field_value in self.ListFields():
+ if field_descriptor.label == _FieldDescriptor.LABEL_REPEATED:
+ repeated_value = field_value
+ else:
+ repeated_value = [field_value]
+ for element in repeated_value:
+ _SerializeValueToEncoder(element, field_descriptor.number,
+ field_descriptor, encoder)
+ return encoder.ToString()
+ cls.SerializePartialToString = SerializePartialToString
+
+
+def _WireTypeForFieldType(field_type):
+ """Given a field type, returns the expected wire type."""
+ try:
+ return type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_type]
+ except KeyError:
+ raise message_mod.DecodeError('Unknown field type: %d' % field_type)
+
+
+def _RecursivelyMerge(field_number, field_type, decoder, message):
+ """Decodes a message from decoder into message.
+ message is either a group or a nested message within some containing
+ protocol message. If it's a group, we use the group protocol to
+ deserialize, and if it's a nested message, we use the nested-message
+ protocol.
+
+ Args:
+ field_number: The field number of message in its enclosing protocol buffer.
+ field_type: The field type of message. Must be either TYPE_MESSAGE
+ or TYPE_GROUP.
+ decoder: Decoder to read from.
+ message: Message to deserialize into.
+ """
+ if field_type == _FieldDescriptor.TYPE_MESSAGE:
+ decoder.ReadMessageInto(message)
+ elif field_type == _FieldDescriptor.TYPE_GROUP:
+ decoder.ReadGroupInto(field_number, message)
+ else:
+ raise message_mod.DecodeError('Unexpected field type: %d' % field_type)
+
+
+def _DeserializeScalarFromDecoder(field_type, decoder):
+ """Deserializes a scalar of the requested type from decoder. field_type must
+ be a scalar (non-group, non-message) FieldDescriptor.FIELD_* constant.
+ """
+ try:
+ method = type_checkers.TYPE_TO_DESERIALIZE_METHOD[field_type]
+ return method(decoder)
+ except KeyError:
+ raise message_mod.DecodeError('Unrecognized field type: %d' % field_type)
+
+
+def _SkipField(field_number, wire_type, decoder):
+ """Skips a field with the specified wire type.
+
+ Args:
+ field_number: Tag number of the field to skip.
+ wire_type: Wire type of the field to skip.
+ decoder: Decoder used to deserialize the messsage. It must be positioned
+ just after reading the the tag and wire type of the field.
+ """
+ if wire_type == wire_format.WIRETYPE_VARINT:
+ decoder.ReadUInt64()
+ elif wire_type == wire_format.WIRETYPE_FIXED64:
+ decoder.ReadFixed64()
+ elif wire_type == wire_format.WIRETYPE_LENGTH_DELIMITED:
+ decoder.SkipBytes(decoder.ReadInt32())
+ elif wire_type == wire_format.WIRETYPE_START_GROUP:
+ _SkipGroup(field_number, decoder)
+ elif wire_type == wire_format.WIRETYPE_END_GROUP:
+ pass
+ elif wire_type == wire_format.WIRETYPE_FIXED32:
+ decoder.ReadFixed32()
+ else:
+ raise message_mod.DecodeError('Unexpected wire type: %d' % wire_type)
+
+
+def _SkipGroup(group_number, decoder):
+ """Skips a nested group from the decoder.
+
+ Args:
+ group_number: Tag number of the group to skip.
+ decoder: Decoder used to deserialize the message. It must be positioned
+ exactly at the beginning of the message that should be skipped.
+ """
+ while True:
+ field_number, wire_type = decoder.ReadFieldNumberAndWireType()
+ if (wire_type == wire_format.WIRETYPE_END_GROUP and
+ field_number == group_number):
+ return
+ _SkipField(field_number, wire_type, decoder)
+
+
+def _DeserializeMessageSetItem(message, decoder):
+ """Deserializes a message using the message set wire format.
+
+ Args:
+ message: Message to be parsed to.
+ decoder: The decoder to be used to deserialize encoded data. Note that the
+ decoder should be positioned just after reading the START_GROUP tag that
+ began the messageset item.
+ """
+ field_number, wire_type = decoder.ReadFieldNumberAndWireType()
+ if wire_type != wire_format.WIRETYPE_VARINT or field_number != 2:
+ raise message_mod.DecodeError(
+ 'Incorrect message set wire format. '
+ 'wire_type: %d, field_number: %d' % (wire_type, field_number))
+
+ type_id = decoder.ReadInt32()
+ field_number, wire_type = decoder.ReadFieldNumberAndWireType()
+ if wire_type != wire_format.WIRETYPE_LENGTH_DELIMITED or field_number != 3:
+ raise message_mod.DecodeError(
+ 'Incorrect message set wire format. '
+ 'wire_type: %d, field_number: %d' % (wire_type, field_number))
+
+ extension_dict = message.Extensions
+ extensions_by_number = extension_dict._AllExtensionsByNumber()
+ if type_id not in extensions_by_number:
+ _SkipField(field_number, wire_type, decoder)
+ return
+
+ field_descriptor = extensions_by_number[type_id]
+ value = extension_dict[field_descriptor]
+ decoder.ReadMessageInto(value)
+ # Read the END_GROUP tag.
+ field_number, wire_type = decoder.ReadFieldNumberAndWireType()
+ if wire_type != wire_format.WIRETYPE_END_GROUP or field_number != 1:
+ raise message_mod.DecodeError(
+ 'Incorrect message set wire format. '
+ 'wire_type: %d, field_number: %d' % (wire_type, field_number))
+
+
+def _DeserializeOneEntity(message_descriptor, message, decoder):
+ """Deserializes the next wire entity from decoder into message.
+ The next wire entity is either a scalar or a nested message,
+ and may also be an element in a repeated field (the wire encoding
+ is the same).
+
+ Args:
+ message_descriptor: A Descriptor instance describing all fields
+ in message.
+ message: The Message instance into which we're decoding our fields.
+ decoder: The Decoder we're using to deserialize encoded data.
+
+ Returns: The number of bytes read from decoder during this method.
+ """
+ initial_position = decoder.Position()
+ field_number, wire_type = decoder.ReadFieldNumberAndWireType()
+ extension_dict = message.Extensions
+ extensions_by_number = extension_dict._AllExtensionsByNumber()
+ if field_number in message_descriptor.fields_by_number:
+ # Non-extension field.
+ field_descriptor = message_descriptor.fields_by_number[field_number]
+ value = getattr(message, _PropertyName(field_descriptor.name))
+ def nonextension_setter_fn(scalar):
+ setattr(message, _PropertyName(field_descriptor.name), scalar)
+ scalar_setter_fn = nonextension_setter_fn
+ elif field_number in extensions_by_number:
+ # Extension field.
+ field_descriptor = extensions_by_number[field_number]
+ value = extension_dict[field_descriptor]
+ def extension_setter_fn(scalar):
+ extension_dict[field_descriptor] = scalar
+ scalar_setter_fn = extension_setter_fn
+ elif wire_type == wire_format.WIRETYPE_END_GROUP:
+ # We assume we're being parsed as the group that's ended.
+ return 0
+ elif (wire_type == wire_format.WIRETYPE_START_GROUP and
+ field_number == 1 and
+ message_descriptor.GetOptions().message_set_wire_format):
+ # A Message Set item.
+ _DeserializeMessageSetItem(message, decoder)
+ return decoder.Position() - initial_position
+ else:
+ _SkipField(field_number, wire_type, decoder)
+ return decoder.Position() - initial_position
+
+ # If we reach this point, we've identified the field as either
+ # hardcoded or extension, and set |field_descriptor|, |scalar_setter_fn|,
+ # and |value| appropriately. Now actually deserialize the thing.
+ #
+ # field_descriptor: Describes the field we're deserializing.
+ # value: The value currently stored in the field to deserialize.
+ # Used only if the field is composite and/or repeated.
+ # scalar_setter_fn: A function F such that F(scalar) will
+ # set a nonrepeated scalar value for this field. Used only
+ # if this field is a nonrepeated scalar.
+
+ field_number = field_descriptor.number
+ field_type = field_descriptor.type
+ expected_wire_type = _WireTypeForFieldType(field_type)
+ if wire_type != expected_wire_type:
+ # Need to fill in uninterpreted_bytes. Work for the next CL.
+ raise RuntimeError('TODO(robinson): Wiretype mismatches not handled.')
+
+ property_name = _PropertyName(field_descriptor.name)
+ label = field_descriptor.label
+ cpp_type = field_descriptor.cpp_type
+
+ # Nonrepeated scalar. Just set the field directly.
+ if (label != _FieldDescriptor.LABEL_REPEATED
+ and cpp_type != _FieldDescriptor.CPPTYPE_MESSAGE):
+ scalar_setter_fn(_DeserializeScalarFromDecoder(field_type, decoder))
+ return decoder.Position() - initial_position
+
+ # Nonrepeated composite. Recursively deserialize.
+ if label != _FieldDescriptor.LABEL_REPEATED:
+ composite = value
+ _RecursivelyMerge(field_number, field_type, decoder, composite)
+ return decoder.Position() - initial_position
+
+ # Now we know we're dealing with a repeated field of some kind.
+ element_list = value
+
+ if cpp_type != _FieldDescriptor.CPPTYPE_MESSAGE:
+ # Repeated scalar.
+ element_list.append(_DeserializeScalarFromDecoder(field_type, decoder))
+ return decoder.Position() - initial_position
+ else:
+ # Repeated composite.
+ composite = element_list.add()
+ _RecursivelyMerge(field_number, field_type, decoder, composite)
+ return decoder.Position() - initial_position
+
+
+def _FieldOrExtensionValues(message, field_or_extension):
+ """Retrieves the list of values for the specified field or extension.
+
+ The target field or extension can be optional, required or repeated, but it
+ must have value(s) set. The assumption is that the target field or extension
+ is set (e.g. _HasFieldOrExtension holds true).
+
+ Args:
+ message: Message which contains the target field or extension.
+ field_or_extension: Field or extension for which the list of values is
+ required. Must be an instance of FieldDescriptor.
+
+ Returns:
+ A list of values for the specified field or extension. This list will only
+ contain a single element if the field is non-repeated.
+ """
+ if field_or_extension.is_extension:
+ value = message.Extensions[field_or_extension]
+ else:
+ value = getattr(message, _ValueFieldName(field_or_extension.name))
+ if field_or_extension.label != _FieldDescriptor.LABEL_REPEATED:
+ return [value]
+ else:
+ # In this case value is a list or repeated values.
+ return value
+
+
+def _HasFieldOrExtension(message, field_or_extension):
+ """Checks if a message has the specified field or extension set.
+
+ The field or extension specified can be optional, required or repeated. If
+ it is repeated, this function returns True. Otherwise it checks the has bit
+ of the field or extension.
+
+ Args:
+ message: Message which contains the target field or extension.
+ field_or_extension: Field or extension to check. This must be a
+ FieldDescriptor instance.
+
+ Returns:
+ True if the message has a value set for the specified field or extension,
+ or if the field or extension is repeated.
+ """
+ if field_or_extension.label == _FieldDescriptor.LABEL_REPEATED:
+ return True
+ if field_or_extension.is_extension:
+ return message.HasExtension(field_or_extension)
+ else:
+ return message.HasField(field_or_extension.name)
+
+
+def _IsFieldOrExtensionInitialized(message, field, errors=None):
+ """Checks if a message field or extension is initialized.
+
+ Args:
+ message: The message which contains the field or extension.
+ field: Field or extension to check. This must be a FieldDescriptor instance.
+ errors: Errors will be appended to it, if set to a meaningful value.
+
+ Returns:
+ True if the field/extension can be considered initialized.
+ """
+ # If the field is required and is not set, it isn't initialized.
+ if field.label == _FieldDescriptor.LABEL_REQUIRED:
+ if not _HasFieldOrExtension(message, field):
+ if errors is not None:
+ errors.append('Required field %s is not set.' % field.full_name)
+ return False
+
+ # If the field is optional and is not set, or if it
+ # isn't a submessage then the field is initialized.
+ if field.label == _FieldDescriptor.LABEL_OPTIONAL:
+ if not _HasFieldOrExtension(message, field):
+ return True
+ if field.cpp_type != _FieldDescriptor.CPPTYPE_MESSAGE:
+ return True
+
+ # The field is set and is either a single or a repeated submessage.
+ messages = _FieldOrExtensionValues(message, field)
+ # If all submessages in this field are initialized, the field is
+ # considered initialized.
+ for message in messages:
+ if not _InternalIsInitialized(message, errors):
+ return False
+ return True
+
+
+def _InternalIsInitialized(message, errors=None):
+ """Checks if all required fields of a message are set.
+
+ Args:
+ message: The message to check.
+ errors: If set, initialization errors will be appended to it.
+
+ Returns:
+ True iff the specified message has all required fields set.
+ """
+ fields_and_extensions = []
+ fields_and_extensions.extend(message.DESCRIPTOR.fields)
+ fields_and_extensions.extend(
+ [extension[0] for extension in message.Extensions._ListSetExtensions()])
+ for field_or_extension in fields_and_extensions:
+ if not _IsFieldOrExtensionInitialized(message, field_or_extension, errors):
+ return False
+ return True
+
+
+def _AddMergeFromStringMethod(message_descriptor, cls):
+ """Helper for _AddMessageMethods()."""
+ Decoder = decoder.Decoder
+ def MergeFromString(self, serialized):
+ decoder = Decoder(serialized)
+ byte_count = 0
+ while not decoder.EndOfStream():
+ bytes_read = _DeserializeOneEntity(message_descriptor, self, decoder)
+ if not bytes_read:
+ break
+ byte_count += bytes_read
+ return byte_count
+ cls.MergeFromString = MergeFromString
+
+
+def _AddIsInitializedMethod(cls):
+ """Adds the IsInitialized method to the protocol message class."""
+ cls.IsInitialized = _InternalIsInitialized
+
+
+def _MergeFieldOrExtension(destination_msg, field, value):
+ """Merges a specified message field into another message."""
+ property_name = _PropertyName(field.name)
+ is_extension = field.is_extension
+
+ if not is_extension:
+ destination = getattr(destination_msg, property_name)
+ elif (field.label == _FieldDescriptor.LABEL_REPEATED or
+ field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE):
+ destination = destination_msg.Extensions[field]
+
+ # Case 1 - a composite field.
+ if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
+ if field.label == _FieldDescriptor.LABEL_REPEATED:
+ for v in value:
+ destination.add().MergeFrom(v)
+ else:
+ destination.MergeFrom(value)
+ return
+
+ # Case 2 - a repeated field.
+ if field.label == _FieldDescriptor.LABEL_REPEATED:
+ for v in value:
+ destination.append(v)
+ return
+
+ # Case 3 - a singular field.
+ if is_extension:
+ destination_msg.Extensions[field] = value
+ else:
+ setattr(destination_msg, property_name, value)
+
+
+def _AddMergeFromMethod(cls):
+ def MergeFrom(self, msg):
+ assert msg is not self
+ for field in msg.ListFields():
+ _MergeFieldOrExtension(self, field[0], field[1])
+ cls.MergeFrom = MergeFrom
+
+
+def _AddMessageMethods(message_descriptor, cls):
+ """Adds implementations of all Message methods to cls."""
+ _AddListFieldsMethod(message_descriptor, cls)
+ _AddHasFieldMethod(cls)
+ _AddClearFieldMethod(cls)
+ _AddClearExtensionMethod(cls)
+ _AddClearMethod(cls)
+ _AddHasExtensionMethod(cls)
+ _AddEqualsMethod(message_descriptor, cls)
+ _AddSetListenerMethod(cls)
+ _AddByteSizeMethod(message_descriptor, cls)
+ _AddSerializeToStringMethod(message_descriptor, cls)
+ _AddSerializePartialToStringMethod(message_descriptor, cls)
+ _AddMergeFromStringMethod(message_descriptor, cls)
+ _AddIsInitializedMethod(cls)
+ _AddMergeFromMethod(cls)
+
+
+def _AddPrivateHelperMethods(cls):
+ """Adds implementation of private helper methods to cls."""
+
+ def MaybeCallTransitionToNonemptyCallback(self):
+ """Calls self._listener.TransitionToNonempty() the first time this
+ method is called. On all subsequent calls, this is a no-op.
+ """
+ if not self._called_transition_to_nonempty:
+ self._listener.TransitionToNonempty()
+ self._called_transition_to_nonempty = True
+ cls._MaybeCallTransitionToNonemptyCallback = (
+ MaybeCallTransitionToNonemptyCallback)
+
+ def MarkByteSizeDirty(self):
+ """Sets the _cached_byte_size_dirty bit to true,
+ and propagates this to our listener iff this was a state change.
+ """
+ if not self._cached_byte_size_dirty:
+ self._cached_byte_size_dirty = True
+ self._listener.ByteSizeDirty()
+ cls._MarkByteSizeDirty = MarkByteSizeDirty
+
+
+class _Listener(object):
+
+ """MessageListener implementation that a parent message registers with its
+ child message.
+
+ In order to support semantics like:
+
+ foo.bar.baz = 23
+ assert foo.HasField('bar')
+
+ ...child objects must have back references to their parents.
+ This helper class is at the heart of this support.
+ """
+
+ def __init__(self, parent_message, has_field_name):
+ """Args:
+ parent_message: The message whose _MaybeCallTransitionToNonemptyCallback()
+ and _MarkByteSizeDirty() methods we should call when we receive
+ TransitionToNonempty() and ByteSizeDirty() messages.
+ has_field_name: The name of the "has" field that we should set in
+ the parent message when we receive a TransitionToNonempty message,
+ or None if there's no "has" field to set. (This will be the case
+ for child objects in "repeated" fields).
+ """
+ # This listener establishes a back reference from a child (contained) object
+ # to its parent (containing) object. We make this a weak reference to avoid
+ # creating cyclic garbage when the client finishes with the 'parent' object
+ # in the tree.
+ if isinstance(parent_message, weakref.ProxyType):
+ self._parent_message_weakref = parent_message
+ else:
+ self._parent_message_weakref = weakref.proxy(parent_message)
+ self._has_field_name = has_field_name
+
+ def TransitionToNonempty(self):
+ try:
+ if self._has_field_name is not None:
+ setattr(self._parent_message_weakref, self._has_field_name, True)
+ # Propagate the signal to our parents iff this is the first field set.
+ self._parent_message_weakref._MaybeCallTransitionToNonemptyCallback()
+ except ReferenceError:
+ # We can get here if a client has kept a reference to a child object,
+ # and is now setting a field on it, but the child's parent has been
+ # garbage-collected. This is not an error.
+ pass
+
+ def ByteSizeDirty(self):
+ try:
+ self._parent_message_weakref._MarkByteSizeDirty()
+ except ReferenceError:
+ # Same as above.
+ pass
+
+
+# TODO(robinson): Move elsewhere?
+# TODO(robinson): Provide a clear() method here in addition to ClearField()?
+class _RepeatedScalarFieldContainer(object):
+
+ """Simple, type-checked, list-like container for holding repeated scalars."""
+
+ # Minimizes memory usage and disallows assignment to other attributes.
+ __slots__ = ['_message_listener', '_type_checker', '_values']
+
+ def __init__(self, message_listener, type_checker):
+ """
+ Args:
+ message_listener: A MessageListener implementation.
+ The _RepeatedScalarFieldContaininer will call this object's
+ TransitionToNonempty() method when it transitions from being empty to
+ being nonempty.
+ type_checker: A _ValueChecker instance to run on elements inserted
+ into this container.
+ """
+ self._message_listener = message_listener
+ self._type_checker = type_checker
+ self._values = []
+
+ def append(self, elem):
+ self._type_checker.CheckValue(elem)
+ self._values.append(elem)
+ self._message_listener.ByteSizeDirty()
+ if len(self._values) == 1:
+ self._message_listener.TransitionToNonempty()
+
+ def remove(self, elem):
+ self._values.remove(elem)
+ self._message_listener.ByteSizeDirty()
+
+ # List-like __getitem__() support also makes us iterable (via "iter(foo)"
+ # or implicitly via "for i in mylist:") for free.
+ def __getitem__(self, key):
+ return self._values[key]
+
+ def __setitem__(self, key, value):
+ # No need to call TransitionToNonempty(), since if we're able to
+ # set the element at this index, we were already nonempty before
+ # this method was called.
+ self._message_listener.ByteSizeDirty()
+ self._type_checker.CheckValue(value)
+ self._values[key] = value
+
+ def __len__(self):
+ return len(self._values)
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ # Special case for the same type which should be common and fast.
+ if isinstance(other, self.__class__):
+ return other._values == self._values
+ # We are presumably comparing against some other sequence type.
+ return other == self._values
+
+ def __ne__(self, other):
+ # Can't use != here since it would infinitely recurse.
+ return not self == other
+
+
+# TODO(robinson): Move elsewhere?
+# TODO(robinson): Provide a clear() method here in addition to ClearField()?
+# TODO(robinson): Unify common functionality with
+# _RepeatedScalarFieldContaininer?
+class _RepeatedCompositeFieldContainer(object):
+
+ """Simple, list-like container for holding repeated composite fields."""
+
+ # Minimizes memory usage and disallows assignment to other attributes.
+ __slots__ = ['_values', '_message_descriptor', '_message_listener']
+
+ def __init__(self, message_listener, message_descriptor):
+ """Note that we pass in a descriptor instead of the generated directly,
+ since at the time we construct a _RepeatedCompositeFieldContainer we
+ haven't yet necessarily initialized the type that will be contained in the
+ container.
+
+ Args:
+ message_listener: A MessageListener implementation.
+ The _RepeatedCompositeFieldContainer will call this object's
+ TransitionToNonempty() method when it transitions from being empty to
+ being nonempty.
+ message_descriptor: A Descriptor instance describing the protocol type
+ that should be present in this container. We'll use the
+ _concrete_class field of this descriptor when the client calls add().
+ """
+ self._message_listener = message_listener
+ self._message_descriptor = message_descriptor
+ self._values = []
+
+ def add(self):
+ new_element = self._message_descriptor._concrete_class()
+ new_element._SetListener(self._message_listener)
+ self._values.append(new_element)
+ self._message_listener.ByteSizeDirty()
+ self._message_listener.TransitionToNonempty()
+ return new_element
+
+ def __delitem__(self, key):
+ self._message_listener.ByteSizeDirty()
+ del self._values[key]
+
+ # List-like __getitem__() support also makes us iterable (via "iter(foo)"
+ # or implicitly via "for i in mylist:") for free.
+ def __getitem__(self, key):
+ return self._values[key]
+
+ def __len__(self):
+ return len(self._values)
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ if not isinstance(other, self.__class__):
+ raise TypeError('Can only compare repeated composite fields against '
+ 'other repeated composite fields.')
+ return self._values == other._values
+
+ def __ne__(self, other):
+ # Can't use != here since it would infinitely recurse.
+ return not self == other
+
+ # TODO(robinson): Implement, document, and test slicing support.
+
+
+# TODO(robinson): Move elsewhere? This file is getting pretty ridiculous...
+# TODO(robinson): Unify error handling of "unknown extension" crap.
+# TODO(robinson): There's so much similarity between the way that
+# extensions behave and the way that normal fields behave that it would
+# be really nice to unify more code. It's not immediately obvious
+# how to do this, though, and I'd rather get the full functionality
+# implemented (and, crucially, get all the tests and specs fleshed out
+# and passing), and then come back to this thorny unification problem.
+# TODO(robinson): Support iteritems()-style iteration over all
+# extensions with the "has" bits turned on?
+class _ExtensionDict(object):
+
+ """Dict-like container for supporting an indexable "Extensions"
+ field on proto instances.
+
+ Note that in all cases we expect extension handles to be
+ FieldDescriptors.
+ """
+
+ class _ExtensionListener(object):
+
+ """Adapts an _ExtensionDict to behave as a MessageListener."""
+
+ def __init__(self, extension_dict, handle_id):
+ self._extension_dict = extension_dict
+ self._handle_id = handle_id
+
+ def TransitionToNonempty(self):
+ self._extension_dict._SubmessageTransitionedToNonempty(self._handle_id)
+
+ def ByteSizeDirty(self):
+ self._extension_dict._SubmessageByteSizeBecameDirty()
+
+ # TODO(robinson): Somewhere, we need to blow up if people
+ # try to register two extensions with the same field number.
+ # (And we need a test for this of course).
+
+ def __init__(self, extended_message, known_extensions):
+ """extended_message: Message instance for which we are the Extensions dict.
+ known_extensions: Iterable of known extension handles.
+ These must be FieldDescriptors.
+ """
+ # We keep a weak reference to extended_message, since
+ # it has a reference to this instance in turn.
+ self._extended_message = weakref.proxy(extended_message)
+ # We make a deep copy of known_extensions to avoid any
+ # thread-safety concerns, since the argument passed in
+ # is the global (class-level) dict of known extensions for
+ # this type of message, which could be modified at any time
+ # via a RegisterExtension() call.
+ #
+ # This dict maps from handle id to handle (a FieldDescriptor).
+ #
+ # XXX
+ # TODO(robinson): This isn't good enough. The client could
+ # instantiate an object in module A, then afterward import
+ # module B and pass the instance to B.Foo(). If B imports
+ # an extender of this proto and then tries to use it, B
+ # will get a KeyError, even though the extension *is* registered
+ # at the time of use.
+ # XXX
+ self._known_extensions = dict((id(e), e) for e in known_extensions)
+ # Read lock around self._values, which may be modified by multiple
+ # concurrent readers in the conceptually "const" __getitem__ method.
+ # So, we grab this lock in every "read-only" method to ensure
+ # that concurrent read access is safe without external locking.
+ self._lock = threading.Lock()
+ # Maps from extension handle ID to current value of that extension.
+ self._values = {}
+ # Maps from extension handle ID to a boolean "has" bit, but only
+ # for non-repeated extension fields.
+ keys = (id for id, extension in self._known_extensions.iteritems()
+ if extension.label != _FieldDescriptor.LABEL_REPEATED)
+ self._has_bits = dict.fromkeys(keys, False)
+
+ def __getitem__(self, extension_handle):
+ """Returns the current value of the given extension handle."""
+ # We don't care as much about keeping critical sections short in the
+ # extension support, since it's presumably much less of a common case.
+ self._lock.acquire()
+ try:
+ handle_id = id(extension_handle)
+ if handle_id not in self._known_extensions:
+ raise KeyError('Extension not known to this class')
+ if handle_id not in self._values:
+ self._AddMissingHandle(extension_handle, handle_id)
+ return self._values[handle_id]
+ finally:
+ self._lock.release()
+
+ def __eq__(self, other):
+ # We have to grab read locks since we're accessing _values
+ # in a "const" method. See the comment in the constructor.
+ if self is other:
+ return True
+ self._lock.acquire()
+ try:
+ other._lock.acquire()
+ try:
+ if self._has_bits != other._has_bits:
+ return False
+ # If there's a "has" bit, then only compare values where it is true.
+ for k, v in self._values.iteritems():
+ if self._has_bits.get(k, False) and v != other._values[k]:
+ return False
+ return True
+ finally:
+ other._lock.release()
+ finally:
+ self._lock.release()
+
+ def __ne__(self, other):
+ return not self == other
+
+ # Note that this is only meaningful for non-repeated, scalar extension
+ # fields. Note also that we may have to call
+ # MaybeCallTransitionToNonemptyCallback() when we do successfully set a field
+ # this way, to set any necssary "has" bits in the ancestors of the extended
+ # message.
+ def __setitem__(self, extension_handle, value):
+ """If extension_handle specifies a non-repeated, scalar extension
+ field, sets the value of that field.
+ """
+ handle_id = id(extension_handle)
+ if handle_id not in self._known_extensions:
+ raise KeyError('Extension not known to this class')
+ field = extension_handle # Just shorten the name.
+ if (field.label == _FieldDescriptor.LABEL_OPTIONAL
+ and field.cpp_type != _FieldDescriptor.CPPTYPE_MESSAGE):
+ # It's slightly wasteful to lookup the type checker each time,
+ # but we expect this to be a vanishingly uncommon case anyway.
+ type_checker = type_checkers.GetTypeChecker(field.cpp_type, field.type)
+ type_checker.CheckValue(value)
+ self._values[handle_id] = value
+ self._has_bits[handle_id] = True
+ self._extended_message._MarkByteSizeDirty()
+ self._extended_message._MaybeCallTransitionToNonemptyCallback()
+ else:
+ raise TypeError('Extension is repeated and/or a composite type.')
+
+ def _AddMissingHandle(self, extension_handle, handle_id):
+ """Helper internal to ExtensionDict."""
+ # Special handling for non-repeated message extensions, which (like
+ # normal fields of this kind) are initialized lazily.
+ # REQUIRES: _lock already held.
+ cpp_type = extension_handle.cpp_type
+ label = extension_handle.label
+ if (cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE
+ and label != _FieldDescriptor.LABEL_REPEATED):
+ self._AddMissingNonRepeatedCompositeHandle(extension_handle, handle_id)
+ else:
+ self._values[handle_id] = _DefaultValueForField(
+ self._extended_message, extension_handle)
+
+ def _AddMissingNonRepeatedCompositeHandle(self, extension_handle, handle_id):
+ """Helper internal to ExtensionDict."""
+ # REQUIRES: _lock already held.
+ value = extension_handle.message_type._concrete_class()
+ value._SetListener(_ExtensionDict._ExtensionListener(self, handle_id))
+ self._values[handle_id] = value
+
+ def _SubmessageTransitionedToNonempty(self, handle_id):
+ """Called when a submessage with a given handle id first transitions to
+ being nonempty. Called by _ExtensionListener.
+ """
+ assert handle_id in self._has_bits
+ self._has_bits[handle_id] = True
+ self._extended_message._MaybeCallTransitionToNonemptyCallback()
+
+ def _SubmessageByteSizeBecameDirty(self):
+ """Called whenever a submessage's cached byte size becomes invalid
+ (goes from being "clean" to being "dirty"). Called by _ExtensionListener.
+ """
+ self._extended_message._MarkByteSizeDirty()
+
+ # We may wish to widen the public interface of Message.Extensions
+ # to expose some of this private functionality in the future.
+ # For now, we make all this functionality module-private and just
+ # implement what we need for serialization/deserialization,
+ # HasField()/ClearField(), etc.
+
+ def _HasExtension(self, extension_handle):
+ """Method for internal use by this module.
+ Returns true iff we "have" this extension in the sense of the
+ "has" bit being set.
+ """
+ handle_id = id(extension_handle)
+ # Note that this is different from the other checks.
+ if handle_id not in self._has_bits:
+ raise KeyError('Extension not known to this class, or is repeated field.')
+ return self._has_bits[handle_id]
+
+ # Intentionally pretty similar to ClearField() above.
+ def _ClearExtension(self, extension_handle):
+ """Method for internal use by this module.
+ Clears the specified extension, unsetting its "has" bit.
+ """
+ handle_id = id(extension_handle)
+ if handle_id not in self._known_extensions:
+ raise KeyError('Extension not known to this class')
+ default_value = _DefaultValueForField(self._extended_message,
+ extension_handle)
+ if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
+ self._extended_message._MarkByteSizeDirty()
+ else:
+ cpp_type = extension_handle.cpp_type
+ if cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
+ if handle_id in self._values:
+ # Future modifications to this object shouldn't set any
+ # "has" bits here.
+ self._values[handle_id]._SetListener(None)
+ if self._has_bits[handle_id]:
+ self._has_bits[handle_id] = False
+ self._extended_message._MarkByteSizeDirty()
+ if handle_id in self._values:
+ del self._values[handle_id]
+
+ def _ListSetExtensions(self):
+ """Method for internal use by this module.
+
+ Returns an sequence of all extensions that are currently "set"
+ in this extension dict. A "set" extension is a repeated extension,
+ or a non-repeated extension with its "has" bit set.
+
+ The returned sequence contains (field_descriptor, value) pairs,
+ where value is the current value of the extension with the given
+ field descriptor.
+
+ The sequence values are in arbitrary order.
+ """
+ self._lock.acquire() # Read-only methods must lock around self._values.
+ try:
+ set_extensions = []
+ for handle_id, value in self._values.iteritems():
+ handle = self._known_extensions[handle_id]
+ if (handle.label == _FieldDescriptor.LABEL_REPEATED
+ or self._has_bits[handle_id]):
+ set_extensions.append((handle, value))
+ return set_extensions
+ finally:
+ self._lock.release()
+
+ def _AllExtensionsByNumber(self):
+ """Method for internal use by this module.
+
+ Returns: A dict mapping field_number to (handle, field_descriptor),
+ for *all* registered extensions for this dict.
+ """
+ # TODO(robinson): Precompute and store this away. Note that we'll have to
+ # be careful when we move away from having _known_extensions as a
+ # deep-copied member of this object.
+ return dict((f.number, f) for f in self._known_extensions.itervalues())
diff --git a/froofle/protobuf/service.py b/froofle/protobuf/service.py
new file mode 100644
index 0000000..3989216
--- /dev/null
+++ b/froofle/protobuf/service.py
@@ -0,0 +1,208 @@
+# Protocol Buffers - Google's data interchange format
+# Copyright 2008 Google Inc. All rights reserved.
+# http://code.google.com/p/protobuf/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Declares the RPC service interfaces.
+
+This module declares the abstract interfaces underlying proto2 RPC
+services. These are intented to be independent of any particular RPC
+implementation, so that proto2 services can be used on top of a variety
+of implementations.
+"""
+
+__author__ = 'petar@google.com (Petar Petrov)'
+
+
+class Service(object):
+
+ """Abstract base interface for protocol-buffer-based RPC services.
+
+ Services themselves are abstract classes (implemented either by servers or as
+ stubs), but they subclass this base interface. The methods of this
+ interface can be used to call the methods of the service without knowing
+ its exact type at compile time (analogous to the Message interface).
+ """
+
+ def GetDescriptor(self):
+ """Retrieves this service's descriptor."""
+ raise NotImplementedError
+
+ def CallMethod(self, method_descriptor, rpc_controller,
+ request, done):
+ """Calls a method of the service specified by method_descriptor.
+
+ Preconditions:
+ * method_descriptor.service == GetDescriptor
+ * request is of the exact same classes as returned by
+ GetRequestClass(method).
+ * After the call has started, the request must not be modified.
+ * "rpc_controller" is of the correct type for the RPC implementation being
+ used by this Service. For stubs, the "correct type" depends on the
+ RpcChannel which the stub is using.
+
+ Postconditions:
+ * "done" will be called when the method is complete. This may be
+ before CallMethod() returns or it may be at some point in the future.
+ """
+ raise NotImplementedError
+
+ def GetRequestClass(self, method_descriptor):
+ """Returns the class of the request message for the specified method.
+
+ CallMethod() requires that the request is of a particular subclass of
+ Message. GetRequestClass() gets the default instance of this required
+ type.
+
+ Example:
+ method = service.GetDescriptor().FindMethodByName("Foo")
+ request = stub.GetRequestClass(method)()
+ request.ParseFromString(input)
+ service.CallMethod(method, request, callback)
+ """
+ raise NotImplementedError
+
+ def GetResponseClass(self, method_descriptor):
+ """Returns the class of the response message for the specified method.
+
+ This method isn't really needed, as the RpcChannel's CallMethod constructs
+ the response protocol message. It's provided anyway in case it is useful
+ for the caller to know the response type in advance.
+ """
+ raise NotImplementedError
+
+
+class RpcController(object):
+
+ """An RpcController mediates a single method call.
+
+ The primary purpose of the controller is to provide a way to manipulate
+ settings specific to the RPC implementation and to find out about RPC-level
+ errors. The methods provided by the RpcController interface are intended
+ to be a "least common denominator" set of features which we expect all
+ implementations to support. Specific implementations may provide more
+ advanced features (e.g. deadline propagation).
+ """
+
+ # Client-side methods below
+
+ def Reset(self):
+ """Resets the RpcController to its initial state.
+
+ After the RpcController has been reset, it may be reused in
+ a new call. Must not be called while an RPC is in progress.
+ """
+ raise NotImplementedError
+
+ def Failed(self):
+ """Returns true if the call failed.
+
+ After a call has finished, returns true if the call failed. The possible
+ reasons for failure depend on the RPC implementation. Failed() must not
+ be called before a call has finished. If Failed() returns true, the
+ contents of the response message are undefined.
+ """
+ raise NotImplementedError
+
+ def ErrorText(self):
+ """If Failed is true, returns a human-readable description of the error."""
+ raise NotImplementedError
+
+ def StartCancel(self):
+ """Initiate cancellation.
+
+ Advises the RPC system that the caller desires that the RPC call be
+ canceled. The RPC system may cancel it immediately, may wait awhile and
+ then cancel it, or may not even cancel the call at all. If the call is
+ canceled, the "done" callback will still be called and the RpcController
+ will indicate that the call failed at that time.
+ """
+ raise NotImplementedError
+
+ # Server-side methods below
+
+ def SetFailed(self, reason):
+ """Sets a failure reason.
+
+ Causes Failed() to return true on the client side. "reason" will be
+ incorporated into the message returned by ErrorText(). If you find
+ you need to return machine-readable information about failures, you
+ should incorporate it into your response protocol buffer and should
+ NOT call SetFailed().
+ """
+ raise NotImplementedError
+
+ def IsCanceled(self):
+ """Checks if the client cancelled the RPC.
+
+ If true, indicates that the client canceled the RPC, so the server may
+ as well give up on replying to it. The server should still call the
+ final "done" callback.
+ """
+ raise NotImplementedError
+
+ def NotifyOnCancel(self, callback):
+ """Sets a callback to invoke on cancel.
+
+ Asks that the given callback be called when the RPC is canceled. The
+ callback will always be called exactly once. If the RPC completes without
+ being canceled, the callback will be called after completion. If the RPC
+ has already been canceled when NotifyOnCancel() is called, the callback
+ will be called immediately.
+
+ NotifyOnCancel() must be called no more than once per request.
+ """
+ raise NotImplementedError
+
+
+class RpcChannel(object):
+
+ """Abstract interface for an RPC channel.
+
+ An RpcChannel represents a communication line to a service which can be used
+ to call that service's methods. The service may be running on another
+ machine. Normally, you should not use an RpcChannel directly, but instead
+ construct a stub {@link Service} wrapping it. Example:
+
+ Example:
+ RpcChannel channel = rpcImpl.Channel("remotehost.example.com:1234")
+ RpcController controller = rpcImpl.Controller()
+ MyService service = MyService_Stub(channel)
+ service.MyMethod(controller, request, callback)
+ """
+
+ def CallMethod(self, method_descriptor, rpc_controller,
+ request, response_class, done):
+ """Calls the method identified by the descriptor.
+
+ Call the given method of the remote service. The signature of this
+ procedure looks the same as Service.CallMethod(), but the requirements
+ are less strict in one important way: the request object doesn't have to
+ be of any specific class as long as its descriptor is method.input_type.
+ """
+ raise NotImplementedError
diff --git a/froofle/protobuf/service_reflection.py b/froofle/protobuf/service_reflection.py
new file mode 100644
index 0000000..bdd6bad
--- /dev/null
+++ b/froofle/protobuf/service_reflection.py
@@ -0,0 +1,289 @@
+# Protocol Buffers - Google's data interchange format
+# Copyright 2008 Google Inc. All rights reserved.
+# http://code.google.com/p/protobuf/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Contains metaclasses used to create protocol service and service stub
+classes from ServiceDescriptor objects at runtime.
+
+The GeneratedServiceType and GeneratedServiceStubType metaclasses are used to
+inject all useful functionality into the classes output by the protocol
+compiler at compile-time.
+"""
+
+__author__ = 'petar@google.com (Petar Petrov)'
+
+
+class GeneratedServiceType(type):
+
+ """Metaclass for service classes created at runtime from ServiceDescriptors.
+
+ Implementations for all methods described in the Service class are added here
+ by this class. We also create properties to allow getting/setting all fields
+ in the protocol message.
+
+ The protocol compiler currently uses this metaclass to create protocol service
+ classes at runtime. Clients can also manually create their own classes at
+ runtime, as in this example:
+
+ mydescriptor = ServiceDescriptor(.....)
+ class MyProtoService(service.Service):
+ __metaclass__ = GeneratedServiceType
+ DESCRIPTOR = mydescriptor
+ myservice_instance = MyProtoService()
+ ...
+ """
+
+ _DESCRIPTOR_KEY = 'DESCRIPTOR'
+
+ def __init__(cls, name, bases, dictionary):
+ """Creates a message service class.
+
+ Args:
+ name: Name of the class (ignored, but required by the metaclass
+ protocol).
+ bases: Base classes of the class being constructed.
+ dictionary: The class dictionary of the class being constructed.
+ dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object
+ describing this protocol service type.
+ """
+ # Don't do anything if this class doesn't have a descriptor. This happens
+ # when a service class is subclassed.
+ if GeneratedServiceType._DESCRIPTOR_KEY not in dictionary:
+ return
+ descriptor = dictionary[GeneratedServiceType._DESCRIPTOR_KEY]
+ service_builder = _ServiceBuilder(descriptor)
+ service_builder.BuildService(cls)
+
+
+class GeneratedServiceStubType(GeneratedServiceType):
+
+ """Metaclass for service stubs created at runtime from ServiceDescriptors.
+
+ This class has similar responsibilities as GeneratedServiceType, except that
+ it creates the service stub classes.
+ """
+
+ _DESCRIPTOR_KEY = 'DESCRIPTOR'
+
+ def __init__(cls, name, bases, dictionary):
+ """Creates a message service stub class.
+
+ Args:
+ name: Name of the class (ignored, here).
+ bases: Base classes of the class being constructed.
+ dictionary: The class dictionary of the class being constructed.
+ dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object
+ describing this protocol service type.
+ """
+ super(GeneratedServiceStubType, cls).__init__(name, bases, dictionary)
+ # Don't do anything if this class doesn't have a descriptor. This happens
+ # when a service stub is subclassed.
+ if GeneratedServiceStubType._DESCRIPTOR_KEY not in dictionary:
+ return
+ descriptor = dictionary[GeneratedServiceStubType._DESCRIPTOR_KEY]
+ service_stub_builder = _ServiceStubBuilder(descriptor)
+ service_stub_builder.BuildServiceStub(cls)
+
+
+class _ServiceBuilder(object):
+
+ """This class constructs a protocol service class using a service descriptor.
+
+ Given a service descriptor, this class constructs a class that represents
+ the specified service descriptor. One service builder instance constructs
+ exactly one service class. That means all instances of that class share the
+ same builder.
+ """
+
+ def __init__(self, service_descriptor):
+ """Initializes an instance of the service class builder.
+
+ Args:
+ service_descriptor: ServiceDescriptor to use when constructing the
+ service class.
+ """
+ self.descriptor = service_descriptor
+
+ def BuildService(self, cls):
+ """Constructs the service class.
+
+ Args:
+ cls: The class that will be constructed.
+ """
+
+ # CallMethod needs to operate with an instance of the Service class. This
+ # internal wrapper function exists only to be able to pass the service
+ # instance to the method that does the real CallMethod work.
+ def _WrapCallMethod(srvc, method_descriptor,
+ rpc_controller, request, callback):
+ self._CallMethod(srvc, method_descriptor,
+ rpc_controller, request, callback)
+ self.cls = cls
+ cls.CallMethod = _WrapCallMethod
+ cls.GetDescriptor = self._GetDescriptor
+ cls.GetRequestClass = self._GetRequestClass
+ cls.GetResponseClass = self._GetResponseClass
+ for method in self.descriptor.methods:
+ setattr(cls, method.name, self._GenerateNonImplementedMethod(method))
+
+ def _GetDescriptor(self):
+ """Retrieves the service descriptor.
+
+ Returns:
+ The descriptor of the service (of type ServiceDescriptor).
+ """
+ return self.descriptor
+
+ def _CallMethod(self, srvc, method_descriptor,
+ rpc_controller, request, callback):
+ """Calls the method described by a given method descriptor.
+
+ Args:
+ srvc: Instance of the service for which this method is called.
+ method_descriptor: Descriptor that represent the method to call.
+ rpc_controller: RPC controller to use for this method's execution.
+ request: Request protocol message.
+ callback: A callback to invoke after the method has completed.
+ """
+ if method_descriptor.containing_service != self.descriptor:
+ raise RuntimeError(
+ 'CallMethod() given method descriptor for wrong service type.')
+ method = getattr(srvc, method_descriptor.name)
+ method(rpc_controller, request, callback)
+
+ def _GetRequestClass(self, method_descriptor):
+ """Returns the class of the request protocol message.
+
+ Args:
+ method_descriptor: Descriptor of the method for which to return the
+ request protocol message class.
+
+ Returns:
+ A class that represents the input protocol message of the specified
+ method.
+ """
+ if method_descriptor.containing_service != self.descriptor:
+ raise RuntimeError(
+ 'GetRequestClass() given method descriptor for wrong service type.')
+ return method_descriptor.input_type._concrete_class
+
+ def _GetResponseClass(self, method_descriptor):
+ """Returns the class of the response protocol message.
+
+ Args:
+ method_descriptor: Descriptor of the method for which to return the
+ response protocol message class.
+
+ Returns:
+ A class that represents the output protocol message of the specified
+ method.
+ """
+ if method_descriptor.containing_service != self.descriptor:
+ raise RuntimeError(
+ 'GetResponseClass() given method descriptor for wrong service type.')
+ return method_descriptor.output_type._concrete_class
+
+ def _GenerateNonImplementedMethod(self, method):
+ """Generates and returns a method that can be set for a service methods.
+
+ Args:
+ method: Descriptor of the service method for which a method is to be
+ generated.
+
+ Returns:
+ A method that can be added to the service class.
+ """
+ return lambda inst, rpc_controller, request, callback: (
+ self._NonImplementedMethod(method.name, rpc_controller, callback))
+
+ def _NonImplementedMethod(self, method_name, rpc_controller, callback):
+ """The body of all methods in the generated service class.
+
+ Args:
+ method_name: Name of the method being executed.
+ rpc_controller: RPC controller used to execute this method.
+ callback: A callback which will be invoked when the method finishes.
+ """
+ rpc_controller.SetFailed('Method %s not implemented.' % method_name)
+ callback(None)
+
+
+class _ServiceStubBuilder(object):
+
+ """Constructs a protocol service stub class using a service descriptor.
+
+ Given a service descriptor, this class constructs a suitable stub class.
+ A stub is just a type-safe wrapper around an RpcChannel which emulates a
+ local implementation of the service.
+
+ One service stub builder instance constructs exactly one class. It means all
+ instances of that class share the same service stub builder.
+ """
+
+ def __init__(self, service_descriptor):
+ """Initializes an instance of the service stub class builder.
+
+ Args:
+ service_descriptor: ServiceDescriptor to use when constructing the
+ stub class.
+ """
+ self.descriptor = service_descriptor
+
+ def BuildServiceStub(self, cls):
+ """Constructs the stub class.
+
+ Args:
+ cls: The class that will be constructed.
+ """
+
+ def _ServiceStubInit(stub, rpc_channel):
+ stub.rpc_channel = rpc_channel
+ self.cls = cls
+ cls.__init__ = _ServiceStubInit
+ for method in self.descriptor.methods:
+ setattr(cls, method.name, self._GenerateStubMethod(method))
+
+ def _GenerateStubMethod(self, method):
+ return lambda inst, rpc_controller, request, callback: self._StubMethod(
+ inst, method, rpc_controller, request, callback)
+
+ def _StubMethod(self, stub, method_descriptor,
+ rpc_controller, request, callback):
+ """The body of all service methods in the generated stub class.
+
+ Args:
+ stub: Stub instance.
+ method_descriptor: Descriptor of the invoked method.
+ rpc_controller: Rpc controller to execute the method.
+ request: Request protocol message.
+ callback: A callback to execute when the method finishes.
+ """
+ stub.rpc_channel.CallMethod(
+ method_descriptor, rpc_controller, request,
+ method_descriptor.output_type._concrete_class, callback)
diff --git a/froofle/protobuf/text_format.py b/froofle/protobuf/text_format.py
new file mode 100644
index 0000000..1c4cadf
--- /dev/null
+++ b/froofle/protobuf/text_format.py
@@ -0,0 +1,125 @@
+# Protocol Buffers - Google's data interchange format
+# Copyright 2008 Google Inc. All rights reserved.
+# http://code.google.com/p/protobuf/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Contains routines for printing protocol messages in text format."""
+
+__author__ = 'kenton@google.com (Kenton Varda)'
+
+import cStringIO
+
+from froofle.protobuf import descriptor
+
+__all__ = [ 'MessageToString', 'PrintMessage', 'PrintField', 'PrintFieldValue' ]
+
+def MessageToString(message):
+ out = cStringIO.StringIO()
+ PrintMessage(message, out)
+ result = out.getvalue()
+ out.close()
+ return result
+
+def PrintMessage(message, out, indent = 0):
+ for field, value in message.ListFields():
+ if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
+ for element in value:
+ PrintField(field, element, out, indent)
+ else:
+ PrintField(field, value, out, indent)
+
+def PrintField(field, value, out, indent = 0):
+ """Print a single field name/value pair. For repeated fields, the value
+ should be a single element."""
+
+ out.write(' ' * indent);
+ if field.is_extension:
+ out.write('[')
+ if (field.containing_type.GetOptions().message_set_wire_format and
+ field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
+ field.message_type == field.extension_scope and
+ field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL):
+ out.write(field.message_type.full_name)
+ else:
+ out.write(field.full_name)
+ out.write(']')
+ elif field.type == descriptor.FieldDescriptor.TYPE_GROUP:
+ # For groups, use the capitalized name.
+ out.write(field.message_type.name)
+ else:
+ out.write(field.name)
+
+ if field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
+ # The colon is optional in this case, but our cross-language golden files
+ # don't include it.
+ out.write(': ')
+
+ PrintFieldValue(field, value, out, indent)
+ out.write('\n')
+
+def PrintFieldValue(field, value, out, indent = 0):
+ """Print a single field value (not including name). For repeated fields,
+ the value should be a single element."""
+
+ if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
+ out.write(' {\n')
+ PrintMessage(value, out, indent + 2)
+ out.write(' ' * indent + '}')
+ elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
+ out.write(field.enum_type.values_by_number[value].name)
+ elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
+ out.write('\"')
+ out.write(_CEscape(value))
+ out.write('\"')
+ elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
+ if value:
+ out.write("true")
+ else:
+ out.write("false")
+ else:
+ out.write(str(value))
+
+# text.encode('string_escape') does not seem to satisfy our needs as it
+# encodes unprintable characters using two-digit hex escapes whereas our
+# C++ unescaping function allows hex escapes to be any length. So,
+# "\0011".encode('string_escape') ends up being "\\x011", which will be
+# decoded in C++ as a single-character string with char code 0x11.
+def _CEscape(text):
+ def escape(c):
+ o = ord(c)
+ if o == 10: return r"\n" # optional escape
+ if o == 13: return r"\r" # optional escape
+ if o == 9: return r"\t" # optional escape
+ if o == 39: return r"\'" # optional escape
+
+ if o == 34: return r'\"' # necessary escape
+ if o == 92: return r"\\" # necessary escape
+
+ if o >= 127 or o < 32: return "\\%03o" % o # necessary escapes
+ return c
+ return "".join([escape(c) for c in text])