VOL-1967 move api-server to separate repository

Change-Id: I21b85be74205805be15f8a85e53a903d16785671
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/LICENSE b/vendor/gopkg.in/jcmturner/rpc.v1/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/gopkg.in/jcmturner/rpc.v1/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/claims.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/claims.go
new file mode 100644
index 0000000..2b375da
--- /dev/null
+++ b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/claims.go
@@ -0,0 +1,134 @@
+package mstypes
+
+import (
+	"bytes"
+	"errors"
+
+	"gopkg.in/jcmturner/rpc.v1/ndr"
+)
+
+// Compression format assigned numbers.
+const (
+	CompressionFormatNone       uint16 = 0
+	CompressionFormatLZNT1      uint16 = 2
+	CompressionFormatXPress     uint16 = 3
+	CompressionFormatXPressHuff uint16 = 4
+)
+
+// ClaimsSourceTypeAD https://msdn.microsoft.com/en-us/library/hh553809.aspx
+const ClaimsSourceTypeAD uint16 = 1
+
+// Claim Type assigned numbers
+const (
+	ClaimTypeIDInt64    uint16 = 1
+	ClaimTypeIDUInt64   uint16 = 2
+	ClaimTypeIDString   uint16 = 3
+	ClaimsTypeIDBoolean uint16 = 6
+)
+
+// ClaimsBlob implements https://msdn.microsoft.com/en-us/library/hh554119.aspx
+type ClaimsBlob struct {
+	Size        uint32
+	EncodedBlob EncodedBlob
+}
+
+// EncodedBlob are the bytes of the encoded Claims
+type EncodedBlob []byte
+
+// Size returns the size of the bytes of the encoded Claims
+func (b EncodedBlob) Size(c interface{}) int {
+	cb := c.(ClaimsBlob)
+	return int(cb.Size)
+}
+
+// ClaimsSetMetadata implements https://msdn.microsoft.com/en-us/library/hh554073.aspx
+type ClaimsSetMetadata struct {
+	ClaimsSetSize             uint32
+	ClaimsSetBytes            []byte `ndr:"pointer,conformant"`
+	CompressionFormat         uint16 // Enum see constants for options
+	UncompressedClaimsSetSize uint32
+	ReservedType              uint16
+	ReservedFieldSize         uint32
+	ReservedField             []byte `ndr:"pointer,conformant"`
+}
+
+// ClaimsSet reads the ClaimsSet type from the NDR encoded ClaimsSetBytes in the ClaimsSetMetadata
+func (m *ClaimsSetMetadata) ClaimsSet() (c ClaimsSet, err error) {
+	if len(m.ClaimsSetBytes) < 1 {
+		err = errors.New("no bytes available for ClaimsSet")
+		return
+	}
+	// TODO switch statement to decompress ClaimsSetBytes
+	if m.CompressionFormat != CompressionFormatNone {
+		err = errors.New("compressed ClaimsSet not currently supported")
+		return
+	}
+	dec := ndr.NewDecoder(bytes.NewReader(m.ClaimsSetBytes))
+	err = dec.Decode(&c)
+	return
+}
+
+// ClaimsSet implements https://msdn.microsoft.com/en-us/library/hh554122.aspx
+type ClaimsSet struct {
+	ClaimsArrayCount  uint32
+	ClaimsArrays      []ClaimsArray `ndr:"pointer,conformant"`
+	ReservedType      uint16
+	ReservedFieldSize uint32
+	ReservedField     []byte `ndr:"pointer,conformant"`
+}
+
+// ClaimsArray implements https://msdn.microsoft.com/en-us/library/hh536458.aspx
+type ClaimsArray struct {
+	ClaimsSourceType uint16
+	ClaimsCount      uint32
+	ClaimEntries     []ClaimEntry `ndr:"pointer,conformant"`
+}
+
+// ClaimEntry is a NDR union that implements https://msdn.microsoft.com/en-us/library/hh536374.aspx
+type ClaimEntry struct {
+	ID         string           `ndr:"pointer,conformant,varying"`
+	Type       uint16           `ndr:"unionTag"`
+	TypeInt64  ClaimTypeInt64   `ndr:"unionField"`
+	TypeUInt64 ClaimTypeUInt64  `ndr:"unionField"`
+	TypeString ClaimTypeString  `ndr:"unionField"`
+	TypeBool   ClaimTypeBoolean `ndr:"unionField"`
+}
+
+// SwitchFunc is the ClaimEntry union field selection function
+func (u ClaimEntry) SwitchFunc(_ interface{}) string {
+	switch u.Type {
+	case ClaimTypeIDInt64:
+		return "TypeInt64"
+	case ClaimTypeIDUInt64:
+		return "TypeUInt64"
+	case ClaimTypeIDString:
+		return "TypeString"
+	case ClaimsTypeIDBoolean:
+		return "TypeBool"
+	}
+	return ""
+}
+
+// ClaimTypeInt64 is a claim of type int64
+type ClaimTypeInt64 struct {
+	ValueCount uint32
+	Value      []int64 `ndr:"pointer,conformant"`
+}
+
+// ClaimTypeUInt64 is a claim of type uint64
+type ClaimTypeUInt64 struct {
+	ValueCount uint32
+	Value      []uint64 `ndr:"pointer,conformant"`
+}
+
+// ClaimTypeString is a claim of type string
+type ClaimTypeString struct {
+	ValueCount uint32
+	Value      []LPWSTR `ndr:"pointer,conformant"`
+}
+
+// ClaimTypeBoolean is a claim of type bool
+type ClaimTypeBoolean struct {
+	ValueCount uint32
+	Value      []bool `ndr:"pointer,conformant"`
+}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/common.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/common.go
new file mode 100644
index 0000000..62cac28
--- /dev/null
+++ b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/common.go
@@ -0,0 +1,10 @@
+package mstypes
+
+// LPWSTR implements https://msdn.microsoft.com/en-us/library/cc230355.aspx
+type LPWSTR struct {
+	Value string `ndr:"pointer,conformant,varying"`
+}
+
+func (s *LPWSTR) String() string {
+	return s.Value
+}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/filetime.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/filetime.go
new file mode 100644
index 0000000..5cc952f
--- /dev/null
+++ b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/filetime.go
@@ -0,0 +1,52 @@
+// Package mstypes implements representations of Microsoft types
+package mstypes
+
+import (
+	"time"
+)
+
+/*
+FILETIME is a windows data structure.
+Ref: https://msdn.microsoft.com/en-us/library/windows/desktop/ms724284%28v=vs.85%29.aspx
+It contains two parts that are 32bit integers:
+	dwLowDateTime
+	dwHighDateTime
+We need to combine these two into one 64bit integer.
+This gives the number of 100 nano second period from January 1, 1601, Coordinated Universal Time (UTC)
+*/
+
+const unixEpochDiff = 116444736000000000
+
+// FileTime implements the Microsoft FILETIME type https://msdn.microsoft.com/en-us/library/cc230324.aspx
+type FileTime struct {
+	LowDateTime  uint32
+	HighDateTime uint32
+}
+
+// Time return a golang Time type from the FileTime
+func (ft FileTime) Time() time.Time {
+	ns := (ft.MSEpoch() - unixEpochDiff) * 100
+	return time.Unix(0, int64(ns)).UTC()
+}
+
+// MSEpoch returns the FileTime as a Microsoft epoch, the number of 100 nano second periods elapsed from January 1, 1601 UTC.
+func (ft FileTime) MSEpoch() int64 {
+	return (int64(ft.HighDateTime) << 32) + int64(ft.LowDateTime)
+}
+
+// Unix returns the FileTime as a Unix time, the number of seconds elapsed since January 1, 1970 UTC.
+func (ft FileTime) Unix() int64 {
+	return (ft.MSEpoch() - unixEpochDiff) / 10000000
+}
+
+// GetFileTime returns a FileTime type from the provided Golang Time type.
+func GetFileTime(t time.Time) FileTime {
+	ns := t.UnixNano()
+	fp := (ns / 100) + unixEpochDiff
+	hd := fp >> 32
+	ld := fp - (hd << 32)
+	return FileTime{
+		LowDateTime:  uint32(ld),
+		HighDateTime: uint32(hd),
+	}
+}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/group_membership.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/group_membership.go
new file mode 100644
index 0000000..7915137
--- /dev/null
+++ b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/group_membership.go
@@ -0,0 +1,19 @@
+package mstypes
+
+// GroupMembership implements https://msdn.microsoft.com/en-us/library/cc237945.aspx
+// RelativeID : A 32-bit unsigned integer that contains the RID of a particular group.
+// The possible values for the Attributes flags are identical to those specified in KERB_SID_AND_ATTRIBUTES
+type GroupMembership struct {
+	RelativeID uint32
+	Attributes uint32
+}
+
+// DomainGroupMembership implements https://msdn.microsoft.com/en-us/library/hh536344.aspx
+// DomainId: A SID structure that contains the SID for the domain.This member is used in conjunction with the GroupIds members to create group SIDs for the device.
+// GroupCount: A 32-bit unsigned integer that contains the number of groups within the domain to which the account belongs.
+// GroupIds: A pointer to a list of GROUP_MEMBERSHIP structures that contain the groups to which the account belongs in the domain. The number of groups in this list MUST be equal to GroupCount.
+type DomainGroupMembership struct {
+	DomainID   RPCSID `ndr:"pointer"`
+	GroupCount uint32
+	GroupIDs   []GroupMembership `ndr:"pointer,conformant"` // Size is value of GroupCount
+}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/kerb_sid_and_attributes.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/kerb_sid_and_attributes.go
new file mode 100644
index 0000000..61ac39b
--- /dev/null
+++ b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/kerb_sid_and_attributes.go
@@ -0,0 +1,23 @@
+package mstypes
+
+// Attributes of a security group membership and can be combined by using the bitwise OR operation.
+// They are used by an access check mechanism to specify whether the membership is to be used in an access check decision.
+const (
+	SEGroupMandatory        = 31
+	SEGroupEnabledByDefault = 30
+	SEGroupEnabled          = 29
+	SEGroupOwner            = 28
+	SEGroupResource         = 2
+	//All other bits MUST be set to zero and MUST be  ignored on receipt.
+)
+
+// KerbSidAndAttributes implements https://msdn.microsoft.com/en-us/library/cc237947.aspx
+type KerbSidAndAttributes struct {
+	SID        RPCSID `ndr:"pointer"` // A pointer to an RPC_SID structure.
+	Attributes uint32
+}
+
+// SetFlag sets a flag in a uint32 attribute value.
+func SetFlag(a *uint32, i uint) {
+	*a = *a | (1 << (31 - i))
+}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/reader.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/reader.go
new file mode 100644
index 0000000..24495bc
--- /dev/null
+++ b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/reader.go
@@ -0,0 +1,109 @@
+package mstypes
+
+import (
+	"bufio"
+	"encoding/binary"
+	"fmt"
+	"io"
+)
+
+// Byte sizes of primitive types
+const (
+	SizeBool   = 1
+	SizeChar   = 1
+	SizeUint8  = 1
+	SizeUint16 = 2
+	SizeUint32 = 4
+	SizeUint64 = 8
+	SizeEnum   = 2
+	SizeSingle = 4
+	SizeDouble = 8
+	SizePtr    = 4
+)
+
+// Reader reads simple byte stream data into a Go representations
+type Reader struct {
+	r *bufio.Reader // source of the data
+}
+
+// NewReader creates a new instance of a simple Reader.
+func NewReader(r io.Reader) *Reader {
+	reader := new(Reader)
+	reader.r = bufio.NewReader(r)
+	return reader
+}
+
+func (r *Reader) Read(p []byte) (n int, err error) {
+	return r.r.Read(p)
+}
+
+func (r *Reader) Uint8() (uint8, error) {
+	b, err := r.r.ReadByte()
+	if err != nil {
+		return uint8(0), err
+	}
+	return uint8(b), nil
+}
+
+func (r *Reader) Uint16() (uint16, error) {
+	b, err := r.ReadBytes(SizeUint16)
+	if err != nil {
+		return uint16(0), err
+	}
+	return binary.LittleEndian.Uint16(b), nil
+}
+
+func (r *Reader) Uint32() (uint32, error) {
+	b, err := r.ReadBytes(SizeUint32)
+	if err != nil {
+		return uint32(0), err
+	}
+	return binary.LittleEndian.Uint32(b), nil
+}
+
+func (r *Reader) Uint64() (uint64, error) {
+	b, err := r.ReadBytes(SizeUint64)
+	if err != nil {
+		return uint64(0), err
+	}
+	return binary.LittleEndian.Uint64(b), nil
+}
+
+func (r *Reader) FileTime() (f FileTime, err error) {
+	f.LowDateTime, err = r.Uint32()
+	if err != nil {
+		return
+	}
+	f.HighDateTime, err = r.Uint32()
+	if err != nil {
+		return
+	}
+	return
+}
+
+// UTF16String returns a string that is UTF16 encoded in a byte slice. n is the number of bytes representing the string
+func (r *Reader) UTF16String(n int) (str string, err error) {
+	//Length divided by 2 as each run is 16bits = 2bytes
+	s := make([]rune, n/2, n/2)
+	for i := 0; i < len(s); i++ {
+		var u uint16
+		u, err = r.Uint16()
+		if err != nil {
+			return
+		}
+		s[i] = rune(u)
+	}
+	str = string(s)
+	return
+}
+
+// readBytes returns a number of bytes from the NDR byte stream.
+func (r *Reader) ReadBytes(n int) ([]byte, error) {
+	//TODO make this take an int64 as input to allow for larger values on all systems?
+	b := make([]byte, n, n)
+	m, err := r.r.Read(b)
+	if err != nil || m != n {
+		return b, fmt.Errorf("error reading bytes from stream: %v", err)
+	}
+	return b, nil
+}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/rpc_unicode_string.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/rpc_unicode_string.go
new file mode 100644
index 0000000..4bf02e0
--- /dev/null
+++ b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/rpc_unicode_string.go
@@ -0,0 +1,13 @@
+package mstypes
+
+// RPCUnicodeString implements https://msdn.microsoft.com/en-us/library/cc230365.aspx
+type RPCUnicodeString struct {
+	Length        uint16 // The length, in bytes, of the string pointed to by the Buffer member, not including the terminating null character if any. The length MUST be a multiple of 2. The length SHOULD equal the entire size of the Buffer, in which case there is no terminating null character. Any method that accesses this structure MUST use the Length specified instead of relying on the presence or absence of a null character.
+	MaximumLength uint16 // The maximum size, in bytes, of the string pointed to by Buffer. The size MUST be a multiple of 2. If not, the size MUST be decremented by 1 prior to use. This value MUST not be less than Length.
+	Value         string `ndr:"pointer,conformant,varying"`
+}
+
+// String returns the RPCUnicodeString string value
+func (r *RPCUnicodeString) String() string {
+	return r.Value
+}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/sid.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/sid.go
new file mode 100644
index 0000000..98a9c5a
--- /dev/null
+++ b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/sid.go
@@ -0,0 +1,32 @@
+package mstypes
+
+import (
+	"encoding/binary"
+	"encoding/hex"
+	"fmt"
+)
+
+// RPCSID implements https://msdn.microsoft.com/en-us/library/cc230364.aspx
+type RPCSID struct {
+	Revision            uint8    // An 8-bit unsigned integer that specifies the revision level of the SID. This value MUST be set to 0x01.
+	SubAuthorityCount   uint8    // An 8-bit unsigned integer that specifies the number of elements in the SubAuthority array. The maximum number of elements allowed is 15.
+	IdentifierAuthority [6]byte  // An RPC_SID_IDENTIFIER_AUTHORITY structure that indicates the authority under which the SID was created. It describes the entity that created the SID. The Identifier Authority value {0,0,0,0,0,5} denotes SIDs created by the NT SID authority.
+	SubAuthority        []uint32 `ndr:"conformant"` // A variable length array of unsigned 32-bit integers that uniquely identifies a principal relative to the IdentifierAuthority. Its length is determined by SubAuthorityCount.
+}
+
+// String returns the string representation of the RPC_SID.
+func (s *RPCSID) String() string {
+	var str string
+	b := append(make([]byte, 2, 2), s.IdentifierAuthority[:]...)
+	// For a strange reason this is read big endian: https://msdn.microsoft.com/en-us/library/dd302645.aspx
+	i := binary.BigEndian.Uint64(b)
+	if i >= 4294967296 {
+		str = fmt.Sprintf("S-1-0x%s", hex.EncodeToString(s.IdentifierAuthority[:]))
+	} else {
+		str = fmt.Sprintf("S-1-%d", i)
+	}
+	for _, sub := range s.SubAuthority {
+		str = fmt.Sprintf("%s-%d", str, sub)
+	}
+	return str
+}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/user_session_key.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/user_session_key.go
new file mode 100644
index 0000000..fcf0a5d
--- /dev/null
+++ b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/user_session_key.go
@@ -0,0 +1,11 @@
+package mstypes
+
+// CypherBlock implements https://msdn.microsoft.com/en-us/library/cc237040.aspx
+type CypherBlock struct {
+	Data [8]byte // size = 8
+}
+
+// UserSessionKey implements https://msdn.microsoft.com/en-us/library/cc237080.aspx
+type UserSessionKey struct {
+	CypherBlock [2]CypherBlock // size = 2
+}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/arrays.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/arrays.go
new file mode 100644
index 0000000..5e2def2
--- /dev/null
+++ b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/arrays.go
@@ -0,0 +1,413 @@
+package ndr
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+)
+
+// intFromTag returns an int that is a value in a struct tag key/value pair
+func intFromTag(tag reflect.StructTag, key string) (int, error) {
+	ndrTag := parseTags(tag)
+	d := 1
+	if n, ok := ndrTag.Map[key]; ok {
+		i, err := strconv.Atoi(n)
+		if err != nil {
+			return d, fmt.Errorf("invalid dimensions tag [%s]: %v", n, err)
+		}
+		d = i
+	}
+	return d, nil
+}
+
+// parseDimensions returns the a slice of the size of each dimension and type of the member at the deepest level.
+func parseDimensions(v reflect.Value) (l []int, tb reflect.Type) {
+	if v.Kind() == reflect.Ptr {
+		v = v.Elem()
+	}
+	t := v.Type()
+	if t.Kind() == reflect.Ptr {
+		t = t.Elem()
+	}
+	if t.Kind() != reflect.Array && t.Kind() != reflect.Slice {
+		return
+	}
+	l = append(l, v.Len())
+	if t.Elem().Kind() == reflect.Array || t.Elem().Kind() == reflect.Slice {
+		// contains array or slice
+		var m []int
+		m, tb = parseDimensions(v.Index(0))
+		l = append(l, m...)
+	} else {
+		tb = t.Elem()
+	}
+	return
+}
+
+// sliceDimensions returns the count of dimensions a slice has.
+func sliceDimensions(t reflect.Type) (d int, tb reflect.Type) {
+	if t.Kind() == reflect.Ptr {
+		t = t.Elem()
+	}
+	if t.Kind() == reflect.Slice {
+		d++
+		var n int
+		n, tb = sliceDimensions(t.Elem())
+		d += n
+	} else {
+		tb = t
+	}
+	return
+}
+
+// makeSubSlices is a deep recursive creation/initialisation of multi-dimensional slices.
+// Takes the reflect.Value of the 1st dimension and a slice of the lengths of the sub dimensions
+func makeSubSlices(v reflect.Value, l []int) {
+	ty := v.Type().Elem()
+	if ty.Kind() != reflect.Slice {
+		return
+	}
+	for i := 0; i < v.Len(); i++ {
+		s := reflect.MakeSlice(ty, l[0], l[0])
+		v.Index(i).Set(s)
+		// Are there more sub dimensions?
+		if len(l) > 1 {
+			makeSubSlices(v.Index(i), l[1:])
+		}
+	}
+	return
+}
+
+// multiDimensionalIndexPermutations returns all the permutations of the indexes of a multi-dimensional slice.
+// The input is a slice of integers that indicates the max size/length of each dimension
+func multiDimensionalIndexPermutations(l []int) (ps [][]int) {
+	z := make([]int, len(l), len(l)) // The zeros permutation
+	ps = append(ps, z)
+	// for each dimension, in reverse
+	for i := len(l) - 1; i >= 0; i-- {
+		ws := make([][]int, len(ps))
+		copy(ws, ps)
+		//create a permutation for each of the iterations of the current dimension
+		for j := 1; j <= l[i]-1; j++ {
+			// For each existing permutation
+			for _, p := range ws {
+				np := make([]int, len(p), len(p))
+				copy(np, p)
+				np[i] = j
+				ps = append(ps, np)
+			}
+		}
+	}
+	return
+}
+
+// precedingMax reads off the next conformant max value
+func (dec *Decoder) precedingMax() uint32 {
+	m := dec.conformantMax[0]
+	dec.conformantMax = dec.conformantMax[1:]
+	return m
+}
+
+// fillFixedArray establishes if the fixed array is uni or multi dimensional and then fills it.
+func (dec *Decoder) fillFixedArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
+	l, t := parseDimensions(v)
+	if t.Kind() == reflect.String {
+		tag = reflect.StructTag(subStringArrayTag)
+	}
+	if len(l) < 1 {
+		return errors.New("could not establish dimensions of fixed array")
+	}
+	if len(l) == 1 {
+		err := dec.fillUniDimensionalFixedArray(v, tag, def)
+		if err != nil {
+			return fmt.Errorf("could not fill uni-dimensional fixed array: %v", err)
+		}
+		return nil
+	}
+	// Fixed array is multidimensional
+	ps := multiDimensionalIndexPermutations(l[:len(l)-1])
+	for _, p := range ps {
+		// Get current multi-dimensional index to fill
+		a := v
+		for _, i := range p {
+			a = a.Index(i)
+		}
+		// fill with the last dimension array
+		err := dec.fillUniDimensionalFixedArray(a, tag, def)
+		if err != nil {
+			return fmt.Errorf("could not fill dimension %v of multi-dimensional fixed array: %v", p, err)
+		}
+	}
+	return nil
+}
+
+// readUniDimensionalFixedArray reads an array (not slice) from the byte stream.
+func (dec *Decoder) fillUniDimensionalFixedArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
+	for i := 0; i < v.Len(); i++ {
+		err := dec.fill(v.Index(i), tag, def)
+		if err != nil {
+			return fmt.Errorf("could not fill index %d of fixed array: %v", i, err)
+		}
+	}
+	return nil
+}
+
+// fillConformantArray establishes if the conformant array is uni or multi dimensional and then fills the slice.
+func (dec *Decoder) fillConformantArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
+	d, _ := sliceDimensions(v.Type())
+	if d > 1 {
+		err := dec.fillMultiDimensionalConformantArray(v, d, tag, def)
+		if err != nil {
+			return err
+		}
+	} else {
+		err := dec.fillUniDimensionalConformantArray(v, tag, def)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// fillUniDimensionalConformantArray fills the uni-dimensional slice value.
+func (dec *Decoder) fillUniDimensionalConformantArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
+	m := dec.precedingMax()
+	n := int(m)
+	a := reflect.MakeSlice(v.Type(), n, n)
+	for i := 0; i < n; i++ {
+		err := dec.fill(a.Index(i), tag, def)
+		if err != nil {
+			return fmt.Errorf("could not fill index %d of uni-dimensional conformant array: %v", i, err)
+		}
+	}
+	v.Set(a)
+	return nil
+}
+
+// fillMultiDimensionalConformantArray fills the multi-dimensional slice value provided from conformant array data.
+// The number of dimensions must be specified. This must be less than or equal to the dimensions in the slice for this
+// method not to panic.
+func (dec *Decoder) fillMultiDimensionalConformantArray(v reflect.Value, d int, tag reflect.StructTag, def *[]deferedPtr) error {
+	// Read the max size of each dimensions from the ndr stream
+	l := make([]int, d, d)
+	for i := range l {
+		l[i] = int(dec.precedingMax())
+	}
+	// Initialise size of slices
+	//   Initialise the size of the 1st dimension
+	ty := v.Type()
+	v.Set(reflect.MakeSlice(ty, l[0], l[0]))
+	// Initialise the size of the other dimensions recursively
+	makeSubSlices(v, l[1:])
+
+	// Get all permutations of the indexes and go through each and fill
+	ps := multiDimensionalIndexPermutations(l)
+	for _, p := range ps {
+		// Get current multi-dimensional index to fill
+		a := v
+		for _, i := range p {
+			a = a.Index(i)
+		}
+		err := dec.fill(a, tag, def)
+		if err != nil {
+			return fmt.Errorf("could not fill index %v of slice: %v", p, err)
+		}
+	}
+	return nil
+}
+
+// fillVaryingArray establishes if the varying array is uni or multi dimensional and then fills the slice.
+func (dec *Decoder) fillVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
+	d, t := sliceDimensions(v.Type())
+	if d > 1 {
+		err := dec.fillMultiDimensionalVaryingArray(v, t, d, tag, def)
+		if err != nil {
+			return err
+		}
+	} else {
+		err := dec.fillUniDimensionalVaryingArray(v, tag, def)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// fillUniDimensionalVaryingArray fills the uni-dimensional slice value.
+func (dec *Decoder) fillUniDimensionalVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
+	o, err := dec.readUint32()
+	if err != nil {
+		return fmt.Errorf("could not read offset of uni-dimensional varying array: %v", err)
+	}
+	s, err := dec.readUint32()
+	if err != nil {
+		return fmt.Errorf("could not establish actual count of uni-dimensional varying array: %v", err)
+	}
+	t := v.Type()
+	// Total size of the array is the offset in the index being passed plus the actual count of elements being passed.
+	n := int(s + o)
+	a := reflect.MakeSlice(t, n, n)
+	// Populate the array starting at the offset specified
+	for i := int(o); i < n; i++ {
+		err := dec.fill(a.Index(i), tag, def)
+		if err != nil {
+			return fmt.Errorf("could not fill index %d of uni-dimensional varying array: %v", i, err)
+		}
+	}
+	v.Set(a)
+	return nil
+}
+
+// fillMultiDimensionalVaryingArray fills the multi-dimensional slice value provided from varying array data.
+// The number of dimensions must be specified. This must be less than or equal to the dimensions in the slice for this
+// method not to panic.
+func (dec *Decoder) fillMultiDimensionalVaryingArray(v reflect.Value, t reflect.Type, d int, tag reflect.StructTag, def *[]deferedPtr) error {
+	// Read the offset and actual count of each dimensions from the ndr stream
+	o := make([]int, d, d)
+	l := make([]int, d, d)
+	for i := range l {
+		off, err := dec.readUint32()
+		if err != nil {
+			return fmt.Errorf("could not read offset of dimension %d: %v", i+1, err)
+		}
+		o[i] = int(off)
+		s, err := dec.readUint32()
+		if err != nil {
+			return fmt.Errorf("could not read size of dimension %d: %v", i+1, err)
+		}
+		l[i] = int(s) + int(off)
+	}
+	// Initialise size of slices
+	//   Initialise the size of the 1st dimension
+	ty := v.Type()
+	v.Set(reflect.MakeSlice(ty, l[0], l[0]))
+	// Initialise the size of the other dimensions recursively
+	makeSubSlices(v, l[1:])
+
+	// Get all permutations of the indexes and go through each and fill
+	ps := multiDimensionalIndexPermutations(l)
+	for _, p := range ps {
+		// Get current multi-dimensional index to fill
+		a := v
+		var os bool // should this permutation be skipped due to the offset of any of the dimensions?
+		for i, j := range p {
+			if j < o[i] {
+				os = true
+				break
+			}
+			a = a.Index(j)
+		}
+		if os {
+			// This permutation should be skipped as it is less than the offset for one of the dimensions.
+			continue
+		}
+		err := dec.fill(a, tag, def)
+		if err != nil {
+			return fmt.Errorf("could not fill index %v of slice: %v", p, err)
+		}
+	}
+	return nil
+}
+
+// fillConformantVaryingArray establishes if the varying array is uni or multi dimensional and then fills the slice.
+func (dec *Decoder) fillConformantVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
+	d, t := sliceDimensions(v.Type())
+	if d > 1 {
+		err := dec.fillMultiDimensionalConformantVaryingArray(v, t, d, tag, def)
+		if err != nil {
+			return err
+		}
+	} else {
+		err := dec.fillUniDimensionalConformantVaryingArray(v, tag, def)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// fillUniDimensionalConformantVaryingArray fills the uni-dimensional slice value.
+func (dec *Decoder) fillUniDimensionalConformantVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
+	m := dec.precedingMax()
+	o, err := dec.readUint32()
+	if err != nil {
+		return fmt.Errorf("could not read offset of uni-dimensional conformant varying array: %v", err)
+	}
+	s, err := dec.readUint32()
+	if err != nil {
+		return fmt.Errorf("could not establish actual count of uni-dimensional conformant varying array: %v", err)
+	}
+	if m < o+s {
+		return errors.New("max count is less than the offset plus actual count")
+	}
+	t := v.Type()
+	n := int(s)
+	a := reflect.MakeSlice(t, n, n)
+	for i := int(o); i < n; i++ {
+		err := dec.fill(a.Index(i), tag, def)
+		if err != nil {
+			return fmt.Errorf("could not fill index %d of uni-dimensional conformant varying array: %v", i, err)
+		}
+	}
+	v.Set(a)
+	return nil
+}
+
+// fillMultiDimensionalConformantVaryingArray fills the multi-dimensional slice value provided from conformant varying array data.
+// The number of dimensions must be specified. This must be less than or equal to the dimensions in the slice for this
+// method not to panic.
+func (dec *Decoder) fillMultiDimensionalConformantVaryingArray(v reflect.Value, t reflect.Type, d int, tag reflect.StructTag, def *[]deferedPtr) error {
+	// Read the offset and actual count of each dimensions from the ndr stream
+	m := make([]int, d, d)
+	for i := range m {
+		m[i] = int(dec.precedingMax())
+	}
+	o := make([]int, d, d)
+	l := make([]int, d, d)
+	for i := range l {
+		off, err := dec.readUint32()
+		if err != nil {
+			return fmt.Errorf("could not read offset of dimension %d: %v", i+1, err)
+		}
+		o[i] = int(off)
+		s, err := dec.readUint32()
+		if err != nil {
+			return fmt.Errorf("could not read actual count of dimension %d: %v", i+1, err)
+		}
+		if m[i] < int(s)+int(off) {
+			m[i] = int(s) + int(off)
+		}
+		l[i] = int(s)
+	}
+	// Initialise size of slices
+	//   Initialise the size of the 1st dimension
+	ty := v.Type()
+	v.Set(reflect.MakeSlice(ty, m[0], m[0]))
+	// Initialise the size of the other dimensions recursively
+	makeSubSlices(v, m[1:])
+
+	// Get all permutations of the indexes and go through each and fill
+	ps := multiDimensionalIndexPermutations(m)
+	for _, p := range ps {
+		// Get current multi-dimensional index to fill
+		a := v
+		var os bool // should this permutation be skipped due to the offset of any of the dimensions or max is higher than the actual count being passed
+		for i, j := range p {
+			if j < o[i] || j >= l[i] {
+				os = true
+				break
+			}
+			a = a.Index(j)
+		}
+		if os {
+			// This permutation should be skipped as it is less than the offset for one of the dimensions.
+			continue
+		}
+		err := dec.fill(a, tag, def)
+		if err != nil {
+			return fmt.Errorf("could not fill index %v of slice: %v", p, err)
+		}
+	}
+	return nil
+}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/decoder.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/decoder.go
new file mode 100644
index 0000000..6157b4e
--- /dev/null
+++ b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/decoder.go
@@ -0,0 +1,393 @@
+// Package ndr provides the ability to unmarshal NDR encoded byte steams into Go data structures
+package ndr
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"reflect"
+	"strings"
+)
+
+// Struct tag values
+const (
+	TagConformant = "conformant"
+	TagVarying    = "varying"
+	TagPointer    = "pointer"
+	TagPipe       = "pipe"
+)
+
+// Decoder unmarshals NDR byte stream data into a Go struct representation
+type Decoder struct {
+	r             *bufio.Reader // source of the data
+	size          int           // initial size of bytes in buffer
+	ch            CommonHeader  // NDR common header
+	ph            PrivateHeader // NDR private header
+	conformantMax []uint32      // conformant max values that were moved to the beginning of the structure
+	s             interface{}   // pointer to the structure being populated
+	current       []string      // keeps track of the current field being populated
+}
+
+type deferedPtr struct {
+	v   reflect.Value
+	tag reflect.StructTag
+}
+
+// NewDecoder creates a new instance of a NDR Decoder.
+func NewDecoder(r io.Reader) *Decoder {
+	dec := new(Decoder)
+	dec.r = bufio.NewReader(r)
+	dec.r.Peek(int(commonHeaderBytes)) // For some reason an operation is needed on the buffer to initialise it so Buffered() != 0
+	dec.size = dec.r.Buffered()
+	return dec
+}
+
+// Decode unmarshals the NDR encoded bytes into the pointer of a struct provided.
+func (dec *Decoder) Decode(s interface{}) error {
+	dec.s = s
+	err := dec.readCommonHeader()
+	if err != nil {
+		return err
+	}
+	err = dec.readPrivateHeader()
+	if err != nil {
+		return err
+	}
+	_, err = dec.r.Discard(4) //The next 4 bytes are an RPC unique pointer referent. We just skip these.
+	if err != nil {
+		return Errorf("unable to process byte stream: %v", err)
+	}
+
+	return dec.process(s, reflect.StructTag(""))
+}
+
+func (dec *Decoder) process(s interface{}, tag reflect.StructTag) error {
+	// Scan for conformant fields as their max counts are moved to the beginning
+	// http://pubs.opengroup.org/onlinepubs/9629399/chap14.htm#tagfcjh_37
+	err := dec.scanConformantArrays(s, tag)
+	if err != nil {
+		return err
+	}
+	// Recursively fill the struct fields
+	var localDef []deferedPtr
+	err = dec.fill(s, tag, &localDef)
+	if err != nil {
+		return Errorf("could not decode: %v", err)
+	}
+	// Read any deferred referents associated with pointers
+	for _, p := range localDef {
+		err = dec.process(p.v, p.tag)
+		if err != nil {
+			return fmt.Errorf("could not decode deferred referent: %v", err)
+		}
+	}
+	return nil
+}
+
+// scanConformantArrays scans the structure for embedded conformant fields and captures the maximum element counts for
+// dimensions of the array that are moved to the beginning of the structure.
+func (dec *Decoder) scanConformantArrays(s interface{}, tag reflect.StructTag) error {
+	err := dec.conformantScan(s, tag)
+	if err != nil {
+		return fmt.Errorf("failed to scan for embedded conformant arrays: %v", err)
+	}
+	for i := range dec.conformantMax {
+		dec.conformantMax[i], err = dec.readUint32()
+		if err != nil {
+			return fmt.Errorf("could not read preceding conformant max count index %d: %v", i, err)
+		}
+	}
+	return nil
+}
+
+// conformantScan inspects the structure's fields for whether they are conformant.
+func (dec *Decoder) conformantScan(s interface{}, tag reflect.StructTag) error {
+	ndrTag := parseTags(tag)
+	if ndrTag.HasValue(TagPointer) {
+		return nil
+	}
+	v := getReflectValue(s)
+	switch v.Kind() {
+	case reflect.Struct:
+		for i := 0; i < v.NumField(); i++ {
+			err := dec.conformantScan(v.Field(i), v.Type().Field(i).Tag)
+			if err != nil {
+				return err
+			}
+		}
+	case reflect.String:
+		if !ndrTag.HasValue(TagConformant) {
+			break
+		}
+		dec.conformantMax = append(dec.conformantMax, uint32(0))
+	case reflect.Slice:
+		if !ndrTag.HasValue(TagConformant) {
+			break
+		}
+		d, t := sliceDimensions(v.Type())
+		for i := 0; i < d; i++ {
+			dec.conformantMax = append(dec.conformantMax, uint32(0))
+		}
+		// For string arrays there is a common max for the strings within the array.
+		if t.Kind() == reflect.String {
+			dec.conformantMax = append(dec.conformantMax, uint32(0))
+		}
+	}
+	return nil
+}
+
+func (dec *Decoder) isPointer(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) (bool, error) {
+	// Pointer so defer filling the referent
+	ndrTag := parseTags(tag)
+	if ndrTag.HasValue(TagPointer) {
+		p, err := dec.readUint32()
+		if err != nil {
+			return true, fmt.Errorf("could not read pointer: %v", err)
+		}
+		ndrTag.delete(TagPointer)
+		if p != 0 {
+			// if pointer is not zero add to the deferred items at end of stream
+			*def = append(*def, deferedPtr{v, ndrTag.StructTag()})
+		}
+		return true, nil
+	}
+	return false, nil
+}
+
+func getReflectValue(s interface{}) (v reflect.Value) {
+	if r, ok := s.(reflect.Value); ok {
+		v = r
+	} else {
+		if reflect.ValueOf(s).Kind() == reflect.Ptr {
+			v = reflect.ValueOf(s).Elem()
+		}
+	}
+	return
+}
+
+// fill populates fields with values from the NDR byte stream.
+func (dec *Decoder) fill(s interface{}, tag reflect.StructTag, localDef *[]deferedPtr) error {
+	v := getReflectValue(s)
+
+	//// Pointer so defer filling the referent
+	ptr, err := dec.isPointer(v, tag, localDef)
+	if err != nil {
+		return fmt.Errorf("could not process struct field(%s): %v", strings.Join(dec.current, "/"), err)
+	}
+	if ptr {
+		return nil
+	}
+
+	// Populate the value from the byte stream
+	switch v.Kind() {
+	case reflect.Struct:
+		dec.current = append(dec.current, v.Type().Name()) //Track the current field being filled
+		// in case struct is a union, track this and the selected union field for efficiency
+		var unionTag reflect.Value
+		var unionField string // field to fill if struct is a union
+		// Go through each field in the struct and recursively fill
+		for i := 0; i < v.NumField(); i++ {
+			fieldName := v.Type().Field(i).Name
+			dec.current = append(dec.current, fieldName) //Track the current field being filled
+			//fmt.Fprintf(os.Stderr, "DEBUG Decoding: %s\n", strings.Join(dec.current, "/"))
+			structTag := v.Type().Field(i).Tag
+			ndrTag := parseTags(structTag)
+
+			// Union handling
+			if !unionTag.IsValid() {
+				// Is this field a union tag?
+				unionTag = dec.isUnion(v.Field(i), structTag)
+			} else {
+				// What is the selected field value of the union if we don't already know
+				if unionField == "" {
+					unionField, err = unionSelectedField(v, unionTag)
+					if err != nil {
+						return fmt.Errorf("could not determine selected union value field for %s with discriminat"+
+							" tag %s: %v", v.Type().Name(), unionTag, err)
+					}
+				}
+				if ndrTag.HasValue(TagUnionField) && fieldName != unionField {
+					// is a union and this field has not been selected so will skip it.
+					dec.current = dec.current[:len(dec.current)-1] //This field has been skipped so remove it from the current field tracker
+					continue
+				}
+			}
+
+			// Check if field is a pointer
+			if v.Field(i).Type().Implements(reflect.TypeOf(new(RawBytes)).Elem()) &&
+				v.Field(i).Type().Kind() == reflect.Slice && v.Field(i).Type().Elem().Kind() == reflect.Uint8 {
+				//field is for rawbytes
+				structTag, err = addSizeToTag(v, v.Field(i), structTag)
+				if err != nil {
+					return fmt.Errorf("could not get rawbytes field(%s) size: %v", strings.Join(dec.current, "/"), err)
+				}
+				ptr, err := dec.isPointer(v.Field(i), structTag, localDef)
+				if err != nil {
+					return fmt.Errorf("could not process struct field(%s): %v", strings.Join(dec.current, "/"), err)
+				}
+				if !ptr {
+					err := dec.readRawBytes(v.Field(i), structTag)
+					if err != nil {
+						return fmt.Errorf("could not fill raw bytes struct field(%s): %v", strings.Join(dec.current, "/"), err)
+					}
+				}
+			} else {
+				err := dec.fill(v.Field(i), structTag, localDef)
+				if err != nil {
+					return fmt.Errorf("could not fill struct field(%s): %v", strings.Join(dec.current, "/"), err)
+				}
+			}
+			dec.current = dec.current[:len(dec.current)-1] //This field has been filled so remove it from the current field tracker
+		}
+		dec.current = dec.current[:len(dec.current)-1] //This field has been filled so remove it from the current field tracker
+	case reflect.Bool:
+		i, err := dec.readBool()
+		if err != nil {
+			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.Uint8:
+		i, err := dec.readUint8()
+		if err != nil {
+			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.Uint16:
+		i, err := dec.readUint16()
+		if err != nil {
+			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.Uint32:
+		i, err := dec.readUint32()
+		if err != nil {
+			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.Uint64:
+		i, err := dec.readUint64()
+		if err != nil {
+			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.Int8:
+		i, err := dec.readInt8()
+		if err != nil {
+			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.Int16:
+		i, err := dec.readInt16()
+		if err != nil {
+			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.Int32:
+		i, err := dec.readInt32()
+		if err != nil {
+			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.Int64:
+		i, err := dec.readInt64()
+		if err != nil {
+			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.String:
+		ndrTag := parseTags(tag)
+		conformant := ndrTag.HasValue(TagConformant)
+		// strings are always varying so this is assumed without an explicit tag
+		var s string
+		var err error
+		if conformant {
+			s, err = dec.readConformantVaryingString(localDef)
+			if err != nil {
+				return fmt.Errorf("could not fill with conformant varying string: %v", err)
+			}
+		} else {
+			s, err = dec.readVaryingString(localDef)
+			if err != nil {
+				return fmt.Errorf("could not fill with varying string: %v", err)
+			}
+		}
+		v.Set(reflect.ValueOf(s))
+	case reflect.Float32:
+		i, err := dec.readFloat32()
+		if err != nil {
+			return fmt.Errorf("could not fill %v: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.Float64:
+		i, err := dec.readFloat64()
+		if err != nil {
+			return fmt.Errorf("could not fill %v: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.Array:
+		err := dec.fillFixedArray(v, tag, localDef)
+		if err != nil {
+			return err
+		}
+	case reflect.Slice:
+		if v.Type().Implements(reflect.TypeOf(new(RawBytes)).Elem()) && v.Type().Elem().Kind() == reflect.Uint8 {
+			//field is for rawbytes
+			err := dec.readRawBytes(v, tag)
+			if err != nil {
+				return fmt.Errorf("could not fill raw bytes struct field(%s): %v", strings.Join(dec.current, "/"), err)
+			}
+			break
+		}
+		ndrTag := parseTags(tag)
+		conformant := ndrTag.HasValue(TagConformant)
+		varying := ndrTag.HasValue(TagVarying)
+		if ndrTag.HasValue(TagPipe) {
+			err := dec.fillPipe(v, tag)
+			if err != nil {
+				return err
+			}
+			break
+		}
+		_, t := sliceDimensions(v.Type())
+		if t.Kind() == reflect.String && !ndrTag.HasValue(subStringArrayValue) {
+			// String array
+			err := dec.readStringsArray(v, tag, localDef)
+			if err != nil {
+				return err
+			}
+			break
+		}
+		// varying is assumed as fixed arrays use the Go array type rather than slice
+		if conformant && varying {
+			err := dec.fillConformantVaryingArray(v, tag, localDef)
+			if err != nil {
+				return err
+			}
+		} else if !conformant && varying {
+			err := dec.fillVaryingArray(v, tag, localDef)
+			if err != nil {
+				return err
+			}
+		} else {
+			//default to conformant and not varying
+			err := dec.fillConformantArray(v, tag, localDef)
+			if err != nil {
+				return err
+			}
+		}
+	default:
+		return fmt.Errorf("unsupported type")
+	}
+	return nil
+}
+
+// readBytes returns a number of bytes from the NDR byte stream.
+func (dec *Decoder) readBytes(n int) ([]byte, error) {
+	//TODO make this take an int64 as input to allow for larger values on all systems?
+	b := make([]byte, n, n)
+	m, err := dec.r.Read(b)
+	if err != nil || m != n {
+		return b, fmt.Errorf("error reading bytes from stream: %v", err)
+	}
+	return b, nil
+}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/error.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/error.go
new file mode 100644
index 0000000..9971194
--- /dev/null
+++ b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/error.go
@@ -0,0 +1,18 @@
+package ndr
+
+import "fmt"
+
+// Malformed implements the error interface for malformed NDR encoding errors.
+type Malformed struct {
+	EText string
+}
+
+// Error implements the error interface on the Malformed struct.
+func (e Malformed) Error() string {
+	return fmt.Sprintf("malformed NDR stream: %s", e.EText)
+}
+
+// Errorf formats an error message into a malformed NDR error.
+func Errorf(format string, a ...interface{}) Malformed {
+	return Malformed{EText: fmt.Sprintf(format, a...)}
+}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/header.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/header.go
new file mode 100644
index 0000000..1970ddb
--- /dev/null
+++ b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/header.go
@@ -0,0 +1,116 @@
+package ndr
+
+import (
+	"encoding/binary"
+	"fmt"
+)
+
+/*
+Serialization Version 1
+https://msdn.microsoft.com/en-us/library/cc243563.aspx
+
+Common Header - https://msdn.microsoft.com/en-us/library/cc243890.aspx
+8 bytes in total:
+- First byte - Version: Must equal 1
+- Second byte -  1st 4 bits: Endianess (0=Big; 1=Little); 2nd 4 bits: Character Encoding (0=ASCII; 1=EBCDIC)
+- 3rd - Floating point representation (This does not seem to be the case in examples for Microsoft test sources)
+- 4th - Common Header Length: Must equal 8
+- 5th - 8th - Filler: MUST be set to 0xcccccccc on marshaling, and SHOULD be ignored during unmarshaling.
+
+Private Header - https://msdn.microsoft.com/en-us/library/cc243919.aspx
+8 bytes in total:
+- First 4 bytes - Indicates the length of a serialized top-level type in the octet stream. It MUST include the padding length and exclude the header itself.
+- Second 4 bytes - Filler: MUST be set to 0 (zero) during marshaling, and SHOULD be ignored during unmarshaling.
+*/
+
+const (
+	protocolVersion   uint8  = 1
+	commonHeaderBytes uint16 = 8
+	bigEndian                = 0
+	littleEndian             = 1
+	ascii             uint8  = 0
+	ebcdic            uint8  = 1
+	ieee              uint8  = 0
+	vax               uint8  = 1
+	cray              uint8  = 2
+	ibm               uint8  = 3
+)
+
+// CommonHeader implements the NDR common header: https://msdn.microsoft.com/en-us/library/cc243889.aspx
+type CommonHeader struct {
+	Version             uint8
+	Endianness          binary.ByteOrder
+	CharacterEncoding   uint8
+	FloatRepresentation uint8
+	HeaderLength        uint16
+	Filler              []byte
+}
+
+// PrivateHeader implements the NDR private header: https://msdn.microsoft.com/en-us/library/cc243919.aspx
+type PrivateHeader struct {
+	ObjectBufferLength uint32
+	Filler             []byte
+}
+
+func (dec *Decoder) readCommonHeader() error {
+	// Version
+	vb, err := dec.r.ReadByte()
+	if err != nil {
+		return Malformed{EText: "could not read first byte of common header for version"}
+	}
+	dec.ch.Version = uint8(vb)
+	if dec.ch.Version != protocolVersion {
+		return Malformed{EText: fmt.Sprintf("byte stream does not indicate a RPC Type serialization of version %v", protocolVersion)}
+	}
+	// Read Endianness & Character Encoding
+	eb, err := dec.r.ReadByte()
+	if err != nil {
+		return Malformed{EText: "could not read second byte of common header for endianness"}
+	}
+	endian := int(eb >> 4 & 0xF)
+	if endian != 0 && endian != 1 {
+		return Malformed{EText: "common header does not indicate a valid endianness"}
+	}
+	dec.ch.CharacterEncoding = uint8(vb & 0xF)
+	if dec.ch.CharacterEncoding != 0 && dec.ch.CharacterEncoding != 1 {
+		return Malformed{EText: "common header does not indicate a valid character encoding"}
+	}
+	switch endian {
+	case littleEndian:
+		dec.ch.Endianness = binary.LittleEndian
+	case bigEndian:
+		dec.ch.Endianness = binary.BigEndian
+	}
+	// Common header length
+	lb, err := dec.readBytes(2)
+	if err != nil {
+		return Malformed{EText: fmt.Sprintf("could not read common header length: %v", err)}
+	}
+	dec.ch.HeaderLength = dec.ch.Endianness.Uint16(lb)
+	if dec.ch.HeaderLength != commonHeaderBytes {
+		return Malformed{EText: "common header does not indicate a valid length"}
+	}
+	// Filler bytes
+	dec.ch.Filler, err = dec.readBytes(4)
+	if err != nil {
+		return Malformed{EText: fmt.Sprintf("could not read common header filler: %v", err)}
+	}
+	return nil
+}
+
+func (dec *Decoder) readPrivateHeader() error {
+	// The next 8 bytes after the common header comprise the RPC type marshalling private header for constructed types.
+	err := binary.Read(dec.r, dec.ch.Endianness, &dec.ph.ObjectBufferLength)
+	if err != nil {
+		return Malformed{EText: "could not read private header object buffer length"}
+	}
+	if dec.ph.ObjectBufferLength%8 != 0 {
+		return Malformed{EText: "object buffer length not a multiple of 8"}
+	}
+	// Filler bytes
+	dec.ph.Filler, err = dec.readBytes(4)
+	if err != nil {
+		return Malformed{EText: fmt.Sprintf("could not read private header filler: %v", err)}
+	}
+	return nil
+}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/pipe.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/pipe.go
new file mode 100644
index 0000000..5fd27da
--- /dev/null
+++ b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/pipe.go
@@ -0,0 +1,31 @@
+package ndr
+
+import (
+	"fmt"
+	"reflect"
+)
+
+func (dec *Decoder) fillPipe(v reflect.Value, tag reflect.StructTag) error {
+	s, err := dec.readUint32() // read element count of first chunk
+	if err != nil {
+		return err
+	}
+	a := reflect.MakeSlice(v.Type(), 0, 0)
+	c := reflect.MakeSlice(v.Type(), int(s), int(s))
+	for s != 0 {
+		for i := 0; i < int(s); i++ {
+			err := dec.fill(c.Index(i), tag, &[]deferedPtr{})
+			if err != nil {
+				return fmt.Errorf("could not fill element %d of pipe: %v", i, err)
+			}
+		}
+		s, err = dec.readUint32() // read element count of first chunk
+		if err != nil {
+			return err
+		}
+		a = reflect.AppendSlice(a, c)
+		c = reflect.MakeSlice(v.Type(), int(s), int(s))
+	}
+	v.Set(a)
+	return nil
+}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/primitives.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/primitives.go
new file mode 100644
index 0000000..7eb1d1a
--- /dev/null
+++ b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/primitives.go
@@ -0,0 +1,211 @@
+package ndr
+
+import (
+	"bytes"
+	"encoding/binary"
+	"math"
+)
+
+// Byte sizes of primitive types
+const (
+	SizeBool   = 1
+	SizeChar   = 1
+	SizeUint8  = 1
+	SizeUint16 = 2
+	SizeUint32 = 4
+	SizeUint64 = 8
+	SizeEnum   = 2
+	SizeSingle = 4
+	SizeDouble = 8
+	SizePtr    = 4
+)
+
+// Bool is an NDR Boolean which is a logical quantity that assumes one of two values: TRUE or FALSE.
+// NDR represents a Boolean as one octet.
+// It represents a value of FALSE as a zero octet, an octet in which every bit is reset.
+// It represents a value of TRUE as a non-zero octet, an octet in which one or more bits are set.
+
+// Char is an NDR character.
+// NDR represents a character as one octet.
+// Characters have two representation formats: ASCII and EBCDIC.
+
+// USmall is an unsigned 8 bit integer
+
+// UShort is an unsigned 16 bit integer
+
+// ULong is an unsigned 32 bit integer
+
+// UHyper is an unsigned 64 bit integer
+
+// Small is an signed 8 bit integer
+
+// Short is an signed 16 bit integer
+
+// Long is an signed 32 bit integer
+
+// Hyper is an signed 64 bit integer
+
+// Enum is the NDR representation of enumerated types as signed short integers (2 octets)
+
+// Single is an NDR defined single-precision floating-point data type
+
+// Double is an NDR defined double-precision floating-point data type
+
+// readBool reads a byte representing a boolean.
+// NDR represents a Boolean as one octet.
+// It represents a value of FALSE as a zero octet, an octet in which every bit is reset.
+// It represents a value of TRUE as a non-zero octet, an octet in which one or more bits are set.
+func (dec *Decoder) readBool() (bool, error) {
+	i, err := dec.readUint8()
+	if err != nil {
+		return false, err
+	}
+	if i != 0 {
+		return true, nil
+	}
+	return false, nil
+}
+
+// readChar reads bytes representing a 8bit ASCII integer cast to a rune.
+func (dec *Decoder) readChar() (rune, error) {
+	var r rune
+	a, err := dec.readUint8()
+	if err != nil {
+		return r, err
+	}
+	return rune(a), nil
+}
+
+// readUint8 reads bytes representing a 8bit unsigned integer.
+func (dec *Decoder) readUint8() (uint8, error) {
+	b, err := dec.r.ReadByte()
+	if err != nil {
+		return uint8(0), err
+	}
+	return uint8(b), nil
+}
+
+// readUint16 reads bytes representing a 16bit unsigned integer.
+func (dec *Decoder) readUint16() (uint16, error) {
+	dec.ensureAlignment(SizeUint16)
+	b, err := dec.readBytes(SizeUint16)
+	if err != nil {
+		return uint16(0), err
+	}
+	return dec.ch.Endianness.Uint16(b), nil
+}
+
+// readUint32 reads bytes representing a 32bit unsigned integer.
+func (dec *Decoder) readUint32() (uint32, error) {
+	dec.ensureAlignment(SizeUint32)
+	b, err := dec.readBytes(SizeUint32)
+	if err != nil {
+		return uint32(0), err
+	}
+	return dec.ch.Endianness.Uint32(b), nil
+}
+
+// readUint32 reads bytes representing a 32bit unsigned integer.
+func (dec *Decoder) readUint64() (uint64, error) {
+	dec.ensureAlignment(SizeUint64)
+	b, err := dec.readBytes(SizeUint64)
+	if err != nil {
+		return uint64(0), err
+	}
+	return dec.ch.Endianness.Uint64(b), nil
+}
+
+func (dec *Decoder) readInt8() (int8, error) {
+	dec.ensureAlignment(SizeUint8)
+	b, err := dec.readBytes(SizeUint8)
+	if err != nil {
+		return 0, err
+	}
+	var i int8
+	buf := bytes.NewReader(b)
+	err = binary.Read(buf, dec.ch.Endianness, &i)
+	if err != nil {
+		return 0, err
+	}
+	return i, nil
+}
+
+func (dec *Decoder) readInt16() (int16, error) {
+	dec.ensureAlignment(SizeUint16)
+	b, err := dec.readBytes(SizeUint16)
+	if err != nil {
+		return 0, err
+	}
+	var i int16
+	buf := bytes.NewReader(b)
+	err = binary.Read(buf, dec.ch.Endianness, &i)
+	if err != nil {
+		return 0, err
+	}
+	return i, nil
+}
+
+func (dec *Decoder) readInt32() (int32, error) {
+	dec.ensureAlignment(SizeUint32)
+	b, err := dec.readBytes(SizeUint32)
+	if err != nil {
+		return 0, err
+	}
+	var i int32
+	buf := bytes.NewReader(b)
+	err = binary.Read(buf, dec.ch.Endianness, &i)
+	if err != nil {
+		return 0, err
+	}
+	return i, nil
+}
+
+func (dec *Decoder) readInt64() (int64, error) {
+	dec.ensureAlignment(SizeUint64)
+	b, err := dec.readBytes(SizeUint64)
+	if err != nil {
+		return 0, err
+	}
+	var i int64
+	buf := bytes.NewReader(b)
+	err = binary.Read(buf, dec.ch.Endianness, &i)
+	if err != nil {
+		return 0, err
+	}
+	return i, nil
+}
+
+// https://en.wikipedia.org/wiki/IEEE_754-1985
+func (dec *Decoder) readFloat32() (f float32, err error) {
+	dec.ensureAlignment(SizeSingle)
+	b, err := dec.readBytes(SizeSingle)
+	if err != nil {
+		return
+	}
+	bits := dec.ch.Endianness.Uint32(b)
+	f = math.Float32frombits(bits)
+	return
+}
+
+func (dec *Decoder) readFloat64() (f float64, err error) {
+	dec.ensureAlignment(SizeDouble)
+	b, err := dec.readBytes(SizeDouble)
+	if err != nil {
+		return
+	}
+	bits := dec.ch.Endianness.Uint64(b)
+	f = math.Float64frombits(bits)
+	return
+}
+
+// NDR enforces NDR alignment of primitive data; that is, any primitive of size n octets is aligned at a octet stream
+// index that is a multiple of n. (In this version of NDR, n is one of {1, 2, 4, 8}.) An octet stream index indicates
+// the number of an octet in an octet stream when octets are numbered, beginning with 0, from the first octet in the
+// stream. Where necessary, an alignment gap, consisting of octets of unspecified value, precedes the representation
+// of a primitive. The gap is of the smallest size sufficient to align the primitive.
+func (dec *Decoder) ensureAlignment(n int) {
+	p := dec.size - dec.r.Buffered()
+	if s := p % n; s != 0 {
+		dec.r.Discard(n - s)
+	}
+}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/rawbytes.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/rawbytes.go
new file mode 100644
index 0000000..9ee59fb
--- /dev/null
+++ b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/rawbytes.go
@@ -0,0 +1,61 @@
+package ndr
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+)
+
+// type MyBytes []byte
+// implement RawBytes interface
+
+const (
+	sizeMethod = "Size"
+)
+
+// RawBytes interface should be implemented if reading just a number of bytes from the NDR stream
+type RawBytes interface {
+	Size(interface{}) int
+}
+
+func rawBytesSize(parent reflect.Value, v reflect.Value) (int, error) {
+	sf := v.MethodByName(sizeMethod)
+	if !sf.IsValid() {
+		return 0, fmt.Errorf("could not find a method called %s on the implementation of RawBytes", sizeMethod)
+	}
+	in := []reflect.Value{parent}
+	f := sf.Call(in)
+	if f[0].Kind() != reflect.Int {
+		return 0, errors.New("the RawBytes size function did not return an integer")
+	}
+	return int(f[0].Int()), nil
+}
+
+func addSizeToTag(parent reflect.Value, v reflect.Value, tag reflect.StructTag) (reflect.StructTag, error) {
+	size, err := rawBytesSize(parent, v)
+	if err != nil {
+		return tag, err
+	}
+	ndrTag := parseTags(tag)
+	ndrTag.Map["size"] = strconv.Itoa(size)
+	return ndrTag.StructTag(), nil
+}
+
+func (dec *Decoder) readRawBytes(v reflect.Value, tag reflect.StructTag) error {
+	ndrTag := parseTags(tag)
+	sizeStr, ok := ndrTag.Map["size"]
+	if !ok {
+		return errors.New("size tag not available")
+	}
+	size, err := strconv.Atoi(sizeStr)
+	if err != nil {
+		return fmt.Errorf("size not valid: %v", err)
+	}
+	b, err := dec.readBytes(size)
+	if err != nil {
+		return err
+	}
+	v.Set(reflect.ValueOf(b).Convert(v.Type()))
+	return nil
+}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/strings.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/strings.go
new file mode 100644
index 0000000..b7a910b
--- /dev/null
+++ b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/strings.go
@@ -0,0 +1,70 @@
+package ndr
+
+import (
+	"fmt"
+	"reflect"
+)
+
+const (
+	subStringArrayTag   = `ndr:"varying,X-subStringArray"`
+	subStringArrayValue = "X-subStringArray"
+)
+
+func uint16SliceToString(a []uint16) string {
+	s := make([]rune, len(a), len(a))
+	for i := range s {
+		s[i] = rune(a[i])
+	}
+	if len(s) > 0 {
+		// Remove any null terminator
+		if s[len(s)-1] == rune(0) {
+			s = s[:len(s)-1]
+		}
+	}
+	return string(s)
+}
+
+func (dec *Decoder) readVaryingString(def *[]deferedPtr) (string, error) {
+	a := new([]uint16)
+	v := reflect.ValueOf(a)
+	var t reflect.StructTag
+	err := dec.fillUniDimensionalVaryingArray(v.Elem(), t, def)
+	if err != nil {
+		return "", err
+	}
+	s := uint16SliceToString(*a)
+	return s, nil
+}
+
+func (dec *Decoder) readConformantVaryingString(def *[]deferedPtr) (string, error) {
+	a := new([]uint16)
+	v := reflect.ValueOf(a)
+	var t reflect.StructTag
+	err := dec.fillUniDimensionalConformantVaryingArray(v.Elem(), t, def)
+	if err != nil {
+		return "", err
+	}
+	s := uint16SliceToString(*a)
+	return s, nil
+}
+
+func (dec *Decoder) readStringsArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
+	d, _ := sliceDimensions(v.Type())
+	ndrTag := parseTags(tag)
+	var m []int
+	//var ms int
+	if ndrTag.HasValue(TagConformant) {
+		for i := 0; i < d; i++ {
+			m = append(m, int(dec.precedingMax()))
+		}
+		//common max size
+		_ = dec.precedingMax()
+		//ms = int(n)
+	}
+	tag = reflect.StructTag(subStringArrayTag)
+	err := dec.fillVaryingArray(v, tag, def)
+	if err != nil {
+		return fmt.Errorf("could not read string array: %v", err)
+	}
+	return nil
+}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/tags.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/tags.go
new file mode 100644
index 0000000..01657e0
--- /dev/null
+++ b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/tags.go
@@ -0,0 +1,69 @@
+package ndr
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+)
+
+const ndrNameSpace = "ndr"
+
+type tags struct {
+	Values []string
+	Map    map[string]string
+}
+
+// parse the struct field tags and extract the ndr related ones.
+// format of tag ndr:"value,key:value1,value2"
+func parseTags(st reflect.StructTag) tags {
+	s := st.Get(ndrNameSpace)
+	t := tags{
+		Values: []string{},
+		Map:    make(map[string]string),
+	}
+	if s != "" {
+		ndrTags := strings.Trim(s, `"`)
+		for _, tag := range strings.Split(ndrTags, ",") {
+			if strings.Contains(tag, ":") {
+				m := strings.SplitN(tag, ":", 2)
+				t.Map[m[0]] = m[1]
+			} else {
+				t.Values = append(t.Values, tag)
+			}
+		}
+	}
+	return t
+}
+
+func appendTag(t reflect.StructTag, s string) reflect.StructTag {
+	ts := t.Get(ndrNameSpace)
+	ts = fmt.Sprintf(`%s"%s,%s"`, ndrNameSpace, ts, s)
+	return reflect.StructTag(ts)
+}
+
+func (t *tags) StructTag() reflect.StructTag {
+	mv := t.Values
+	for key, val := range t.Map {
+		mv = append(mv, key+":"+val)
+	}
+	s := ndrNameSpace + ":" + `"` + strings.Join(mv, ",") + `"`
+	return reflect.StructTag(s)
+}
+
+func (t *tags) delete(s string) {
+	for i, x := range t.Values {
+		if x == s {
+			t.Values = append(t.Values[:i], t.Values[i+1:]...)
+		}
+	}
+	delete(t.Map, s)
+}
+
+func (t *tags) HasValue(s string) bool {
+	for _, v := range t.Values {
+		if v == s {
+			return true
+		}
+	}
+	return false
+}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/union.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/union.go
new file mode 100644
index 0000000..6a657fa
--- /dev/null
+++ b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/union.go
@@ -0,0 +1,57 @@
+package ndr
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+)
+
+// Union interface must be implemented by structs that will be unmarshaled into from the NDR byte stream union representation.
+// The union's discriminating tag will be passed to the SwitchFunc method.
+// The discriminating tag field must have the struct tag: `ndr:"unionTag"`
+// If the union is encapsulated the discriminating tag field must have the struct tag: `ndr:"encapsulated"`
+// The possible value fields that can be selected from must have the struct tag: `ndr:"unionField"`
+type Union interface {
+	SwitchFunc(t interface{}) string
+}
+
+// Union related constants such as struct tag values
+const (
+	unionSelectionFuncName = "SwitchFunc"
+	TagEncapsulated        = "encapsulated"
+	TagUnionTag            = "unionTag"
+	TagUnionField          = "unionField"
+)
+
+func (dec *Decoder) isUnion(field reflect.Value, tag reflect.StructTag) (r reflect.Value) {
+	ndrTag := parseTags(tag)
+	if !ndrTag.HasValue(TagUnionTag) {
+		return
+	}
+	r = field
+	// For a non-encapsulated union, the discriminant is marshalled into the transmitted data stream twice: once as the
+	// field or parameter, which is referenced by the switch_is construct, in the procedure argument list; and once as
+	// the first part of the union representation.
+	if !ndrTag.HasValue(TagEncapsulated) {
+		dec.r.Discard(int(r.Type().Size()))
+	}
+	return
+}
+
+// unionSelectedField returns the field name of which of the union values to fill
+func unionSelectedField(union, discriminant reflect.Value) (string, error) {
+	if !union.Type().Implements(reflect.TypeOf(new(Union)).Elem()) {
+		return "", errors.New("struct does not implement union interface")
+	}
+	args := []reflect.Value{discriminant}
+	// Call the SelectFunc of the union struct to find the name of the field to fill with the value selected.
+	sf := union.MethodByName(unionSelectionFuncName)
+	if !sf.IsValid() {
+		return "", fmt.Errorf("could not find a selection function called %s in the unions struct representation", unionSelectionFuncName)
+	}
+	f := sf.Call(args)
+	if f[0].Kind() != reflect.String || f[0].String() == "" {
+		return "", fmt.Errorf("the union select function did not return a string for the name of the field to fill")
+	}
+	return f[0].String(), nil
+}