khenaidoo | ab1f7bd | 2019-11-14 14:00:27 -0500 | [diff] [blame] | 1 | // Go support for Protocol Buffers - Google's data interchange format |
| 2 | // |
| 3 | // Copyright 2017 The Go Authors. All rights reserved. |
| 4 | // https://github.com/golang/protobuf |
| 5 | // |
| 6 | // Redistribution and use in source and binary forms, with or without |
| 7 | // modification, are permitted provided that the following conditions are |
| 8 | // met: |
| 9 | // |
| 10 | // * Redistributions of source code must retain the above copyright |
| 11 | // notice, this list of conditions and the following disclaimer. |
| 12 | // * Redistributions in binary form must reproduce the above |
| 13 | // copyright notice, this list of conditions and the following disclaimer |
| 14 | // in the documentation and/or other materials provided with the |
| 15 | // distribution. |
| 16 | // * Neither the name of Google Inc. nor the names of its |
| 17 | // contributors may be used to endorse or promote products derived from |
| 18 | // this software without specific prior written permission. |
| 19 | // |
| 20 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 23 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 24 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 25 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 26 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 27 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 28 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 29 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 30 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 31 | |
| 32 | /* |
| 33 | Package remap handles tracking the locations of Go tokens in a source text |
| 34 | across a rewrite by the Go formatter. |
| 35 | */ |
| 36 | package remap |
| 37 | |
| 38 | import ( |
| 39 | "fmt" |
| 40 | "go/scanner" |
| 41 | "go/token" |
| 42 | ) |
| 43 | |
| 44 | // A Location represents a span of byte offsets in the source text. |
| 45 | type Location struct { |
| 46 | Pos, End int // End is exclusive |
| 47 | } |
| 48 | |
| 49 | // A Map represents a mapping between token locations in an input source text |
| 50 | // and locations in the correspnding output text. |
| 51 | type Map map[Location]Location |
| 52 | |
| 53 | // Find reports whether the specified span is recorded by m, and if so returns |
| 54 | // the new location it was mapped to. If the input span was not found, the |
| 55 | // returned location is the same as the input. |
| 56 | func (m Map) Find(pos, end int) (Location, bool) { |
| 57 | key := Location{ |
| 58 | Pos: pos, |
| 59 | End: end, |
| 60 | } |
| 61 | if loc, ok := m[key]; ok { |
| 62 | return loc, true |
| 63 | } |
| 64 | return key, false |
| 65 | } |
| 66 | |
| 67 | func (m Map) add(opos, oend, npos, nend int) { |
| 68 | m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend} |
| 69 | } |
| 70 | |
| 71 | // Compute constructs a location mapping from input to output. An error is |
| 72 | // reported if any of the tokens of output cannot be mapped. |
| 73 | func Compute(input, output []byte) (Map, error) { |
| 74 | itok := tokenize(input) |
| 75 | otok := tokenize(output) |
| 76 | if len(itok) != len(otok) { |
| 77 | return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok)) |
| 78 | } |
| 79 | m := make(Map) |
| 80 | for i, ti := range itok { |
| 81 | to := otok[i] |
| 82 | if ti.Token != to.Token { |
| 83 | return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to) |
| 84 | } |
| 85 | m.add(ti.pos, ti.end, to.pos, to.end) |
| 86 | } |
| 87 | return m, nil |
| 88 | } |
| 89 | |
| 90 | // tokinfo records the span and type of a source token. |
| 91 | type tokinfo struct { |
| 92 | pos, end int |
| 93 | token.Token |
| 94 | } |
| 95 | |
| 96 | func tokenize(src []byte) []tokinfo { |
| 97 | fs := token.NewFileSet() |
| 98 | var s scanner.Scanner |
| 99 | s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments) |
| 100 | var info []tokinfo |
| 101 | for { |
| 102 | pos, next, lit := s.Scan() |
| 103 | switch next { |
| 104 | case token.SEMICOLON: |
| 105 | continue |
| 106 | } |
| 107 | info = append(info, tokinfo{ |
| 108 | pos: int(pos - 1), |
| 109 | end: int(pos + token.Pos(len(lit)) - 1), |
| 110 | Token: next, |
| 111 | }) |
| 112 | if next == token.EOF { |
| 113 | break |
| 114 | } |
| 115 | } |
| 116 | return info |
| 117 | } |