diff options
Diffstat (limited to 'vendor/github.com/hashicorp/hcl')
21 files changed, 4338 insertions, 0 deletions
diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE new file mode 100644 index 0000000..c33dcc7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/LICENSE | |||
@@ -0,0 +1,354 @@ | |||
1 | Mozilla Public License, version 2.0 | ||
2 | |||
3 | 1. Definitions | ||
4 | |||
5 | 1.1. “Contributor” | ||
6 | |||
7 | means each individual or legal entity that creates, contributes to the | ||
8 | creation of, or owns Covered Software. | ||
9 | |||
10 | 1.2. “Contributor Version” | ||
11 | |||
12 | means the combination of the Contributions of others (if any) used by a | ||
13 | Contributor and that particular Contributor’s Contribution. | ||
14 | |||
15 | 1.3. “Contribution” | ||
16 | |||
17 | means Covered Software of a particular Contributor. | ||
18 | |||
19 | 1.4. “Covered Software” | ||
20 | |||
21 | means Source Code Form to which the initial Contributor has attached the | ||
22 | notice in Exhibit A, the Executable Form of such Source Code Form, and | ||
23 | Modifications of such Source Code Form, in each case including portions | ||
24 | thereof. | ||
25 | |||
26 | 1.5. “Incompatible With Secondary Licenses” | ||
27 | means | ||
28 | |||
29 | a. that the initial Contributor has attached the notice described in | ||
30 | Exhibit B to the Covered Software; or | ||
31 | |||
32 | b. that the Covered Software was made available under the terms of version | ||
33 | 1.1 or earlier of the License, but not also under the terms of a | ||
34 | Secondary License. | ||
35 | |||
36 | 1.6. “Executable Form” | ||
37 | |||
38 | means any form of the work other than Source Code Form. | ||
39 | |||
40 | 1.7. “Larger Work” | ||
41 | |||
42 | means a work that combines Covered Software with other material, in a separate | ||
43 | file or files, that is not Covered Software. | ||
44 | |||
45 | 1.8. “License” | ||
46 | |||
47 | means this document. | ||
48 | |||
49 | 1.9. “Licensable” | ||
50 | |||
51 | means having the right to grant, to the maximum extent possible, whether at the | ||
52 | time of the initial grant or subsequently, any and all of the rights conveyed by | ||
53 | this License. | ||
54 | |||
55 | 1.10. “Modifications” | ||
56 | |||
57 | means any of the following: | ||
58 | |||
59 | a. any file in Source Code Form that results from an addition to, deletion | ||
60 | from, or modification of the contents of Covered Software; or | ||
61 | |||
62 | b. any new file in Source Code Form that contains any Covered Software. | ||
63 | |||
64 | 1.11. “Patent Claims” of a Contributor | ||
65 | |||
66 | means any patent claim(s), including without limitation, method, process, | ||
67 | and apparatus claims, in any patent Licensable by such Contributor that | ||
68 | would be infringed, but for the grant of the License, by the making, | ||
69 | using, selling, offering for sale, having made, import, or transfer of | ||
70 | either its Contributions or its Contributor Version. | ||
71 | |||
72 | 1.12. “Secondary License” | ||
73 | |||
74 | means either the GNU General Public License, Version 2.0, the GNU Lesser | ||
75 | General Public License, Version 2.1, the GNU Affero General Public | ||
76 | License, Version 3.0, or any later versions of those licenses. | ||
77 | |||
78 | 1.13. “Source Code Form” | ||
79 | |||
80 | means the form of the work preferred for making modifications. | ||
81 | |||
82 | 1.14. “You” (or “Your”) | ||
83 | |||
84 | means an individual or a legal entity exercising rights under this | ||
85 | License. For legal entities, “You” includes any entity that controls, is | ||
86 | controlled by, or is under common control with You. For purposes of this | ||
87 | definition, “control” means (a) the power, direct or indirect, to cause | ||
88 | the direction or management of such entity, whether by contract or | ||
89 | otherwise, or (b) ownership of more than fifty percent (50%) of the | ||
90 | outstanding shares or beneficial ownership of such entity. | ||
91 | |||
92 | |||
93 | 2. License Grants and Conditions | ||
94 | |||
95 | 2.1. Grants | ||
96 | |||
97 | Each Contributor hereby grants You a world-wide, royalty-free, | ||
98 | non-exclusive license: | ||
99 | |||
100 | a. under intellectual property rights (other than patent or trademark) | ||
101 | Licensable by such Contributor to use, reproduce, make available, | ||
102 | modify, display, perform, distribute, and otherwise exploit its | ||
103 | Contributions, either on an unmodified basis, with Modifications, or as | ||
104 | part of a Larger Work; and | ||
105 | |||
106 | b. under Patent Claims of such Contributor to make, use, sell, offer for | ||
107 | sale, have made, import, and otherwise transfer either its Contributions | ||
108 | or its Contributor Version. | ||
109 | |||
110 | 2.2. Effective Date | ||
111 | |||
112 | The licenses granted in Section 2.1 with respect to any Contribution become | ||
113 | effective for each Contribution on the date the Contributor first distributes | ||
114 | such Contribution. | ||
115 | |||
116 | 2.3. Limitations on Grant Scope | ||
117 | |||
118 | The licenses granted in this Section 2 are the only rights granted under this | ||
119 | License. No additional rights or licenses will be implied from the distribution | ||
120 | or licensing of Covered Software under this License. Notwithstanding Section | ||
121 | 2.1(b) above, no patent license is granted by a Contributor: | ||
122 | |||
123 | a. for any code that a Contributor has removed from Covered Software; or | ||
124 | |||
125 | b. for infringements caused by: (i) Your and any other third party’s | ||
126 | modifications of Covered Software, or (ii) the combination of its | ||
127 | Contributions with other software (except as part of its Contributor | ||
128 | Version); or | ||
129 | |||
130 | c. under Patent Claims infringed by Covered Software in the absence of its | ||
131 | Contributions. | ||
132 | |||
133 | This License does not grant any rights in the trademarks, service marks, or | ||
134 | logos of any Contributor (except as may be necessary to comply with the | ||
135 | notice requirements in Section 3.4). | ||
136 | |||
137 | 2.4. Subsequent Licenses | ||
138 | |||
139 | No Contributor makes additional grants as a result of Your choice to | ||
140 | distribute the Covered Software under a subsequent version of this License | ||
141 | (see Section 10.2) or under the terms of a Secondary License (if permitted | ||
142 | under the terms of Section 3.3). | ||
143 | |||
144 | 2.5. Representation | ||
145 | |||
146 | Each Contributor represents that the Contributor believes its Contributions | ||
147 | are its original creation(s) or it has sufficient rights to grant the | ||
148 | rights to its Contributions conveyed by this License. | ||
149 | |||
150 | 2.6. Fair Use | ||
151 | |||
152 | This License is not intended to limit any rights You have under applicable | ||
153 | copyright doctrines of fair use, fair dealing, or other equivalents. | ||
154 | |||
155 | 2.7. Conditions | ||
156 | |||
157 | Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in | ||
158 | Section 2.1. | ||
159 | |||
160 | |||
161 | 3. Responsibilities | ||
162 | |||
163 | 3.1. Distribution of Source Form | ||
164 | |||
165 | All distribution of Covered Software in Source Code Form, including any | ||
166 | Modifications that You create or to which You contribute, must be under the | ||
167 | terms of this License. You must inform recipients that the Source Code Form | ||
168 | of the Covered Software is governed by the terms of this License, and how | ||
169 | they can obtain a copy of this License. You may not attempt to alter or | ||
170 | restrict the recipients’ rights in the Source Code Form. | ||
171 | |||
172 | 3.2. Distribution of Executable Form | ||
173 | |||
174 | If You distribute Covered Software in Executable Form then: | ||
175 | |||
176 | a. such Covered Software must also be made available in Source Code Form, | ||
177 | as described in Section 3.1, and You must inform recipients of the | ||
178 | Executable Form how they can obtain a copy of such Source Code Form by | ||
179 | reasonable means in a timely manner, at a charge no more than the cost | ||
180 | of distribution to the recipient; and | ||
181 | |||
182 | b. You may distribute such Executable Form under the terms of this License, | ||
183 | or sublicense it under different terms, provided that the license for | ||
184 | the Executable Form does not attempt to limit or alter the recipients’ | ||
185 | rights in the Source Code Form under this License. | ||
186 | |||
187 | 3.3. Distribution of a Larger Work | ||
188 | |||
189 | You may create and distribute a Larger Work under terms of Your choice, | ||
190 | provided that You also comply with the requirements of this License for the | ||
191 | Covered Software. If the Larger Work is a combination of Covered Software | ||
192 | with a work governed by one or more Secondary Licenses, and the Covered | ||
193 | Software is not Incompatible With Secondary Licenses, this License permits | ||
194 | You to additionally distribute such Covered Software under the terms of | ||
195 | such Secondary License(s), so that the recipient of the Larger Work may, at | ||
196 | their option, further distribute the Covered Software under the terms of | ||
197 | either this License or such Secondary License(s). | ||
198 | |||
199 | 3.4. Notices | ||
200 | |||
201 | You may not remove or alter the substance of any license notices (including | ||
202 | copyright notices, patent notices, disclaimers of warranty, or limitations | ||
203 | of liability) contained within the Source Code Form of the Covered | ||
204 | Software, except that You may alter any license notices to the extent | ||
205 | required to remedy known factual inaccuracies. | ||
206 | |||
207 | 3.5. Application of Additional Terms | ||
208 | |||
209 | You may choose to offer, and to charge a fee for, warranty, support, | ||
210 | indemnity or liability obligations to one or more recipients of Covered | ||
211 | Software. However, You may do so only on Your own behalf, and not on behalf | ||
212 | of any Contributor. You must make it absolutely clear that any such | ||
213 | warranty, support, indemnity, or liability obligation is offered by You | ||
214 | alone, and You hereby agree to indemnify every Contributor for any | ||
215 | liability incurred by such Contributor as a result of warranty, support, | ||
216 | indemnity or liability terms You offer. You may include additional | ||
217 | disclaimers of warranty and limitations of liability specific to any | ||
218 | jurisdiction. | ||
219 | |||
220 | 4. Inability to Comply Due to Statute or Regulation | ||
221 | |||
222 | If it is impossible for You to comply with any of the terms of this License | ||
223 | with respect to some or all of the Covered Software due to statute, judicial | ||
224 | order, or regulation then You must: (a) comply with the terms of this License | ||
225 | to the maximum extent possible; and (b) describe the limitations and the code | ||
226 | they affect. Such description must be placed in a text file included with all | ||
227 | distributions of the Covered Software under this License. Except to the | ||
228 | extent prohibited by statute or regulation, such description must be | ||
229 | sufficiently detailed for a recipient of ordinary skill to be able to | ||
230 | understand it. | ||
231 | |||
232 | 5. Termination | ||
233 | |||
234 | 5.1. The rights granted under this License will terminate automatically if You | ||
235 | fail to comply with any of its terms. However, if You become compliant, | ||
236 | then the rights granted under this License from a particular Contributor | ||
237 | are reinstated (a) provisionally, unless and until such Contributor | ||
238 | explicitly and finally terminates Your grants, and (b) on an ongoing basis, | ||
239 | if such Contributor fails to notify You of the non-compliance by some | ||
240 | reasonable means prior to 60 days after You have come back into compliance. | ||
241 | Moreover, Your grants from a particular Contributor are reinstated on an | ||
242 | ongoing basis if such Contributor notifies You of the non-compliance by | ||
243 | some reasonable means, this is the first time You have received notice of | ||
244 | non-compliance with this License from such Contributor, and You become | ||
245 | compliant prior to 30 days after Your receipt of the notice. | ||
246 | |||
247 | 5.2. If You initiate litigation against any entity by asserting a patent | ||
248 | infringement claim (excluding declaratory judgment actions, counter-claims, | ||
249 | and cross-claims) alleging that a Contributor Version directly or | ||
250 | indirectly infringes any patent, then the rights granted to You by any and | ||
251 | all Contributors for the Covered Software under Section 2.1 of this License | ||
252 | shall terminate. | ||
253 | |||
254 | 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user | ||
255 | license agreements (excluding distributors and resellers) which have been | ||
256 | validly granted by You or Your distributors under this License prior to | ||
257 | termination shall survive termination. | ||
258 | |||
259 | 6. Disclaimer of Warranty | ||
260 | |||
261 | Covered Software is provided under this License on an “as is” basis, without | ||
262 | warranty of any kind, either expressed, implied, or statutory, including, | ||
263 | without limitation, warranties that the Covered Software is free of defects, | ||
264 | merchantable, fit for a particular purpose or non-infringing. The entire | ||
265 | risk as to the quality and performance of the Covered Software is with You. | ||
266 | Should any Covered Software prove defective in any respect, You (not any | ||
267 | Contributor) assume the cost of any necessary servicing, repair, or | ||
268 | correction. This disclaimer of warranty constitutes an essential part of this | ||
269 | License. No use of any Covered Software is authorized under this License | ||
270 | except under this disclaimer. | ||
271 | |||
272 | 7. Limitation of Liability | ||
273 | |||
274 | Under no circumstances and under no legal theory, whether tort (including | ||
275 | negligence), contract, or otherwise, shall any Contributor, or anyone who | ||
276 | distributes Covered Software as permitted above, be liable to You for any | ||
277 | direct, indirect, special, incidental, or consequential damages of any | ||
278 | character including, without limitation, damages for lost profits, loss of | ||
279 | goodwill, work stoppage, computer failure or malfunction, or any and all | ||
280 | other commercial damages or losses, even if such party shall have been | ||
281 | informed of the possibility of such damages. This limitation of liability | ||
282 | shall not apply to liability for death or personal injury resulting from such | ||
283 | party’s negligence to the extent applicable law prohibits such limitation. | ||
284 | Some jurisdictions do not allow the exclusion or limitation of incidental or | ||
285 | consequential damages, so this exclusion and limitation may not apply to You. | ||
286 | |||
287 | 8. Litigation | ||
288 | |||
289 | Any litigation relating to this License may be brought only in the courts of | ||
290 | a jurisdiction where the defendant maintains its principal place of business | ||
291 | and such litigation shall be governed by laws of that jurisdiction, without | ||
292 | reference to its conflict-of-law provisions. Nothing in this Section shall | ||
293 | prevent a party’s ability to bring cross-claims or counter-claims. | ||
294 | |||
295 | 9. Miscellaneous | ||
296 | |||
297 | This License represents the complete agreement concerning the subject matter | ||
298 | hereof. If any provision of this License is held to be unenforceable, such | ||
299 | provision shall be reformed only to the extent necessary to make it | ||
300 | enforceable. Any law or regulation which provides that the language of a | ||
301 | contract shall be construed against the drafter shall not be used to construe | ||
302 | this License against a Contributor. | ||
303 | |||
304 | |||
305 | 10. Versions of the License | ||
306 | |||
307 | 10.1. New Versions | ||
308 | |||
309 | Mozilla Foundation is the license steward. Except as provided in Section | ||
310 | 10.3, no one other than the license steward has the right to modify or | ||
311 | publish new versions of this License. Each version will be given a | ||
312 | distinguishing version number. | ||
313 | |||
314 | 10.2. Effect of New Versions | ||
315 | |||
316 | You may distribute the Covered Software under the terms of the version of | ||
317 | the License under which You originally received the Covered Software, or | ||
318 | under the terms of any subsequent version published by the license | ||
319 | steward. | ||
320 | |||
321 | 10.3. Modified Versions | ||
322 | |||
323 | If you create software not governed by this License, and you want to | ||
324 | create a new license for such software, you may create and use a modified | ||
325 | version of this License if you rename the license and remove any | ||
326 | references to the name of the license steward (except to note that such | ||
327 | modified license differs from this License). | ||
328 | |||
329 | 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses | ||
330 | If You choose to distribute Source Code Form that is Incompatible With | ||
331 | Secondary Licenses under the terms of this version of the License, the | ||
332 | notice described in Exhibit B of this License must be attached. | ||
333 | |||
334 | Exhibit A - Source Code Form License Notice | ||
335 | |||
336 | This Source Code Form is subject to the | ||
337 | terms of the Mozilla Public License, v. | ||
338 | 2.0. If a copy of the MPL was not | ||
339 | distributed with this file, You can | ||
340 | obtain one at | ||
341 | http://mozilla.org/MPL/2.0/. | ||
342 | |||
343 | If it is not possible or desirable to put the notice in a particular file, then | ||
344 | You may include the notice in a location (such as a LICENSE file in a relevant | ||
345 | directory) where a recipient would be likely to look for such a notice. | ||
346 | |||
347 | You may add additional accurate notices of copyright ownership. | ||
348 | |||
349 | Exhibit B - “Incompatible With Secondary Licenses” Notice | ||
350 | |||
351 | This Source Code Form is “Incompatible | ||
352 | With Secondary Licenses”, as defined by | ||
353 | the Mozilla Public License, v. 2.0. | ||
354 | |||
diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile new file mode 100644 index 0000000..84fd743 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/Makefile | |||
@@ -0,0 +1,18 @@ | |||
1 | TEST?=./... | ||
2 | |||
3 | default: test | ||
4 | |||
5 | fmt: generate | ||
6 | go fmt ./... | ||
7 | |||
8 | test: generate | ||
9 | go get -t ./... | ||
10 | go test $(TEST) $(TESTARGS) | ||
11 | |||
12 | generate: | ||
13 | go generate ./... | ||
14 | |||
15 | updatedeps: | ||
16 | go get -u golang.org/x/tools/cmd/stringer | ||
17 | |||
18 | .PHONY: default generate test updatedeps | ||
diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md new file mode 100644 index 0000000..c822332 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/README.md | |||
@@ -0,0 +1,125 @@ | |||
1 | # HCL | ||
2 | |||
3 | [![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) | ||
4 | |||
5 | HCL (HashiCorp Configuration Language) is a configuration language built | ||
6 | by HashiCorp. The goal of HCL is to build a structured configuration language | ||
7 | that is both human and machine friendly for use with command-line tools, but | ||
8 | specifically targeted towards DevOps tools, servers, etc. | ||
9 | |||
10 | HCL is also fully JSON compatible. That is, JSON can be used as completely | ||
11 | valid input to a system expecting HCL. This helps makes systems | ||
12 | interoperable with other systems. | ||
13 | |||
14 | HCL is heavily inspired by | ||
15 | [libucl](https://github.com/vstakhov/libucl), | ||
16 | nginx configuration, and others similar. | ||
17 | |||
18 | ## Why? | ||
19 | |||
20 | A common question when viewing HCL is to ask the question: why not | ||
21 | JSON, YAML, etc.? | ||
22 | |||
23 | Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) | ||
24 | used a variety of configuration languages from full programming languages | ||
25 | such as Ruby to complete data structure languages such as JSON. What we | ||
26 | learned is that some people wanted human-friendly configuration languages | ||
27 | and some people wanted machine-friendly languages. | ||
28 | |||
29 | JSON fits a nice balance in this, but is fairly verbose and most | ||
30 | importantly doesn't support comments. With YAML, we found that beginners | ||
31 | had a really hard time determining what the actual structure was, and | ||
32 | ended up guessing more often than not whether to use a hyphen, colon, etc. | ||
33 | in order to represent some configuration key. | ||
34 | |||
35 | Full programming languages such as Ruby enable complex behavior | ||
36 | a configuration language shouldn't usually allow, and also forces | ||
37 | people to learn some set of Ruby. | ||
38 | |||
39 | Because of this, we decided to create our own configuration language | ||
40 | that is JSON-compatible. Our configuration language (HCL) is designed | ||
41 | to be written and modified by humans. The API for HCL allows JSON | ||
42 | as an input so that it is also machine-friendly (machines can generate | ||
43 | JSON instead of trying to generate HCL). | ||
44 | |||
45 | Our goal with HCL is not to alienate other configuration languages. | ||
46 | It is instead to provide HCL as a specialized language for our tools, | ||
47 | and JSON as the interoperability layer. | ||
48 | |||
49 | ## Syntax | ||
50 | |||
51 | For a complete grammar, please see the parser itself. A high-level overview | ||
52 | of the syntax and grammar is listed here. | ||
53 | |||
54 | * Single line comments start with `#` or `//` | ||
55 | |||
56 | * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments | ||
57 | are not allowed. A multi-line comment (also known as a block comment) | ||
58 | terminates at the first `*/` found. | ||
59 | |||
60 | * Values are assigned with the syntax `key = value` (whitespace doesn't | ||
61 | matter). The value can be any primitive: a string, number, boolean, | ||
62 | object, or list. | ||
63 | |||
64 | * Strings are double-quoted and can contain any UTF-8 characters. | ||
65 | Example: `"Hello, World"` | ||
66 | |||
67 | * Multi-line strings start with `<<EOF` at the end of a line, and end | ||
68 | with `EOF` on its own line ([here documents](https://en.wikipedia.org/wiki/Here_document)). | ||
69 | Any text may be used in place of `EOF`. Example: | ||
70 | ``` | ||
71 | <<FOO | ||
72 | hello | ||
73 | world | ||
74 | FOO | ||
75 | ``` | ||
76 | |||
77 | * Numbers are assumed to be base 10. If you prefix a number with 0x, | ||
78 | it is treated as a hexadecimal. If it is prefixed with 0, it is | ||
79 | treated as an octal. Numbers can be in scientific notation: "1e10". | ||
80 | |||
81 | * Boolean values: `true`, `false` | ||
82 | |||
83 | * Arrays can be made by wrapping it in `[]`. Example: | ||
84 | `["foo", "bar", 42]`. Arrays can contain primitives, | ||
85 | other arrays, and objects. As an alternative, lists | ||
86 | of objects can be created with repeated blocks, using | ||
87 | this structure: | ||
88 | |||
89 | ```hcl | ||
90 | service { | ||
91 | key = "value" | ||
92 | } | ||
93 | |||
94 | service { | ||
95 | key = "value" | ||
96 | } | ||
97 | ``` | ||
98 | |||
99 | Objects and nested objects are created using the structure shown below: | ||
100 | |||
101 | ``` | ||
102 | variable "ami" { | ||
103 | description = "the AMI to use" | ||
104 | } | ||
105 | ``` | ||
106 | This would be equivalent to the following json: | ||
107 | ``` json | ||
108 | { | ||
109 | "variable": { | ||
110 | "ami": { | ||
111 | "description": "the AMI to use" | ||
112 | } | ||
113 | } | ||
114 | } | ||
115 | ``` | ||
116 | |||
117 | ## Thanks | ||
118 | |||
119 | Thanks to: | ||
120 | |||
121 | * [@vstakhov](https://github.com/vstakhov) - The original libucl parser | ||
122 | and syntax that HCL was based off of. | ||
123 | |||
124 | * [@fatih](https://github.com/fatih) - The rewritten HCL parser | ||
125 | in pure Go (no goyacc) and support for a printer. | ||
diff --git a/vendor/github.com/hashicorp/hcl/appveyor.yml b/vendor/github.com/hashicorp/hcl/appveyor.yml new file mode 100644 index 0000000..4db0b71 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/appveyor.yml | |||
@@ -0,0 +1,19 @@ | |||
1 | version: "build-{branch}-{build}" | ||
2 | image: Visual Studio 2015 | ||
3 | clone_folder: c:\gopath\src\github.com\hashicorp\hcl | ||
4 | environment: | ||
5 | GOPATH: c:\gopath | ||
6 | init: | ||
7 | - git config --global core.autocrlf false | ||
8 | install: | ||
9 | - cmd: >- | ||
10 | echo %Path% | ||
11 | |||
12 | go version | ||
13 | |||
14 | go env | ||
15 | |||
16 | go get -t ./... | ||
17 | |||
18 | build_script: | ||
19 | - cmd: go test -v ./... | ||
diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go new file mode 100644 index 0000000..0b39c1b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/decoder.go | |||
@@ -0,0 +1,724 @@ | |||
1 | package hcl | ||
2 | |||
3 | import ( | ||
4 | "errors" | ||
5 | "fmt" | ||
6 | "reflect" | ||
7 | "sort" | ||
8 | "strconv" | ||
9 | "strings" | ||
10 | |||
11 | "github.com/hashicorp/hcl/hcl/ast" | ||
12 | "github.com/hashicorp/hcl/hcl/parser" | ||
13 | "github.com/hashicorp/hcl/hcl/token" | ||
14 | ) | ||
15 | |||
16 | // This is the tag to use with structures to have settings for HCL | ||
17 | const tagName = "hcl" | ||
18 | |||
19 | var ( | ||
20 | // nodeType holds a reference to the type of ast.Node | ||
21 | nodeType reflect.Type = findNodeType() | ||
22 | ) | ||
23 | |||
24 | // Unmarshal accepts a byte slice as input and writes the | ||
25 | // data to the value pointed to by v. | ||
26 | func Unmarshal(bs []byte, v interface{}) error { | ||
27 | root, err := parse(bs) | ||
28 | if err != nil { | ||
29 | return err | ||
30 | } | ||
31 | |||
32 | return DecodeObject(v, root) | ||
33 | } | ||
34 | |||
35 | // Decode reads the given input and decodes it into the structure | ||
36 | // given by `out`. | ||
37 | func Decode(out interface{}, in string) error { | ||
38 | obj, err := Parse(in) | ||
39 | if err != nil { | ||
40 | return err | ||
41 | } | ||
42 | |||
43 | return DecodeObject(out, obj) | ||
44 | } | ||
45 | |||
46 | // DecodeObject is a lower-level version of Decode. It decodes a | ||
47 | // raw Object into the given output. | ||
48 | func DecodeObject(out interface{}, n ast.Node) error { | ||
49 | val := reflect.ValueOf(out) | ||
50 | if val.Kind() != reflect.Ptr { | ||
51 | return errors.New("result must be a pointer") | ||
52 | } | ||
53 | |||
54 | // If we have the file, we really decode the root node | ||
55 | if f, ok := n.(*ast.File); ok { | ||
56 | n = f.Node | ||
57 | } | ||
58 | |||
59 | var d decoder | ||
60 | return d.decode("root", n, val.Elem()) | ||
61 | } | ||
62 | |||
63 | type decoder struct { | ||
64 | stack []reflect.Kind | ||
65 | } | ||
66 | |||
67 | func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { | ||
68 | k := result | ||
69 | |||
70 | // If we have an interface with a valid value, we use that | ||
71 | // for the check. | ||
72 | if result.Kind() == reflect.Interface { | ||
73 | elem := result.Elem() | ||
74 | if elem.IsValid() { | ||
75 | k = elem | ||
76 | } | ||
77 | } | ||
78 | |||
79 | // Push current onto stack unless it is an interface. | ||
80 | if k.Kind() != reflect.Interface { | ||
81 | d.stack = append(d.stack, k.Kind()) | ||
82 | |||
83 | // Schedule a pop | ||
84 | defer func() { | ||
85 | d.stack = d.stack[:len(d.stack)-1] | ||
86 | }() | ||
87 | } | ||
88 | |||
89 | switch k.Kind() { | ||
90 | case reflect.Bool: | ||
91 | return d.decodeBool(name, node, result) | ||
92 | case reflect.Float64: | ||
93 | return d.decodeFloat(name, node, result) | ||
94 | case reflect.Int, reflect.Int32, reflect.Int64: | ||
95 | return d.decodeInt(name, node, result) | ||
96 | case reflect.Interface: | ||
97 | // When we see an interface, we make our own thing | ||
98 | return d.decodeInterface(name, node, result) | ||
99 | case reflect.Map: | ||
100 | return d.decodeMap(name, node, result) | ||
101 | case reflect.Ptr: | ||
102 | return d.decodePtr(name, node, result) | ||
103 | case reflect.Slice: | ||
104 | return d.decodeSlice(name, node, result) | ||
105 | case reflect.String: | ||
106 | return d.decodeString(name, node, result) | ||
107 | case reflect.Struct: | ||
108 | return d.decodeStruct(name, node, result) | ||
109 | default: | ||
110 | return &parser.PosError{ | ||
111 | Pos: node.Pos(), | ||
112 | Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), | ||
113 | } | ||
114 | } | ||
115 | } | ||
116 | |||
117 | func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { | ||
118 | switch n := node.(type) { | ||
119 | case *ast.LiteralType: | ||
120 | if n.Token.Type == token.BOOL { | ||
121 | v, err := strconv.ParseBool(n.Token.Text) | ||
122 | if err != nil { | ||
123 | return err | ||
124 | } | ||
125 | |||
126 | result.Set(reflect.ValueOf(v)) | ||
127 | return nil | ||
128 | } | ||
129 | } | ||
130 | |||
131 | return &parser.PosError{ | ||
132 | Pos: node.Pos(), | ||
133 | Err: fmt.Errorf("%s: unknown type %T", name, node), | ||
134 | } | ||
135 | } | ||
136 | |||
137 | func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { | ||
138 | switch n := node.(type) { | ||
139 | case *ast.LiteralType: | ||
140 | if n.Token.Type == token.FLOAT { | ||
141 | v, err := strconv.ParseFloat(n.Token.Text, 64) | ||
142 | if err != nil { | ||
143 | return err | ||
144 | } | ||
145 | |||
146 | result.Set(reflect.ValueOf(v)) | ||
147 | return nil | ||
148 | } | ||
149 | } | ||
150 | |||
151 | return &parser.PosError{ | ||
152 | Pos: node.Pos(), | ||
153 | Err: fmt.Errorf("%s: unknown type %T", name, node), | ||
154 | } | ||
155 | } | ||
156 | |||
157 | func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { | ||
158 | switch n := node.(type) { | ||
159 | case *ast.LiteralType: | ||
160 | switch n.Token.Type { | ||
161 | case token.NUMBER: | ||
162 | v, err := strconv.ParseInt(n.Token.Text, 0, 0) | ||
163 | if err != nil { | ||
164 | return err | ||
165 | } | ||
166 | |||
167 | if result.Kind() == reflect.Interface { | ||
168 | result.Set(reflect.ValueOf(int(v))) | ||
169 | } else { | ||
170 | result.SetInt(v) | ||
171 | } | ||
172 | return nil | ||
173 | case token.STRING: | ||
174 | v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) | ||
175 | if err != nil { | ||
176 | return err | ||
177 | } | ||
178 | |||
179 | if result.Kind() == reflect.Interface { | ||
180 | result.Set(reflect.ValueOf(int(v))) | ||
181 | } else { | ||
182 | result.SetInt(v) | ||
183 | } | ||
184 | return nil | ||
185 | } | ||
186 | } | ||
187 | |||
188 | return &parser.PosError{ | ||
189 | Pos: node.Pos(), | ||
190 | Err: fmt.Errorf("%s: unknown type %T", name, node), | ||
191 | } | ||
192 | } | ||
193 | |||
194 | func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { | ||
195 | // When we see an ast.Node, we retain the value to enable deferred decoding. | ||
196 | // Very useful in situations where we want to preserve ast.Node information | ||
197 | // like Pos | ||
198 | if result.Type() == nodeType && result.CanSet() { | ||
199 | result.Set(reflect.ValueOf(node)) | ||
200 | return nil | ||
201 | } | ||
202 | |||
203 | var set reflect.Value | ||
204 | redecode := true | ||
205 | |||
206 | // For testing types, ObjectType should just be treated as a list. We | ||
207 | // set this to a temporary var because we want to pass in the real node. | ||
208 | testNode := node | ||
209 | if ot, ok := node.(*ast.ObjectType); ok { | ||
210 | testNode = ot.List | ||
211 | } | ||
212 | |||
213 | switch n := testNode.(type) { | ||
214 | case *ast.ObjectList: | ||
215 | // If we're at the root or we're directly within a slice, then we | ||
216 | // decode objects into map[string]interface{}, otherwise we decode | ||
217 | // them into lists. | ||
218 | if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { | ||
219 | var temp map[string]interface{} | ||
220 | tempVal := reflect.ValueOf(temp) | ||
221 | result := reflect.MakeMap( | ||
222 | reflect.MapOf( | ||
223 | reflect.TypeOf(""), | ||
224 | tempVal.Type().Elem())) | ||
225 | |||
226 | set = result | ||
227 | } else { | ||
228 | var temp []map[string]interface{} | ||
229 | tempVal := reflect.ValueOf(temp) | ||
230 | result := reflect.MakeSlice( | ||
231 | reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) | ||
232 | set = result | ||
233 | } | ||
234 | case *ast.ObjectType: | ||
235 | // If we're at the root or we're directly within a slice, then we | ||
236 | // decode objects into map[string]interface{}, otherwise we decode | ||
237 | // them into lists. | ||
238 | if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { | ||
239 | var temp map[string]interface{} | ||
240 | tempVal := reflect.ValueOf(temp) | ||
241 | result := reflect.MakeMap( | ||
242 | reflect.MapOf( | ||
243 | reflect.TypeOf(""), | ||
244 | tempVal.Type().Elem())) | ||
245 | |||
246 | set = result | ||
247 | } else { | ||
248 | var temp []map[string]interface{} | ||
249 | tempVal := reflect.ValueOf(temp) | ||
250 | result := reflect.MakeSlice( | ||
251 | reflect.SliceOf(tempVal.Type().Elem()), 0, 1) | ||
252 | set = result | ||
253 | } | ||
254 | case *ast.ListType: | ||
255 | var temp []interface{} | ||
256 | tempVal := reflect.ValueOf(temp) | ||
257 | result := reflect.MakeSlice( | ||
258 | reflect.SliceOf(tempVal.Type().Elem()), 0, 0) | ||
259 | set = result | ||
260 | case *ast.LiteralType: | ||
261 | switch n.Token.Type { | ||
262 | case token.BOOL: | ||
263 | var result bool | ||
264 | set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) | ||
265 | case token.FLOAT: | ||
266 | var result float64 | ||
267 | set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) | ||
268 | case token.NUMBER: | ||
269 | var result int | ||
270 | set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) | ||
271 | case token.STRING, token.HEREDOC: | ||
272 | set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) | ||
273 | default: | ||
274 | return &parser.PosError{ | ||
275 | Pos: node.Pos(), | ||
276 | Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), | ||
277 | } | ||
278 | } | ||
279 | default: | ||
280 | return fmt.Errorf( | ||
281 | "%s: cannot decode into interface: %T", | ||
282 | name, node) | ||
283 | } | ||
284 | |||
285 | // Set the result to what its supposed to be, then reset | ||
286 | // result so we don't reflect into this method anymore. | ||
287 | result.Set(set) | ||
288 | |||
289 | if redecode { | ||
290 | // Revisit the node so that we can use the newly instantiated | ||
291 | // thing and populate it. | ||
292 | if err := d.decode(name, node, result); err != nil { | ||
293 | return err | ||
294 | } | ||
295 | } | ||
296 | |||
297 | return nil | ||
298 | } | ||
299 | |||
300 | func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { | ||
301 | if item, ok := node.(*ast.ObjectItem); ok { | ||
302 | node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} | ||
303 | } | ||
304 | |||
305 | if ot, ok := node.(*ast.ObjectType); ok { | ||
306 | node = ot.List | ||
307 | } | ||
308 | |||
309 | n, ok := node.(*ast.ObjectList) | ||
310 | if !ok { | ||
311 | return &parser.PosError{ | ||
312 | Pos: node.Pos(), | ||
313 | Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), | ||
314 | } | ||
315 | } | ||
316 | |||
317 | // If we have an interface, then we can address the interface, | ||
318 | // but not the slice itself, so get the element but set the interface | ||
319 | set := result | ||
320 | if result.Kind() == reflect.Interface { | ||
321 | result = result.Elem() | ||
322 | } | ||
323 | |||
324 | resultType := result.Type() | ||
325 | resultElemType := resultType.Elem() | ||
326 | resultKeyType := resultType.Key() | ||
327 | if resultKeyType.Kind() != reflect.String { | ||
328 | return &parser.PosError{ | ||
329 | Pos: node.Pos(), | ||
330 | Err: fmt.Errorf("%s: map must have string keys", name), | ||
331 | } | ||
332 | } | ||
333 | |||
334 | // Make a map if it is nil | ||
335 | resultMap := result | ||
336 | if result.IsNil() { | ||
337 | resultMap = reflect.MakeMap( | ||
338 | reflect.MapOf(resultKeyType, resultElemType)) | ||
339 | } | ||
340 | |||
341 | // Go through each element and decode it. | ||
342 | done := make(map[string]struct{}) | ||
343 | for _, item := range n.Items { | ||
344 | if item.Val == nil { | ||
345 | continue | ||
346 | } | ||
347 | |||
348 | // github.com/hashicorp/terraform/issue/5740 | ||
349 | if len(item.Keys) == 0 { | ||
350 | return &parser.PosError{ | ||
351 | Pos: node.Pos(), | ||
352 | Err: fmt.Errorf("%s: map must have string keys", name), | ||
353 | } | ||
354 | } | ||
355 | |||
356 | // Get the key we're dealing with, which is the first item | ||
357 | keyStr := item.Keys[0].Token.Value().(string) | ||
358 | |||
359 | // If we've already processed this key, then ignore it | ||
360 | if _, ok := done[keyStr]; ok { | ||
361 | continue | ||
362 | } | ||
363 | |||
364 | // Determine the value. If we have more than one key, then we | ||
365 | // get the objectlist of only these keys. | ||
366 | itemVal := item.Val | ||
367 | if len(item.Keys) > 1 { | ||
368 | itemVal = n.Filter(keyStr) | ||
369 | done[keyStr] = struct{}{} | ||
370 | } | ||
371 | |||
372 | // Make the field name | ||
373 | fieldName := fmt.Sprintf("%s.%s", name, keyStr) | ||
374 | |||
375 | // Get the key/value as reflection values | ||
376 | key := reflect.ValueOf(keyStr) | ||
377 | val := reflect.Indirect(reflect.New(resultElemType)) | ||
378 | |||
379 | // If we have a pre-existing value in the map, use that | ||
380 | oldVal := resultMap.MapIndex(key) | ||
381 | if oldVal.IsValid() { | ||
382 | val.Set(oldVal) | ||
383 | } | ||
384 | |||
385 | // Decode! | ||
386 | if err := d.decode(fieldName, itemVal, val); err != nil { | ||
387 | return err | ||
388 | } | ||
389 | |||
390 | // Set the value on the map | ||
391 | resultMap.SetMapIndex(key, val) | ||
392 | } | ||
393 | |||
394 | // Set the final map if we can | ||
395 | set.Set(resultMap) | ||
396 | return nil | ||
397 | } | ||
398 | |||
399 | func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { | ||
400 | // Create an element of the concrete (non pointer) type and decode | ||
401 | // into that. Then set the value of the pointer to this type. | ||
402 | resultType := result.Type() | ||
403 | resultElemType := resultType.Elem() | ||
404 | val := reflect.New(resultElemType) | ||
405 | if err := d.decode(name, node, reflect.Indirect(val)); err != nil { | ||
406 | return err | ||
407 | } | ||
408 | |||
409 | result.Set(val) | ||
410 | return nil | ||
411 | } | ||
412 | |||
413 | func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { | ||
414 | // If we have an interface, then we can address the interface, | ||
415 | // but not the slice itself, so get the element but set the interface | ||
416 | set := result | ||
417 | if result.Kind() == reflect.Interface { | ||
418 | result = result.Elem() | ||
419 | } | ||
420 | // Create the slice if it isn't nil | ||
421 | resultType := result.Type() | ||
422 | resultElemType := resultType.Elem() | ||
423 | if result.IsNil() { | ||
424 | resultSliceType := reflect.SliceOf(resultElemType) | ||
425 | result = reflect.MakeSlice( | ||
426 | resultSliceType, 0, 0) | ||
427 | } | ||
428 | |||
429 | // Figure out the items we'll be copying into the slice | ||
430 | var items []ast.Node | ||
431 | switch n := node.(type) { | ||
432 | case *ast.ObjectList: | ||
433 | items = make([]ast.Node, len(n.Items)) | ||
434 | for i, item := range n.Items { | ||
435 | items[i] = item | ||
436 | } | ||
437 | case *ast.ObjectType: | ||
438 | items = []ast.Node{n} | ||
439 | case *ast.ListType: | ||
440 | items = n.List | ||
441 | default: | ||
442 | return &parser.PosError{ | ||
443 | Pos: node.Pos(), | ||
444 | Err: fmt.Errorf("unknown slice type: %T", node), | ||
445 | } | ||
446 | } | ||
447 | |||
448 | for i, item := range items { | ||
449 | fieldName := fmt.Sprintf("%s[%d]", name, i) | ||
450 | |||
451 | // Decode | ||
452 | val := reflect.Indirect(reflect.New(resultElemType)) | ||
453 | |||
454 | // if item is an object that was decoded from ambiguous JSON and | ||
455 | // flattened, make sure it's expanded if it needs to decode into a | ||
456 | // defined structure. | ||
457 | item := expandObject(item, val) | ||
458 | |||
459 | if err := d.decode(fieldName, item, val); err != nil { | ||
460 | return err | ||
461 | } | ||
462 | |||
463 | // Append it onto the slice | ||
464 | result = reflect.Append(result, val) | ||
465 | } | ||
466 | |||
467 | set.Set(result) | ||
468 | return nil | ||
469 | } | ||
470 | |||
471 | // expandObject detects if an ambiguous JSON object was flattened to a List which | ||
472 | // should be decoded into a struct, and expands the ast to properly deocode. | ||
473 | func expandObject(node ast.Node, result reflect.Value) ast.Node { | ||
474 | item, ok := node.(*ast.ObjectItem) | ||
475 | if !ok { | ||
476 | return node | ||
477 | } | ||
478 | |||
479 | elemType := result.Type() | ||
480 | |||
481 | // our target type must be a struct | ||
482 | switch elemType.Kind() { | ||
483 | case reflect.Ptr: | ||
484 | switch elemType.Elem().Kind() { | ||
485 | case reflect.Struct: | ||
486 | //OK | ||
487 | default: | ||
488 | return node | ||
489 | } | ||
490 | case reflect.Struct: | ||
491 | //OK | ||
492 | default: | ||
493 | return node | ||
494 | } | ||
495 | |||
496 | // A list value will have a key and field name. If it had more fields, | ||
497 | // it wouldn't have been flattened. | ||
498 | if len(item.Keys) != 2 { | ||
499 | return node | ||
500 | } | ||
501 | |||
502 | keyToken := item.Keys[0].Token | ||
503 | item.Keys = item.Keys[1:] | ||
504 | |||
505 | // we need to un-flatten the ast enough to decode | ||
506 | newNode := &ast.ObjectItem{ | ||
507 | Keys: []*ast.ObjectKey{ | ||
508 | &ast.ObjectKey{ | ||
509 | Token: keyToken, | ||
510 | }, | ||
511 | }, | ||
512 | Val: &ast.ObjectType{ | ||
513 | List: &ast.ObjectList{ | ||
514 | Items: []*ast.ObjectItem{item}, | ||
515 | }, | ||
516 | }, | ||
517 | } | ||
518 | |||
519 | return newNode | ||
520 | } | ||
521 | |||
522 | func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { | ||
523 | switch n := node.(type) { | ||
524 | case *ast.LiteralType: | ||
525 | switch n.Token.Type { | ||
526 | case token.NUMBER: | ||
527 | result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) | ||
528 | return nil | ||
529 | case token.STRING, token.HEREDOC: | ||
530 | result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) | ||
531 | return nil | ||
532 | } | ||
533 | } | ||
534 | |||
535 | return &parser.PosError{ | ||
536 | Pos: node.Pos(), | ||
537 | Err: fmt.Errorf("%s: unknown type for string %T", name, node), | ||
538 | } | ||
539 | } | ||
540 | |||
541 | func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { | ||
542 | var item *ast.ObjectItem | ||
543 | if it, ok := node.(*ast.ObjectItem); ok { | ||
544 | item = it | ||
545 | node = it.Val | ||
546 | } | ||
547 | |||
548 | if ot, ok := node.(*ast.ObjectType); ok { | ||
549 | node = ot.List | ||
550 | } | ||
551 | |||
552 | // Handle the special case where the object itself is a literal. Previously | ||
553 | // the yacc parser would always ensure top-level elements were arrays. The new | ||
554 | // parser does not make the same guarantees, thus we need to convert any | ||
555 | // top-level literal elements into a list. | ||
556 | if _, ok := node.(*ast.LiteralType); ok && item != nil { | ||
557 | node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} | ||
558 | } | ||
559 | |||
560 | list, ok := node.(*ast.ObjectList) | ||
561 | if !ok { | ||
562 | return &parser.PosError{ | ||
563 | Pos: node.Pos(), | ||
564 | Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), | ||
565 | } | ||
566 | } | ||
567 | |||
568 | // This slice will keep track of all the structs we'll be decoding. | ||
569 | // There can be more than one struct if there are embedded structs | ||
570 | // that are squashed. | ||
571 | structs := make([]reflect.Value, 1, 5) | ||
572 | structs[0] = result | ||
573 | |||
574 | // Compile the list of all the fields that we're going to be decoding | ||
575 | // from all the structs. | ||
576 | fields := make(map[*reflect.StructField]reflect.Value) | ||
577 | for len(structs) > 0 { | ||
578 | structVal := structs[0] | ||
579 | structs = structs[1:] | ||
580 | |||
581 | structType := structVal.Type() | ||
582 | for i := 0; i < structType.NumField(); i++ { | ||
583 | fieldType := structType.Field(i) | ||
584 | tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") | ||
585 | |||
586 | // Ignore fields with tag name "-" | ||
587 | if tagParts[0] == "-" { | ||
588 | continue | ||
589 | } | ||
590 | |||
591 | if fieldType.Anonymous { | ||
592 | fieldKind := fieldType.Type.Kind() | ||
593 | if fieldKind != reflect.Struct { | ||
594 | return &parser.PosError{ | ||
595 | Pos: node.Pos(), | ||
596 | Err: fmt.Errorf("%s: unsupported type to struct: %s", | ||
597 | fieldType.Name, fieldKind), | ||
598 | } | ||
599 | } | ||
600 | |||
601 | // We have an embedded field. We "squash" the fields down | ||
602 | // if specified in the tag. | ||
603 | squash := false | ||
604 | for _, tag := range tagParts[1:] { | ||
605 | if tag == "squash" { | ||
606 | squash = true | ||
607 | break | ||
608 | } | ||
609 | } | ||
610 | |||
611 | if squash { | ||
612 | structs = append( | ||
613 | structs, result.FieldByName(fieldType.Name)) | ||
614 | continue | ||
615 | } | ||
616 | } | ||
617 | |||
618 | // Normal struct field, store it away | ||
619 | fields[&fieldType] = structVal.Field(i) | ||
620 | } | ||
621 | } | ||
622 | |||
623 | usedKeys := make(map[string]struct{}) | ||
624 | decodedFields := make([]string, 0, len(fields)) | ||
625 | decodedFieldsVal := make([]reflect.Value, 0) | ||
626 | unusedKeysVal := make([]reflect.Value, 0) | ||
627 | for fieldType, field := range fields { | ||
628 | if !field.IsValid() { | ||
629 | // This should never happen | ||
630 | panic("field is not valid") | ||
631 | } | ||
632 | |||
633 | // If we can't set the field, then it is unexported or something, | ||
634 | // and we just continue onwards. | ||
635 | if !field.CanSet() { | ||
636 | continue | ||
637 | } | ||
638 | |||
639 | fieldName := fieldType.Name | ||
640 | |||
641 | tagValue := fieldType.Tag.Get(tagName) | ||
642 | tagParts := strings.SplitN(tagValue, ",", 2) | ||
643 | if len(tagParts) >= 2 { | ||
644 | switch tagParts[1] { | ||
645 | case "decodedFields": | ||
646 | decodedFieldsVal = append(decodedFieldsVal, field) | ||
647 | continue | ||
648 | case "key": | ||
649 | if item == nil { | ||
650 | return &parser.PosError{ | ||
651 | Pos: node.Pos(), | ||
652 | Err: fmt.Errorf("%s: %s asked for 'key', impossible", | ||
653 | name, fieldName), | ||
654 | } | ||
655 | } | ||
656 | |||
657 | field.SetString(item.Keys[0].Token.Value().(string)) | ||
658 | continue | ||
659 | case "unusedKeys": | ||
660 | unusedKeysVal = append(unusedKeysVal, field) | ||
661 | continue | ||
662 | } | ||
663 | } | ||
664 | |||
665 | if tagParts[0] != "" { | ||
666 | fieldName = tagParts[0] | ||
667 | } | ||
668 | |||
669 | // Determine the element we'll use to decode. If it is a single | ||
670 | // match (only object with the field), then we decode it exactly. | ||
671 | // If it is a prefix match, then we decode the matches. | ||
672 | filter := list.Filter(fieldName) | ||
673 | |||
674 | prefixMatches := filter.Children() | ||
675 | matches := filter.Elem() | ||
676 | if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { | ||
677 | continue | ||
678 | } | ||
679 | |||
680 | // Track the used key | ||
681 | usedKeys[fieldName] = struct{}{} | ||
682 | |||
683 | // Create the field name and decode. We range over the elements | ||
684 | // because we actually want the value. | ||
685 | fieldName = fmt.Sprintf("%s.%s", name, fieldName) | ||
686 | if len(prefixMatches.Items) > 0 { | ||
687 | if err := d.decode(fieldName, prefixMatches, field); err != nil { | ||
688 | return err | ||
689 | } | ||
690 | } | ||
691 | for _, match := range matches.Items { | ||
692 | var decodeNode ast.Node = match.Val | ||
693 | if ot, ok := decodeNode.(*ast.ObjectType); ok { | ||
694 | decodeNode = &ast.ObjectList{Items: ot.List.Items} | ||
695 | } | ||
696 | |||
697 | if err := d.decode(fieldName, decodeNode, field); err != nil { | ||
698 | return err | ||
699 | } | ||
700 | } | ||
701 | |||
702 | decodedFields = append(decodedFields, fieldType.Name) | ||
703 | } | ||
704 | |||
705 | if len(decodedFieldsVal) > 0 { | ||
706 | // Sort it so that it is deterministic | ||
707 | sort.Strings(decodedFields) | ||
708 | |||
709 | for _, v := range decodedFieldsVal { | ||
710 | v.Set(reflect.ValueOf(decodedFields)) | ||
711 | } | ||
712 | } | ||
713 | |||
714 | return nil | ||
715 | } | ||
716 | |||
717 | // findNodeType returns the type of ast.Node | ||
718 | func findNodeType() reflect.Type { | ||
719 | var nodeContainer struct { | ||
720 | Node ast.Node | ||
721 | } | ||
722 | value := reflect.ValueOf(nodeContainer).FieldByName("Node") | ||
723 | return value.Type() | ||
724 | } | ||
diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go new file mode 100644 index 0000000..575a20b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl.go | |||
@@ -0,0 +1,11 @@ | |||
1 | // Package hcl decodes HCL into usable Go structures. | ||
2 | // | ||
3 | // hcl input can come in either pure HCL format or JSON format. | ||
4 | // It can be parsed into an AST, and then decoded into a structure, | ||
5 | // or it can be decoded directly from a string into a structure. | ||
6 | // | ||
7 | // If you choose to parse HCL into a raw AST, the benefit is that you | ||
8 | // can write custom visitor implementations to implement custom | ||
9 | // semantic checks. By default, HCL does not perform any semantic | ||
10 | // checks. | ||
11 | package hcl | ||
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go new file mode 100644 index 0000000..6e5ef65 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go | |||
@@ -0,0 +1,219 @@ | |||
1 | // Package ast declares the types used to represent syntax trees for HCL | ||
2 | // (HashiCorp Configuration Language) | ||
3 | package ast | ||
4 | |||
5 | import ( | ||
6 | "fmt" | ||
7 | "strings" | ||
8 | |||
9 | "github.com/hashicorp/hcl/hcl/token" | ||
10 | ) | ||
11 | |||
12 | // Node is an element in the abstract syntax tree. | ||
13 | type Node interface { | ||
14 | node() | ||
15 | Pos() token.Pos | ||
16 | } | ||
17 | |||
18 | func (File) node() {} | ||
19 | func (ObjectList) node() {} | ||
20 | func (ObjectKey) node() {} | ||
21 | func (ObjectItem) node() {} | ||
22 | func (Comment) node() {} | ||
23 | func (CommentGroup) node() {} | ||
24 | func (ObjectType) node() {} | ||
25 | func (LiteralType) node() {} | ||
26 | func (ListType) node() {} | ||
27 | |||
28 | // File represents a single HCL file | ||
29 | type File struct { | ||
30 | Node Node // usually a *ObjectList | ||
31 | Comments []*CommentGroup // list of all comments in the source | ||
32 | } | ||
33 | |||
34 | func (f *File) Pos() token.Pos { | ||
35 | return f.Node.Pos() | ||
36 | } | ||
37 | |||
38 | // ObjectList represents a list of ObjectItems. An HCL file itself is an | ||
39 | // ObjectList. | ||
40 | type ObjectList struct { | ||
41 | Items []*ObjectItem | ||
42 | } | ||
43 | |||
44 | func (o *ObjectList) Add(item *ObjectItem) { | ||
45 | o.Items = append(o.Items, item) | ||
46 | } | ||
47 | |||
48 | // Filter filters out the objects with the given key list as a prefix. | ||
49 | // | ||
50 | // The returned list of objects contain ObjectItems where the keys have | ||
51 | // this prefix already stripped off. This might result in objects with | ||
52 | // zero-length key lists if they have no children. | ||
53 | // | ||
54 | // If no matches are found, an empty ObjectList (non-nil) is returned. | ||
55 | func (o *ObjectList) Filter(keys ...string) *ObjectList { | ||
56 | var result ObjectList | ||
57 | for _, item := range o.Items { | ||
58 | // If there aren't enough keys, then ignore this | ||
59 | if len(item.Keys) < len(keys) { | ||
60 | continue | ||
61 | } | ||
62 | |||
63 | match := true | ||
64 | for i, key := range item.Keys[:len(keys)] { | ||
65 | key := key.Token.Value().(string) | ||
66 | if key != keys[i] && !strings.EqualFold(key, keys[i]) { | ||
67 | match = false | ||
68 | break | ||
69 | } | ||
70 | } | ||
71 | if !match { | ||
72 | continue | ||
73 | } | ||
74 | |||
75 | // Strip off the prefix from the children | ||
76 | newItem := *item | ||
77 | newItem.Keys = newItem.Keys[len(keys):] | ||
78 | result.Add(&newItem) | ||
79 | } | ||
80 | |||
81 | return &result | ||
82 | } | ||
83 | |||
84 | // Children returns further nested objects (key length > 0) within this | ||
85 | // ObjectList. This should be used with Filter to get at child items. | ||
86 | func (o *ObjectList) Children() *ObjectList { | ||
87 | var result ObjectList | ||
88 | for _, item := range o.Items { | ||
89 | if len(item.Keys) > 0 { | ||
90 | result.Add(item) | ||
91 | } | ||
92 | } | ||
93 | |||
94 | return &result | ||
95 | } | ||
96 | |||
97 | // Elem returns items in the list that are direct element assignments | ||
98 | // (key length == 0). This should be used with Filter to get at elements. | ||
99 | func (o *ObjectList) Elem() *ObjectList { | ||
100 | var result ObjectList | ||
101 | for _, item := range o.Items { | ||
102 | if len(item.Keys) == 0 { | ||
103 | result.Add(item) | ||
104 | } | ||
105 | } | ||
106 | |||
107 | return &result | ||
108 | } | ||
109 | |||
110 | func (o *ObjectList) Pos() token.Pos { | ||
111 | // always returns the uninitiliazed position | ||
112 | return o.Items[0].Pos() | ||
113 | } | ||
114 | |||
115 | // ObjectItem represents a HCL Object Item. An item is represented with a key | ||
116 | // (or keys). It can be an assignment or an object (both normal and nested) | ||
117 | type ObjectItem struct { | ||
118 | // keys is only one length long if it's of type assignment. If it's a | ||
119 | // nested object it can be larger than one. In that case "assign" is | ||
120 | // invalid as there is no assignments for a nested object. | ||
121 | Keys []*ObjectKey | ||
122 | |||
123 | // assign contains the position of "=", if any | ||
124 | Assign token.Pos | ||
125 | |||
126 | // val is the item itself. It can be an object,list, number, bool or a | ||
127 | // string. If key length is larger than one, val can be only of type | ||
128 | // Object. | ||
129 | Val Node | ||
130 | |||
131 | LeadComment *CommentGroup // associated lead comment | ||
132 | LineComment *CommentGroup // associated line comment | ||
133 | } | ||
134 | |||
135 | func (o *ObjectItem) Pos() token.Pos { | ||
136 | // I'm not entirely sure what causes this, but removing this causes | ||
137 | // a test failure. We should investigate at some point. | ||
138 | if len(o.Keys) == 0 { | ||
139 | return token.Pos{} | ||
140 | } | ||
141 | |||
142 | return o.Keys[0].Pos() | ||
143 | } | ||
144 | |||
145 | // ObjectKeys are either an identifier or of type string. | ||
146 | type ObjectKey struct { | ||
147 | Token token.Token | ||
148 | } | ||
149 | |||
150 | func (o *ObjectKey) Pos() token.Pos { | ||
151 | return o.Token.Pos | ||
152 | } | ||
153 | |||
154 | // LiteralType represents a literal of basic type. Valid types are: | ||
155 | // token.NUMBER, token.FLOAT, token.BOOL and token.STRING | ||
156 | type LiteralType struct { | ||
157 | Token token.Token | ||
158 | |||
159 | // comment types, only used when in a list | ||
160 | LeadComment *CommentGroup | ||
161 | LineComment *CommentGroup | ||
162 | } | ||
163 | |||
164 | func (l *LiteralType) Pos() token.Pos { | ||
165 | return l.Token.Pos | ||
166 | } | ||
167 | |||
168 | // ListStatement represents a HCL List type | ||
169 | type ListType struct { | ||
170 | Lbrack token.Pos // position of "[" | ||
171 | Rbrack token.Pos // position of "]" | ||
172 | List []Node // the elements in lexical order | ||
173 | } | ||
174 | |||
175 | func (l *ListType) Pos() token.Pos { | ||
176 | return l.Lbrack | ||
177 | } | ||
178 | |||
179 | func (l *ListType) Add(node Node) { | ||
180 | l.List = append(l.List, node) | ||
181 | } | ||
182 | |||
183 | // ObjectType represents a HCL Object Type | ||
184 | type ObjectType struct { | ||
185 | Lbrace token.Pos // position of "{" | ||
186 | Rbrace token.Pos // position of "}" | ||
187 | List *ObjectList // the nodes in lexical order | ||
188 | } | ||
189 | |||
190 | func (o *ObjectType) Pos() token.Pos { | ||
191 | return o.Lbrace | ||
192 | } | ||
193 | |||
194 | // Comment node represents a single //, # style or /*- style commment | ||
195 | type Comment struct { | ||
196 | Start token.Pos // position of / or # | ||
197 | Text string | ||
198 | } | ||
199 | |||
200 | func (c *Comment) Pos() token.Pos { | ||
201 | return c.Start | ||
202 | } | ||
203 | |||
204 | // CommentGroup node represents a sequence of comments with no other tokens and | ||
205 | // no empty lines between. | ||
206 | type CommentGroup struct { | ||
207 | List []*Comment // len(List) > 0 | ||
208 | } | ||
209 | |||
210 | func (c *CommentGroup) Pos() token.Pos { | ||
211 | return c.List[0].Pos() | ||
212 | } | ||
213 | |||
214 | //------------------------------------------------------------------- | ||
215 | // GoStringer | ||
216 | //------------------------------------------------------------------- | ||
217 | |||
218 | func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } | ||
219 | func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } | ||
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go new file mode 100644 index 0000000..ba07ad4 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go | |||
@@ -0,0 +1,52 @@ | |||
1 | package ast | ||
2 | |||
3 | import "fmt" | ||
4 | |||
5 | // WalkFunc describes a function to be called for each node during a Walk. The | ||
6 | // returned node can be used to rewrite the AST. Walking stops the returned | ||
7 | // bool is false. | ||
8 | type WalkFunc func(Node) (Node, bool) | ||
9 | |||
10 | // Walk traverses an AST in depth-first order: It starts by calling fn(node); | ||
11 | // node must not be nil. If fn returns true, Walk invokes fn recursively for | ||
12 | // each of the non-nil children of node, followed by a call of fn(nil). The | ||
13 | // returned node of fn can be used to rewrite the passed node to fn. | ||
14 | func Walk(node Node, fn WalkFunc) Node { | ||
15 | rewritten, ok := fn(node) | ||
16 | if !ok { | ||
17 | return rewritten | ||
18 | } | ||
19 | |||
20 | switch n := node.(type) { | ||
21 | case *File: | ||
22 | n.Node = Walk(n.Node, fn) | ||
23 | case *ObjectList: | ||
24 | for i, item := range n.Items { | ||
25 | n.Items[i] = Walk(item, fn).(*ObjectItem) | ||
26 | } | ||
27 | case *ObjectKey: | ||
28 | // nothing to do | ||
29 | case *ObjectItem: | ||
30 | for i, k := range n.Keys { | ||
31 | n.Keys[i] = Walk(k, fn).(*ObjectKey) | ||
32 | } | ||
33 | |||
34 | if n.Val != nil { | ||
35 | n.Val = Walk(n.Val, fn) | ||
36 | } | ||
37 | case *LiteralType: | ||
38 | // nothing to do | ||
39 | case *ListType: | ||
40 | for i, l := range n.List { | ||
41 | n.List[i] = Walk(l, fn) | ||
42 | } | ||
43 | case *ObjectType: | ||
44 | n.List = Walk(n.List, fn).(*ObjectList) | ||
45 | default: | ||
46 | // should we panic here? | ||
47 | fmt.Printf("unknown type: %T\n", n) | ||
48 | } | ||
49 | |||
50 | fn(nil) | ||
51 | return rewritten | ||
52 | } | ||
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go new file mode 100644 index 0000000..5c99381 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go | |||
@@ -0,0 +1,17 @@ | |||
1 | package parser | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | |||
6 | "github.com/hashicorp/hcl/hcl/token" | ||
7 | ) | ||
8 | |||
9 | // PosError is a parse error that contains a position. | ||
10 | type PosError struct { | ||
11 | Pos token.Pos | ||
12 | Err error | ||
13 | } | ||
14 | |||
15 | func (e *PosError) Error() string { | ||
16 | return fmt.Sprintf("At %s: %s", e.Pos, e.Err) | ||
17 | } | ||
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go new file mode 100644 index 0000000..b488180 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go | |||
@@ -0,0 +1,520 @@ | |||
1 | // Package parser implements a parser for HCL (HashiCorp Configuration | ||
2 | // Language) | ||
3 | package parser | ||
4 | |||
5 | import ( | ||
6 | "bytes" | ||
7 | "errors" | ||
8 | "fmt" | ||
9 | "strings" | ||
10 | |||
11 | "github.com/hashicorp/hcl/hcl/ast" | ||
12 | "github.com/hashicorp/hcl/hcl/scanner" | ||
13 | "github.com/hashicorp/hcl/hcl/token" | ||
14 | ) | ||
15 | |||
16 | type Parser struct { | ||
17 | sc *scanner.Scanner | ||
18 | |||
19 | // Last read token | ||
20 | tok token.Token | ||
21 | commaPrev token.Token | ||
22 | |||
23 | comments []*ast.CommentGroup | ||
24 | leadComment *ast.CommentGroup // last lead comment | ||
25 | lineComment *ast.CommentGroup // last line comment | ||
26 | |||
27 | enableTrace bool | ||
28 | indent int | ||
29 | n int // buffer size (max = 1) | ||
30 | } | ||
31 | |||
32 | func newParser(src []byte) *Parser { | ||
33 | return &Parser{ | ||
34 | sc: scanner.New(src), | ||
35 | } | ||
36 | } | ||
37 | |||
38 | // Parse returns the fully parsed source and returns the abstract syntax tree. | ||
39 | func Parse(src []byte) (*ast.File, error) { | ||
40 | // normalize all line endings | ||
41 | // since the scanner and output only work with "\n" line endings, we may | ||
42 | // end up with dangling "\r" characters in the parsed data. | ||
43 | src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) | ||
44 | |||
45 | p := newParser(src) | ||
46 | return p.Parse() | ||
47 | } | ||
48 | |||
49 | var errEofToken = errors.New("EOF token found") | ||
50 | |||
51 | // Parse returns the fully parsed source and returns the abstract syntax tree. | ||
52 | func (p *Parser) Parse() (*ast.File, error) { | ||
53 | f := &ast.File{} | ||
54 | var err, scerr error | ||
55 | p.sc.Error = func(pos token.Pos, msg string) { | ||
56 | scerr = &PosError{Pos: pos, Err: errors.New(msg)} | ||
57 | } | ||
58 | |||
59 | f.Node, err = p.objectList(false) | ||
60 | if scerr != nil { | ||
61 | return nil, scerr | ||
62 | } | ||
63 | if err != nil { | ||
64 | return nil, err | ||
65 | } | ||
66 | |||
67 | f.Comments = p.comments | ||
68 | return f, nil | ||
69 | } | ||
70 | |||
71 | // objectList parses a list of items within an object (generally k/v pairs). | ||
72 | // The parameter" obj" tells this whether to we are within an object (braces: | ||
73 | // '{', '}') or just at the top level. If we're within an object, we end | ||
74 | // at an RBRACE. | ||
75 | func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { | ||
76 | defer un(trace(p, "ParseObjectList")) | ||
77 | node := &ast.ObjectList{} | ||
78 | |||
79 | for { | ||
80 | if obj { | ||
81 | tok := p.scan() | ||
82 | p.unscan() | ||
83 | if tok.Type == token.RBRACE { | ||
84 | break | ||
85 | } | ||
86 | } | ||
87 | |||
88 | n, err := p.objectItem() | ||
89 | if err == errEofToken { | ||
90 | break // we are finished | ||
91 | } | ||
92 | |||
93 | // we don't return a nil node, because might want to use already | ||
94 | // collected items. | ||
95 | if err != nil { | ||
96 | return node, err | ||
97 | } | ||
98 | |||
99 | node.Add(n) | ||
100 | |||
101 | // object lists can be optionally comma-delimited e.g. when a list of maps | ||
102 | // is being expressed, so a comma is allowed here - it's simply consumed | ||
103 | tok := p.scan() | ||
104 | if tok.Type != token.COMMA { | ||
105 | p.unscan() | ||
106 | } | ||
107 | } | ||
108 | return node, nil | ||
109 | } | ||
110 | |||
111 | func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { | ||
112 | endline = p.tok.Pos.Line | ||
113 | |||
114 | // count the endline if it's multiline comment, ie starting with /* | ||
115 | if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { | ||
116 | // don't use range here - no need to decode Unicode code points | ||
117 | for i := 0; i < len(p.tok.Text); i++ { | ||
118 | if p.tok.Text[i] == '\n' { | ||
119 | endline++ | ||
120 | } | ||
121 | } | ||
122 | } | ||
123 | |||
124 | comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} | ||
125 | p.tok = p.sc.Scan() | ||
126 | return | ||
127 | } | ||
128 | |||
129 | func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { | ||
130 | var list []*ast.Comment | ||
131 | endline = p.tok.Pos.Line | ||
132 | |||
133 | for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { | ||
134 | var comment *ast.Comment | ||
135 | comment, endline = p.consumeComment() | ||
136 | list = append(list, comment) | ||
137 | } | ||
138 | |||
139 | // add comment group to the comments list | ||
140 | comments = &ast.CommentGroup{List: list} | ||
141 | p.comments = append(p.comments, comments) | ||
142 | |||
143 | return | ||
144 | } | ||
145 | |||
146 | // objectItem parses a single object item | ||
147 | func (p *Parser) objectItem() (*ast.ObjectItem, error) { | ||
148 | defer un(trace(p, "ParseObjectItem")) | ||
149 | |||
150 | keys, err := p.objectKey() | ||
151 | if len(keys) > 0 && err == errEofToken { | ||
152 | // We ignore eof token here since it is an error if we didn't | ||
153 | // receive a value (but we did receive a key) for the item. | ||
154 | err = nil | ||
155 | } | ||
156 | if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { | ||
157 | // This is a strange boolean statement, but what it means is: | ||
158 | // We have keys with no value, and we're likely in an object | ||
159 | // (since RBrace ends an object). For this, we set err to nil so | ||
160 | // we continue and get the error below of having the wrong value | ||
161 | // type. | ||
162 | err = nil | ||
163 | |||
164 | // Reset the token type so we don't think it completed fine. See | ||
165 | // objectType which uses p.tok.Type to check if we're done with | ||
166 | // the object. | ||
167 | p.tok.Type = token.EOF | ||
168 | } | ||
169 | if err != nil { | ||
170 | return nil, err | ||
171 | } | ||
172 | |||
173 | o := &ast.ObjectItem{ | ||
174 | Keys: keys, | ||
175 | } | ||
176 | |||
177 | if p.leadComment != nil { | ||
178 | o.LeadComment = p.leadComment | ||
179 | p.leadComment = nil | ||
180 | } | ||
181 | |||
182 | switch p.tok.Type { | ||
183 | case token.ASSIGN: | ||
184 | o.Assign = p.tok.Pos | ||
185 | o.Val, err = p.object() | ||
186 | if err != nil { | ||
187 | return nil, err | ||
188 | } | ||
189 | case token.LBRACE: | ||
190 | o.Val, err = p.objectType() | ||
191 | if err != nil { | ||
192 | return nil, err | ||
193 | } | ||
194 | default: | ||
195 | keyStr := make([]string, 0, len(keys)) | ||
196 | for _, k := range keys { | ||
197 | keyStr = append(keyStr, k.Token.Text) | ||
198 | } | ||
199 | |||
200 | return nil, fmt.Errorf( | ||
201 | "key '%s' expected start of object ('{') or assignment ('=')", | ||
202 | strings.Join(keyStr, " ")) | ||
203 | } | ||
204 | |||
205 | // do a look-ahead for line comment | ||
206 | p.scan() | ||
207 | if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { | ||
208 | o.LineComment = p.lineComment | ||
209 | p.lineComment = nil | ||
210 | } | ||
211 | p.unscan() | ||
212 | return o, nil | ||
213 | } | ||
214 | |||
215 | // objectKey parses an object key and returns a ObjectKey AST | ||
216 | func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { | ||
217 | keyCount := 0 | ||
218 | keys := make([]*ast.ObjectKey, 0) | ||
219 | |||
220 | for { | ||
221 | tok := p.scan() | ||
222 | switch tok.Type { | ||
223 | case token.EOF: | ||
224 | // It is very important to also return the keys here as well as | ||
225 | // the error. This is because we need to be able to tell if we | ||
226 | // did parse keys prior to finding the EOF, or if we just found | ||
227 | // a bare EOF. | ||
228 | return keys, errEofToken | ||
229 | case token.ASSIGN: | ||
230 | // assignment or object only, but not nested objects. this is not | ||
231 | // allowed: `foo bar = {}` | ||
232 | if keyCount > 1 { | ||
233 | return nil, &PosError{ | ||
234 | Pos: p.tok.Pos, | ||
235 | Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), | ||
236 | } | ||
237 | } | ||
238 | |||
239 | if keyCount == 0 { | ||
240 | return nil, &PosError{ | ||
241 | Pos: p.tok.Pos, | ||
242 | Err: errors.New("no object keys found!"), | ||
243 | } | ||
244 | } | ||
245 | |||
246 | return keys, nil | ||
247 | case token.LBRACE: | ||
248 | var err error | ||
249 | |||
250 | // If we have no keys, then it is a syntax error. i.e. {{}} is not | ||
251 | // allowed. | ||
252 | if len(keys) == 0 { | ||
253 | err = &PosError{ | ||
254 | Pos: p.tok.Pos, | ||
255 | Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), | ||
256 | } | ||
257 | } | ||
258 | |||
259 | // object | ||
260 | return keys, err | ||
261 | case token.IDENT, token.STRING: | ||
262 | keyCount++ | ||
263 | keys = append(keys, &ast.ObjectKey{Token: p.tok}) | ||
264 | case token.ILLEGAL: | ||
265 | return keys, &PosError{ | ||
266 | Pos: p.tok.Pos, | ||
267 | Err: fmt.Errorf("illegal character"), | ||
268 | } | ||
269 | default: | ||
270 | return keys, &PosError{ | ||
271 | Pos: p.tok.Pos, | ||
272 | Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), | ||
273 | } | ||
274 | } | ||
275 | } | ||
276 | } | ||
277 | |||
278 | // object parses any type of object, such as number, bool, string, object or | ||
279 | // list. | ||
280 | func (p *Parser) object() (ast.Node, error) { | ||
281 | defer un(trace(p, "ParseType")) | ||
282 | tok := p.scan() | ||
283 | |||
284 | switch tok.Type { | ||
285 | case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: | ||
286 | return p.literalType() | ||
287 | case token.LBRACE: | ||
288 | return p.objectType() | ||
289 | case token.LBRACK: | ||
290 | return p.listType() | ||
291 | case token.COMMENT: | ||
292 | // implement comment | ||
293 | case token.EOF: | ||
294 | return nil, errEofToken | ||
295 | } | ||
296 | |||
297 | return nil, &PosError{ | ||
298 | Pos: tok.Pos, | ||
299 | Err: fmt.Errorf("Unknown token: %+v", tok), | ||
300 | } | ||
301 | } | ||
302 | |||
303 | // objectType parses an object type and returns a ObjectType AST | ||
304 | func (p *Parser) objectType() (*ast.ObjectType, error) { | ||
305 | defer un(trace(p, "ParseObjectType")) | ||
306 | |||
307 | // we assume that the currently scanned token is a LBRACE | ||
308 | o := &ast.ObjectType{ | ||
309 | Lbrace: p.tok.Pos, | ||
310 | } | ||
311 | |||
312 | l, err := p.objectList(true) | ||
313 | |||
314 | // if we hit RBRACE, we are good to go (means we parsed all Items), if it's | ||
315 | // not a RBRACE, it's an syntax error and we just return it. | ||
316 | if err != nil && p.tok.Type != token.RBRACE { | ||
317 | return nil, err | ||
318 | } | ||
319 | |||
320 | // No error, scan and expect the ending to be a brace | ||
321 | if tok := p.scan(); tok.Type != token.RBRACE { | ||
322 | return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type) | ||
323 | } | ||
324 | |||
325 | o.List = l | ||
326 | o.Rbrace = p.tok.Pos // advanced via parseObjectList | ||
327 | return o, nil | ||
328 | } | ||
329 | |||
330 | // listType parses a list type and returns a ListType AST | ||
331 | func (p *Parser) listType() (*ast.ListType, error) { | ||
332 | defer un(trace(p, "ParseListType")) | ||
333 | |||
334 | // we assume that the currently scanned token is a LBRACK | ||
335 | l := &ast.ListType{ | ||
336 | Lbrack: p.tok.Pos, | ||
337 | } | ||
338 | |||
339 | needComma := false | ||
340 | for { | ||
341 | tok := p.scan() | ||
342 | if needComma { | ||
343 | switch tok.Type { | ||
344 | case token.COMMA, token.RBRACK: | ||
345 | default: | ||
346 | return nil, &PosError{ | ||
347 | Pos: tok.Pos, | ||
348 | Err: fmt.Errorf( | ||
349 | "error parsing list, expected comma or list end, got: %s", | ||
350 | tok.Type), | ||
351 | } | ||
352 | } | ||
353 | } | ||
354 | switch tok.Type { | ||
355 | case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: | ||
356 | node, err := p.literalType() | ||
357 | if err != nil { | ||
358 | return nil, err | ||
359 | } | ||
360 | |||
361 | // If there is a lead comment, apply it | ||
362 | if p.leadComment != nil { | ||
363 | node.LeadComment = p.leadComment | ||
364 | p.leadComment = nil | ||
365 | } | ||
366 | |||
367 | l.Add(node) | ||
368 | needComma = true | ||
369 | case token.COMMA: | ||
370 | // get next list item or we are at the end | ||
371 | // do a look-ahead for line comment | ||
372 | p.scan() | ||
373 | if p.lineComment != nil && len(l.List) > 0 { | ||
374 | lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) | ||
375 | if ok { | ||
376 | lit.LineComment = p.lineComment | ||
377 | l.List[len(l.List)-1] = lit | ||
378 | p.lineComment = nil | ||
379 | } | ||
380 | } | ||
381 | p.unscan() | ||
382 | |||
383 | needComma = false | ||
384 | continue | ||
385 | case token.LBRACE: | ||
386 | // Looks like a nested object, so parse it out | ||
387 | node, err := p.objectType() | ||
388 | if err != nil { | ||
389 | return nil, &PosError{ | ||
390 | Pos: tok.Pos, | ||
391 | Err: fmt.Errorf( | ||
392 | "error while trying to parse object within list: %s", err), | ||
393 | } | ||
394 | } | ||
395 | l.Add(node) | ||
396 | needComma = true | ||
397 | case token.LBRACK: | ||
398 | node, err := p.listType() | ||
399 | if err != nil { | ||
400 | return nil, &PosError{ | ||
401 | Pos: tok.Pos, | ||
402 | Err: fmt.Errorf( | ||
403 | "error while trying to parse list within list: %s", err), | ||
404 | } | ||
405 | } | ||
406 | l.Add(node) | ||
407 | case token.RBRACK: | ||
408 | // finished | ||
409 | l.Rbrack = p.tok.Pos | ||
410 | return l, nil | ||
411 | default: | ||
412 | return nil, &PosError{ | ||
413 | Pos: tok.Pos, | ||
414 | Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), | ||
415 | } | ||
416 | } | ||
417 | } | ||
418 | } | ||
419 | |||
420 | // literalType parses a literal type and returns a LiteralType AST | ||
421 | func (p *Parser) literalType() (*ast.LiteralType, error) { | ||
422 | defer un(trace(p, "ParseLiteral")) | ||
423 | |||
424 | return &ast.LiteralType{ | ||
425 | Token: p.tok, | ||
426 | }, nil | ||
427 | } | ||
428 | |||
429 | // scan returns the next token from the underlying scanner. If a token has | ||
430 | // been unscanned then read that instead. In the process, it collects any | ||
431 | // comment groups encountered, and remembers the last lead and line comments. | ||
432 | func (p *Parser) scan() token.Token { | ||
433 | // If we have a token on the buffer, then return it. | ||
434 | if p.n != 0 { | ||
435 | p.n = 0 | ||
436 | return p.tok | ||
437 | } | ||
438 | |||
439 | // Otherwise read the next token from the scanner and Save it to the buffer | ||
440 | // in case we unscan later. | ||
441 | prev := p.tok | ||
442 | p.tok = p.sc.Scan() | ||
443 | |||
444 | if p.tok.Type == token.COMMENT { | ||
445 | var comment *ast.CommentGroup | ||
446 | var endline int | ||
447 | |||
448 | // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", | ||
449 | // p.tok.Pos.Line, prev.Pos.Line, endline) | ||
450 | if p.tok.Pos.Line == prev.Pos.Line { | ||
451 | // The comment is on same line as the previous token; it | ||
452 | // cannot be a lead comment but may be a line comment. | ||
453 | comment, endline = p.consumeCommentGroup(0) | ||
454 | if p.tok.Pos.Line != endline { | ||
455 | // The next token is on a different line, thus | ||
456 | // the last comment group is a line comment. | ||
457 | p.lineComment = comment | ||
458 | } | ||
459 | } | ||
460 | |||
461 | // consume successor comments, if any | ||
462 | endline = -1 | ||
463 | for p.tok.Type == token.COMMENT { | ||
464 | comment, endline = p.consumeCommentGroup(1) | ||
465 | } | ||
466 | |||
467 | if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { | ||
468 | switch p.tok.Type { | ||
469 | case token.RBRACE, token.RBRACK: | ||
470 | // Do not count for these cases | ||
471 | default: | ||
472 | // The next token is following on the line immediately after the | ||
473 | // comment group, thus the last comment group is a lead comment. | ||
474 | p.leadComment = comment | ||
475 | } | ||
476 | } | ||
477 | |||
478 | } | ||
479 | |||
480 | return p.tok | ||
481 | } | ||
482 | |||
483 | // unscan pushes the previously read token back onto the buffer. | ||
484 | func (p *Parser) unscan() { | ||
485 | p.n = 1 | ||
486 | } | ||
487 | |||
488 | // ---------------------------------------------------------------------------- | ||
489 | // Parsing support | ||
490 | |||
491 | func (p *Parser) printTrace(a ...interface{}) { | ||
492 | if !p.enableTrace { | ||
493 | return | ||
494 | } | ||
495 | |||
496 | const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " | ||
497 | const n = len(dots) | ||
498 | fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) | ||
499 | |||
500 | i := 2 * p.indent | ||
501 | for i > n { | ||
502 | fmt.Print(dots) | ||
503 | i -= n | ||
504 | } | ||
505 | // i <= n | ||
506 | fmt.Print(dots[0:i]) | ||
507 | fmt.Println(a...) | ||
508 | } | ||
509 | |||
510 | func trace(p *Parser, msg string) *Parser { | ||
511 | p.printTrace(msg, "(") | ||
512 | p.indent++ | ||
513 | return p | ||
514 | } | ||
515 | |||
516 | // Usage pattern: defer un(trace(p, "...")) | ||
517 | func un(p *Parser) { | ||
518 | p.indent-- | ||
519 | p.printTrace(")") | ||
520 | } | ||
diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go new file mode 100644 index 0000000..6966236 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go | |||
@@ -0,0 +1,651 @@ | |||
1 | // Package scanner implements a scanner for HCL (HashiCorp Configuration | ||
2 | // Language) source text. | ||
3 | package scanner | ||
4 | |||
5 | import ( | ||
6 | "bytes" | ||
7 | "fmt" | ||
8 | "os" | ||
9 | "regexp" | ||
10 | "unicode" | ||
11 | "unicode/utf8" | ||
12 | |||
13 | "github.com/hashicorp/hcl/hcl/token" | ||
14 | ) | ||
15 | |||
16 | // eof represents a marker rune for the end of the reader. | ||
17 | const eof = rune(0) | ||
18 | |||
19 | // Scanner defines a lexical scanner | ||
20 | type Scanner struct { | ||
21 | buf *bytes.Buffer // Source buffer for advancing and scanning | ||
22 | src []byte // Source buffer for immutable access | ||
23 | |||
24 | // Source Position | ||
25 | srcPos token.Pos // current position | ||
26 | prevPos token.Pos // previous position, used for peek() method | ||
27 | |||
28 | lastCharLen int // length of last character in bytes | ||
29 | lastLineLen int // length of last line in characters (for correct column reporting) | ||
30 | |||
31 | tokStart int // token text start position | ||
32 | tokEnd int // token text end position | ||
33 | |||
34 | // Error is called for each error encountered. If no Error | ||
35 | // function is set, the error is reported to os.Stderr. | ||
36 | Error func(pos token.Pos, msg string) | ||
37 | |||
38 | // ErrorCount is incremented by one for each error encountered. | ||
39 | ErrorCount int | ||
40 | |||
41 | // tokPos is the start position of most recently scanned token; set by | ||
42 | // Scan. The Filename field is always left untouched by the Scanner. If | ||
43 | // an error is reported (via Error) and Position is invalid, the scanner is | ||
44 | // not inside a token. | ||
45 | tokPos token.Pos | ||
46 | } | ||
47 | |||
48 | // New creates and initializes a new instance of Scanner using src as | ||
49 | // its source content. | ||
50 | func New(src []byte) *Scanner { | ||
51 | // even though we accept a src, we read from a io.Reader compatible type | ||
52 | // (*bytes.Buffer). So in the future we might easily change it to streaming | ||
53 | // read. | ||
54 | b := bytes.NewBuffer(src) | ||
55 | s := &Scanner{ | ||
56 | buf: b, | ||
57 | src: src, | ||
58 | } | ||
59 | |||
60 | // srcPosition always starts with 1 | ||
61 | s.srcPos.Line = 1 | ||
62 | return s | ||
63 | } | ||
64 | |||
65 | // next reads the next rune from the bufferred reader. Returns the rune(0) if | ||
66 | // an error occurs (or io.EOF is returned). | ||
67 | func (s *Scanner) next() rune { | ||
68 | ch, size, err := s.buf.ReadRune() | ||
69 | if err != nil { | ||
70 | // advance for error reporting | ||
71 | s.srcPos.Column++ | ||
72 | s.srcPos.Offset += size | ||
73 | s.lastCharLen = size | ||
74 | return eof | ||
75 | } | ||
76 | |||
77 | if ch == utf8.RuneError && size == 1 { | ||
78 | s.srcPos.Column++ | ||
79 | s.srcPos.Offset += size | ||
80 | s.lastCharLen = size | ||
81 | s.err("illegal UTF-8 encoding") | ||
82 | return ch | ||
83 | } | ||
84 | |||
85 | // remember last position | ||
86 | s.prevPos = s.srcPos | ||
87 | |||
88 | s.srcPos.Column++ | ||
89 | s.lastCharLen = size | ||
90 | s.srcPos.Offset += size | ||
91 | |||
92 | if ch == '\n' { | ||
93 | s.srcPos.Line++ | ||
94 | s.lastLineLen = s.srcPos.Column | ||
95 | s.srcPos.Column = 0 | ||
96 | } | ||
97 | |||
98 | // If we see a null character with data left, then that is an error | ||
99 | if ch == '\x00' && s.buf.Len() > 0 { | ||
100 | s.err("unexpected null character (0x00)") | ||
101 | return eof | ||
102 | } | ||
103 | |||
104 | // debug | ||
105 | // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) | ||
106 | return ch | ||
107 | } | ||
108 | |||
109 | // unread unreads the previous read Rune and updates the source position | ||
110 | func (s *Scanner) unread() { | ||
111 | if err := s.buf.UnreadRune(); err != nil { | ||
112 | panic(err) // this is user fault, we should catch it | ||
113 | } | ||
114 | s.srcPos = s.prevPos // put back last position | ||
115 | } | ||
116 | |||
117 | // peek returns the next rune without advancing the reader. | ||
118 | func (s *Scanner) peek() rune { | ||
119 | peek, _, err := s.buf.ReadRune() | ||
120 | if err != nil { | ||
121 | return eof | ||
122 | } | ||
123 | |||
124 | s.buf.UnreadRune() | ||
125 | return peek | ||
126 | } | ||
127 | |||
128 | // Scan scans the next token and returns the token. | ||
129 | func (s *Scanner) Scan() token.Token { | ||
130 | ch := s.next() | ||
131 | |||
132 | // skip white space | ||
133 | for isWhitespace(ch) { | ||
134 | ch = s.next() | ||
135 | } | ||
136 | |||
137 | var tok token.Type | ||
138 | |||
139 | // token text markings | ||
140 | s.tokStart = s.srcPos.Offset - s.lastCharLen | ||
141 | |||
142 | // token position, initial next() is moving the offset by one(size of rune | ||
143 | // actually), though we are interested with the starting point | ||
144 | s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen | ||
145 | if s.srcPos.Column > 0 { | ||
146 | // common case: last character was not a '\n' | ||
147 | s.tokPos.Line = s.srcPos.Line | ||
148 | s.tokPos.Column = s.srcPos.Column | ||
149 | } else { | ||
150 | // last character was a '\n' | ||
151 | // (we cannot be at the beginning of the source | ||
152 | // since we have called next() at least once) | ||
153 | s.tokPos.Line = s.srcPos.Line - 1 | ||
154 | s.tokPos.Column = s.lastLineLen | ||
155 | } | ||
156 | |||
157 | switch { | ||
158 | case isLetter(ch): | ||
159 | tok = token.IDENT | ||
160 | lit := s.scanIdentifier() | ||
161 | if lit == "true" || lit == "false" { | ||
162 | tok = token.BOOL | ||
163 | } | ||
164 | case isDecimal(ch): | ||
165 | tok = s.scanNumber(ch) | ||
166 | default: | ||
167 | switch ch { | ||
168 | case eof: | ||
169 | tok = token.EOF | ||
170 | case '"': | ||
171 | tok = token.STRING | ||
172 | s.scanString() | ||
173 | case '#', '/': | ||
174 | tok = token.COMMENT | ||
175 | s.scanComment(ch) | ||
176 | case '.': | ||
177 | tok = token.PERIOD | ||
178 | ch = s.peek() | ||
179 | if isDecimal(ch) { | ||
180 | tok = token.FLOAT | ||
181 | ch = s.scanMantissa(ch) | ||
182 | ch = s.scanExponent(ch) | ||
183 | } | ||
184 | case '<': | ||
185 | tok = token.HEREDOC | ||
186 | s.scanHeredoc() | ||
187 | case '[': | ||
188 | tok = token.LBRACK | ||
189 | case ']': | ||
190 | tok = token.RBRACK | ||
191 | case '{': | ||
192 | tok = token.LBRACE | ||
193 | case '}': | ||
194 | tok = token.RBRACE | ||
195 | case ',': | ||
196 | tok = token.COMMA | ||
197 | case '=': | ||
198 | tok = token.ASSIGN | ||
199 | case '+': | ||
200 | tok = token.ADD | ||
201 | case '-': | ||
202 | if isDecimal(s.peek()) { | ||
203 | ch := s.next() | ||
204 | tok = s.scanNumber(ch) | ||
205 | } else { | ||
206 | tok = token.SUB | ||
207 | } | ||
208 | default: | ||
209 | s.err("illegal char") | ||
210 | } | ||
211 | } | ||
212 | |||
213 | // finish token ending | ||
214 | s.tokEnd = s.srcPos.Offset | ||
215 | |||
216 | // create token literal | ||
217 | var tokenText string | ||
218 | if s.tokStart >= 0 { | ||
219 | tokenText = string(s.src[s.tokStart:s.tokEnd]) | ||
220 | } | ||
221 | s.tokStart = s.tokEnd // ensure idempotency of tokenText() call | ||
222 | |||
223 | return token.Token{ | ||
224 | Type: tok, | ||
225 | Pos: s.tokPos, | ||
226 | Text: tokenText, | ||
227 | } | ||
228 | } | ||
229 | |||
230 | func (s *Scanner) scanComment(ch rune) { | ||
231 | // single line comments | ||
232 | if ch == '#' || (ch == '/' && s.peek() != '*') { | ||
233 | if ch == '/' && s.peek() != '/' { | ||
234 | s.err("expected '/' for comment") | ||
235 | return | ||
236 | } | ||
237 | |||
238 | ch = s.next() | ||
239 | for ch != '\n' && ch >= 0 && ch != eof { | ||
240 | ch = s.next() | ||
241 | } | ||
242 | if ch != eof && ch >= 0 { | ||
243 | s.unread() | ||
244 | } | ||
245 | return | ||
246 | } | ||
247 | |||
248 | // be sure we get the character after /* This allows us to find comment's | ||
249 | // that are not erminated | ||
250 | if ch == '/' { | ||
251 | s.next() | ||
252 | ch = s.next() // read character after "/*" | ||
253 | } | ||
254 | |||
255 | // look for /* - style comments | ||
256 | for { | ||
257 | if ch < 0 || ch == eof { | ||
258 | s.err("comment not terminated") | ||
259 | break | ||
260 | } | ||
261 | |||
262 | ch0 := ch | ||
263 | ch = s.next() | ||
264 | if ch0 == '*' && ch == '/' { | ||
265 | break | ||
266 | } | ||
267 | } | ||
268 | } | ||
269 | |||
270 | // scanNumber scans a HCL number definition starting with the given rune | ||
271 | func (s *Scanner) scanNumber(ch rune) token.Type { | ||
272 | if ch == '0' { | ||
273 | // check for hexadecimal, octal or float | ||
274 | ch = s.next() | ||
275 | if ch == 'x' || ch == 'X' { | ||
276 | // hexadecimal | ||
277 | ch = s.next() | ||
278 | found := false | ||
279 | for isHexadecimal(ch) { | ||
280 | ch = s.next() | ||
281 | found = true | ||
282 | } | ||
283 | |||
284 | if !found { | ||
285 | s.err("illegal hexadecimal number") | ||
286 | } | ||
287 | |||
288 | if ch != eof { | ||
289 | s.unread() | ||
290 | } | ||
291 | |||
292 | return token.NUMBER | ||
293 | } | ||
294 | |||
295 | // now it's either something like: 0421(octal) or 0.1231(float) | ||
296 | illegalOctal := false | ||
297 | for isDecimal(ch) { | ||
298 | ch = s.next() | ||
299 | if ch == '8' || ch == '9' { | ||
300 | // this is just a possibility. For example 0159 is illegal, but | ||
301 | // 0159.23 is valid. So we mark a possible illegal octal. If | ||
302 | // the next character is not a period, we'll print the error. | ||
303 | illegalOctal = true | ||
304 | } | ||
305 | } | ||
306 | |||
307 | if ch == 'e' || ch == 'E' { | ||
308 | ch = s.scanExponent(ch) | ||
309 | return token.FLOAT | ||
310 | } | ||
311 | |||
312 | if ch == '.' { | ||
313 | ch = s.scanFraction(ch) | ||
314 | |||
315 | if ch == 'e' || ch == 'E' { | ||
316 | ch = s.next() | ||
317 | ch = s.scanExponent(ch) | ||
318 | } | ||
319 | return token.FLOAT | ||
320 | } | ||
321 | |||
322 | if illegalOctal { | ||
323 | s.err("illegal octal number") | ||
324 | } | ||
325 | |||
326 | if ch != eof { | ||
327 | s.unread() | ||
328 | } | ||
329 | return token.NUMBER | ||
330 | } | ||
331 | |||
332 | s.scanMantissa(ch) | ||
333 | ch = s.next() // seek forward | ||
334 | if ch == 'e' || ch == 'E' { | ||
335 | ch = s.scanExponent(ch) | ||
336 | return token.FLOAT | ||
337 | } | ||
338 | |||
339 | if ch == '.' { | ||
340 | ch = s.scanFraction(ch) | ||
341 | if ch == 'e' || ch == 'E' { | ||
342 | ch = s.next() | ||
343 | ch = s.scanExponent(ch) | ||
344 | } | ||
345 | return token.FLOAT | ||
346 | } | ||
347 | |||
348 | if ch != eof { | ||
349 | s.unread() | ||
350 | } | ||
351 | return token.NUMBER | ||
352 | } | ||
353 | |||
354 | // scanMantissa scans the mantissa begining from the rune. It returns the next | ||
355 | // non decimal rune. It's used to determine wheter it's a fraction or exponent. | ||
356 | func (s *Scanner) scanMantissa(ch rune) rune { | ||
357 | scanned := false | ||
358 | for isDecimal(ch) { | ||
359 | ch = s.next() | ||
360 | scanned = true | ||
361 | } | ||
362 | |||
363 | if scanned && ch != eof { | ||
364 | s.unread() | ||
365 | } | ||
366 | return ch | ||
367 | } | ||
368 | |||
369 | // scanFraction scans the fraction after the '.' rune | ||
370 | func (s *Scanner) scanFraction(ch rune) rune { | ||
371 | if ch == '.' { | ||
372 | ch = s.peek() // we peek just to see if we can move forward | ||
373 | ch = s.scanMantissa(ch) | ||
374 | } | ||
375 | return ch | ||
376 | } | ||
377 | |||
378 | // scanExponent scans the remaining parts of an exponent after the 'e' or 'E' | ||
379 | // rune. | ||
380 | func (s *Scanner) scanExponent(ch rune) rune { | ||
381 | if ch == 'e' || ch == 'E' { | ||
382 | ch = s.next() | ||
383 | if ch == '-' || ch == '+' { | ||
384 | ch = s.next() | ||
385 | } | ||
386 | ch = s.scanMantissa(ch) | ||
387 | } | ||
388 | return ch | ||
389 | } | ||
390 | |||
391 | // scanHeredoc scans a heredoc string | ||
392 | func (s *Scanner) scanHeredoc() { | ||
393 | // Scan the second '<' in example: '<<EOF' | ||
394 | if s.next() != '<' { | ||
395 | s.err("heredoc expected second '<', didn't see it") | ||
396 | return | ||
397 | } | ||
398 | |||
399 | // Get the original offset so we can read just the heredoc ident | ||
400 | offs := s.srcPos.Offset | ||
401 | |||
402 | // Scan the identifier | ||
403 | ch := s.next() | ||
404 | |||
405 | // Indented heredoc syntax | ||
406 | if ch == '-' { | ||
407 | ch = s.next() | ||
408 | } | ||
409 | |||
410 | for isLetter(ch) || isDigit(ch) { | ||
411 | ch = s.next() | ||
412 | } | ||
413 | |||
414 | // If we reached an EOF then that is not good | ||
415 | if ch == eof { | ||
416 | s.err("heredoc not terminated") | ||
417 | return | ||
418 | } | ||
419 | |||
420 | // Ignore the '\r' in Windows line endings | ||
421 | if ch == '\r' { | ||
422 | if s.peek() == '\n' { | ||
423 | ch = s.next() | ||
424 | } | ||
425 | } | ||
426 | |||
427 | // If we didn't reach a newline then that is also not good | ||
428 | if ch != '\n' { | ||
429 | s.err("invalid characters in heredoc anchor") | ||
430 | return | ||
431 | } | ||
432 | |||
433 | // Read the identifier | ||
434 | identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen] | ||
435 | if len(identBytes) == 0 { | ||
436 | s.err("zero-length heredoc anchor") | ||
437 | return | ||
438 | } | ||
439 | |||
440 | var identRegexp *regexp.Regexp | ||
441 | if identBytes[0] == '-' { | ||
442 | identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes[1:])) | ||
443 | } else { | ||
444 | identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes)) | ||
445 | } | ||
446 | |||
447 | // Read the actual string value | ||
448 | lineStart := s.srcPos.Offset | ||
449 | for { | ||
450 | ch := s.next() | ||
451 | |||
452 | // Special newline handling. | ||
453 | if ch == '\n' { | ||
454 | // Math is fast, so we first compare the byte counts to see if we have a chance | ||
455 | // of seeing the same identifier - if the length is less than the number of bytes | ||
456 | // in the identifier, this cannot be a valid terminator. | ||
457 | lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart | ||
458 | if lineBytesLen >= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { | ||
459 | break | ||
460 | } | ||
461 | |||
462 | // Not an anchor match, record the start of a new line | ||
463 | lineStart = s.srcPos.Offset | ||
464 | } | ||
465 | |||
466 | if ch == eof { | ||
467 | s.err("heredoc not terminated") | ||
468 | return | ||
469 | } | ||
470 | } | ||
471 | |||
472 | return | ||
473 | } | ||
474 | |||
475 | // scanString scans a quoted string | ||
476 | func (s *Scanner) scanString() { | ||
477 | braces := 0 | ||
478 | for { | ||
479 | // '"' opening already consumed | ||
480 | // read character after quote | ||
481 | ch := s.next() | ||
482 | |||
483 | if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { | ||
484 | s.err("literal not terminated") | ||
485 | return | ||
486 | } | ||
487 | |||
488 | if ch == '"' && braces == 0 { | ||
489 | break | ||
490 | } | ||
491 | |||
492 | // If we're going into a ${} then we can ignore quotes for awhile | ||
493 | if braces == 0 && ch == '$' && s.peek() == '{' { | ||
494 | braces++ | ||
495 | s.next() | ||
496 | } else if braces > 0 && ch == '{' { | ||
497 | braces++ | ||
498 | } | ||
499 | if braces > 0 && ch == '}' { | ||
500 | braces-- | ||
501 | } | ||
502 | |||
503 | if ch == '\\' { | ||
504 | s.scanEscape() | ||
505 | } | ||
506 | } | ||
507 | |||
508 | return | ||
509 | } | ||
510 | |||
511 | // scanEscape scans an escape sequence | ||
512 | func (s *Scanner) scanEscape() rune { | ||
513 | // http://en.cppreference.com/w/cpp/language/escape | ||
514 | ch := s.next() // read character after '/' | ||
515 | switch ch { | ||
516 | case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': | ||
517 | // nothing to do | ||
518 | case '0', '1', '2', '3', '4', '5', '6', '7': | ||
519 | // octal notation | ||
520 | ch = s.scanDigits(ch, 8, 3) | ||
521 | case 'x': | ||
522 | // hexademical notation | ||
523 | ch = s.scanDigits(s.next(), 16, 2) | ||
524 | case 'u': | ||
525 | // universal character name | ||
526 | ch = s.scanDigits(s.next(), 16, 4) | ||
527 | case 'U': | ||
528 | // universal character name | ||
529 | ch = s.scanDigits(s.next(), 16, 8) | ||
530 | default: | ||
531 | s.err("illegal char escape") | ||
532 | } | ||
533 | return ch | ||
534 | } | ||
535 | |||
536 | // scanDigits scans a rune with the given base for n times. For example an | ||
537 | // octal notation \184 would yield in scanDigits(ch, 8, 3) | ||
538 | func (s *Scanner) scanDigits(ch rune, base, n int) rune { | ||
539 | start := n | ||
540 | for n > 0 && digitVal(ch) < base { | ||
541 | ch = s.next() | ||
542 | if ch == eof { | ||
543 | // If we see an EOF, we halt any more scanning of digits | ||
544 | // immediately. | ||
545 | break | ||
546 | } | ||
547 | |||
548 | n-- | ||
549 | } | ||
550 | if n > 0 { | ||
551 | s.err("illegal char escape") | ||
552 | } | ||
553 | |||
554 | if n != start { | ||
555 | // we scanned all digits, put the last non digit char back, | ||
556 | // only if we read anything at all | ||
557 | s.unread() | ||
558 | } | ||
559 | |||
560 | return ch | ||
561 | } | ||
562 | |||
563 | // scanIdentifier scans an identifier and returns the literal string | ||
564 | func (s *Scanner) scanIdentifier() string { | ||
565 | offs := s.srcPos.Offset - s.lastCharLen | ||
566 | ch := s.next() | ||
567 | for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { | ||
568 | ch = s.next() | ||
569 | } | ||
570 | |||
571 | if ch != eof { | ||
572 | s.unread() // we got identifier, put back latest char | ||
573 | } | ||
574 | |||
575 | return string(s.src[offs:s.srcPos.Offset]) | ||
576 | } | ||
577 | |||
578 | // recentPosition returns the position of the character immediately after the | ||
579 | // character or token returned by the last call to Scan. | ||
580 | func (s *Scanner) recentPosition() (pos token.Pos) { | ||
581 | pos.Offset = s.srcPos.Offset - s.lastCharLen | ||
582 | switch { | ||
583 | case s.srcPos.Column > 0: | ||
584 | // common case: last character was not a '\n' | ||
585 | pos.Line = s.srcPos.Line | ||
586 | pos.Column = s.srcPos.Column | ||
587 | case s.lastLineLen > 0: | ||
588 | // last character was a '\n' | ||
589 | // (we cannot be at the beginning of the source | ||
590 | // since we have called next() at least once) | ||
591 | pos.Line = s.srcPos.Line - 1 | ||
592 | pos.Column = s.lastLineLen | ||
593 | default: | ||
594 | // at the beginning of the source | ||
595 | pos.Line = 1 | ||
596 | pos.Column = 1 | ||
597 | } | ||
598 | return | ||
599 | } | ||
600 | |||
601 | // err prints the error of any scanning to s.Error function. If the function is | ||
602 | // not defined, by default it prints them to os.Stderr | ||
603 | func (s *Scanner) err(msg string) { | ||
604 | s.ErrorCount++ | ||
605 | pos := s.recentPosition() | ||
606 | |||
607 | if s.Error != nil { | ||
608 | s.Error(pos, msg) | ||
609 | return | ||
610 | } | ||
611 | |||
612 | fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) | ||
613 | } | ||
614 | |||
615 | // isHexadecimal returns true if the given rune is a letter | ||
616 | func isLetter(ch rune) bool { | ||
617 | return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) | ||
618 | } | ||
619 | |||
620 | // isDigit returns true if the given rune is a decimal digit | ||
621 | func isDigit(ch rune) bool { | ||
622 | return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) | ||
623 | } | ||
624 | |||
625 | // isDecimal returns true if the given rune is a decimal number | ||
626 | func isDecimal(ch rune) bool { | ||
627 | return '0' <= ch && ch <= '9' | ||
628 | } | ||
629 | |||
630 | // isHexadecimal returns true if the given rune is an hexadecimal number | ||
631 | func isHexadecimal(ch rune) bool { | ||
632 | return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' | ||
633 | } | ||
634 | |||
635 | // isWhitespace returns true if the rune is a space, tab, newline or carriage return | ||
636 | func isWhitespace(ch rune) bool { | ||
637 | return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' | ||
638 | } | ||
639 | |||
640 | // digitVal returns the integer value of a given octal,decimal or hexadecimal rune | ||
641 | func digitVal(ch rune) int { | ||
642 | switch { | ||
643 | case '0' <= ch && ch <= '9': | ||
644 | return int(ch - '0') | ||
645 | case 'a' <= ch && ch <= 'f': | ||
646 | return int(ch - 'a' + 10) | ||
647 | case 'A' <= ch && ch <= 'F': | ||
648 | return int(ch - 'A' + 10) | ||
649 | } | ||
650 | return 16 // larger than any legal digit val | ||
651 | } | ||
diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go new file mode 100644 index 0000000..5f981ea --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go | |||
@@ -0,0 +1,241 @@ | |||
1 | package strconv | ||
2 | |||
3 | import ( | ||
4 | "errors" | ||
5 | "unicode/utf8" | ||
6 | ) | ||
7 | |||
8 | // ErrSyntax indicates that a value does not have the right syntax for the target type. | ||
9 | var ErrSyntax = errors.New("invalid syntax") | ||
10 | |||
11 | // Unquote interprets s as a single-quoted, double-quoted, | ||
12 | // or backquoted Go string literal, returning the string value | ||
13 | // that s quotes. (If s is single-quoted, it would be a Go | ||
14 | // character literal; Unquote returns the corresponding | ||
15 | // one-character string.) | ||
16 | func Unquote(s string) (t string, err error) { | ||
17 | n := len(s) | ||
18 | if n < 2 { | ||
19 | return "", ErrSyntax | ||
20 | } | ||
21 | quote := s[0] | ||
22 | if quote != s[n-1] { | ||
23 | return "", ErrSyntax | ||
24 | } | ||
25 | s = s[1 : n-1] | ||
26 | |||
27 | if quote != '"' { | ||
28 | return "", ErrSyntax | ||
29 | } | ||
30 | if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { | ||
31 | return "", ErrSyntax | ||
32 | } | ||
33 | |||
34 | // Is it trivial? Avoid allocation. | ||
35 | if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { | ||
36 | switch quote { | ||
37 | case '"': | ||
38 | return s, nil | ||
39 | case '\'': | ||
40 | r, size := utf8.DecodeRuneInString(s) | ||
41 | if size == len(s) && (r != utf8.RuneError || size != 1) { | ||
42 | return s, nil | ||
43 | } | ||
44 | } | ||
45 | } | ||
46 | |||
47 | var runeTmp [utf8.UTFMax]byte | ||
48 | buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. | ||
49 | for len(s) > 0 { | ||
50 | // If we're starting a '${}' then let it through un-unquoted. | ||
51 | // Specifically: we don't unquote any characters within the `${}` | ||
52 | // section. | ||
53 | if s[0] == '$' && len(s) > 1 && s[1] == '{' { | ||
54 | buf = append(buf, '$', '{') | ||
55 | s = s[2:] | ||
56 | |||
57 | // Continue reading until we find the closing brace, copying as-is | ||
58 | braces := 1 | ||
59 | for len(s) > 0 && braces > 0 { | ||
60 | r, size := utf8.DecodeRuneInString(s) | ||
61 | if r == utf8.RuneError { | ||
62 | return "", ErrSyntax | ||
63 | } | ||
64 | |||
65 | s = s[size:] | ||
66 | |||
67 | n := utf8.EncodeRune(runeTmp[:], r) | ||
68 | buf = append(buf, runeTmp[:n]...) | ||
69 | |||
70 | switch r { | ||
71 | case '{': | ||
72 | braces++ | ||
73 | case '}': | ||
74 | braces-- | ||
75 | } | ||
76 | } | ||
77 | if braces != 0 { | ||
78 | return "", ErrSyntax | ||
79 | } | ||
80 | if len(s) == 0 { | ||
81 | // If there's no string left, we're done! | ||
82 | break | ||
83 | } else { | ||
84 | // If there's more left, we need to pop back up to the top of the loop | ||
85 | // in case there's another interpolation in this string. | ||
86 | continue | ||
87 | } | ||
88 | } | ||
89 | |||
90 | if s[0] == '\n' { | ||
91 | return "", ErrSyntax | ||
92 | } | ||
93 | |||
94 | c, multibyte, ss, err := unquoteChar(s, quote) | ||
95 | if err != nil { | ||
96 | return "", err | ||
97 | } | ||
98 | s = ss | ||
99 | if c < utf8.RuneSelf || !multibyte { | ||
100 | buf = append(buf, byte(c)) | ||
101 | } else { | ||
102 | n := utf8.EncodeRune(runeTmp[:], c) | ||
103 | buf = append(buf, runeTmp[:n]...) | ||
104 | } | ||
105 | if quote == '\'' && len(s) != 0 { | ||
106 | // single-quoted must be single character | ||
107 | return "", ErrSyntax | ||
108 | } | ||
109 | } | ||
110 | return string(buf), nil | ||
111 | } | ||
112 | |||
113 | // contains reports whether the string contains the byte c. | ||
114 | func contains(s string, c byte) bool { | ||
115 | for i := 0; i < len(s); i++ { | ||
116 | if s[i] == c { | ||
117 | return true | ||
118 | } | ||
119 | } | ||
120 | return false | ||
121 | } | ||
122 | |||
123 | func unhex(b byte) (v rune, ok bool) { | ||
124 | c := rune(b) | ||
125 | switch { | ||
126 | case '0' <= c && c <= '9': | ||
127 | return c - '0', true | ||
128 | case 'a' <= c && c <= 'f': | ||
129 | return c - 'a' + 10, true | ||
130 | case 'A' <= c && c <= 'F': | ||
131 | return c - 'A' + 10, true | ||
132 | } | ||
133 | return | ||
134 | } | ||
135 | |||
136 | func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { | ||
137 | // easy cases | ||
138 | switch c := s[0]; { | ||
139 | case c == quote && (quote == '\'' || quote == '"'): | ||
140 | err = ErrSyntax | ||
141 | return | ||
142 | case c >= utf8.RuneSelf: | ||
143 | r, size := utf8.DecodeRuneInString(s) | ||
144 | return r, true, s[size:], nil | ||
145 | case c != '\\': | ||
146 | return rune(s[0]), false, s[1:], nil | ||
147 | } | ||
148 | |||
149 | // hard case: c is backslash | ||
150 | if len(s) <= 1 { | ||
151 | err = ErrSyntax | ||
152 | return | ||
153 | } | ||
154 | c := s[1] | ||
155 | s = s[2:] | ||
156 | |||
157 | switch c { | ||
158 | case 'a': | ||
159 | value = '\a' | ||
160 | case 'b': | ||
161 | value = '\b' | ||
162 | case 'f': | ||
163 | value = '\f' | ||
164 | case 'n': | ||
165 | value = '\n' | ||
166 | case 'r': | ||
167 | value = '\r' | ||
168 | case 't': | ||
169 | value = '\t' | ||
170 | case 'v': | ||
171 | value = '\v' | ||
172 | case 'x', 'u', 'U': | ||
173 | n := 0 | ||
174 | switch c { | ||
175 | case 'x': | ||
176 | n = 2 | ||
177 | case 'u': | ||
178 | n = 4 | ||
179 | case 'U': | ||
180 | n = 8 | ||
181 | } | ||
182 | var v rune | ||
183 | if len(s) < n { | ||
184 | err = ErrSyntax | ||
185 | return | ||
186 | } | ||
187 | for j := 0; j < n; j++ { | ||
188 | x, ok := unhex(s[j]) | ||
189 | if !ok { | ||
190 | err = ErrSyntax | ||
191 | return | ||
192 | } | ||
193 | v = v<<4 | x | ||
194 | } | ||
195 | s = s[n:] | ||
196 | if c == 'x' { | ||
197 | // single-byte string, possibly not UTF-8 | ||
198 | value = v | ||
199 | break | ||
200 | } | ||
201 | if v > utf8.MaxRune { | ||
202 | err = ErrSyntax | ||
203 | return | ||
204 | } | ||
205 | value = v | ||
206 | multibyte = true | ||
207 | case '0', '1', '2', '3', '4', '5', '6', '7': | ||
208 | v := rune(c) - '0' | ||
209 | if len(s) < 2 { | ||
210 | err = ErrSyntax | ||
211 | return | ||
212 | } | ||
213 | for j := 0; j < 2; j++ { // one digit already; two more | ||
214 | x := rune(s[j]) - '0' | ||
215 | if x < 0 || x > 7 { | ||
216 | err = ErrSyntax | ||
217 | return | ||
218 | } | ||
219 | v = (v << 3) | x | ||
220 | } | ||
221 | s = s[2:] | ||
222 | if v > 255 { | ||
223 | err = ErrSyntax | ||
224 | return | ||
225 | } | ||
226 | value = v | ||
227 | case '\\': | ||
228 | value = '\\' | ||
229 | case '\'', '"': | ||
230 | if c != quote { | ||
231 | err = ErrSyntax | ||
232 | return | ||
233 | } | ||
234 | value = rune(c) | ||
235 | default: | ||
236 | err = ErrSyntax | ||
237 | return | ||
238 | } | ||
239 | tail = s | ||
240 | return | ||
241 | } | ||
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go new file mode 100644 index 0000000..59c1bb7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go | |||
@@ -0,0 +1,46 @@ | |||
1 | package token | ||
2 | |||
3 | import "fmt" | ||
4 | |||
5 | // Pos describes an arbitrary source position | ||
6 | // including the file, line, and column location. | ||
7 | // A Position is valid if the line number is > 0. | ||
8 | type Pos struct { | ||
9 | Filename string // filename, if any | ||
10 | Offset int // offset, starting at 0 | ||
11 | Line int // line number, starting at 1 | ||
12 | Column int // column number, starting at 1 (character count) | ||
13 | } | ||
14 | |||
15 | // IsValid returns true if the position is valid. | ||
16 | func (p *Pos) IsValid() bool { return p.Line > 0 } | ||
17 | |||
18 | // String returns a string in one of several forms: | ||
19 | // | ||
20 | // file:line:column valid position with file name | ||
21 | // line:column valid position without file name | ||
22 | // file invalid position with file name | ||
23 | // - invalid position without file name | ||
24 | func (p Pos) String() string { | ||
25 | s := p.Filename | ||
26 | if p.IsValid() { | ||
27 | if s != "" { | ||
28 | s += ":" | ||
29 | } | ||
30 | s += fmt.Sprintf("%d:%d", p.Line, p.Column) | ||
31 | } | ||
32 | if s == "" { | ||
33 | s = "-" | ||
34 | } | ||
35 | return s | ||
36 | } | ||
37 | |||
38 | // Before reports whether the position p is before u. | ||
39 | func (p Pos) Before(u Pos) bool { | ||
40 | return u.Offset > p.Offset || u.Line > p.Line | ||
41 | } | ||
42 | |||
43 | // After reports whether the position p is after u. | ||
44 | func (p Pos) After(u Pos) bool { | ||
45 | return u.Offset < p.Offset || u.Line < p.Line | ||
46 | } | ||
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go new file mode 100644 index 0000000..e37c066 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go | |||
@@ -0,0 +1,219 @@ | |||
1 | // Package token defines constants representing the lexical tokens for HCL | ||
2 | // (HashiCorp Configuration Language) | ||
3 | package token | ||
4 | |||
5 | import ( | ||
6 | "fmt" | ||
7 | "strconv" | ||
8 | "strings" | ||
9 | |||
10 | hclstrconv "github.com/hashicorp/hcl/hcl/strconv" | ||
11 | ) | ||
12 | |||
13 | // Token defines a single HCL token which can be obtained via the Scanner | ||
14 | type Token struct { | ||
15 | Type Type | ||
16 | Pos Pos | ||
17 | Text string | ||
18 | JSON bool | ||
19 | } | ||
20 | |||
21 | // Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) | ||
22 | type Type int | ||
23 | |||
24 | const ( | ||
25 | // Special tokens | ||
26 | ILLEGAL Type = iota | ||
27 | EOF | ||
28 | COMMENT | ||
29 | |||
30 | identifier_beg | ||
31 | IDENT // literals | ||
32 | literal_beg | ||
33 | NUMBER // 12345 | ||
34 | FLOAT // 123.45 | ||
35 | BOOL // true,false | ||
36 | STRING // "abc" | ||
37 | HEREDOC // <<FOO\nbar\nFOO | ||
38 | literal_end | ||
39 | identifier_end | ||
40 | |||
41 | operator_beg | ||
42 | LBRACK // [ | ||
43 | LBRACE // { | ||
44 | COMMA // , | ||
45 | PERIOD // . | ||
46 | |||
47 | RBRACK // ] | ||
48 | RBRACE // } | ||
49 | |||
50 | ASSIGN // = | ||
51 | ADD // + | ||
52 | SUB // - | ||
53 | operator_end | ||
54 | ) | ||
55 | |||
56 | var tokens = [...]string{ | ||
57 | ILLEGAL: "ILLEGAL", | ||
58 | |||
59 | EOF: "EOF", | ||
60 | COMMENT: "COMMENT", | ||
61 | |||
62 | IDENT: "IDENT", | ||
63 | NUMBER: "NUMBER", | ||
64 | FLOAT: "FLOAT", | ||
65 | BOOL: "BOOL", | ||
66 | STRING: "STRING", | ||
67 | |||
68 | LBRACK: "LBRACK", | ||
69 | LBRACE: "LBRACE", | ||
70 | COMMA: "COMMA", | ||
71 | PERIOD: "PERIOD", | ||
72 | HEREDOC: "HEREDOC", | ||
73 | |||
74 | RBRACK: "RBRACK", | ||
75 | RBRACE: "RBRACE", | ||
76 | |||
77 | ASSIGN: "ASSIGN", | ||
78 | ADD: "ADD", | ||
79 | SUB: "SUB", | ||
80 | } | ||
81 | |||
82 | // String returns the string corresponding to the token tok. | ||
83 | func (t Type) String() string { | ||
84 | s := "" | ||
85 | if 0 <= t && t < Type(len(tokens)) { | ||
86 | s = tokens[t] | ||
87 | } | ||
88 | if s == "" { | ||
89 | s = "token(" + strconv.Itoa(int(t)) + ")" | ||
90 | } | ||
91 | return s | ||
92 | } | ||
93 | |||
94 | // IsIdentifier returns true for tokens corresponding to identifiers and basic | ||
95 | // type literals; it returns false otherwise. | ||
96 | func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } | ||
97 | |||
98 | // IsLiteral returns true for tokens corresponding to basic type literals; it | ||
99 | // returns false otherwise. | ||
100 | func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } | ||
101 | |||
102 | // IsOperator returns true for tokens corresponding to operators and | ||
103 | // delimiters; it returns false otherwise. | ||
104 | func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } | ||
105 | |||
106 | // String returns the token's literal text. Note that this is only | ||
107 | // applicable for certain token types, such as token.IDENT, | ||
108 | // token.STRING, etc.. | ||
109 | func (t Token) String() string { | ||
110 | return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) | ||
111 | } | ||
112 | |||
113 | // Value returns the properly typed value for this token. The type of | ||
114 | // the returned interface{} is guaranteed based on the Type field. | ||
115 | // | ||
116 | // This can only be called for literal types. If it is called for any other | ||
117 | // type, this will panic. | ||
118 | func (t Token) Value() interface{} { | ||
119 | switch t.Type { | ||
120 | case BOOL: | ||
121 | if t.Text == "true" { | ||
122 | return true | ||
123 | } else if t.Text == "false" { | ||
124 | return false | ||
125 | } | ||
126 | |||
127 | panic("unknown bool value: " + t.Text) | ||
128 | case FLOAT: | ||
129 | v, err := strconv.ParseFloat(t.Text, 64) | ||
130 | if err != nil { | ||
131 | panic(err) | ||
132 | } | ||
133 | |||
134 | return float64(v) | ||
135 | case NUMBER: | ||
136 | v, err := strconv.ParseInt(t.Text, 0, 64) | ||
137 | if err != nil { | ||
138 | panic(err) | ||
139 | } | ||
140 | |||
141 | return int64(v) | ||
142 | case IDENT: | ||
143 | return t.Text | ||
144 | case HEREDOC: | ||
145 | return unindentHeredoc(t.Text) | ||
146 | case STRING: | ||
147 | // Determine the Unquote method to use. If it came from JSON, | ||
148 | // then we need to use the built-in unquote since we have to | ||
149 | // escape interpolations there. | ||
150 | f := hclstrconv.Unquote | ||
151 | if t.JSON { | ||
152 | f = strconv.Unquote | ||
153 | } | ||
154 | |||
155 | // This case occurs if json null is used | ||
156 | if t.Text == "" { | ||
157 | return "" | ||
158 | } | ||
159 | |||
160 | v, err := f(t.Text) | ||
161 | if err != nil { | ||
162 | panic(fmt.Sprintf("unquote %s err: %s", t.Text, err)) | ||
163 | } | ||
164 | |||
165 | return v | ||
166 | default: | ||
167 | panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type)) | ||
168 | } | ||
169 | } | ||
170 | |||
171 | // unindentHeredoc returns the string content of a HEREDOC if it is started with << | ||
172 | // and the content of a HEREDOC with the hanging indent removed if it is started with | ||
173 | // a <<-, and the terminating line is at least as indented as the least indented line. | ||
174 | func unindentHeredoc(heredoc string) string { | ||
175 | // We need to find the end of the marker | ||
176 | idx := strings.IndexByte(heredoc, '\n') | ||
177 | if idx == -1 { | ||
178 | panic("heredoc doesn't contain newline") | ||
179 | } | ||
180 | |||
181 | unindent := heredoc[2] == '-' | ||
182 | |||
183 | // We can optimize if the heredoc isn't marked for indentation | ||
184 | if !unindent { | ||
185 | return string(heredoc[idx+1 : len(heredoc)-idx+1]) | ||
186 | } | ||
187 | |||
188 | // We need to unindent each line based on the indentation level of the marker | ||
189 | lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n") | ||
190 | whitespacePrefix := lines[len(lines)-1] | ||
191 | |||
192 | isIndented := true | ||
193 | for _, v := range lines { | ||
194 | if strings.HasPrefix(v, whitespacePrefix) { | ||
195 | continue | ||
196 | } | ||
197 | |||
198 | isIndented = false | ||
199 | break | ||
200 | } | ||
201 | |||
202 | // If all lines are not at least as indented as the terminating mark, return the | ||
203 | // heredoc as is, but trim the leading space from the marker on the final line. | ||
204 | if !isIndented { | ||
205 | return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t") | ||
206 | } | ||
207 | |||
208 | unindentedLines := make([]string, len(lines)) | ||
209 | for k, v := range lines { | ||
210 | if k == len(lines)-1 { | ||
211 | unindentedLines[k] = "" | ||
212 | break | ||
213 | } | ||
214 | |||
215 | unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix) | ||
216 | } | ||
217 | |||
218 | return strings.Join(unindentedLines, "\n") | ||
219 | } | ||
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/flatten.go b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go new file mode 100644 index 0000000..f652d6f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go | |||
@@ -0,0 +1,117 @@ | |||
1 | package parser | ||
2 | |||
3 | import "github.com/hashicorp/hcl/hcl/ast" | ||
4 | |||
5 | // flattenObjects takes an AST node, walks it, and flattens | ||
6 | func flattenObjects(node ast.Node) { | ||
7 | ast.Walk(node, func(n ast.Node) (ast.Node, bool) { | ||
8 | // We only care about lists, because this is what we modify | ||
9 | list, ok := n.(*ast.ObjectList) | ||
10 | if !ok { | ||
11 | return n, true | ||
12 | } | ||
13 | |||
14 | // Rebuild the item list | ||
15 | items := make([]*ast.ObjectItem, 0, len(list.Items)) | ||
16 | frontier := make([]*ast.ObjectItem, len(list.Items)) | ||
17 | copy(frontier, list.Items) | ||
18 | for len(frontier) > 0 { | ||
19 | // Pop the current item | ||
20 | n := len(frontier) | ||
21 | item := frontier[n-1] | ||
22 | frontier = frontier[:n-1] | ||
23 | |||
24 | switch v := item.Val.(type) { | ||
25 | case *ast.ObjectType: | ||
26 | items, frontier = flattenObjectType(v, item, items, frontier) | ||
27 | case *ast.ListType: | ||
28 | items, frontier = flattenListType(v, item, items, frontier) | ||
29 | default: | ||
30 | items = append(items, item) | ||
31 | } | ||
32 | } | ||
33 | |||
34 | // Reverse the list since the frontier model runs things backwards | ||
35 | for i := len(items)/2 - 1; i >= 0; i-- { | ||
36 | opp := len(items) - 1 - i | ||
37 | items[i], items[opp] = items[opp], items[i] | ||
38 | } | ||
39 | |||
40 | // Done! Set the original items | ||
41 | list.Items = items | ||
42 | return n, true | ||
43 | }) | ||
44 | } | ||
45 | |||
46 | func flattenListType( | ||
47 | ot *ast.ListType, | ||
48 | item *ast.ObjectItem, | ||
49 | items []*ast.ObjectItem, | ||
50 | frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { | ||
51 | // If the list is empty, keep the original list | ||
52 | if len(ot.List) == 0 { | ||
53 | items = append(items, item) | ||
54 | return items, frontier | ||
55 | } | ||
56 | |||
57 | // All the elements of this object must also be objects! | ||
58 | for _, subitem := range ot.List { | ||
59 | if _, ok := subitem.(*ast.ObjectType); !ok { | ||
60 | items = append(items, item) | ||
61 | return items, frontier | ||
62 | } | ||
63 | } | ||
64 | |||
65 | // Great! We have a match go through all the items and flatten | ||
66 | for _, elem := range ot.List { | ||
67 | // Add it to the frontier so that we can recurse | ||
68 | frontier = append(frontier, &ast.ObjectItem{ | ||
69 | Keys: item.Keys, | ||
70 | Assign: item.Assign, | ||
71 | Val: elem, | ||
72 | LeadComment: item.LeadComment, | ||
73 | LineComment: item.LineComment, | ||
74 | }) | ||
75 | } | ||
76 | |||
77 | return items, frontier | ||
78 | } | ||
79 | |||
80 | func flattenObjectType( | ||
81 | ot *ast.ObjectType, | ||
82 | item *ast.ObjectItem, | ||
83 | items []*ast.ObjectItem, | ||
84 | frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { | ||
85 | // If the list has no items we do not have to flatten anything | ||
86 | if ot.List.Items == nil { | ||
87 | items = append(items, item) | ||
88 | return items, frontier | ||
89 | } | ||
90 | |||
91 | // All the elements of this object must also be objects! | ||
92 | for _, subitem := range ot.List.Items { | ||
93 | if _, ok := subitem.Val.(*ast.ObjectType); !ok { | ||
94 | items = append(items, item) | ||
95 | return items, frontier | ||
96 | } | ||
97 | } | ||
98 | |||
99 | // Great! We have a match go through all the items and flatten | ||
100 | for _, subitem := range ot.List.Items { | ||
101 | // Copy the new key | ||
102 | keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) | ||
103 | copy(keys, item.Keys) | ||
104 | copy(keys[len(item.Keys):], subitem.Keys) | ||
105 | |||
106 | // Add it to the frontier so that we can recurse | ||
107 | frontier = append(frontier, &ast.ObjectItem{ | ||
108 | Keys: keys, | ||
109 | Assign: item.Assign, | ||
110 | Val: subitem.Val, | ||
111 | LeadComment: item.LeadComment, | ||
112 | LineComment: item.LineComment, | ||
113 | }) | ||
114 | } | ||
115 | |||
116 | return items, frontier | ||
117 | } | ||
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go new file mode 100644 index 0000000..125a5f0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go | |||
@@ -0,0 +1,313 @@ | |||
1 | package parser | ||
2 | |||
3 | import ( | ||
4 | "errors" | ||
5 | "fmt" | ||
6 | |||
7 | "github.com/hashicorp/hcl/hcl/ast" | ||
8 | hcltoken "github.com/hashicorp/hcl/hcl/token" | ||
9 | "github.com/hashicorp/hcl/json/scanner" | ||
10 | "github.com/hashicorp/hcl/json/token" | ||
11 | ) | ||
12 | |||
13 | type Parser struct { | ||
14 | sc *scanner.Scanner | ||
15 | |||
16 | // Last read token | ||
17 | tok token.Token | ||
18 | commaPrev token.Token | ||
19 | |||
20 | enableTrace bool | ||
21 | indent int | ||
22 | n int // buffer size (max = 1) | ||
23 | } | ||
24 | |||
25 | func newParser(src []byte) *Parser { | ||
26 | return &Parser{ | ||
27 | sc: scanner.New(src), | ||
28 | } | ||
29 | } | ||
30 | |||
31 | // Parse returns the fully parsed source and returns the abstract syntax tree. | ||
32 | func Parse(src []byte) (*ast.File, error) { | ||
33 | p := newParser(src) | ||
34 | return p.Parse() | ||
35 | } | ||
36 | |||
37 | var errEofToken = errors.New("EOF token found") | ||
38 | |||
39 | // Parse returns the fully parsed source and returns the abstract syntax tree. | ||
40 | func (p *Parser) Parse() (*ast.File, error) { | ||
41 | f := &ast.File{} | ||
42 | var err, scerr error | ||
43 | p.sc.Error = func(pos token.Pos, msg string) { | ||
44 | scerr = fmt.Errorf("%s: %s", pos, msg) | ||
45 | } | ||
46 | |||
47 | // The root must be an object in JSON | ||
48 | object, err := p.object() | ||
49 | if scerr != nil { | ||
50 | return nil, scerr | ||
51 | } | ||
52 | if err != nil { | ||
53 | return nil, err | ||
54 | } | ||
55 | |||
56 | // We make our final node an object list so it is more HCL compatible | ||
57 | f.Node = object.List | ||
58 | |||
59 | // Flatten it, which finds patterns and turns them into more HCL-like | ||
60 | // AST trees. | ||
61 | flattenObjects(f.Node) | ||
62 | |||
63 | return f, nil | ||
64 | } | ||
65 | |||
66 | func (p *Parser) objectList() (*ast.ObjectList, error) { | ||
67 | defer un(trace(p, "ParseObjectList")) | ||
68 | node := &ast.ObjectList{} | ||
69 | |||
70 | for { | ||
71 | n, err := p.objectItem() | ||
72 | if err == errEofToken { | ||
73 | break // we are finished | ||
74 | } | ||
75 | |||
76 | // we don't return a nil node, because might want to use already | ||
77 | // collected items. | ||
78 | if err != nil { | ||
79 | return node, err | ||
80 | } | ||
81 | |||
82 | node.Add(n) | ||
83 | |||
84 | // Check for a followup comma. If it isn't a comma, then we're done | ||
85 | if tok := p.scan(); tok.Type != token.COMMA { | ||
86 | break | ||
87 | } | ||
88 | } | ||
89 | |||
90 | return node, nil | ||
91 | } | ||
92 | |||
93 | // objectItem parses a single object item | ||
94 | func (p *Parser) objectItem() (*ast.ObjectItem, error) { | ||
95 | defer un(trace(p, "ParseObjectItem")) | ||
96 | |||
97 | keys, err := p.objectKey() | ||
98 | if err != nil { | ||
99 | return nil, err | ||
100 | } | ||
101 | |||
102 | o := &ast.ObjectItem{ | ||
103 | Keys: keys, | ||
104 | } | ||
105 | |||
106 | switch p.tok.Type { | ||
107 | case token.COLON: | ||
108 | pos := p.tok.Pos | ||
109 | o.Assign = hcltoken.Pos{ | ||
110 | Filename: pos.Filename, | ||
111 | Offset: pos.Offset, | ||
112 | Line: pos.Line, | ||
113 | Column: pos.Column, | ||
114 | } | ||
115 | |||
116 | o.Val, err = p.objectValue() | ||
117 | if err != nil { | ||
118 | return nil, err | ||
119 | } | ||
120 | } | ||
121 | |||
122 | return o, nil | ||
123 | } | ||
124 | |||
125 | // objectKey parses an object key and returns a ObjectKey AST | ||
126 | func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { | ||
127 | keyCount := 0 | ||
128 | keys := make([]*ast.ObjectKey, 0) | ||
129 | |||
130 | for { | ||
131 | tok := p.scan() | ||
132 | switch tok.Type { | ||
133 | case token.EOF: | ||
134 | return nil, errEofToken | ||
135 | case token.STRING: | ||
136 | keyCount++ | ||
137 | keys = append(keys, &ast.ObjectKey{ | ||
138 | Token: p.tok.HCLToken(), | ||
139 | }) | ||
140 | case token.COLON: | ||
141 | // If we have a zero keycount it means that we never got | ||
142 | // an object key, i.e. `{ :`. This is a syntax error. | ||
143 | if keyCount == 0 { | ||
144 | return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) | ||
145 | } | ||
146 | |||
147 | // Done | ||
148 | return keys, nil | ||
149 | case token.ILLEGAL: | ||
150 | return nil, errors.New("illegal") | ||
151 | default: | ||
152 | return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) | ||
153 | } | ||
154 | } | ||
155 | } | ||
156 | |||
157 | // object parses any type of object, such as number, bool, string, object or | ||
158 | // list. | ||
159 | func (p *Parser) objectValue() (ast.Node, error) { | ||
160 | defer un(trace(p, "ParseObjectValue")) | ||
161 | tok := p.scan() | ||
162 | |||
163 | switch tok.Type { | ||
164 | case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: | ||
165 | return p.literalType() | ||
166 | case token.LBRACE: | ||
167 | return p.objectType() | ||
168 | case token.LBRACK: | ||
169 | return p.listType() | ||
170 | case token.EOF: | ||
171 | return nil, errEofToken | ||
172 | } | ||
173 | |||
174 | return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) | ||
175 | } | ||
176 | |||
177 | // object parses any type of object, such as number, bool, string, object or | ||
178 | // list. | ||
179 | func (p *Parser) object() (*ast.ObjectType, error) { | ||
180 | defer un(trace(p, "ParseType")) | ||
181 | tok := p.scan() | ||
182 | |||
183 | switch tok.Type { | ||
184 | case token.LBRACE: | ||
185 | return p.objectType() | ||
186 | case token.EOF: | ||
187 | return nil, errEofToken | ||
188 | } | ||
189 | |||
190 | return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) | ||
191 | } | ||
192 | |||
193 | // objectType parses an object type and returns a ObjectType AST | ||
194 | func (p *Parser) objectType() (*ast.ObjectType, error) { | ||
195 | defer un(trace(p, "ParseObjectType")) | ||
196 | |||
197 | // we assume that the currently scanned token is a LBRACE | ||
198 | o := &ast.ObjectType{} | ||
199 | |||
200 | l, err := p.objectList() | ||
201 | |||
202 | // if we hit RBRACE, we are good to go (means we parsed all Items), if it's | ||
203 | // not a RBRACE, it's an syntax error and we just return it. | ||
204 | if err != nil && p.tok.Type != token.RBRACE { | ||
205 | return nil, err | ||
206 | } | ||
207 | |||
208 | o.List = l | ||
209 | return o, nil | ||
210 | } | ||
211 | |||
212 | // listType parses a list type and returns a ListType AST | ||
213 | func (p *Parser) listType() (*ast.ListType, error) { | ||
214 | defer un(trace(p, "ParseListType")) | ||
215 | |||
216 | // we assume that the currently scanned token is a LBRACK | ||
217 | l := &ast.ListType{} | ||
218 | |||
219 | for { | ||
220 | tok := p.scan() | ||
221 | switch tok.Type { | ||
222 | case token.NUMBER, token.FLOAT, token.STRING: | ||
223 | node, err := p.literalType() | ||
224 | if err != nil { | ||
225 | return nil, err | ||
226 | } | ||
227 | |||
228 | l.Add(node) | ||
229 | case token.COMMA: | ||
230 | continue | ||
231 | case token.LBRACE: | ||
232 | node, err := p.objectType() | ||
233 | if err != nil { | ||
234 | return nil, err | ||
235 | } | ||
236 | |||
237 | l.Add(node) | ||
238 | case token.BOOL: | ||
239 | // TODO(arslan) should we support? not supported by HCL yet | ||
240 | case token.LBRACK: | ||
241 | // TODO(arslan) should we support nested lists? Even though it's | ||
242 | // written in README of HCL, it's not a part of the grammar | ||
243 | // (not defined in parse.y) | ||
244 | case token.RBRACK: | ||
245 | // finished | ||
246 | return l, nil | ||
247 | default: | ||
248 | return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) | ||
249 | } | ||
250 | |||
251 | } | ||
252 | } | ||
253 | |||
254 | // literalType parses a literal type and returns a LiteralType AST | ||
255 | func (p *Parser) literalType() (*ast.LiteralType, error) { | ||
256 | defer un(trace(p, "ParseLiteral")) | ||
257 | |||
258 | return &ast.LiteralType{ | ||
259 | Token: p.tok.HCLToken(), | ||
260 | }, nil | ||
261 | } | ||
262 | |||
263 | // scan returns the next token from the underlying scanner. If a token has | ||
264 | // been unscanned then read that instead. | ||
265 | func (p *Parser) scan() token.Token { | ||
266 | // If we have a token on the buffer, then return it. | ||
267 | if p.n != 0 { | ||
268 | p.n = 0 | ||
269 | return p.tok | ||
270 | } | ||
271 | |||
272 | p.tok = p.sc.Scan() | ||
273 | return p.tok | ||
274 | } | ||
275 | |||
276 | // unscan pushes the previously read token back onto the buffer. | ||
277 | func (p *Parser) unscan() { | ||
278 | p.n = 1 | ||
279 | } | ||
280 | |||
281 | // ---------------------------------------------------------------------------- | ||
282 | // Parsing support | ||
283 | |||
284 | func (p *Parser) printTrace(a ...interface{}) { | ||
285 | if !p.enableTrace { | ||
286 | return | ||
287 | } | ||
288 | |||
289 | const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " | ||
290 | const n = len(dots) | ||
291 | fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) | ||
292 | |||
293 | i := 2 * p.indent | ||
294 | for i > n { | ||
295 | fmt.Print(dots) | ||
296 | i -= n | ||
297 | } | ||
298 | // i <= n | ||
299 | fmt.Print(dots[0:i]) | ||
300 | fmt.Println(a...) | ||
301 | } | ||
302 | |||
303 | func trace(p *Parser, msg string) *Parser { | ||
304 | p.printTrace(msg, "(") | ||
305 | p.indent++ | ||
306 | return p | ||
307 | } | ||
308 | |||
309 | // Usage pattern: defer un(trace(p, "...")) | ||
310 | func un(p *Parser) { | ||
311 | p.indent-- | ||
312 | p.printTrace(")") | ||
313 | } | ||
diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go new file mode 100644 index 0000000..dd5c72b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go | |||
@@ -0,0 +1,451 @@ | |||
1 | package scanner | ||
2 | |||
3 | import ( | ||
4 | "bytes" | ||
5 | "fmt" | ||
6 | "os" | ||
7 | "unicode" | ||
8 | "unicode/utf8" | ||
9 | |||
10 | "github.com/hashicorp/hcl/json/token" | ||
11 | ) | ||
12 | |||
13 | // eof represents a marker rune for the end of the reader. | ||
14 | const eof = rune(0) | ||
15 | |||
16 | // Scanner defines a lexical scanner | ||
17 | type Scanner struct { | ||
18 | buf *bytes.Buffer // Source buffer for advancing and scanning | ||
19 | src []byte // Source buffer for immutable access | ||
20 | |||
21 | // Source Position | ||
22 | srcPos token.Pos // current position | ||
23 | prevPos token.Pos // previous position, used for peek() method | ||
24 | |||
25 | lastCharLen int // length of last character in bytes | ||
26 | lastLineLen int // length of last line in characters (for correct column reporting) | ||
27 | |||
28 | tokStart int // token text start position | ||
29 | tokEnd int // token text end position | ||
30 | |||
31 | // Error is called for each error encountered. If no Error | ||
32 | // function is set, the error is reported to os.Stderr. | ||
33 | Error func(pos token.Pos, msg string) | ||
34 | |||
35 | // ErrorCount is incremented by one for each error encountered. | ||
36 | ErrorCount int | ||
37 | |||
38 | // tokPos is the start position of most recently scanned token; set by | ||
39 | // Scan. The Filename field is always left untouched by the Scanner. If | ||
40 | // an error is reported (via Error) and Position is invalid, the scanner is | ||
41 | // not inside a token. | ||
42 | tokPos token.Pos | ||
43 | } | ||
44 | |||
45 | // New creates and initializes a new instance of Scanner using src as | ||
46 | // its source content. | ||
47 | func New(src []byte) *Scanner { | ||
48 | // even though we accept a src, we read from a io.Reader compatible type | ||
49 | // (*bytes.Buffer). So in the future we might easily change it to streaming | ||
50 | // read. | ||
51 | b := bytes.NewBuffer(src) | ||
52 | s := &Scanner{ | ||
53 | buf: b, | ||
54 | src: src, | ||
55 | } | ||
56 | |||
57 | // srcPosition always starts with 1 | ||
58 | s.srcPos.Line = 1 | ||
59 | return s | ||
60 | } | ||
61 | |||
62 | // next reads the next rune from the bufferred reader. Returns the rune(0) if | ||
63 | // an error occurs (or io.EOF is returned). | ||
64 | func (s *Scanner) next() rune { | ||
65 | ch, size, err := s.buf.ReadRune() | ||
66 | if err != nil { | ||
67 | // advance for error reporting | ||
68 | s.srcPos.Column++ | ||
69 | s.srcPos.Offset += size | ||
70 | s.lastCharLen = size | ||
71 | return eof | ||
72 | } | ||
73 | |||
74 | if ch == utf8.RuneError && size == 1 { | ||
75 | s.srcPos.Column++ | ||
76 | s.srcPos.Offset += size | ||
77 | s.lastCharLen = size | ||
78 | s.err("illegal UTF-8 encoding") | ||
79 | return ch | ||
80 | } | ||
81 | |||
82 | // remember last position | ||
83 | s.prevPos = s.srcPos | ||
84 | |||
85 | s.srcPos.Column++ | ||
86 | s.lastCharLen = size | ||
87 | s.srcPos.Offset += size | ||
88 | |||
89 | if ch == '\n' { | ||
90 | s.srcPos.Line++ | ||
91 | s.lastLineLen = s.srcPos.Column | ||
92 | s.srcPos.Column = 0 | ||
93 | } | ||
94 | |||
95 | // debug | ||
96 | // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) | ||
97 | return ch | ||
98 | } | ||
99 | |||
100 | // unread unreads the previous read Rune and updates the source position | ||
101 | func (s *Scanner) unread() { | ||
102 | if err := s.buf.UnreadRune(); err != nil { | ||
103 | panic(err) // this is user fault, we should catch it | ||
104 | } | ||
105 | s.srcPos = s.prevPos // put back last position | ||
106 | } | ||
107 | |||
108 | // peek returns the next rune without advancing the reader. | ||
109 | func (s *Scanner) peek() rune { | ||
110 | peek, _, err := s.buf.ReadRune() | ||
111 | if err != nil { | ||
112 | return eof | ||
113 | } | ||
114 | |||
115 | s.buf.UnreadRune() | ||
116 | return peek | ||
117 | } | ||
118 | |||
119 | // Scan scans the next token and returns the token. | ||
120 | func (s *Scanner) Scan() token.Token { | ||
121 | ch := s.next() | ||
122 | |||
123 | // skip white space | ||
124 | for isWhitespace(ch) { | ||
125 | ch = s.next() | ||
126 | } | ||
127 | |||
128 | var tok token.Type | ||
129 | |||
130 | // token text markings | ||
131 | s.tokStart = s.srcPos.Offset - s.lastCharLen | ||
132 | |||
133 | // token position, initial next() is moving the offset by one(size of rune | ||
134 | // actually), though we are interested with the starting point | ||
135 | s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen | ||
136 | if s.srcPos.Column > 0 { | ||
137 | // common case: last character was not a '\n' | ||
138 | s.tokPos.Line = s.srcPos.Line | ||
139 | s.tokPos.Column = s.srcPos.Column | ||
140 | } else { | ||
141 | // last character was a '\n' | ||
142 | // (we cannot be at the beginning of the source | ||
143 | // since we have called next() at least once) | ||
144 | s.tokPos.Line = s.srcPos.Line - 1 | ||
145 | s.tokPos.Column = s.lastLineLen | ||
146 | } | ||
147 | |||
148 | switch { | ||
149 | case isLetter(ch): | ||
150 | lit := s.scanIdentifier() | ||
151 | if lit == "true" || lit == "false" { | ||
152 | tok = token.BOOL | ||
153 | } else if lit == "null" { | ||
154 | tok = token.NULL | ||
155 | } else { | ||
156 | s.err("illegal char") | ||
157 | } | ||
158 | case isDecimal(ch): | ||
159 | tok = s.scanNumber(ch) | ||
160 | default: | ||
161 | switch ch { | ||
162 | case eof: | ||
163 | tok = token.EOF | ||
164 | case '"': | ||
165 | tok = token.STRING | ||
166 | s.scanString() | ||
167 | case '.': | ||
168 | tok = token.PERIOD | ||
169 | ch = s.peek() | ||
170 | if isDecimal(ch) { | ||
171 | tok = token.FLOAT | ||
172 | ch = s.scanMantissa(ch) | ||
173 | ch = s.scanExponent(ch) | ||
174 | } | ||
175 | case '[': | ||
176 | tok = token.LBRACK | ||
177 | case ']': | ||
178 | tok = token.RBRACK | ||
179 | case '{': | ||
180 | tok = token.LBRACE | ||
181 | case '}': | ||
182 | tok = token.RBRACE | ||
183 | case ',': | ||
184 | tok = token.COMMA | ||
185 | case ':': | ||
186 | tok = token.COLON | ||
187 | case '-': | ||
188 | if isDecimal(s.peek()) { | ||
189 | ch := s.next() | ||
190 | tok = s.scanNumber(ch) | ||
191 | } else { | ||
192 | s.err("illegal char") | ||
193 | } | ||
194 | default: | ||
195 | s.err("illegal char: " + string(ch)) | ||
196 | } | ||
197 | } | ||
198 | |||
199 | // finish token ending | ||
200 | s.tokEnd = s.srcPos.Offset | ||
201 | |||
202 | // create token literal | ||
203 | var tokenText string | ||
204 | if s.tokStart >= 0 { | ||
205 | tokenText = string(s.src[s.tokStart:s.tokEnd]) | ||
206 | } | ||
207 | s.tokStart = s.tokEnd // ensure idempotency of tokenText() call | ||
208 | |||
209 | return token.Token{ | ||
210 | Type: tok, | ||
211 | Pos: s.tokPos, | ||
212 | Text: tokenText, | ||
213 | } | ||
214 | } | ||
215 | |||
216 | // scanNumber scans a HCL number definition starting with the given rune | ||
217 | func (s *Scanner) scanNumber(ch rune) token.Type { | ||
218 | zero := ch == '0' | ||
219 | pos := s.srcPos | ||
220 | |||
221 | s.scanMantissa(ch) | ||
222 | ch = s.next() // seek forward | ||
223 | if ch == 'e' || ch == 'E' { | ||
224 | ch = s.scanExponent(ch) | ||
225 | return token.FLOAT | ||
226 | } | ||
227 | |||
228 | if ch == '.' { | ||
229 | ch = s.scanFraction(ch) | ||
230 | if ch == 'e' || ch == 'E' { | ||
231 | ch = s.next() | ||
232 | ch = s.scanExponent(ch) | ||
233 | } | ||
234 | return token.FLOAT | ||
235 | } | ||
236 | |||
237 | if ch != eof { | ||
238 | s.unread() | ||
239 | } | ||
240 | |||
241 | // If we have a larger number and this is zero, error | ||
242 | if zero && pos != s.srcPos { | ||
243 | s.err("numbers cannot start with 0") | ||
244 | } | ||
245 | |||
246 | return token.NUMBER | ||
247 | } | ||
248 | |||
249 | // scanMantissa scans the mantissa begining from the rune. It returns the next | ||
250 | // non decimal rune. It's used to determine wheter it's a fraction or exponent. | ||
251 | func (s *Scanner) scanMantissa(ch rune) rune { | ||
252 | scanned := false | ||
253 | for isDecimal(ch) { | ||
254 | ch = s.next() | ||
255 | scanned = true | ||
256 | } | ||
257 | |||
258 | if scanned && ch != eof { | ||
259 | s.unread() | ||
260 | } | ||
261 | return ch | ||
262 | } | ||
263 | |||
264 | // scanFraction scans the fraction after the '.' rune | ||
265 | func (s *Scanner) scanFraction(ch rune) rune { | ||
266 | if ch == '.' { | ||
267 | ch = s.peek() // we peek just to see if we can move forward | ||
268 | ch = s.scanMantissa(ch) | ||
269 | } | ||
270 | return ch | ||
271 | } | ||
272 | |||
273 | // scanExponent scans the remaining parts of an exponent after the 'e' or 'E' | ||
274 | // rune. | ||
275 | func (s *Scanner) scanExponent(ch rune) rune { | ||
276 | if ch == 'e' || ch == 'E' { | ||
277 | ch = s.next() | ||
278 | if ch == '-' || ch == '+' { | ||
279 | ch = s.next() | ||
280 | } | ||
281 | ch = s.scanMantissa(ch) | ||
282 | } | ||
283 | return ch | ||
284 | } | ||
285 | |||
286 | // scanString scans a quoted string | ||
287 | func (s *Scanner) scanString() { | ||
288 | braces := 0 | ||
289 | for { | ||
290 | // '"' opening already consumed | ||
291 | // read character after quote | ||
292 | ch := s.next() | ||
293 | |||
294 | if ch == '\n' || ch < 0 || ch == eof { | ||
295 | s.err("literal not terminated") | ||
296 | return | ||
297 | } | ||
298 | |||
299 | if ch == '"' { | ||
300 | break | ||
301 | } | ||
302 | |||
303 | // If we're going into a ${} then we can ignore quotes for awhile | ||
304 | if braces == 0 && ch == '$' && s.peek() == '{' { | ||
305 | braces++ | ||
306 | s.next() | ||
307 | } else if braces > 0 && ch == '{' { | ||
308 | braces++ | ||
309 | } | ||
310 | if braces > 0 && ch == '}' { | ||
311 | braces-- | ||
312 | } | ||
313 | |||
314 | if ch == '\\' { | ||
315 | s.scanEscape() | ||
316 | } | ||
317 | } | ||
318 | |||
319 | return | ||
320 | } | ||
321 | |||
322 | // scanEscape scans an escape sequence | ||
323 | func (s *Scanner) scanEscape() rune { | ||
324 | // http://en.cppreference.com/w/cpp/language/escape | ||
325 | ch := s.next() // read character after '/' | ||
326 | switch ch { | ||
327 | case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': | ||
328 | // nothing to do | ||
329 | case '0', '1', '2', '3', '4', '5', '6', '7': | ||
330 | // octal notation | ||
331 | ch = s.scanDigits(ch, 8, 3) | ||
332 | case 'x': | ||
333 | // hexademical notation | ||
334 | ch = s.scanDigits(s.next(), 16, 2) | ||
335 | case 'u': | ||
336 | // universal character name | ||
337 | ch = s.scanDigits(s.next(), 16, 4) | ||
338 | case 'U': | ||
339 | // universal character name | ||
340 | ch = s.scanDigits(s.next(), 16, 8) | ||
341 | default: | ||
342 | s.err("illegal char escape") | ||
343 | } | ||
344 | return ch | ||
345 | } | ||
346 | |||
347 | // scanDigits scans a rune with the given base for n times. For example an | ||
348 | // octal notation \184 would yield in scanDigits(ch, 8, 3) | ||
349 | func (s *Scanner) scanDigits(ch rune, base, n int) rune { | ||
350 | for n > 0 && digitVal(ch) < base { | ||
351 | ch = s.next() | ||
352 | n-- | ||
353 | } | ||
354 | if n > 0 { | ||
355 | s.err("illegal char escape") | ||
356 | } | ||
357 | |||
358 | // we scanned all digits, put the last non digit char back | ||
359 | s.unread() | ||
360 | return ch | ||
361 | } | ||
362 | |||
363 | // scanIdentifier scans an identifier and returns the literal string | ||
364 | func (s *Scanner) scanIdentifier() string { | ||
365 | offs := s.srcPos.Offset - s.lastCharLen | ||
366 | ch := s.next() | ||
367 | for isLetter(ch) || isDigit(ch) || ch == '-' { | ||
368 | ch = s.next() | ||
369 | } | ||
370 | |||
371 | if ch != eof { | ||
372 | s.unread() // we got identifier, put back latest char | ||
373 | } | ||
374 | |||
375 | return string(s.src[offs:s.srcPos.Offset]) | ||
376 | } | ||
377 | |||
378 | // recentPosition returns the position of the character immediately after the | ||
379 | // character or token returned by the last call to Scan. | ||
380 | func (s *Scanner) recentPosition() (pos token.Pos) { | ||
381 | pos.Offset = s.srcPos.Offset - s.lastCharLen | ||
382 | switch { | ||
383 | case s.srcPos.Column > 0: | ||
384 | // common case: last character was not a '\n' | ||
385 | pos.Line = s.srcPos.Line | ||
386 | pos.Column = s.srcPos.Column | ||
387 | case s.lastLineLen > 0: | ||
388 | // last character was a '\n' | ||
389 | // (we cannot be at the beginning of the source | ||
390 | // since we have called next() at least once) | ||
391 | pos.Line = s.srcPos.Line - 1 | ||
392 | pos.Column = s.lastLineLen | ||
393 | default: | ||
394 | // at the beginning of the source | ||
395 | pos.Line = 1 | ||
396 | pos.Column = 1 | ||
397 | } | ||
398 | return | ||
399 | } | ||
400 | |||
401 | // err prints the error of any scanning to s.Error function. If the function is | ||
402 | // not defined, by default it prints them to os.Stderr | ||
403 | func (s *Scanner) err(msg string) { | ||
404 | s.ErrorCount++ | ||
405 | pos := s.recentPosition() | ||
406 | |||
407 | if s.Error != nil { | ||
408 | s.Error(pos, msg) | ||
409 | return | ||
410 | } | ||
411 | |||
412 | fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) | ||
413 | } | ||
414 | |||
415 | // isHexadecimal returns true if the given rune is a letter | ||
416 | func isLetter(ch rune) bool { | ||
417 | return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) | ||
418 | } | ||
419 | |||
420 | // isHexadecimal returns true if the given rune is a decimal digit | ||
421 | func isDigit(ch rune) bool { | ||
422 | return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) | ||
423 | } | ||
424 | |||
425 | // isHexadecimal returns true if the given rune is a decimal number | ||
426 | func isDecimal(ch rune) bool { | ||
427 | return '0' <= ch && ch <= '9' | ||
428 | } | ||
429 | |||
430 | // isHexadecimal returns true if the given rune is an hexadecimal number | ||
431 | func isHexadecimal(ch rune) bool { | ||
432 | return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' | ||
433 | } | ||
434 | |||
435 | // isWhitespace returns true if the rune is a space, tab, newline or carriage return | ||
436 | func isWhitespace(ch rune) bool { | ||
437 | return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' | ||
438 | } | ||
439 | |||
440 | // digitVal returns the integer value of a given octal,decimal or hexadecimal rune | ||
441 | func digitVal(ch rune) int { | ||
442 | switch { | ||
443 | case '0' <= ch && ch <= '9': | ||
444 | return int(ch - '0') | ||
445 | case 'a' <= ch && ch <= 'f': | ||
446 | return int(ch - 'a' + 10) | ||
447 | case 'A' <= ch && ch <= 'F': | ||
448 | return int(ch - 'A' + 10) | ||
449 | } | ||
450 | return 16 // larger than any legal digit val | ||
451 | } | ||
diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go new file mode 100644 index 0000000..59c1bb7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/position.go | |||
@@ -0,0 +1,46 @@ | |||
1 | package token | ||
2 | |||
3 | import "fmt" | ||
4 | |||
5 | // Pos describes an arbitrary source position | ||
6 | // including the file, line, and column location. | ||
7 | // A Position is valid if the line number is > 0. | ||
8 | type Pos struct { | ||
9 | Filename string // filename, if any | ||
10 | Offset int // offset, starting at 0 | ||
11 | Line int // line number, starting at 1 | ||
12 | Column int // column number, starting at 1 (character count) | ||
13 | } | ||
14 | |||
15 | // IsValid returns true if the position is valid. | ||
16 | func (p *Pos) IsValid() bool { return p.Line > 0 } | ||
17 | |||
18 | // String returns a string in one of several forms: | ||
19 | // | ||
20 | // file:line:column valid position with file name | ||
21 | // line:column valid position without file name | ||
22 | // file invalid position with file name | ||
23 | // - invalid position without file name | ||
24 | func (p Pos) String() string { | ||
25 | s := p.Filename | ||
26 | if p.IsValid() { | ||
27 | if s != "" { | ||
28 | s += ":" | ||
29 | } | ||
30 | s += fmt.Sprintf("%d:%d", p.Line, p.Column) | ||
31 | } | ||
32 | if s == "" { | ||
33 | s = "-" | ||
34 | } | ||
35 | return s | ||
36 | } | ||
37 | |||
38 | // Before reports whether the position p is before u. | ||
39 | func (p Pos) Before(u Pos) bool { | ||
40 | return u.Offset > p.Offset || u.Line > p.Line | ||
41 | } | ||
42 | |||
43 | // After reports whether the position p is after u. | ||
44 | func (p Pos) After(u Pos) bool { | ||
45 | return u.Offset < p.Offset || u.Line < p.Line | ||
46 | } | ||
diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go new file mode 100644 index 0000000..95a0c3e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/token.go | |||
@@ -0,0 +1,118 @@ | |||
1 | package token | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | "strconv" | ||
6 | |||
7 | hcltoken "github.com/hashicorp/hcl/hcl/token" | ||
8 | ) | ||
9 | |||
10 | // Token defines a single HCL token which can be obtained via the Scanner | ||
11 | type Token struct { | ||
12 | Type Type | ||
13 | Pos Pos | ||
14 | Text string | ||
15 | } | ||
16 | |||
17 | // Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) | ||
18 | type Type int | ||
19 | |||
20 | const ( | ||
21 | // Special tokens | ||
22 | ILLEGAL Type = iota | ||
23 | EOF | ||
24 | |||
25 | identifier_beg | ||
26 | literal_beg | ||
27 | NUMBER // 12345 | ||
28 | FLOAT // 123.45 | ||
29 | BOOL // true,false | ||
30 | STRING // "abc" | ||
31 | NULL // null | ||
32 | literal_end | ||
33 | identifier_end | ||
34 | |||
35 | operator_beg | ||
36 | LBRACK // [ | ||
37 | LBRACE // { | ||
38 | COMMA // , | ||
39 | PERIOD // . | ||
40 | COLON // : | ||
41 | |||
42 | RBRACK // ] | ||
43 | RBRACE // } | ||
44 | |||
45 | operator_end | ||
46 | ) | ||
47 | |||
48 | var tokens = [...]string{ | ||
49 | ILLEGAL: "ILLEGAL", | ||
50 | |||
51 | EOF: "EOF", | ||
52 | |||
53 | NUMBER: "NUMBER", | ||
54 | FLOAT: "FLOAT", | ||
55 | BOOL: "BOOL", | ||
56 | STRING: "STRING", | ||
57 | NULL: "NULL", | ||
58 | |||
59 | LBRACK: "LBRACK", | ||
60 | LBRACE: "LBRACE", | ||
61 | COMMA: "COMMA", | ||
62 | PERIOD: "PERIOD", | ||
63 | COLON: "COLON", | ||
64 | |||
65 | RBRACK: "RBRACK", | ||
66 | RBRACE: "RBRACE", | ||
67 | } | ||
68 | |||
69 | // String returns the string corresponding to the token tok. | ||
70 | func (t Type) String() string { | ||
71 | s := "" | ||
72 | if 0 <= t && t < Type(len(tokens)) { | ||
73 | s = tokens[t] | ||
74 | } | ||
75 | if s == "" { | ||
76 | s = "token(" + strconv.Itoa(int(t)) + ")" | ||
77 | } | ||
78 | return s | ||
79 | } | ||
80 | |||
81 | // IsIdentifier returns true for tokens corresponding to identifiers and basic | ||
82 | // type literals; it returns false otherwise. | ||
83 | func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } | ||
84 | |||
85 | // IsLiteral returns true for tokens corresponding to basic type literals; it | ||
86 | // returns false otherwise. | ||
87 | func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } | ||
88 | |||
89 | // IsOperator returns true for tokens corresponding to operators and | ||
90 | // delimiters; it returns false otherwise. | ||
91 | func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } | ||
92 | |||
93 | // String returns the token's literal text. Note that this is only | ||
94 | // applicable for certain token types, such as token.IDENT, | ||
95 | // token.STRING, etc.. | ||
96 | func (t Token) String() string { | ||
97 | return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) | ||
98 | } | ||
99 | |||
100 | // HCLToken converts this token to an HCL token. | ||
101 | // | ||
102 | // The token type must be a literal type or this will panic. | ||
103 | func (t Token) HCLToken() hcltoken.Token { | ||
104 | switch t.Type { | ||
105 | case BOOL: | ||
106 | return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} | ||
107 | case FLOAT: | ||
108 | return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} | ||
109 | case NULL: | ||
110 | return hcltoken.Token{Type: hcltoken.STRING, Text: ""} | ||
111 | case NUMBER: | ||
112 | return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} | ||
113 | case STRING: | ||
114 | return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} | ||
115 | default: | ||
116 | panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) | ||
117 | } | ||
118 | } | ||
diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go new file mode 100644 index 0000000..d9993c2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/lex.go | |||
@@ -0,0 +1,38 @@ | |||
1 | package hcl | ||
2 | |||
3 | import ( | ||
4 | "unicode" | ||
5 | "unicode/utf8" | ||
6 | ) | ||
7 | |||
8 | type lexModeValue byte | ||
9 | |||
10 | const ( | ||
11 | lexModeUnknown lexModeValue = iota | ||
12 | lexModeHcl | ||
13 | lexModeJson | ||
14 | ) | ||
15 | |||
16 | // lexMode returns whether we're going to be parsing in JSON | ||
17 | // mode or HCL mode. | ||
18 | func lexMode(v []byte) lexModeValue { | ||
19 | var ( | ||
20 | r rune | ||
21 | w int | ||
22 | offset int | ||
23 | ) | ||
24 | |||
25 | for { | ||
26 | r, w = utf8.DecodeRune(v[offset:]) | ||
27 | offset += w | ||
28 | if unicode.IsSpace(r) { | ||
29 | continue | ||
30 | } | ||
31 | if r == '{' { | ||
32 | return lexModeJson | ||
33 | } | ||
34 | break | ||
35 | } | ||
36 | |||
37 | return lexModeHcl | ||
38 | } | ||
diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go new file mode 100644 index 0000000..1fca53c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/parse.go | |||
@@ -0,0 +1,39 @@ | |||
1 | package hcl | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | |||
6 | "github.com/hashicorp/hcl/hcl/ast" | ||
7 | hclParser "github.com/hashicorp/hcl/hcl/parser" | ||
8 | jsonParser "github.com/hashicorp/hcl/json/parser" | ||
9 | ) | ||
10 | |||
11 | // ParseBytes accepts as input byte slice and returns ast tree. | ||
12 | // | ||
13 | // Input can be either JSON or HCL | ||
14 | func ParseBytes(in []byte) (*ast.File, error) { | ||
15 | return parse(in) | ||
16 | } | ||
17 | |||
18 | // ParseString accepts input as a string and returns ast tree. | ||
19 | func ParseString(input string) (*ast.File, error) { | ||
20 | return parse([]byte(input)) | ||
21 | } | ||
22 | |||
23 | func parse(in []byte) (*ast.File, error) { | ||
24 | switch lexMode(in) { | ||
25 | case lexModeHcl: | ||
26 | return hclParser.Parse(in) | ||
27 | case lexModeJson: | ||
28 | return jsonParser.Parse(in) | ||
29 | } | ||
30 | |||
31 | return nil, fmt.Errorf("unknown config format") | ||
32 | } | ||
33 | |||
34 | // Parse parses the given input and returns the root object. | ||
35 | // | ||
36 | // The input format can be either HCL or JSON. | ||
37 | func Parse(input string) (*ast.File, error) { | ||
38 | return parse([]byte(input)) | ||
39 | } | ||