install pushgateway
This commit is contained in:
parent
560075055c
commit
9e3a70f925
|
@ -1,6 +1,15 @@
|
|||
{
|
||||
"version": 1,
|
||||
"dependencies": [
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/latchmihay/kube-prometheus-pushgateway.git",
|
||||
"subdir": "prometheus-pushgateway"
|
||||
}
|
||||
},
|
||||
"version": "master"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
|
|
|
@ -61,6 +61,17 @@
|
|||
"version": "d68f9a6e0b1af7c4c4056dc2b43fb8f3bac01f43",
|
||||
"sum": "tDR6yT2GVfw0wTU12iZH+m01HrbIr6g/xN+/8nzNkU0="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/ksonnet/ksonnet-lib.git",
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "0d2f82676817bbf9e4acf6495b2090205f323b9f",
|
||||
"sum": "h28BXZ7+vczxYJ2sCt8JuR9+yznRtU/iA6DCpQUrtEg=",
|
||||
"name": "ksonnet"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
|
@ -91,6 +102,16 @@
|
|||
"version": "f1288f943a49344b00ed3d02cc07799da7226414",
|
||||
"sum": "u8gaydJoxEjzizQ8jY8xSjYgWooPmxw+wIWdDxifMAk="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/latchmihay/kube-prometheus-pushgateway.git",
|
||||
"subdir": "prometheus-pushgateway"
|
||||
}
|
||||
},
|
||||
"version": "8bccad50dcad36d6f65ea7d2ee90e9e54c03e0a0",
|
||||
"sum": "hz7E205TO/mHdpOlZGhGE/7qCbF7QWgsGAKWrdGoj1o="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
local kp =
|
||||
(import 'kube-prometheus/main.libsonnet') +
|
||||
(import 'prometheus-pushgateway/pushgateway.libsonnet') +
|
||||
{
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
|
@ -20,4 +23,5 @@ local kp = (import 'kube-prometheus/main.libsonnet') + {
|
|||
[kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane)] +
|
||||
[kp.prometheus[name] for name in std.objectFields(kp.prometheus)] +
|
||||
[kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter)] +
|
||||
[kp.grafana[name] for name in std.objectFields(kp.grafana)]
|
||||
[kp.grafana[name] for name in std.objectFields(kp.grafana)] +
|
||||
[kp.pushgateway[name] for name in std.objectFields(kp.pushgateway)]
|
||||
|
|
5
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/.editorconfig
generated
vendored
Normal file
5
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/.editorconfig
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
root = true
|
||||
|
||||
[*.jsonnet]
|
||||
indent_size = 2
|
||||
indent_style = space
|
41
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/.gitignore
generated
vendored
Normal file
41
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# External packages folder
|
||||
vendor/
|
||||
tmp/
|
||||
|
||||
# Project-specific working space
|
||||
/charts/
|
||||
|
||||
#stray unwanted in fork
|
||||
.DS_Store
|
||||
|
||||
/ksonnet-gen/ksonnet-gen
|
||||
|
||||
.vscode
|
27
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/.travis.yml
generated
vendored
Normal file
27
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
language: go
|
||||
|
||||
env:
|
||||
global:
|
||||
- JSONNET_BIN_URL=https://s3-us-west-2.amazonaws.com/ksonnet-ci/dist/linux-amd64/jsonnet
|
||||
- JSONNET_BIN=/tmp/jsonnet
|
||||
|
||||
go:
|
||||
- "1.10.x"
|
||||
- master
|
||||
|
||||
matrix:
|
||||
# allow master to fail
|
||||
allow_failures:
|
||||
- go: master
|
||||
|
||||
# don't wait for items with allowed failures to finish on error
|
||||
fast_finish: true
|
||||
|
||||
before_install:
|
||||
- sudo apt-get -qq update
|
||||
- sudo apt-get install libstdc++6
|
||||
|
||||
script:
|
||||
- curl -o ${JSONNET_BIN} ${JSONNET_BIN_URL}
|
||||
- chmod 755 ${JSONNET_BIN}
|
||||
- go test -v -race ./...
|
38
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/CODE-OF-CONDUCT.md
generated
vendored
Normal file
38
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/CODE-OF-CONDUCT.md
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
## ksonnet Community Code of Conduct
|
||||
|
||||
### Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project, and in the interest of fostering
|
||||
an open and welcoming community, we pledge to respect all people who contribute
|
||||
through reporting issues, posting feature requests, updating documentation,
|
||||
submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project a harassment-free experience for
|
||||
everyone, regardless of level of experience, gender, gender identity and expression,
|
||||
sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
|
||||
religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information, such as physical or electronic addresses,
|
||||
without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are not
|
||||
aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers
|
||||
commit themselves to fairly and consistently applying these principles to every aspect
|
||||
of managing this project. Project maintainers who do not follow or enforce the Code of
|
||||
Conduct may be permanently removed from the project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a project maintainer, Alex Clemmer (alex@heptio.com) and TBD.
|
||||
|
||||
This Code of Conduct is adapted from the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md) and [Contributor Covenant](http://contributor-covenant.org/version/1/2/0/), version 1.2.0.
|
||||
|
58
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/CONTRIBUTING.md
generated
vendored
Normal file
58
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/CONTRIBUTING.md
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
|||
## DCO Sign off
|
||||
|
||||
All authors to the project retain copyright to their work. However, to ensure
|
||||
that they are only submitting work that they have rights to, we are requiring
|
||||
everyone to acknowldge this by signing their work.
|
||||
|
||||
Any copyright notices in this repos should specify the authors as "The
|
||||
heptio/aws-quickstart authors".
|
||||
|
||||
To sign your work, just add a line like this at the end of your commit message:
|
||||
|
||||
```
|
||||
Signed-off-by: Joe Beda <joe@heptio.com>
|
||||
```
|
||||
|
||||
This can easily be done with the `--signoff` option to `git commit`.
|
||||
|
||||
By doing this you state that you can certify the following (from https://developercertificate.org/):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
1 Letterman Drive
|
||||
Suite D4700
|
||||
San Francisco, CA, 94129
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
63
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/Dockerfile
generated
vendored
Normal file
63
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/Dockerfile
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
|||
# Builds a Docker image that allows you to run Jsonnet, kubecfg, and/or ksonnet
|
||||
# on a file in your local directory. Specifically, this image contains:
|
||||
#
|
||||
# 1. Jsonnet, added to /usr/local/bin
|
||||
# 2. ksonnet-lib, added to the Jsonnet library paths, so you can
|
||||
# compile against the ksonnet libraries without specifying the -J
|
||||
# flag.
|
||||
# 3. kubecfg binary, added to /usr/local/bin
|
||||
# 4. kubecfg lib, included in Jsonnet library paths via KUBECFG_JPATH,
|
||||
# similarly to (2) ksonnet-lib.
|
||||
#
|
||||
# USAGE: Define a function like `ksonnet` below, and then run:
|
||||
#
|
||||
# `ksonnet <jsonnet-file-and-options-here>`
|
||||
#
|
||||
# ksonnet() {
|
||||
# docker run -it --rm \
|
||||
# --volume "$PWD":/wd \
|
||||
# --workdir /wd \
|
||||
# ksonnet \
|
||||
# jsonnet "$@"
|
||||
# }
|
||||
#
|
||||
# You can also define a similar function for `kubecfg`. Note that any required
|
||||
# Jsonnet libraries specified by -J (required for compilation) need to be
|
||||
# described relative to your working directory.
|
||||
|
||||
##############################################
|
||||
# STAGE 1: build kubecfg
|
||||
##############################################
|
||||
|
||||
FROM golang:1.8 as kubecfg-builder
|
||||
# Keep this in sync with the corresponding ENV in stage 2
|
||||
ENV KUBECFG_VERSION v0.5.0
|
||||
|
||||
RUN go get github.com/ksonnet/kubecfg
|
||||
WORKDIR /go/src/github.com/ksonnet/kubecfg
|
||||
RUN git checkout tags/${KUBECFG_VERSION} -b ${KUBECFG_VERSION}
|
||||
RUN CGO_ENABLED=1 GOOS=linux go install -a --ldflags '-linkmode external -extldflags "-static"' .
|
||||
|
||||
##############################################
|
||||
# STAGE 2: build jsonnet and download ksonnet
|
||||
##############################################
|
||||
|
||||
FROM alpine:3.6
|
||||
ENV KUBECFG_VERSION v0.5.0
|
||||
ENV JSONNET_VERSION v0.9.4
|
||||
|
||||
# Copy kubecfg executable and lib files from previous stage
|
||||
RUN mkdir -p /usr/share/kubecfg/${KUBECFG_VERSION}
|
||||
COPY --from=kubecfg-builder /go/bin/kubecfg /usr/local/bin/
|
||||
COPY --from=kubecfg-builder /go/src/github.com/ksonnet/kubecfg/lib/ /usr/share/kubecfg/${KUBECFG_VERSION}/
|
||||
ENV KUBECFG_JPATH /usr/share/kubecfg/${KUBECFG_VERSION}
|
||||
|
||||
# Get Jsonnet.
|
||||
RUN apk update && apk add git make g++
|
||||
RUN git clone https://github.com/google/jsonnet.git
|
||||
RUN cd jsonnet && git checkout tags/${JSONNET_VERSION} -b ${JSONNET_VERSION} && make -j4 && mv jsonnet /usr/local/bin
|
||||
|
||||
# Get ksonnet-lib, add to the Jsonnet -J path.
|
||||
RUN git clone https://github.com/ksonnet/ksonnet-lib.git
|
||||
RUN mkdir -p /usr/share/${JSONNET_VERSION}
|
||||
RUN cp -r ksonnet-lib/ksonnet.beta.2 /usr/share/${JSONNET_VERSION}
|
201
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/LICENSE
generated
vendored
Normal file
201
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
221
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/README.md
generated
vendored
Normal file
221
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/README.md
generated
vendored
Normal file
|
@ -0,0 +1,221 @@
|
|||
# ksonnet: Simplify working with Kubernetes
|
||||
|
||||
**ksonnet** (currently in beta testing) provides a simpler alternative
|
||||
to writing complex YAML for your Kubernetes configurations. Instead,
|
||||
you write template functions against the [Kubernetes application
|
||||
API][v1] using the data templating language [Jsonnet][jsonnet] .
|
||||
Components called **mixins** also help simplify the work that's
|
||||
required to extend your configuration as your application scales up.
|
||||
|
||||
![Jsonnet syntax highlighting][jsonnet-demo]
|
||||
|
||||
Other projects help simplify the work of writing a Kubernetes
|
||||
configuration by creating a simpler API that wraps the Kubernetes
|
||||
API. These projects include [Kompose][Kompose],
|
||||
[OpenCompose][OpenCompose], and [compose2kube][compose2kube].
|
||||
|
||||
**ksonnet** instead streamlines the process of writing
|
||||
configurations that create native Kubernetes objects.
|
||||
|
||||
## Install
|
||||
|
||||
First, install Jsonnet.
|
||||
|
||||
### Mac OS X
|
||||
|
||||
If you do not have Homebrew installed, [install it now](https://brew.sh/).
|
||||
|
||||
Then run:
|
||||
|
||||
`brew install jsonnet`
|
||||
|
||||
### Linux
|
||||
|
||||
You must build the binary. For details, [see the GitHub
|
||||
repository](https://github.com/google/jsonnet).
|
||||
|
||||
## Run
|
||||
|
||||
Fork or clone this repository, using a command such as:
|
||||
|
||||
```shell
|
||||
git clone git@github.com:ksonnet/ksonnet-lib.git
|
||||
```
|
||||
|
||||
Then add the appropriate import
|
||||
statements for the library to your Jsonnet code:
|
||||
|
||||
```jsonnet
|
||||
local k = import "ksonnet.beta.2/k.libsonnet";
|
||||
```
|
||||
|
||||
Jsonnet `import` statements look along a "search path" specified using
|
||||
`jsonnet -J <path>`. To use **ksonnet**, the search path should
|
||||
include the root of the `ksonnet-lib` git repository. You should add
|
||||
additional `-J` paths as you build up your own local libraries.
|
||||
|
||||
Jsonnet does not yet support [ES2016-style](https://github.com/google/jsonnet/issues/307) imports,
|
||||
so it is common to "unpack" an import with a series of `local` definitions:
|
||||
|
||||
```jsonnet
|
||||
local container = k.core.v1.container;
|
||||
local deployment = k.extensions.v1beta1.deployment;
|
||||
```
|
||||
|
||||
### Tools
|
||||
|
||||
Developed in tandem with `ksonnet-lib` is
|
||||
[`vscode-jsonnet`](https://github.com/heptio/vscode-jsonnet), a static
|
||||
analysis toolset written as a [Visual Studio
|
||||
Code](https://code.visualstudio.com/) plugin, meant to provide
|
||||
features such as autocomplete, syntax highlighting, and static
|
||||
analysis.
|
||||
|
||||
### Get started
|
||||
|
||||
If you're not familiar with **Jsonnet**, check out the
|
||||
[website](http://jsonnet.org/index.html) and [their
|
||||
tutorial](http://jsonnet.org/docs/tutorial.html). For usage, see the [command
|
||||
line tool](http://jsonnet.org/implementation/commandline.html).
|
||||
|
||||
You can also start writing `.libsonnet` or `.jsonnet` files based on
|
||||
the examples in this readme. Then run the
|
||||
following command:
|
||||
|
||||
```bash
|
||||
jsonnet -J /path/to/ksonnet-lib <filename.libsonnet>
|
||||
```
|
||||
|
||||
This command produces a JSON file that you can then run the
|
||||
appropriate `kubectl`
|
||||
commands against, with the following syntax:
|
||||
|
||||
```bash
|
||||
kubectl <command> -<options> <filename.json>
|
||||
```
|
||||
|
||||
## Write your config files with ksonnet
|
||||
|
||||
The YAML for the Kubernetes
|
||||
[nginx hello world tutorial][helloworld] looks
|
||||
like this:
|
||||
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment
|
||||
spec:
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:1.7.9
|
||||
ports:
|
||||
- containerPort: 80
|
||||
```
|
||||
|
||||
Instead, you can write the following **ksonnet** code:
|
||||
|
||||
```javascript
|
||||
local k = import "ksonnet.beta.2/k.libsonnet";
|
||||
|
||||
// Specify the import objects that we need
|
||||
local container = k.extensions.v1beta1.deployment.mixin.spec.template.spec.containersType;
|
||||
local containerPort = container.portsType;
|
||||
local deployment = k.extensions.v1beta1.deployment;
|
||||
|
||||
local targetPort = 80;
|
||||
local podLabels = {app: "nginx"};
|
||||
|
||||
local nginxContainer =
|
||||
container.new("nginx", "nginx:1.7.9") +
|
||||
container.ports(containerPort.containerPort(targetPort));
|
||||
|
||||
local nginxDeployment =
|
||||
deployment.new("nginx-deployment", 2, nginxContainer, podLabels);
|
||||
|
||||
k.core.v1.list.new(nginxDeployment)
|
||||
```
|
||||
|
||||
Save the file as `helloworld.libsonnet`, then run:
|
||||
|
||||
```bash
|
||||
jsonnet -J </path/to/ksonnet-lib> helloworld.libsonnet > deployment.json
|
||||
```
|
||||
|
||||
This command creates the `deployment.json` file that the
|
||||
**ksonnet** snippet defines.
|
||||
|
||||
You can now apply this deployment to your Kubernetes cluster
|
||||
by running the following command:
|
||||
|
||||
```bash
|
||||
kubectl apply -f deployment.json
|
||||
```
|
||||
|
||||
## The **ksonnet** libraries
|
||||
|
||||
The **ksonnet** project organizes libraries by the level of
|
||||
abstraction they approach. For most users, the right entry point is:
|
||||
|
||||
* `ksonnet.beta.2/k.libsonnet`: higher-level abstractions and methods
|
||||
to help create complex Kubernetes objects out of smaller objects
|
||||
|
||||
`k.libsonnet` is built on top of a utility library, `k8s.libsonnet`,
|
||||
that is generated directly from the OpenAPI definition.
|
||||
|
||||
## Mixins
|
||||
|
||||
Mixins are a core feature of **ksonnet**. Conceptually, they provide dynamic inheritance, at
|
||||
runtime instead of compile time, which lets you combine them freely to modify objects or
|
||||
create new ones.
|
||||
|
||||
**ksonnet** ships with a large library of built-in mixins, or you can write your own custom mixins.
|
||||
The [tutorial](/docs/TUTORIAL.md) shows you how to create a custom mixin that you can then
|
||||
easily add as a Sidecar container to your Kubernetes cluster.
|
||||
|
||||
## Contributing
|
||||
|
||||
Thanks for taking the time to join our community and start
|
||||
contributing!
|
||||
|
||||
### Before you start
|
||||
|
||||
* Please familiarize yourself with the [Code of
|
||||
Conduct](https://github.com/ksonnet/ksonnet-lib/blob/master/CODE-OF-CONDUCT.md) before contributing.
|
||||
* See [CONTRIBUTING.md](https://github.com/ksonnet/ksonnet-lib/blob/master/CONTRIBUTING.md) for instructions on the
|
||||
developer certificate of origin that we require.
|
||||
|
||||
### Pull requests
|
||||
|
||||
* We welcome pull requests. Feel free to dig through the
|
||||
[issues](https://github.com/ksonnet/ksonnet-lib/issues) and jump in.
|
||||
|
||||
## Contact us
|
||||
|
||||
Have any questions or long-form feedback? You can always find us here:
|
||||
|
||||
* Our [Slack channel](https://ksonnet.slack.com) [working having an auto-invite system!)
|
||||
* Our [mailing list](https://groups.google.com/forum/#!forum/ksonnet).
|
||||
* We monitor the [ksonnet
|
||||
tag](https://stackoverflow.com/questions/tagged/ksonnet) on Stack
|
||||
Overflow.
|
||||
|
||||
[jsonnet]: http://jsonnet.org/ "Jsonnet"
|
||||
[v1]: https://kubernetes.io/docs/api-reference/v1/definitions/ "V1 API objects"
|
||||
[v1Container]: https://kubernetes.io/docs/api-reference/v1/definitions/#_v1_container "v1.Container"
|
||||
[Kompose]: https://github.com/kubernetes-incubator/kompose "Kompose"
|
||||
[OpenCompose]: https://github.com/redhat-developer/opencompose "OpenCompose"
|
||||
[compose2kube]: https://github.com/kelseyhightower/compose2kube "compose2kube"
|
||||
|
||||
[helloworld]: https://kubernetes.io/docs/tutorials/stateless-application/run-stateless-application-deployment/ "Hello, Kubernetes!"
|
||||
[v1hellojsonnet]: https://github.com/ksonnet/ksonnet-lib/blob/master/examples/hello-world/hello.v1.jsonnet "Hello, Jsonnet (v1)!"
|
||||
[v2hellojsonnet]: https://github.com/ksonnet/ksonnet-lib/blob/master/examples/hello-world/hello.v2.jsonnet "Hello, Jsonnet (v2)!"
|
||||
[deploymentspec]: https://kubernetes.io/docs/api-reference/extensions/v1beta1/definitions/#_v1beta1_deploymentspec "v1.DeploymentSpec"
|
||||
|
||||
[jsonnet-demo]: docs/images/kube-demo.gif
|
26
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ROADMAP.md
generated
vendored
Normal file
26
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ROADMAP.md
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
# ksonnet Roadmap
|
||||
|
||||
## State of the release
|
||||
This is a living document that in the coming weeks will be organized grouped for releases.
|
||||
ksonnet is currently considered to be in a pre-generally available (0.1) state.
|
||||
Current versions are intended for prototyping activities and we don't recommend production use at this time.
|
||||
|
||||
## Upcoming features
|
||||
|
||||
### Usability Enhancements
|
||||
* Higher level abstractions, starter kits
|
||||
* App centric grouping of components; e.g. config is “child” of code
|
||||
* Reference pattern for SCM based config workflow
|
||||
* CI/CD integration toolkit
|
||||
|
||||
### Tooling Enhancements
|
||||
* Extend kubectl
|
||||
* IDE: type suggestion, arg types, more editors
|
||||
* Server side evaluation, realize template as operator
|
||||
* Configuration linting and validation
|
||||
|
||||
### Packaging Enhancements
|
||||
* Integrate with Helm
|
||||
* Package management of ksonnet libraries
|
||||
* Network library download
|
||||
* Published library of sidecar mix-ins
|
20
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/docs/TODO.md
generated
vendored
Normal file
20
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/docs/TODO.md
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
# TODO
|
||||
|
||||
A list of things to do before we share outside the company.
|
||||
|
||||
* [ ] Finish making the README useful. (Incorportae Joe's suggestions.)
|
||||
* [ ] Explicitly declare the goals of the project, and add a roadmap that articulates how we are (currently) planning on accomplishing them.
|
||||
* [ ] Create a specific "how to use" section for (_e.g._) the GitLab folks.
|
||||
* [x] Create compelling "Hello World" example.
|
||||
* [ ] Move to a constructor-based paradigm. (_e.g._,
|
||||
`container.New(...) + container.Ports(...)`.)
|
||||
* This should be both more familiar to developers, and also enables
|
||||
us to do things like verify that we're `+`'ing components together
|
||||
that make sense.
|
||||
* [ ] Add some rudimentary type checking for the `+` operator.
|
||||
(_e.g._, we should error if you try to add a `conatiner` to a
|
||||
`deployment`, or something.)
|
||||
* [ ] Add the ability to add a container to the containers list.
|
||||
(_e.g._, `pod.Container(...) + pod.Container(...)` vs
|
||||
`pod.Container([...])`.)
|
||||
* [ ] Move from `kube.v1` -> `kube.core.v1`.
|
299
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/docs/TUTORIAL.md
generated
vendored
Normal file
299
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/docs/TUTORIAL.md
generated
vendored
Normal file
|
@ -0,0 +1,299 @@
|
|||
# Tutorial
|
||||
|
||||
One of the strengths of **ksonnet** mixin libraries is their ability
|
||||
to allow users to separate a Kubernetes application into several
|
||||
modular components.
|
||||
|
||||
For example, a team might split into application and logging subteams.
|
||||
Rather than writing a single YAML file that combines them into a
|
||||
single Kubernetes app, the logging team can simply write a mixin
|
||||
library that the application team can use to add logging to their
|
||||
Kubernetes application definition.
|
||||
|
||||
In this tutorial, we will explore how such libraries are constructed,
|
||||
using a mixin library for [fluentd][fluentd] (hosted in the official
|
||||
[mixins repository][fluentd-mixin]). Specifically, we see how one team
|
||||
writing an app using [Elasticsearch][elastic] can use the Fluentd
|
||||
mixin library to use easily configure Fluentd to tail the
|
||||
Elasticsearch logs and pass them Kibana to be rendered in a dashboard.
|
||||
|
||||
For more information about Elasticsearch and Kibana, see [the Elastic
|
||||
website][elastic]. For `fluentd`, see [the Fluentd website][fluentd].
|
||||
|
||||
## Requirements to build your own mixins
|
||||
|
||||
If you want to build your own mixin libraries, or write **ksonnet**
|
||||
using the built-in mixins, you need to perform the following tasks.
|
||||
For details, see the [readme][readme].
|
||||
|
||||
* Install **Jsonnet**, version 0.9.4 or later
|
||||
* Clone the **ksonnet** repository locally
|
||||
* Install and configure the Visual Studio Code extension (optional)
|
||||
* Create a test Kubernetes cluster
|
||||
|
||||
## Architecture and design
|
||||
|
||||
The idea of the application is for Elasticsearch to emit logs to
|
||||
standard out, and for Fluentd to tail those logs and send them to
|
||||
Kibana for rendering.
|
||||
|
||||
In Kubernetes, accessing the `Pod` logs involves:
|
||||
|
||||
* Giving the Fluentd container permissions to access the `Pod` logs,
|
||||
and
|
||||
* Appending volume mounts that contain the `Pod` logs, to the Fluentd
|
||||
container, so that it can access them.
|
||||
|
||||
We'll walk through the key parts of the files in example in detail,
|
||||
but at a high level this implementation is broken up as:
|
||||
|
||||
* A `DaemonSet` that causes Fluentd to run once on every machine, so
|
||||
that it can tail `Pod` logs for Elasticsearch running anywhere in
|
||||
the cluster.
|
||||
|
||||
On its own, this `DaemonSet` only contains the core Fluentd
|
||||
application definition. For example, it has no permissions to access
|
||||
(_e.g._) `Pod` Logs, or the volume mounts required to access them.
|
||||
* A separate mixin that defines the `VolumeMounts` and `Volumes` that
|
||||
the `DaemonSet` requires to access the `Pod` Logs.
|
||||
* A separate mixin that configures the access permissions for the
|
||||
`DaemonSet`
|
||||
* The RBAC objects that the cluster administrator must send to the
|
||||
cluster so that the `ServiceAccount` associated with Fluentd can be
|
||||
granted permission to obtain the `Pod` logs.
|
||||
|
||||
The power of this approach lies in its separation of concerns: an
|
||||
application developer can define the `DaemonSet`, while a cluster
|
||||
admin can define the access permissions that this or any other
|
||||
`DaemonSet` might require. The `DaemonSet` or the access permissions
|
||||
can be modified as needed without requiring a complete cluster
|
||||
reconfiguration. Indeed, as the `DaemonSet` mixin demonstrates, the
|
||||
details of the `DaemonSet` (in this case, the `Volumes` and
|
||||
`VolumeMounts`) can also be adjusted without having to touch the base
|
||||
`DaemonSet` definition.
|
||||
|
||||
### Define mixins to configure access to pod logs
|
||||
|
||||
Let's look at how we can decouple the pieces of a complete Fluentd
|
||||
configuration, so that your logging team, for example, can write just
|
||||
the core of a Fluentd DaemonSet, and then write a **ksonnet** library
|
||||
that lets you customize key details of the configuration as needed.
|
||||
|
||||
`fluentd-es-ds.jsonnet` defines a basic DaemonSet, and then adds access permissions to it.
|
||||
|
||||
```javascript
|
||||
// daemonset
|
||||
local ds =
|
||||
// base daemonset
|
||||
fluentd.app.daemonSetBuilder.new(config) +
|
||||
// add configuration for access to pod logs
|
||||
fluentd.app.daemonSetBuilder.configureForPodLogs(config);
|
||||
|
||||
// create access permissions for pod logs
|
||||
local rbacObjs = fluentd.app.admin.rbacForPodLogs(config);
|
||||
```
|
||||
|
||||
Note that our base DaemonSet can't do anything. It doesn't know where
|
||||
the pod logs that it needs are -- it needs Volumes and VolumeMounts to
|
||||
provide this information. It also needs access permissions, provided
|
||||
with RBAC. So we add these items separately. Let's look more closely
|
||||
at the advantages of this approach.
|
||||
|
||||
In `fluentd.libsonnet`, we define the `daemonSet` mixin. Here is where
|
||||
we start to see the real power of **ksonnet** mixins at work. This
|
||||
mixin specifies the VolumeMounts and Volumes that Fluentd requires
|
||||
separately from the DaemonSet definition itself. This approach lets us
|
||||
decouple application definitions from deployment details.
|
||||
|
||||
Note particularly in the following snippet the `containerSelector`
|
||||
parameter to `addHostMountedPodLogs`. We pass this function to
|
||||
`ds.mapContainers` to iterate over our containers (in this case, our
|
||||
Fluentd containers) and add the VolumeMounts that they need. (The
|
||||
details of the pod logs have also been abstracted away to their own
|
||||
function.)
|
||||
|
||||
```javascript
|
||||
mixin:: {
|
||||
daemonSet:: {
|
||||
// Takes two volume names and produces a
|
||||
// mixin that mounts the Kubernetes pod logs into a set of
|
||||
// containers specified by `containerSelector`.
|
||||
addHostMountedPodLogs(
|
||||
varlogName, podlogsName, containerSelector=function(c) true
|
||||
)::
|
||||
local podLogs = $.parts.podLogs(varlogName, podlogsName);
|
||||
|
||||
// Add volume to DaemonSet.
|
||||
ds.mixin.spec.template.spec.volumes([
|
||||
podLogs.varLogVolume,
|
||||
podLogs.podLogVolume,
|
||||
]) +
|
||||
|
||||
// Iterate over a specified set of containers to add the VolumeMounts
|
||||
ds.mapContainers(
|
||||
function (c)
|
||||
if containerSelector(c)
|
||||
then
|
||||
c + container.volumeMounts([
|
||||
podLogs.varLogMount,
|
||||
podLogs.podLogMount,
|
||||
])
|
||||
else c),
|
||||
},
|
||||
},
|
||||
```
|
||||
|
||||
The `daemonSetBuilder` that we used to create the DaemonSet calls our
|
||||
`daemonSet` mixin, and also defines the `configureForPodLogs` function
|
||||
that the DaemonSet needs. But the DaemonSet itself, from our first
|
||||
code snippet, doesn't need to know any of these details:
|
||||
|
||||
```javascript
|
||||
daemonSetBuilder:: {
|
||||
new(config):: {
|
||||
toArray():: [self.daemonSet],
|
||||
daemonSet:: $.parts.daemonSet(config.daemonSet.name, config.container.name, config.container.tag, config.namespace)
|
||||
},
|
||||
|
||||
// access configuration
|
||||
configureForPodLogs(
|
||||
config,
|
||||
varlogVolName="varlog",
|
||||
podLogsVolName="varlibdockercontainers",
|
||||
)::
|
||||
{} + {
|
||||
daemonSet+::
|
||||
$.mixin.daemonSet.addHostMountedPodLogs(
|
||||
varlogVolName,
|
||||
podLogsVolName,
|
||||
$.util.containerNameInSet(config.container.name)) +
|
||||
// RBAC and service account
|
||||
ds.mixin.spec.template.spec.serviceAccountName(config.rbac.accountName)
|
||||
},
|
||||
},
|
||||
```
|
||||
|
||||
In the previous snippet, we notice that we're specifying a Service
|
||||
Account, and RBAC is involved. It's time to define our RBAC objects so
|
||||
that our Fluentd access permissions mean something.
|
||||
|
||||
### Define RBAC objects
|
||||
|
||||
We define RBAC objects separately so that they can be managed
|
||||
independently of the rest of the cluster configuration. This approach
|
||||
lets cluster admins and application developers work independently.
|
||||
Your cluster admins can determine and define access permissions that
|
||||
can be applied to application configurations with a few lines of code.
|
||||
|
||||
Defining access permissions in Kubernetes requires definition of the
|
||||
RBAC objects that are encapsulated in this definition (from
|
||||
`fluentd.libsonnet`).
|
||||
|
||||
```javascript
|
||||
admin:: {
|
||||
rbacForPodLogs(config)::
|
||||
$.parts.rbac(config.rbac.accountName, config.namespace),
|
||||
},
|
||||
```
|
||||
|
||||
Let's unpack this snippet.
|
||||
|
||||
`fluentd.libsonnet` also defines all the required RBAC objects. Note
|
||||
especially that we abstract the attributes of the Service Account
|
||||
separately and assign their values in a separate `config` object. This
|
||||
approach lets us make sure that the correct Service Account is
|
||||
appropriately associated with all required objects.
|
||||
|
||||
```javascript
|
||||
rbac(name, namespace)::
|
||||
local metadata = svcAccount.mixin.metadata.name(name) +
|
||||
svcAccount.mixin.metadata.namespace(namespace);
|
||||
|
||||
local hcServiceAccount = svcAccount.new() +
|
||||
metadata;
|
||||
|
||||
local hcClusterRole =
|
||||
clRole.new() +
|
||||
metadata +
|
||||
clRole.rules(
|
||||
rule.new() +
|
||||
rule.apiGroups("*") +
|
||||
rule.resources(["pods", "nodes"]) +
|
||||
rule.verbs(["list", "watch"])
|
||||
);
|
||||
|
||||
local hcClusterRoleBinding =
|
||||
clRoleBinding.new() +
|
||||
metadata +
|
||||
clRoleBinding.mixin.roleRef.apiGroup("rbac.authorization.k8s.io") +
|
||||
clRoleBinding.mixin.roleRef.name(name) +
|
||||
clRoleBinding.mixin.roleRef.mixinInstance({kind: "ClusterRole"}) +
|
||||
clRoleBinding.subjects(
|
||||
subject.new() +
|
||||
subject.name(name) +
|
||||
subject.namespace(namespace)
|
||||
{kind: "ServiceAccount"}
|
||||
);
|
||||
|
||||
```
|
||||
|
||||
In `fluentd-es-ds.jsonnet` we define our config thus:
|
||||
|
||||
```javascript
|
||||
local config = {
|
||||
namespace:: "elasticsearch",
|
||||
container:: {
|
||||
name:: "fluentd-es",
|
||||
tag:: "1.22",
|
||||
},
|
||||
daemonSet:: {
|
||||
name:: "fluentd-es-v1.22",
|
||||
},
|
||||
rbac:: {
|
||||
accountName:: "fluentd-serviceaccount"
|
||||
},
|
||||
};
|
||||
```
|
||||
|
||||
The relevant fields here are `namespace` and `AccountName`, which we
|
||||
pass as the arguments that our RBAC snippet needs when it calls the
|
||||
`rbac` function.
|
||||
|
||||
## Wrap it all up
|
||||
|
||||
Here's where we started, with our simple DaemonSet, its pod logging,
|
||||
and its access permissions. But now you've seen what's going on
|
||||
underneath -- not just how the functions for adding pod logs and
|
||||
permissions are clearly separated, but how we can customize them as
|
||||
needed without having to rewrite the entire configuration.
|
||||
|
||||
```javascript
|
||||
// daemonset
|
||||
local ds =
|
||||
// base daemonset
|
||||
fluentd.app.daemonSetBuilder.new(config) +
|
||||
// add configuration for access to pod logs
|
||||
fluentd.app.daemonSetBuilder.configureForPodLogs(config);
|
||||
|
||||
// create access permissions for pod logs
|
||||
local rbacObjs = fluentd.app.admin.rbacForPodLogs(config);
|
||||
```
|
||||
|
||||
## Explore further
|
||||
|
||||
The GitHub example directory also includes the generated JSON files.
|
||||
Examine them to help understand the details of how **ksonnet**'s
|
||||
decomposition and abstraction are compiled into complete
|
||||
configurations.
|
||||
|
||||
As you start to write your own custom mixins, look also at how we
|
||||
break down the basic **ksonnet** imports into smaller component
|
||||
objects for easier manipulation.
|
||||
|
||||
And feel free to contribute your own examples to our mixins
|
||||
repository!
|
||||
|
||||
[readme]: ../readme.md "ksonnet readme"
|
||||
[fluentd-mixin]: https://github.com/ksonnet/mixins/tree/master/incubator/fluentd
|
||||
[fluentd]: http://www.fluentd.org/architecture
|
||||
[elastic]: https://www.elastic.co/products
|
BIN
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/docs/images/kube-demo.gif
generated
vendored
Normal file
BIN
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/docs/images/kube-demo.gif
generated
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 177 KiB |
215
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/docs/jsonnetIntro.md
generated
vendored
Normal file
215
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/docs/jsonnetIntro.md
generated
vendored
Normal file
|
@ -0,0 +1,215 @@
|
|||
# Introduction to Jsonnet
|
||||
|
||||
If you're not familiar with **Jsonnet**, this brief introduction can
|
||||
help you get started with **ksonnet**. See also the **Jsonnet**
|
||||
[tutorial][jsonnetTutorial].
|
||||
|
||||
## References, variables, simple JSON templating
|
||||
|
||||
You can think of **Jsonnet** as a domain-specific language
|
||||
that can be extended to provide templating for other
|
||||
languages. Think JSON, but with:
|
||||
|
||||
* variables ([lexically-scoped locals][jsonnetLocals] and
|
||||
JsonPath-style [references][jsonnetReferences])
|
||||
* [functions][jsonnetFunctions]
|
||||
* some notion of [object-oriented inheritance between JSON
|
||||
objects][jsonnetOO]
|
||||
* the ability to define libraries and [import][jsonnetImports] them
|
||||
* [cleaner syntax][jsonnetSyntax].
|
||||
|
||||
This introdution focuses on the first three items.
|
||||
|
||||
## Local variables and references
|
||||
|
||||
In Jsonnet, you can define lexically-scoped local variables:
|
||||
|
||||
```javascript
|
||||
{
|
||||
local foo = "bar",
|
||||
baz: foo,
|
||||
}
|
||||
```
|
||||
|
||||
which produces:
|
||||
|
||||
```json
|
||||
{ "baz": "bar" }
|
||||
```
|
||||
|
||||
**Jsonnet** also exposes a `self` to access properties of the
|
||||
current object, and a JsonPath-style `$`, which refers to the root
|
||||
object (the grandparent that is farthest away from the `$`):
|
||||
|
||||
```javascript
|
||||
{
|
||||
foo: "bar",
|
||||
baz: self.foo,
|
||||
cow: {
|
||||
moo: $.foo,
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"foo": "bar",
|
||||
"baz": "bar",
|
||||
"cow": { "moo": "bar" }
|
||||
}
|
||||
```
|
||||
|
||||
It is worth noting that both `local` variables and references are
|
||||
_order-independent_, which is a decision that largely falls out of
|
||||
JSON's design. Notice, for example, that if we re-order `foo` and
|
||||
`baz`, it does not affect the output of Jsonnet:
|
||||
|
||||
```javascript
|
||||
{
|
||||
baz: self.foo,
|
||||
cow: {
|
||||
moo: $.foo,
|
||||
},
|
||||
|
||||
// This is perfectly legal.
|
||||
foo: "bar",
|
||||
}
|
||||
```
|
||||
|
||||
## Functions
|
||||
|
||||
Jsonnet implements lexically-scoped functions, but they can be
|
||||
declared in a few ways, and it's worth pointing them out.
|
||||
|
||||
In the example below, note the use of the double colon (`::`) in
|
||||
the declaration of `function2`. This marks the field as _hidden_,
|
||||
which is a concept we will look closer at in the section on
|
||||
object-orientation. For now, it is only important to understand that a
|
||||
function must be either `local` or hidden with `::`, because Jsonnet
|
||||
doesn't know how to render a function as JSON data. (Instead of
|
||||
rendering it, Jsonnet will complain and crash.)
|
||||
|
||||
```javascript
|
||||
{
|
||||
local function1(arg1) = { foo: arg1 },
|
||||
function2(arg1="cluck"):: { bar: arg1 },
|
||||
cow: function1("moo"),
|
||||
chicken: self.function2(),
|
||||
}
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"chicken": {
|
||||
"bar": "cluck"
|
||||
},
|
||||
"cow": {
|
||||
"foo": "moo"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Object-orientation (inheritance, mixins)
|
||||
|
||||
One of Jsonnet's most powerful features, which we use liberally in
|
||||
this tutorial and in **ksonnet**, is its object model, which
|
||||
implements a concise, [well-specified _algebra_][jsonnetAlgebra] for
|
||||
combining JSON-like objects.
|
||||
|
||||
The primary tool for combining objects is the `+` operator. In this
|
||||
example we see two objects (the first is called the _parent_, or
|
||||
_base_, and the second is called the _child_) that are combined with
|
||||
the `+`. The child (which is said to _inherit_ from the parent)
|
||||
overwrites the `bar` property that was defined in the parent:
|
||||
|
||||
```javascript
|
||||
|
||||
{
|
||||
// Parent object.
|
||||
foo: "foo",
|
||||
bar: "bar",
|
||||
} + {
|
||||
// Child object.
|
||||
bar: "fubar",
|
||||
}
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"bar": "fubar",
|
||||
"foo": "foo"
|
||||
}
|
||||
```
|
||||
|
||||
It is sometimes convenient for a child to reference members of the
|
||||
parent, so Jsonnet also exposes `super`, which behaves a lot like
|
||||
`self`, except in reference to the parent:
|
||||
|
||||
```javascript
|
||||
{
|
||||
foo: "foo",
|
||||
} + {
|
||||
bar: super.foo + "bar",
|
||||
}
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"bar": "foobar",
|
||||
"foo": "foo"
|
||||
}
|
||||
```
|
||||
|
||||
One interesting aspect of `super` is that it can be "mixed in",
|
||||
meaning that if you have an object that refers to `super.bar`, then it
|
||||
can dynamically be made to inherit from _any object_ that has a `bar`
|
||||
property. For example:
|
||||
|
||||
```javascript
|
||||
local fooTheBar = { bar: super.bar + "foo" };
|
||||
{
|
||||
bar: "bar",
|
||||
} + fooTheBar
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"bar": "barfoo"
|
||||
}
|
||||
```
|
||||
|
||||
This stands in contrast to the object model of (say) Java, where you
|
||||
would have to declare at compile time an `Animal` class before a `Dog`
|
||||
class could be made to inherit from it. The technique above (called a
|
||||
_mixin_) causes the object to inherit dynamically, at runtime rather
|
||||
than compile time.
|
||||
|
||||
Lastly, Jsonnet allows you to create hidden properties, not included
|
||||
when we generate the final JSON. Denoted with with a `::`, they are
|
||||
also visible to all descendent objects (_i.e._, children,
|
||||
grandchildren, _etc_.), and are useful for holding data you'd like to
|
||||
use to construct other properties, but not expose as part of the
|
||||
generated JSON itself:
|
||||
|
||||
```javascript
|
||||
{
|
||||
foo:: "foo",
|
||||
} + {
|
||||
bar: super.foo + "bar",
|
||||
}
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"bar": "foobar"
|
||||
}
|
||||
```
|
||||
|
||||
[jsonnetTutorial]: http://jsonnet.org/docs/tutorial.html "Jsonnet tutorial"
|
||||
[jsonnetSyntax]: http://jsonnet.org/docs/tutorial.html#syntax_improvements "Jsonnet syntax improvements"
|
||||
[jsonnetFunctions]: http://jsonnet.org/docs/tutorial.html#functions "Jsonnet functions"
|
||||
[jsonnetLocals]: http://jsonnet.org/docs/tutorial.html#locals "Jsonnet local variables"
|
||||
[jsonnetReferences]: http://jsonnet.org/docs/tutorial.html#references "Jsonnet references"
|
||||
[jsonnetImports]: http://jsonnet.org/docs/tutorial.html#imports "Jsonnet imports"
|
||||
[jsonnetOO]: http://jsonnet.org/docs/tutorial.html#oo "Jsonnet OO"
|
||||
[jsonnetAlgebra]: http://jsonnet.org/language/spec.html#properties "Jsonnet inheritance algebra"
|
19
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/examples/readme/hello-nginx.jsonnet
generated
vendored
Normal file
19
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/examples/readme/hello-nginx.jsonnet
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
// This expects to be run with `jsonnet -J <path to ksonnet-lib>`
|
||||
local k = import "ksonnet.beta.2/k.libsonnet";
|
||||
|
||||
// Specify the import objects that we need
|
||||
local container = k.extensions.v1beta1.deployment.mixin.spec.template.spec.containersType;
|
||||
local containerPort = container.portsType;
|
||||
local deployment = k.extensions.v1beta1.deployment;
|
||||
|
||||
local targetPort = 80;
|
||||
local podLabels = {app: "nginx"};
|
||||
|
||||
local nginxContainer =
|
||||
container.new("nginx", "nginx:1.7.9") +
|
||||
container.ports(containerPort.containerPort(targetPort));
|
||||
|
||||
local nginxDeployment =
|
||||
deployment.new("nginx-deployment", 2, nginxContainer, podLabels);
|
||||
|
||||
k.core.v1.list.new(nginxDeployment)
|
27
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/Gopkg.toml
generated
vendored
Normal file
27
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/Gopkg.toml
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
[[constraint]]
|
||||
name = "github.com/blang/semver"
|
||||
version = "3.5.1"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/spec"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/swag"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/google/go-jsonnet"
|
||||
version = "0.10.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/pkg/errors"
|
||||
version = "0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "1.2.1"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
19
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/README.md
generated
vendored
Normal file
19
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/README.md
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
# ksonnet-gen
|
||||
|
||||
`ksonnet-gen` takes the OpenAPI Kubernetes specification and generates
|
||||
a Jsonnet file representing that API definition.
|
||||
|
||||
## Build
|
||||
|
||||
```bash
|
||||
dep ensure # Fetch dependencies
|
||||
go build -o ksonnet-gen .
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
`ksonnet-gen [path to k8s OpenAPI swagger.json] [output dir]`
|
||||
|
||||
Typically the swagger spec is in something like
|
||||
`k8s.io/kubernetes/api/openapi-spec`, where `k8s.io` is in your Go src
|
||||
folder.
|
62
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/astext/astext.go
generated
vendored
Normal file
62
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/astext/astext.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
|||
package astext
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
|
||||
"github.com/google/go-jsonnet/ast"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ObjectFields is a slice of ObjectField.
|
||||
type ObjectFields []ObjectField
|
||||
|
||||
// ObjectField wraps ast.ObjectField and adds commenting and the ability to
|
||||
// be printed on one line.
|
||||
type ObjectField struct {
|
||||
ast.ObjectField
|
||||
|
||||
// Comment is a comment for the object field.
|
||||
Comment *Comment
|
||||
|
||||
// Oneline prints this field on a single line.
|
||||
Oneline bool
|
||||
}
|
||||
|
||||
// Object wraps ast.Object and adds the ability to be printed on one line.
|
||||
type Object struct {
|
||||
ast.Object
|
||||
|
||||
Fields []ObjectField
|
||||
|
||||
// Oneline prints this field on a single line.
|
||||
Oneline bool
|
||||
}
|
||||
|
||||
var (
|
||||
// reFieldStr matches a field id that should be enclosed in quotes.
|
||||
reFieldStr = regexp.MustCompile(`^([_A-Za-z0-9\.]?[A-Za-z0-9\-_\.]+(\.[A-Za-z0-9\-_]+)*)?$`)
|
||||
// reField matches a field id.
|
||||
reField = regexp.MustCompile(`^[_A-Za-z]+[_A-Za-z0-9]*$`)
|
||||
)
|
||||
|
||||
// CreateField creates an ObjectField with a name. If the name matches `reFieldStr`, it will
|
||||
// create an ObjectField with Kind `ObjectFieldStr`. If not, it will create an identifier
|
||||
// and set the ObjectField kind to `ObjectFieldId`.
|
||||
func CreateField(name string) (*ObjectField, error) {
|
||||
of := ObjectField{ObjectField: ast.ObjectField{}}
|
||||
if reField.MatchString(name) {
|
||||
id := ast.Identifier(name)
|
||||
of.Kind = ast.ObjectFieldID
|
||||
of.Id = &id
|
||||
} else if reFieldStr.MatchString(name) {
|
||||
of.Expr1 = &ast.LiteralString{
|
||||
Value: name,
|
||||
Kind: ast.StringDouble,
|
||||
}
|
||||
of.Kind = ast.ObjectFieldStr
|
||||
} else {
|
||||
return nil, errors.Errorf("invalid field name %q", name)
|
||||
}
|
||||
|
||||
return &of, nil
|
||||
}
|
107
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/astext/astext_test.go
generated
vendored
Normal file
107
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/astext/astext_test.go
generated
vendored
Normal file
|
@ -0,0 +1,107 @@
|
|||
package astext
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-jsonnet/ast"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCreateField(t *testing.T) {
|
||||
id := ast.Identifier("name")
|
||||
uID := ast.Identifier("underscore_name")
|
||||
leadingID := ast.Identifier("__leading")
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
isErr bool
|
||||
expected *ObjectField
|
||||
}{
|
||||
{
|
||||
name: "name",
|
||||
expected: &ObjectField{
|
||||
ObjectField: ast.ObjectField{
|
||||
Kind: ast.ObjectFieldID, Id: &id}},
|
||||
},
|
||||
{
|
||||
name: "underscore_name",
|
||||
expected: &ObjectField{
|
||||
ObjectField: ast.ObjectField{
|
||||
Kind: ast.ObjectFieldID, Id: &uID}},
|
||||
},
|
||||
{
|
||||
name: "underscore_field-",
|
||||
expected: &ObjectField{
|
||||
ObjectField: ast.ObjectField{
|
||||
Kind: ast.ObjectFieldStr,
|
||||
Expr1: &ast.LiteralString{
|
||||
Value: "underscore_field-",
|
||||
Kind: ast.StringDouble,
|
||||
}}},
|
||||
},
|
||||
{
|
||||
name: "dashed-name",
|
||||
expected: &ObjectField{
|
||||
ObjectField: ast.ObjectField{
|
||||
Kind: ast.ObjectFieldStr,
|
||||
Expr1: &ast.LiteralString{
|
||||
Value: "dashed-name",
|
||||
Kind: ast.StringDouble,
|
||||
}}},
|
||||
},
|
||||
{
|
||||
name: "__leading",
|
||||
expected: &ObjectField{
|
||||
ObjectField: ast.ObjectField{
|
||||
Kind: ast.ObjectFieldID,
|
||||
Id: &leadingID,
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "dot.name",
|
||||
expected: &ObjectField{
|
||||
ObjectField: ast.ObjectField{
|
||||
Kind: ast.ObjectFieldStr,
|
||||
Expr1: &ast.LiteralString{
|
||||
Value: "dot.name",
|
||||
Kind: ast.StringDouble,
|
||||
}}},
|
||||
},
|
||||
{
|
||||
name: ".",
|
||||
expected: &ObjectField{
|
||||
ObjectField: ast.ObjectField{
|
||||
Kind: ast.ObjectFieldStr,
|
||||
Expr1: &ast.LiteralString{
|
||||
Value: ".",
|
||||
Kind: ast.StringDouble,
|
||||
}}},
|
||||
},
|
||||
{
|
||||
name: "9p",
|
||||
expected: &ObjectField{
|
||||
ObjectField: ast.ObjectField{
|
||||
Kind: ast.ObjectFieldStr,
|
||||
Expr1: &ast.LiteralString{
|
||||
Value: "9p",
|
||||
Kind: ast.StringDouble,
|
||||
}}},
|
||||
},
|
||||
{
|
||||
name: "invalid$",
|
||||
isErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got, err := CreateField(tc.name)
|
||||
if tc.isErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expected, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
8
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/astext/extensions.go
generated
vendored
Normal file
8
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/astext/extensions.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
package astext
|
||||
|
||||
// extensions for ast that could live upstream
|
||||
|
||||
// Comment is a comment.
|
||||
type Comment struct {
|
||||
Text string // represents a single line comment
|
||||
}
|
123
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/jsonnet/rewrite.go
generated
vendored
Normal file
123
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/jsonnet/rewrite.go
generated
vendored
Normal file
|
@ -0,0 +1,123 @@
|
|||
// Package jsonnet contains a collection of simple rewriting
|
||||
// facilities that allow us to easily map text from the OpenAPI spec
|
||||
// to things that are Jsonnet-friendly (e.g., renaming identifiers
|
||||
// that are Jsonnet keywords, lowerCamelCase'ing names, and so on).
|
||||
package jsonnet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec"
|
||||
"github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubeversion"
|
||||
)
|
||||
|
||||
// FieldKey represents the literal text of a key for some JSON object
|
||||
// field, after rewriting to avoid collisions with Jsonnet keywords.
|
||||
// For example, for `{foo: ...}`, the `FieldKey` would be `foo`, while
|
||||
// for `{error: ...}`, the `FieldKey` would be `"error"` (with
|
||||
// quotation marks, to avoid collisions).
|
||||
type FieldKey string
|
||||
|
||||
// FuncParam represents the parameter to a Jsonnet function, after
|
||||
// being rewritten to avoid collisions with Jsonnet keywords and
|
||||
// normalized to fit the Jsonnet style (i.e., lowerCamelCase) using a
|
||||
// manual set of custom transformations that change per Kubernetes
|
||||
// version. For example, in `foo(BarAPI) {...}`, `FuncParam` would be
|
||||
// `barApi`, and in `foo(error) {...}`, `FuncParam` would be
|
||||
// `errorParam`.
|
||||
type FuncParam string
|
||||
|
||||
// Identifier represents any identifier in a Jsonnet program, after
|
||||
// being normalized to fit the Jsonnet style (i.e., lowerCamelCase)
|
||||
// using a manual set of custom transformations that change per
|
||||
// Kubernetes version. For example, `fooAPI` becomes `fooApi`.
|
||||
type Identifier string
|
||||
|
||||
func (id Identifier) ToSetterID() Identifier {
|
||||
return Identifier("with" + strings.Title(string(id)))
|
||||
}
|
||||
|
||||
func (id Identifier) ToMixinID() Identifier {
|
||||
return Identifier("with" + strings.Title(string(id)) + "Mixin")
|
||||
}
|
||||
|
||||
// RewriteAsFieldKey takes a `PropertyName` and converts it to a valid
|
||||
// Jsonnet field name. For example, if the `PropertyName` has a value
|
||||
// of `"error"`, then this would generate an invalid object, `{error:
|
||||
// ...}`. Hence, this function will quote this string, so that it ends
|
||||
// up like: `{"error": ...}`.
|
||||
func RewriteAsFieldKey(text kubespec.PropertyName) FieldKey {
|
||||
// NOTE: Because the field needs to have precisely the same text as
|
||||
// the Kubernetes API spec, we do not compute a version-specific ID
|
||||
// alias as we do for other rewrites.
|
||||
if _, ok := jsonnetKeywordSet[text]; ok {
|
||||
return FieldKey(fmt.Sprintf("\"%s\"", text))
|
||||
}
|
||||
return FieldKey(text)
|
||||
}
|
||||
|
||||
// RewriteAsFuncParam takes a `PropertyName` and converts it to a
|
||||
// valid Jsonnet function parameter. For example, if the
|
||||
// `PropertyName` has a value of `"error"`, then this would generate
|
||||
// an invalid function parameter, `function(error) ...`. Hence, this
|
||||
// function will alter the identifier, so that it ends up like:
|
||||
// `function(errorParam) ...`.
|
||||
//
|
||||
// NOTE: This transformation involves a hand-curated style change to
|
||||
// lowerCamelCase (e.g., `fooAPI` -> `fooApi`). This list changes per
|
||||
// Kubernetes version, according to identifiers that don't conform to
|
||||
// this style.
|
||||
func RewriteAsFuncParam(
|
||||
k8sVersion string, text kubespec.PropertyName,
|
||||
) FuncParam {
|
||||
id := RewriteAsIdentifier(k8sVersion, text)
|
||||
if _, ok := jsonnetKeywordSet[kubespec.PropertyName(id)]; ok {
|
||||
return FuncParam(fmt.Sprintf("%sParam", id))
|
||||
}
|
||||
return FuncParam(id)
|
||||
}
|
||||
|
||||
// RewriteAsIdentifier takes a `GroupName`, `ObjectKind`,
|
||||
// `PropertyName`, or `string`, and converts it to a Jsonnet-style
|
||||
// Identifier. Typically this includes lower-casing the first letter,
|
||||
// but also changing initialisms like fooAPI -> fooApi.
|
||||
//
|
||||
// NOTE: This transformation involves a hand-curated style change to
|
||||
// lowerCamelCase (e.g., `fooAPI` -> `fooApi`). This list changes per
|
||||
// Kubernetes version, according to identifiers that don't conform to
|
||||
// this style.
|
||||
func RewriteAsIdentifier(
|
||||
k8sVersion string, rawID fmt.Stringer,
|
||||
) Identifier {
|
||||
var id = rawID.String()
|
||||
|
||||
if len(id) == 0 {
|
||||
log.Fatalf("Can't lowercase first letter of 0-rune string")
|
||||
}
|
||||
kindString := kubeversion.MapIdentifier(k8sVersion, id)
|
||||
|
||||
upper := strings.ToLower(kindString[:1])
|
||||
return Identifier(upper + kindString[1:])
|
||||
}
|
||||
|
||||
var jsonnetKeywordSet = map[kubespec.PropertyName]string{
|
||||
"assert": "assert",
|
||||
"else": "else",
|
||||
"error": "error",
|
||||
"false": "false",
|
||||
"for": "for",
|
||||
"function": "function",
|
||||
"if": "if",
|
||||
"import": "import",
|
||||
"importstr": "importstr",
|
||||
"in": "in",
|
||||
"local": "local",
|
||||
"null": "null",
|
||||
"tailstrict": "tailstrict",
|
||||
"then": "then",
|
||||
"self": "self",
|
||||
"super": "super",
|
||||
"true": "true",
|
||||
}
|
122
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/jsonnet/rewrite_test.go
generated
vendored
Normal file
122
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/jsonnet/rewrite_test.go
generated
vendored
Normal file
|
@ -0,0 +1,122 @@
|
|||
package jsonnet
|
||||
|
||||
import "testing"
|
||||
import "github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec"
|
||||
|
||||
var fieldKeyTests = map[kubespec.PropertyName]FieldKey{
|
||||
"assert": "\"assert\"",
|
||||
"else": "\"else\"",
|
||||
"error": "\"error\"",
|
||||
"false": "\"false\"",
|
||||
"for": "\"for\"",
|
||||
"function": "\"function\"",
|
||||
"if": "\"if\"",
|
||||
"import": "\"import\"",
|
||||
"importstr": "\"importstr\"",
|
||||
"in": "\"in\"",
|
||||
// TODO: this needs to be resolved
|
||||
// "local": "\"local\"",
|
||||
"null": "\"null\"",
|
||||
"tailstrict": "\"tailstrict\"",
|
||||
"then": "\"then\"",
|
||||
"self": "\"self\"",
|
||||
"super": "\"super\"",
|
||||
"true": "\"true\"",
|
||||
}
|
||||
|
||||
var funcParamTests = map[kubespec.PropertyName]FuncParam{
|
||||
"assert": "assertParam",
|
||||
"else": "elseParam",
|
||||
"error": "errorParam",
|
||||
"false": "falseParam",
|
||||
"for": "forParam",
|
||||
"function": "functionParam",
|
||||
"if": "ifParam",
|
||||
"import": "importParam",
|
||||
"importstr": "importstrParam",
|
||||
"in": "inParam",
|
||||
// TODO: this needs to be resolved
|
||||
// "local": "localParam",
|
||||
"null": "nullParam",
|
||||
"tailstrict": "tailstrictParam",
|
||||
"then": "thenParam",
|
||||
"self": "selfParam",
|
||||
"super": "superParam",
|
||||
"true": "trueParam",
|
||||
}
|
||||
|
||||
var identifierTests = map[kubespec.PropertyName]Identifier{
|
||||
"hostIPC": "hostIpc",
|
||||
"hostPID": "hostPid",
|
||||
"targetCPUUtilizationPercentage": "targetCpuUtilizationPercentage",
|
||||
"externalID": "externalId",
|
||||
"podCIDR": "podCidr",
|
||||
"providerID": "providerId",
|
||||
"bootID": "bootId",
|
||||
"machineID": "machineId",
|
||||
"systemUUID": "systemUuid",
|
||||
"volumeID": "volumeId",
|
||||
"diskURI": "diskUri",
|
||||
"targetWWNs": "targetWwns",
|
||||
"datasetUUID": "datasetUuid",
|
||||
"pdID": "pdId",
|
||||
"scaleIO": "scaleIo",
|
||||
"podIP": "podIp",
|
||||
"hostIP": "hostIp",
|
||||
"clusterIP": "clusterIp",
|
||||
"externalIPs": "externalIps",
|
||||
"loadBalancerIP": "loadBalancerIp",
|
||||
}
|
||||
|
||||
func TestRewriteAsFieldKey(t *testing.T) {
|
||||
for keyword, target := range fieldKeyTests {
|
||||
actual := RewriteAsFieldKey(keyword)
|
||||
if target != actual {
|
||||
t.Errorf("Expected '%s' got '%s'", target, actual)
|
||||
}
|
||||
}
|
||||
|
||||
// Test rewrite is a no-op for other identifiers.
|
||||
for id := range identifierTests {
|
||||
target := FieldKey(id)
|
||||
actual := RewriteAsFieldKey(kubespec.PropertyName(id))
|
||||
if target != actual {
|
||||
t.Errorf("Expected '%s' got '%s'", target, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRewriteAsFuncParam(t *testing.T) {
|
||||
for keyword, target := range funcParamTests {
|
||||
actual := RewriteAsFuncParam("v1.7.0", keyword)
|
||||
if target != actual {
|
||||
t.Errorf("Expected '%s' got '%s'", target, actual)
|
||||
}
|
||||
}
|
||||
|
||||
// Test we also do aliasing for func parameters
|
||||
for id, target := range identifierTests {
|
||||
actual := RewriteAsFuncParam("v1.7.0", id)
|
||||
if FuncParam(target) != actual {
|
||||
t.Errorf("Expected '%s' got '%s'", target, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRewriteAsIdentifier(t *testing.T) {
|
||||
for id, target := range identifierTests {
|
||||
actual := RewriteAsIdentifier("v1.7.0", id)
|
||||
if target != actual {
|
||||
t.Errorf("Expected '%s' got '%s'", target, actual)
|
||||
}
|
||||
}
|
||||
|
||||
// Test rewrite is a no-op for keywords.
|
||||
for keyword := range fieldKeyTests {
|
||||
target := Identifier(keyword)
|
||||
actual := RewriteAsIdentifier("v1.7.0", kubespec.PropertyName(keyword))
|
||||
if target != actual {
|
||||
t.Errorf("Expected '%s' got '%s'", target, actual)
|
||||
}
|
||||
}
|
||||
}
|
99
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/api_object.go
generated
vendored
Normal file
99
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/api_object.go
generated
vendored
Normal file
|
@ -0,0 +1,99 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
nm "github.com/ksonnet/ksonnet-lib/ksonnet-gen/nodemaker"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// APIObject is an API object.
|
||||
type APIObject struct {
|
||||
resource Object
|
||||
renderFieldsFn renderFieldsFn
|
||||
}
|
||||
|
||||
// NewAPIObject creates an instance of APIObject.
|
||||
func NewAPIObject(resource Object) *APIObject {
|
||||
ao := &APIObject{
|
||||
resource: resource,
|
||||
renderFieldsFn: renderFields,
|
||||
}
|
||||
|
||||
return ao
|
||||
}
|
||||
|
||||
// Kind is the kind of api object this is.
|
||||
func (a *APIObject) Kind() string {
|
||||
return FormatKind(a.resource.Kind())
|
||||
}
|
||||
|
||||
// Description is the description of this API object.
|
||||
func (a *APIObject) Description() string {
|
||||
return a.resource.Description()
|
||||
}
|
||||
|
||||
// Node returns an AST node for this api object.
|
||||
func (a *APIObject) Node(catalog *Catalog) (*nm.Object, error) {
|
||||
return apiObjectNode(catalog, a)
|
||||
}
|
||||
|
||||
func (a *APIObject) initNode(catalog *Catalog) (*nm.Object, error) {
|
||||
o := nm.NewObject()
|
||||
|
||||
if a.resource.IsType() {
|
||||
kindObject := nm.OnelineObject()
|
||||
kind := a.resource.Kind()
|
||||
kindObject.Set(nm.InheritedKey("kind"), nm.NewStringDouble(kind))
|
||||
o.Set(nm.LocalKey("kind"), kindObject)
|
||||
|
||||
ctorBase := []nm.Noder{
|
||||
nm.NewVar("apiVersion"),
|
||||
nm.NewVar("kind"),
|
||||
}
|
||||
|
||||
a.setConstructors(o, ctorBase, objectConstructor())
|
||||
} else {
|
||||
a.setConstructors(o, nil, nm.OnelineObject())
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
func (a *APIObject) setConstructors(parent *nm.Object, ctorBase []nm.Noder, defaultCtorBody nm.Noder) error {
|
||||
desc := makeDescriptor(a.resource.Codebase(), a.resource.Group(), a.resource.Kind())
|
||||
ctors := locateConstructors(desc)
|
||||
|
||||
if len(ctors) > 0 {
|
||||
for _, ctor := range ctors {
|
||||
key, err := ctor.Key()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "generate constructor key")
|
||||
}
|
||||
|
||||
parent.Set(key, ctor.Body(ctorBase...))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
parent.Set(nm.FunctionKey("new", []string{}), defaultCtorBody)
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func objectConstructor() *nm.Binary {
|
||||
return nm.NewBinary(nm.NewVar("apiVersion"), nm.NewVar("kind"), nm.BopPlus)
|
||||
}
|
||||
|
||||
func apiObjectNode(catalog *Catalog, a *APIObject) (*nm.Object, error) {
|
||||
if catalog == nil {
|
||||
return nil, errors.New("catalog is nil")
|
||||
}
|
||||
|
||||
o, err := a.initNode(catalog)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := a.renderFieldsFn(catalog, o, "", a.resource.Properties()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
86
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/api_object_test.go
generated
vendored
Normal file
86
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/api_object_test.go
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
nm "github.com/ksonnet/ksonnet-lib/ksonnet-gen/nodemaker"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAPIObject_Kind(t *testing.T) {
|
||||
c1 := Component{Group: "group2", Version: "v1", Kind: "Deployment"}
|
||||
o1 := NewType("alpha", "desc", "codebase", "group", c1, nil)
|
||||
ao := NewAPIObject(&o1)
|
||||
|
||||
require.Equal(t, "deployment", ao.Kind())
|
||||
}
|
||||
|
||||
func TestAPIObject_Description(t *testing.T) {
|
||||
c1 := Component{Group: "group2", Version: "v1", Kind: "Deployment"}
|
||||
o1 := NewType("alpha", "desc", "codebase", "group", c1, nil)
|
||||
ao := NewAPIObject(&o1)
|
||||
|
||||
require.Equal(t, "desc", ao.Description())
|
||||
}
|
||||
|
||||
func TestAPIObject_Node_with_type(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
t1 := NewField("io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", "desc", "apimachinery", "group", "ver", "Kind", nil)
|
||||
ao := NewAPIObject(t1)
|
||||
|
||||
n, err := ao.Node(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, ok := n.Get("kind").(*nm.Object)
|
||||
require.False(t, ok)
|
||||
|
||||
require.NotNil(t, n.Get("new"))
|
||||
}
|
||||
|
||||
func TestAPIObject_Node_with_field(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
c1 := Component{Group: "group2", Version: "v1", Kind: "Deployment"}
|
||||
|
||||
o1 := NewType("io.k8s.codebase.pkg.api.version.kind", "desc", "codebase", "group", c1, nil)
|
||||
ao := NewAPIObject(&o1)
|
||||
|
||||
n, err := ao.Node(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
kindo, ok := n.Get("kind").(*nm.Object)
|
||||
require.True(t, ok)
|
||||
require.IsType(t, nm.NewObject(), kindo)
|
||||
|
||||
kind, ok := kindo.Get("kind").(*nm.StringDouble)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, nm.NewStringDouble("Deployment"), kind)
|
||||
|
||||
require.NotNil(t, n.Get("new"))
|
||||
}
|
||||
|
||||
func TestAPIObject_Node_with_nil_catalog(t *testing.T) {
|
||||
c1 := Component{Group: "group2", Version: "v1", Kind: "Deployment"}
|
||||
o1 := NewType("alpha", "desc", "codebase", "group", c1, nil)
|
||||
ao := NewAPIObject(&o1)
|
||||
|
||||
_, err := ao.Node(nil)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestAPIObject_Node_fails_when_renderer_fails(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
c1 := Component{Group: "group2", Version: "v1", Kind: "Deployment"}
|
||||
o1 := NewType("alpha", "desc", "codebase", "group", c1, nil)
|
||||
ao := NewAPIObject(&o1)
|
||||
|
||||
ao.renderFieldsFn = func(typeLookup, *nm.Object, string, map[string]Property) error {
|
||||
return errors.New("failed")
|
||||
}
|
||||
|
||||
_, err := ao.Node(c)
|
||||
require.Error(t, err)
|
||||
}
|
58
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/buffer.go
generated
vendored
Normal file
58
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/buffer.go
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// indentWriter abstracts the task of writing out indented text to a
|
||||
// buffer. Different components can call `indent` and `dedent` as
|
||||
// appropriate to specify how indentation needs to change, rather than
|
||||
// to keep track of the current indentation.
|
||||
//
|
||||
// For example, if one component is responsible for writing an array,
|
||||
// and an element in that array is a function, the component
|
||||
// responsible for the array need only know to call `indent` after the
|
||||
// '[' character and `dedent` before the ']' character, while the
|
||||
// routine responsible for writing out the function can handle its own
|
||||
// indentation independently.
|
||||
type indentWriter struct {
|
||||
depth int
|
||||
err error
|
||||
buffer bytes.Buffer
|
||||
}
|
||||
|
||||
func newIndentWriter() *indentWriter {
|
||||
var buffer bytes.Buffer
|
||||
return &indentWriter{
|
||||
depth: 0,
|
||||
err: nil,
|
||||
buffer: buffer,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *indentWriter) writeLine(text string) {
|
||||
if m.err != nil {
|
||||
return
|
||||
}
|
||||
prefix := strings.Repeat(" ", m.depth)
|
||||
line := fmt.Sprintf("%s%s\n", prefix, text)
|
||||
_, m.err = m.buffer.WriteString(line)
|
||||
}
|
||||
|
||||
func (m *indentWriter) bytes() ([]byte, error) {
|
||||
if m.err != nil {
|
||||
return nil, m.err
|
||||
}
|
||||
|
||||
return m.buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
func (m *indentWriter) indent() {
|
||||
m.depth++
|
||||
}
|
||||
|
||||
func (m *indentWriter) dedent() {
|
||||
m.depth--
|
||||
}
|
336
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/catalog.go
generated
vendored
Normal file
336
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/catalog.go
generated
vendored
Normal file
|
@ -0,0 +1,336 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
blockedReferences = []string{
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.Status",
|
||||
}
|
||||
|
||||
blockedPropertyNames = []string{
|
||||
"status",
|
||||
"apiVersion",
|
||||
"kind",
|
||||
}
|
||||
)
|
||||
|
||||
// ExtractFn is a function which extracts properties from a schema.
|
||||
type ExtractFn func(*Catalog, map[string]spec.Schema, []string) (map[string]Property, error)
|
||||
|
||||
// CatalogOpt is an option for configuring Catalog.
|
||||
type CatalogOpt func(*Catalog)
|
||||
|
||||
// CatalogOptExtractProperties is a Catalog option for setting the property
|
||||
// extractor.
|
||||
func CatalogOptExtractProperties(fn ExtractFn) CatalogOpt {
|
||||
return func(c *Catalog) {
|
||||
c.extractFn = fn
|
||||
}
|
||||
}
|
||||
|
||||
// CatalogOptChecksum is a Catalog option for setting the checksum of the swagger schema.
|
||||
func CatalogOptChecksum(checksum string) CatalogOpt {
|
||||
return func(c *Catalog) {
|
||||
c.checksum = checksum
|
||||
}
|
||||
}
|
||||
|
||||
// Catalog is a catalog definitions
|
||||
type Catalog struct {
|
||||
apiSpec *spec.Swagger
|
||||
extractFn ExtractFn
|
||||
apiVersion semver.Version
|
||||
paths map[string]Component
|
||||
checksum string
|
||||
|
||||
// memos
|
||||
typesCache []Type
|
||||
fieldsCache []Field
|
||||
}
|
||||
|
||||
// NewCatalog creates an instance of Catalog.
|
||||
func NewCatalog(apiSpec *spec.Swagger, opts ...CatalogOpt) (*Catalog, error) {
|
||||
if apiSpec == nil {
|
||||
return nil, errors.New("apiSpec is nil")
|
||||
}
|
||||
|
||||
if apiSpec.Info == nil {
|
||||
return nil, errors.New("apiSpec Info is nil")
|
||||
}
|
||||
|
||||
parts := strings.SplitN(apiSpec.Info.Version, ".", 3)
|
||||
parts[0] = strings.TrimPrefix(parts[0], "v")
|
||||
vers := strings.Join(parts, ".")
|
||||
apiVersion, err := semver.Parse(vers)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "invalid apiSpec version")
|
||||
}
|
||||
|
||||
paths, err := parsePaths(apiSpec)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "parse apiSpec paths")
|
||||
}
|
||||
|
||||
c := &Catalog{
|
||||
apiSpec: apiSpec,
|
||||
extractFn: extractProperties,
|
||||
apiVersion: apiVersion,
|
||||
paths: paths,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(c)
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Checksum returns the checksum of the swagger schema.
|
||||
func (c *Catalog) Checksum() string {
|
||||
return c.checksum
|
||||
}
|
||||
|
||||
// Version returns the Kubernetes API version represented by this Catalog.
|
||||
func (c *Catalog) Version() string {
|
||||
return c.apiVersion.String()
|
||||
}
|
||||
|
||||
// Types returns a slice of all types.
|
||||
func (c *Catalog) Types() ([]Type, error) {
|
||||
if c.typesCache != nil {
|
||||
return c.typesCache, nil
|
||||
}
|
||||
|
||||
var resources []Type
|
||||
|
||||
for name, schema := range c.definitions() {
|
||||
desc, err := ParseDescription(name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "parse description for %s", name)
|
||||
}
|
||||
|
||||
// If there is a path, we can update it as a first class object
|
||||
// in the API. This makes this schema a type.
|
||||
component, ok := c.paths[name]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
props, err := c.extractFn(c, schema.Properties, schema.Required)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "extract propererties from %s", name)
|
||||
}
|
||||
|
||||
kind := NewType(name, schema.Description, desc.Codebase, desc.Group, component, props)
|
||||
|
||||
resources = append(resources, kind)
|
||||
}
|
||||
|
||||
c.typesCache = resources
|
||||
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
// Fields returns a slice of all fields.
|
||||
func (c *Catalog) Fields() ([]Field, error) {
|
||||
if c.fieldsCache != nil {
|
||||
return c.fieldsCache, nil
|
||||
}
|
||||
|
||||
var types []Field
|
||||
|
||||
for name, schema := range c.definitions() {
|
||||
desc, err := ParseDescription(name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "parse description for %s", name)
|
||||
}
|
||||
|
||||
// If there is a path, this should not be a hidden object. This
|
||||
// makes this schema a field.
|
||||
if _, ok := c.paths[name]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
props, err := c.extractFn(c, schema.Properties, schema.Required)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "extract propererties from %s", name)
|
||||
}
|
||||
t := NewField(name, schema.Description, desc.Codebase, desc.Group, desc.Version, desc.Kind, props)
|
||||
types = append(types, *t)
|
||||
}
|
||||
|
||||
c.fieldsCache = types
|
||||
return types, nil
|
||||
}
|
||||
|
||||
func (c *Catalog) isFormatRef(name string) (bool, error) {
|
||||
schema, ok := c.apiSpec.Definitions[name]
|
||||
if !ok {
|
||||
return false, errors.Errorf("%s was not found", name)
|
||||
}
|
||||
|
||||
if schema.Format != "" {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Field returns a field by definition id. If the type cannot be found, it returns an error.
|
||||
func (c *Catalog) Field(name string) (*Field, error) {
|
||||
types, err := c.Fields()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, ty := range types {
|
||||
if ty.Identifier() == name {
|
||||
return &ty, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.Errorf("%s was not found", name)
|
||||
}
|
||||
|
||||
// Resource returns a resource by group, version, kind. If the field cannot be found,
|
||||
// it returns an error
|
||||
func (c *Catalog) Resource(group, version, kind string) (*Type, error) {
|
||||
resources, err := c.Types()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, resource := range resources {
|
||||
if group == resource.Group() &&
|
||||
version == resource.Version() &&
|
||||
kind == resource.Kind() {
|
||||
return &resource, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.Errorf("unable to find %s.%s.%s",
|
||||
group, version, kind)
|
||||
}
|
||||
|
||||
// TypeByID returns a type by identifier.
|
||||
func (c *Catalog) TypeByID(id string) (*Type, error) {
|
||||
resources, err := c.Types()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, resource := range resources {
|
||||
if resource.Identifier() == id {
|
||||
return &resource, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.Errorf("unable to find type %q", id)
|
||||
}
|
||||
|
||||
// TypesWithDescendant returns types who have the specified definition as a descendant.
|
||||
// This list does not include List types (e.g. DeploymentList).
|
||||
func (c *Catalog) TypesWithDescendant(definition string) ([]Type, error) {
|
||||
types, err := c.Types()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "retrieve types")
|
||||
}
|
||||
|
||||
var out []Type
|
||||
for _, ty := range types {
|
||||
|
||||
if strings.HasSuffix(ty.Kind(), "List") {
|
||||
continue
|
||||
}
|
||||
tf, err := c.descend(definition, ty.Properties())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if tf {
|
||||
out = append(out, ty)
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *Catalog) find(id string) (Object, error) {
|
||||
f, err := c.Field(id)
|
||||
if err == nil {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
t, err := c.TypeByID(id)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("unable to find object %q", id)
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (c *Catalog) descend(definition string, m map[string]Property) (bool, error) {
|
||||
|
||||
for _, prop := range m {
|
||||
if ref := prop.Ref(); ref != "" {
|
||||
|
||||
if ref == definition {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// NOTE: if this is a reference to json schema, bail out because this is recursive.
|
||||
if ref == "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps" {
|
||||
continue
|
||||
}
|
||||
|
||||
f, err := c.find(ref)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "find field %s", ref)
|
||||
}
|
||||
|
||||
tf, err := c.descend(definition, f.Properties())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if tf {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func isValidDefinition(name string, ver semver.Version) bool {
|
||||
checkVer := semver.Version{Major: 1, Minor: 7}
|
||||
if ver.GTE(checkVer) {
|
||||
return !strings.HasPrefix(name, "io.k8s.kubernetes.pkg.api")
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// extractRef extracts a ref from a schema.
|
||||
func extractRef(schema spec.Schema) string {
|
||||
return strings.TrimPrefix(schema.Ref.String(), "#/definitions/")
|
||||
}
|
||||
|
||||
func (c *Catalog) definitions() spec.Definitions {
|
||||
out := spec.Definitions{}
|
||||
|
||||
for name, schema := range c.apiSpec.Definitions {
|
||||
if isValidDefinition(name, c.apiVersion) {
|
||||
out[name] = schema
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
258
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/catalog_test.go
generated
vendored
Normal file
258
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/catalog_test.go
generated
vendored
Normal file
|
@ -0,0 +1,258 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
apiSpecCache = map[string]*spec.Swagger{}
|
||||
)
|
||||
|
||||
func initCatalog(t *testing.T, file string, opts ...CatalogOpt) *Catalog {
|
||||
apiSpec := apiSpecCache[file]
|
||||
if apiSpec == nil {
|
||||
var err error
|
||||
apiSpec, _, err = kubespec.Import(testdata(file))
|
||||
require.NoError(t, err)
|
||||
|
||||
apiSpecCache[file] = apiSpec
|
||||
}
|
||||
|
||||
c, err := NewCatalog(apiSpec, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func TestCatalog_nil_apiSpec(t *testing.T) {
|
||||
_, err := NewCatalog(nil)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCatalog_Types(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
resources, err := c.Types()
|
||||
require.NoError(t, err)
|
||||
|
||||
var found bool
|
||||
for _, resource := range resources {
|
||||
if resource.Identifier() == "io.k8s.api.apps.v1beta1.Deployment" {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
require.True(t, found)
|
||||
}
|
||||
|
||||
func TestCatalog_Resources_invalid_description(t *testing.T) {
|
||||
source, err := ioutil.ReadFile("testdata/invalid_definition.json")
|
||||
require.NoError(t, err)
|
||||
|
||||
apiSpec, err := kubespec.CreateAPISpec(source)
|
||||
require.NoError(t, err)
|
||||
|
||||
c, err := NewCatalog(apiSpec)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = c.Types()
|
||||
assert.Error(t, err)
|
||||
|
||||
_, err = c.Resource("group", "version", "kind")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCatalog_Resources_invalid_field_properties(t *testing.T) {
|
||||
fn := func(*Catalog, map[string]spec.Schema, []string) (map[string]Property, error) {
|
||||
return nil, errors.New("failed")
|
||||
}
|
||||
|
||||
opt := CatalogOptExtractProperties(fn)
|
||||
|
||||
c := initCatalog(t, "swagger-1.8.json", opt)
|
||||
|
||||
_, err := c.Types()
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCatalog_Resource(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
group string
|
||||
version string
|
||||
kind string
|
||||
isErr bool
|
||||
}{
|
||||
{name: "valid id", group: "apps", version: "v1beta2", kind: "Deployment"},
|
||||
{name: "unknown kind", group: "apps", version: "v1beta2", kind: "Foo", isErr: true},
|
||||
{name: "unknown version", group: "apps", version: "Foo", kind: "Foo", isErr: true},
|
||||
{name: "unknown group", group: "Foo", version: "Foo", kind: "Foo", isErr: true},
|
||||
}
|
||||
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
r, err := c.Resource(tc.group, tc.version, tc.kind)
|
||||
if tc.isErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Logf("id is %s", r.Identifier())
|
||||
|
||||
require.Equal(t, tc.group, r.Group())
|
||||
require.Equal(t, tc.version, r.Version())
|
||||
require.Equal(t, tc.kind, r.Kind())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCatalog_Fields(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
fields, err := c.Fields()
|
||||
require.NoError(t, err)
|
||||
|
||||
var found bool
|
||||
for _, field := range fields {
|
||||
if field.Identifier() == "io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
require.True(t, found)
|
||||
}
|
||||
|
||||
func TestCatalog_Fields_invalid_description(t *testing.T) {
|
||||
source, err := ioutil.ReadFile("testdata/invalid_definition.json")
|
||||
require.NoError(t, err)
|
||||
|
||||
apiSpec, err := kubespec.CreateAPISpec(source)
|
||||
require.NoError(t, err)
|
||||
|
||||
c, err := NewCatalog(apiSpec)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = c.Fields()
|
||||
assert.Error(t, err)
|
||||
|
||||
_, err = c.Field("anything")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCatalog_Fields_invalid_field_properties(t *testing.T) {
|
||||
fn := func(*Catalog, map[string]spec.Schema, []string) (map[string]Property, error) {
|
||||
return nil, errors.New("failed")
|
||||
}
|
||||
|
||||
opt := CatalogOptExtractProperties(fn)
|
||||
|
||||
c := initCatalog(t, "swagger-1.8.json", opt)
|
||||
|
||||
_, err := c.Fields()
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCatalog_Field(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
id string
|
||||
isErr bool
|
||||
}{
|
||||
{name: "valid id", id: "io.k8s.apimachinery.pkg.apis.meta.v1.Initializers"},
|
||||
{name: "missing", id: "missing", isErr: true},
|
||||
}
|
||||
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ty, err := c.Field(tc.id)
|
||||
if tc.isErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, tc.id, ty.Identifier())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCatalog_TypesWithDescendant(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
types, err := c.TypesWithDescendant("io.k8s.api.core.v1.PodSpec")
|
||||
require.NoError(t, err)
|
||||
|
||||
var names []string
|
||||
for _, ty := range types {
|
||||
names = append(names, ty.component.String())
|
||||
}
|
||||
|
||||
sort.Strings(names)
|
||||
|
||||
expected := []string{
|
||||
"apps.v1beta1.Deployment",
|
||||
"apps.v1beta1.StatefulSet",
|
||||
"apps.v1beta2.DaemonSet",
|
||||
"apps.v1beta2.Deployment",
|
||||
"apps.v1beta2.ReplicaSet",
|
||||
"apps.v1beta2.StatefulSet",
|
||||
"batch.v1.Job",
|
||||
"batch.v1beta1.CronJob",
|
||||
"batch.v2alpha1.CronJob",
|
||||
"core.v1.Pod",
|
||||
"core.v1.PodTemplate",
|
||||
"core.v1.ReplicationController",
|
||||
"extensions.v1beta1.DaemonSet",
|
||||
"extensions.v1beta1.Deployment",
|
||||
"extensions.v1beta1.ReplicaSet",
|
||||
}
|
||||
require.Equal(t, expected, names)
|
||||
}
|
||||
|
||||
func TestCatalog_isFormatRef(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
isFormatRef bool
|
||||
isErr bool
|
||||
}{
|
||||
{
|
||||
name: "io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta",
|
||||
},
|
||||
{
|
||||
name: "missing",
|
||||
isErr: true,
|
||||
},
|
||||
{
|
||||
name: "io.k8s.apimachinery.pkg.util.intstr.IntOrString",
|
||||
isFormatRef: true,
|
||||
},
|
||||
}
|
||||
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tf, err := c.isFormatRef(tc.name)
|
||||
if tc.isErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, tc.isFormatRef, tf)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
77
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/component.go
generated
vendored
Normal file
77
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/component.go
generated
vendored
Normal file
|
@ -0,0 +1,77 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
extensionGroupVersionKind = "x-kubernetes-group-version-kind"
|
||||
)
|
||||
|
||||
// Component is resource information provided in the k8s swagger schema
|
||||
// which contains the group, kind, and version for a definition.
|
||||
type Component struct {
|
||||
Group string
|
||||
Kind string
|
||||
Version string
|
||||
}
|
||||
|
||||
// NewComponent extracts component information from a schema.
|
||||
func NewComponent(s spec.Schema) (*Component, error) {
|
||||
re := componentExtractor{schema: s}
|
||||
group := re.extract("group")
|
||||
kind := re.extract("kind")
|
||||
version := re.extract("version")
|
||||
|
||||
if re.err != nil {
|
||||
return nil, re.err
|
||||
}
|
||||
|
||||
return &Component{
|
||||
Group: group,
|
||||
Kind: kind,
|
||||
Version: version,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Component) String() string {
|
||||
group := c.Group
|
||||
if group == "" {
|
||||
group = "core"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s.%s.%s", group, c.Version, c.Kind)
|
||||
}
|
||||
|
||||
type componentExtractor struct {
|
||||
err error
|
||||
schema spec.Schema
|
||||
}
|
||||
|
||||
func (re *componentExtractor) extract(key string) string {
|
||||
if re.err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
i, ok := re.schema.Extensions[extensionGroupVersionKind]
|
||||
if !ok {
|
||||
re.err = errors.New("no group/kind/version extension")
|
||||
return ""
|
||||
}
|
||||
|
||||
s, ok := i.([]interface{})
|
||||
if ok {
|
||||
m, ok := s[0].(map[string]interface{})
|
||||
if ok {
|
||||
str, ok := m[key].(string)
|
||||
if ok {
|
||||
return str
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
58
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/component_test.go
generated
vendored
Normal file
58
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/component_test.go
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func testdata(name string) string {
|
||||
return filepath.Join("testdata", name)
|
||||
}
|
||||
|
||||
func TestComponent(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
expected *Component
|
||||
isErr bool
|
||||
}{
|
||||
{
|
||||
name: "io.k8s.api.apps.v1beta2.Deployment",
|
||||
expected: &Component{
|
||||
Group: "apps",
|
||||
Version: "v1beta2",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta",
|
||||
isErr: true,
|
||||
},
|
||||
{
|
||||
name: "missing",
|
||||
isErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
apiSpec, _, err := kubespec.Import(testdata("swagger-1.8.json"))
|
||||
require.NoError(t, err)
|
||||
|
||||
schema := apiSpec.Definitions[tc.name]
|
||||
|
||||
c, err := NewComponent(schema)
|
||||
if tc.isErr {
|
||||
require.Error(t, err)
|
||||
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expected, c)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
}
|
187
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/constructors.go
generated
vendored
Normal file
187
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/constructors.go
generated
vendored
Normal file
|
@ -0,0 +1,187 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"sort"
|
||||
|
||||
"github.com/google/go-jsonnet/ast"
|
||||
nm "github.com/ksonnet/ksonnet-lib/ksonnet-gen/nodemaker"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// reCtorSetter is a regex that matches function names. It'll successfully
|
||||
// match `withName`, `foo.withName`, and `foo.bar.withName`.
|
||||
reCtorSetter = regexp.MustCompile(`((^.*?)\.)*(with\w+|mixinInstance)$`)
|
||||
)
|
||||
|
||||
func matchCtorSetter(in string) (string, string, error) {
|
||||
match := reCtorSetter.FindAllStringSubmatch(in, -1)
|
||||
if len(match) == 0 {
|
||||
return "", "", errors.New("no match")
|
||||
}
|
||||
|
||||
cur := match[0]
|
||||
if cur[1] == "" {
|
||||
return "self", cur[3], nil
|
||||
}
|
||||
|
||||
return "self." + cur[2], cur[3], nil
|
||||
}
|
||||
|
||||
type constructor struct {
|
||||
name string
|
||||
params []constructorParam
|
||||
}
|
||||
|
||||
func newConstructor(name string, params ...constructorParam) *constructor {
|
||||
return &constructor{
|
||||
name: name,
|
||||
params: params,
|
||||
}
|
||||
}
|
||||
|
||||
// Key creates an object key for the constructor.
|
||||
func (c *constructor) Key() (nm.Key, error) {
|
||||
var args []nm.OptionalArg
|
||||
|
||||
for _, param := range c.params {
|
||||
option, err := param.Option()
|
||||
if err != nil {
|
||||
return nm.Key{}, errors.Wrap(err, "unable to create key from param")
|
||||
}
|
||||
|
||||
args = append(args, option)
|
||||
}
|
||||
|
||||
key := nm.FunctionKey(c.name, []string{}, nm.KeyOptNamedParams(args...))
|
||||
return key, nil
|
||||
}
|
||||
|
||||
func (c *constructor) Body(baseNodes ...nm.Noder) nm.Noder {
|
||||
var items []nm.Noder
|
||||
for _, node := range baseNodes {
|
||||
items = append(items, node)
|
||||
}
|
||||
|
||||
// collection functions so they can be de-duplicated.
|
||||
funs := make(map[string][]argRef)
|
||||
for _, param := range c.params {
|
||||
path, fn, err := matchCtorSetter(param.function)
|
||||
if err != nil {
|
||||
// TODO should we handle this error?
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := funs[path]; !ok {
|
||||
funs[path] = make([]argRef, 0)
|
||||
}
|
||||
|
||||
funs[path] = append(funs[path], argRef{name: param.name, fn: fn})
|
||||
}
|
||||
|
||||
var funNames []string
|
||||
for funName := range funs {
|
||||
funNames = append(funNames, funName)
|
||||
}
|
||||
sort.Strings(funNames)
|
||||
|
||||
for _, funName := range funNames {
|
||||
|
||||
call := nm.NewCall(funName)
|
||||
|
||||
var curApply *ctorApply
|
||||
var addedCall bool
|
||||
|
||||
ars := funs[funName]
|
||||
sort.Slice(ars, func(i, j int) bool {
|
||||
return ars[i].fn < ars[j].fn
|
||||
})
|
||||
|
||||
for _, ar := range ars {
|
||||
indexID := ast.Identifier(ar.fn)
|
||||
index := &ast.Index{Id: &indexID}
|
||||
if !addedCall {
|
||||
index.Target = call.Node()
|
||||
addedCall = true
|
||||
} else {
|
||||
index.Target = curApply.Node()
|
||||
}
|
||||
|
||||
arg := &ast.Var{Id: ast.Identifier(ar.name)}
|
||||
apply := ast.Apply{
|
||||
Arguments: ast.Arguments{Positional: ast.Nodes{arg}},
|
||||
Target: index,
|
||||
}
|
||||
|
||||
curApply = &ctorApply{Apply: apply}
|
||||
}
|
||||
|
||||
items = append(items, curApply)
|
||||
}
|
||||
|
||||
return nm.Combine(items...)
|
||||
}
|
||||
|
||||
type ctorApply struct {
|
||||
ast.Apply
|
||||
}
|
||||
|
||||
func (ca *ctorApply) Node() ast.Node {
|
||||
return &ca.Apply
|
||||
}
|
||||
|
||||
type argRef struct {
|
||||
name string
|
||||
fn string
|
||||
}
|
||||
|
||||
type constructorParam struct {
|
||||
name string
|
||||
function string
|
||||
defaultValue interface{}
|
||||
}
|
||||
|
||||
func newConstructorParam(name, function string, defaultValue interface{}) *constructorParam {
|
||||
if defaultValue == nil {
|
||||
defaultValue = ""
|
||||
}
|
||||
|
||||
return &constructorParam{
|
||||
name: name,
|
||||
function: function,
|
||||
defaultValue: defaultValue,
|
||||
}
|
||||
}
|
||||
|
||||
func (cp *constructorParam) Option() (nm.OptionalArg, error) {
|
||||
var node nm.Noder
|
||||
|
||||
var err error
|
||||
|
||||
switch t := cp.defaultValue.(type) {
|
||||
case string:
|
||||
node = nm.NewStringDouble(t)
|
||||
case map[string]interface{}:
|
||||
node, err = nm.KVFromMap(t)
|
||||
if err != nil {
|
||||
return nm.OptionalArg{}, errors.Wrap(err, "invalid parameter")
|
||||
}
|
||||
case []string:
|
||||
var items []nm.Noder
|
||||
for _, item := range t {
|
||||
items = append(items, nm.NewStringDouble(item))
|
||||
}
|
||||
node = nm.NewArray(items)
|
||||
case float64:
|
||||
node = nm.NewFloat(t)
|
||||
case int:
|
||||
node = nm.NewInt(t)
|
||||
case bool:
|
||||
node = nm.NewBoolean(t)
|
||||
default:
|
||||
return nm.OptionalArg{}, errors.Errorf("unable to use type %T in param", t)
|
||||
}
|
||||
|
||||
return nm.OptionalArg{Name: cp.name, Default: node}, nil
|
||||
}
|
172
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/constructors_test.go
generated
vendored
Normal file
172
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/constructors_test.go
generated
vendored
Normal file
|
@ -0,0 +1,172 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
nm "github.com/ksonnet/ksonnet-lib/ksonnet-gen/nodemaker"
|
||||
"github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func Test_matchCtorSetter(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
in string
|
||||
path string
|
||||
fn string
|
||||
isErr bool
|
||||
}{
|
||||
{
|
||||
name: "with no path",
|
||||
in: "withName",
|
||||
fn: "withName",
|
||||
path: "self",
|
||||
},
|
||||
{
|
||||
name: "with a path",
|
||||
in: "foo.bar.baz.withName",
|
||||
path: "self.foo.bar.baz",
|
||||
fn: "withName",
|
||||
},
|
||||
{
|
||||
name: "unrecognized",
|
||||
in: "invalid",
|
||||
isErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
path, fn, err := matchCtorSetter(tc.in)
|
||||
if tc.isErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
assert.Equal(t, tc.path, path)
|
||||
assert.Equal(t, tc.fn, fn)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_constructor(t *testing.T) {
|
||||
obj := map[string]interface{}{
|
||||
"key": "val",
|
||||
}
|
||||
|
||||
array := []string{"val"}
|
||||
|
||||
params := []constructorParam{
|
||||
*newConstructorParam("name", "withName", nil),
|
||||
*newConstructorParam("nestedName", "foo.bar.baz.withName", nil),
|
||||
*newConstructorParam("nestedItem", "foo.bar.baz.withItem", nil),
|
||||
*newConstructorParam("str", "withStr", "val"),
|
||||
*newConstructorParam("obj", "withObj", obj),
|
||||
*newConstructorParam("array", "withArray", array),
|
||||
*newConstructorParam("other", "other.withArray", nil),
|
||||
*newConstructorParam("foo", "last.path.withFoo", nil),
|
||||
}
|
||||
|
||||
c := newConstructor("new", params...)
|
||||
|
||||
o := nm.NewObject()
|
||||
|
||||
ctorBase := []nm.Noder{
|
||||
nm.NewVar("apiVersion"),
|
||||
nm.NewVar("kind"),
|
||||
}
|
||||
|
||||
key, err := c.Key()
|
||||
require.NoError(t, err)
|
||||
o.Set(key, c.Body(ctorBase...))
|
||||
|
||||
var buf bytes.Buffer
|
||||
err = printer.Fprint(&buf, o.Node())
|
||||
require.NoError(t, err)
|
||||
|
||||
testData, err := ioutil.ReadFile("testdata/constructor.libsonnet")
|
||||
require.NoError(t, err)
|
||||
|
||||
got := strings.TrimSpace(buf.String())
|
||||
expected := strings.TrimSpace(string(testData))
|
||||
assert.Equal(t, expected, got)
|
||||
}
|
||||
|
||||
func Test_constructorParam(t *testing.T) {
|
||||
obj, err := nm.KVFromMap(map[string]interface{}{"alpha": "beta"})
|
||||
require.NoError(t, err)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
cp *constructorParam
|
||||
option nm.OptionalArg
|
||||
isOptErr bool
|
||||
}{
|
||||
{
|
||||
name: "local property",
|
||||
cp: newConstructorParam("name", "withName", nil),
|
||||
option: nm.OptionalArg{Name: "name", Default: nm.NewStringDouble("")},
|
||||
},
|
||||
{
|
||||
name: "nested property",
|
||||
cp: newConstructorParam("name", "foo.bar.baz.withName", nil),
|
||||
option: nm.OptionalArg{Name: "name", Default: nm.NewStringDouble("")},
|
||||
},
|
||||
{
|
||||
name: "string",
|
||||
cp: newConstructorParam("name", "withName", "name"),
|
||||
option: nm.OptionalArg{Name: "name", Default: nm.NewStringDouble("name")},
|
||||
},
|
||||
{
|
||||
name: "map[string]interface{}",
|
||||
cp: newConstructorParam("name", "withName", map[string]interface{}{"alpha": "beta"}),
|
||||
option: nm.OptionalArg{Name: "name", Default: obj},
|
||||
},
|
||||
{
|
||||
name: "invalid item in map[string]interface{}",
|
||||
cp: newConstructorParam("name", "withName", map[string]interface{}{"alpha": []int{1}}),
|
||||
isOptErr: true,
|
||||
},
|
||||
{
|
||||
name: "array of strings",
|
||||
cp: newConstructorParam("name", "withName", []string{"one", "two"}),
|
||||
option: nm.OptionalArg{Name: "name",
|
||||
Default: nm.NewArray([]nm.Noder{nm.NewStringDouble("one"), nm.NewStringDouble("two")})},
|
||||
},
|
||||
{
|
||||
name: "float64",
|
||||
cp: newConstructorParam("name", "withName", 1.0),
|
||||
option: nm.OptionalArg{Name: "name", Default: nm.NewFloat(1.0)},
|
||||
},
|
||||
{
|
||||
name: "int",
|
||||
cp: newConstructorParam("name", "withName", 1),
|
||||
option: nm.OptionalArg{Name: "name", Default: nm.NewInt(1)},
|
||||
},
|
||||
{
|
||||
name: "bool",
|
||||
cp: newConstructorParam("name", "withName", true),
|
||||
option: nm.OptionalArg{Name: "name", Default: nm.NewBoolean(true)},
|
||||
},
|
||||
{
|
||||
name: "unknown type",
|
||||
cp: newConstructorParam("name", "withName", []int{1}),
|
||||
isOptErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
option, err := tc.cp.Option()
|
||||
if tc.isOptErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.option, option)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
215
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/custom_constructor.go
generated
vendored
Normal file
215
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/custom_constructor.go
generated
vendored
Normal file
|
@ -0,0 +1,215 @@
|
|||
package ksonnet
|
||||
|
||||
// NOTE: custom constructors will be removed at ksonnet 0.11
|
||||
|
||||
func locateConstructors(desc Description) []constructor {
|
||||
ctors, ok := customConstructors[desc]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return ctors
|
||||
}
|
||||
|
||||
func makeDescriptor(codebase, group, kind string) Description {
|
||||
return Description{
|
||||
Codebase: codebase,
|
||||
Group: group,
|
||||
Kind: kind,
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
customConstructors = map[Description][]constructor{
|
||||
makeDescriptor("api", "apps", "Deployment"): deploymentCtor,
|
||||
makeDescriptor("api", "apps", "DeploymentList"): objectList,
|
||||
makeDescriptor("api", "apps", "DeploymentRollback"): deploymentRollbackCtor,
|
||||
makeDescriptor("api", "apps", "Scale"): scaleCtor,
|
||||
makeDescriptor("api", "apps", "StatefulSet"): statefulSetCtor,
|
||||
makeDescriptor("api", "apps", "StatefulSetList"): objectList,
|
||||
|
||||
makeDescriptor("api", "extensions", "Deployment"): deploymentCtor,
|
||||
makeDescriptor("api", "extensions", "DeploymentList"): objectList,
|
||||
makeDescriptor("api", "extensions", "DeploymentRollback"): deploymentRollbackCtor,
|
||||
makeDescriptor("api", "extensions", "Scale"): scaleCtor,
|
||||
makeDescriptor("api", "extensions", "StatefulSet"): statefulSetCtor,
|
||||
makeDescriptor("api", "extensions", "StatefulSetList"): objectList,
|
||||
|
||||
makeDescriptor("api", "authentication", "TokenReview"): []constructor{
|
||||
*newConstructor(
|
||||
"new",
|
||||
*newConstructorParam("token", "mixin.spec.withToken", nil),
|
||||
),
|
||||
},
|
||||
|
||||
makeDescriptor("api", "autoscaling", "HorizontalPodAutoscalerList"): objectList,
|
||||
makeDescriptor("api", "autoscaling", "Scale"): scaleCtor,
|
||||
|
||||
makeDescriptor("api", "batch", "JobList"): objectList,
|
||||
makeDescriptor("api", "batch", "CronJobList"): objectList,
|
||||
|
||||
makeDescriptor("api", "certificates", "CertificateSigningRequestList"): objectList,
|
||||
|
||||
makeDescriptor("api", "core", "ConfigMap"): []constructor{
|
||||
*newConstructor(
|
||||
"new",
|
||||
*newConstructorParam("name", "mixin.metadata.withName", nil),
|
||||
*newConstructorParam("data", "withData", nil),
|
||||
),
|
||||
},
|
||||
makeDescriptor("api", "core", "ConfigMapList"): objectList,
|
||||
makeDescriptor("api", "core", "Container"): []constructor{
|
||||
*newConstructor(
|
||||
"new",
|
||||
*newConstructorParam("name", "withName", nil),
|
||||
*newConstructorParam("image", "withImage", nil),
|
||||
),
|
||||
},
|
||||
makeDescriptor("api", "core", "ContainerPort"): []constructor{
|
||||
*newConstructor("new", *newConstructorParam("containerPort", "withContainerPort", nil)),
|
||||
*newConstructor("newNamed",
|
||||
*newConstructorParam("containerPort", "withContainerPort", nil),
|
||||
*newConstructorParam("name", "withName", nil),
|
||||
),
|
||||
},
|
||||
makeDescriptor("api", "core", "EndpointsList"): objectList,
|
||||
makeDescriptor("api", "core", "EnvVar"): []constructor{
|
||||
*newConstructor("new",
|
||||
*newConstructorParam("name", "withName", nil),
|
||||
*newConstructorParam("value", "withValue", nil)),
|
||||
*newConstructor("fromSecretRef",
|
||||
*newConstructorParam("name", "withName", nil),
|
||||
*newConstructorParam("secretRefName", "mixin.valueFrom.secretKeyRef.withName", nil),
|
||||
*newConstructorParam("secretRefKey", "mixin.valueFrom.secretKeyRef.withKey", nil)),
|
||||
*newConstructor("fromFieldPath",
|
||||
*newConstructorParam("name", "withName", nil),
|
||||
*newConstructorParam("fieldPath", "mixin.valueFrom.fieldRef.withFieldPath", nil)),
|
||||
},
|
||||
makeDescriptor("api", "core", "EventList"): objectList,
|
||||
makeDescriptor("api", "core", "KeyToPath"): []constructor{
|
||||
*newConstructor("new",
|
||||
*newConstructorParam("key", "withKey", nil),
|
||||
*newConstructorParam("path", "withPath", nil)),
|
||||
},
|
||||
makeDescriptor("api", "core", "LimitRangeList"): objectList,
|
||||
makeDescriptor("api", "core", "Namespace"): []constructor{
|
||||
*newConstructor("new",
|
||||
*newConstructorParam("name", "mixin.metadata.withName", nil)),
|
||||
},
|
||||
makeDescriptor("api", "core", "NamespaceList"): objectList,
|
||||
makeDescriptor("api", "core", "NodeList"): objectList,
|
||||
makeDescriptor("api", "core", "PersistentVolumeClaimList"): objectList,
|
||||
makeDescriptor("api", "core", "PersistentVolumeList"): objectList,
|
||||
makeDescriptor("api", "core", "PodList"): objectList,
|
||||
makeDescriptor("api", "core", "PodTemplateList"): objectList,
|
||||
makeDescriptor("api", "core", "ReplicationControllerList"): objectList,
|
||||
makeDescriptor("api", "core", "ResourceQuotaList"): objectList,
|
||||
makeDescriptor("api", "core", "Secret"): []constructor{
|
||||
*newConstructor("new",
|
||||
*newConstructorParam("name", "mixin.metadata.withName", nil),
|
||||
*newConstructorParam("data", "withData", nil),
|
||||
*newConstructorParam("type", "withType", "Opaque")),
|
||||
*newConstructor("new",
|
||||
*newConstructorParam("name", "mixin.metadata.withName", nil),
|
||||
*newConstructorParam("stringDate", "withStringData", nil),
|
||||
*newConstructorParam("type", "withType", "Opaque")),
|
||||
},
|
||||
makeDescriptor("api", "core", "SecretList"): objectList,
|
||||
makeDescriptor("api", "core", "Service"): []constructor{
|
||||
*newConstructor("new",
|
||||
*newConstructorParam("name", "mixin.metadata.withName", nil),
|
||||
*newConstructorParam("selector", "mixin.spec.withSelector", nil),
|
||||
*newConstructorParam("ports", "mixin.spec.withPorts", nil)),
|
||||
},
|
||||
makeDescriptor("api", "core", "ServiceAccount"): []constructor{
|
||||
*newConstructor("new",
|
||||
*newConstructorParam("name", "mixin.metadata.withName", nil)),
|
||||
},
|
||||
makeDescriptor("api", "core", "ServiceAccountList"): objectList,
|
||||
makeDescriptor("api", "core", "ServiceList"): objectList,
|
||||
makeDescriptor("api", "core", "ServicePort"): []constructor{
|
||||
*newConstructor("new",
|
||||
*newConstructorParam("port", "withPort", nil),
|
||||
*newConstructorParam("targetPort", "withTargetPort", nil)),
|
||||
*newConstructor("newNamed",
|
||||
*newConstructorParam("name", "withName", nil),
|
||||
*newConstructorParam("port", "withPort", nil),
|
||||
*newConstructorParam("targetPort", "withTargetPort", nil)),
|
||||
},
|
||||
makeDescriptor("api", "core", "Volume"): []constructor{
|
||||
*newConstructor(
|
||||
"fromConfigMap",
|
||||
*newConstructorParam("name", "withName", nil),
|
||||
*newConstructorParam("configMapName", "mixin.configMap.withName", nil),
|
||||
*newConstructorParam("configMapItems", "mixin.configMap.withItems", nil)),
|
||||
*newConstructor("fromEmptyDir",
|
||||
*newConstructorParam("name", "withName", nil),
|
||||
*newConstructorParam("emptyDir", "mixin.emptyDir.mixinInstance",
|
||||
map[string]interface{}{})),
|
||||
*newConstructor("fromPersistentVolumeClaim",
|
||||
*newConstructorParam("name", "withName", nil),
|
||||
*newConstructorParam("emptyDir", "mixin.persistentVolumeClaim.withClaimName", nil)),
|
||||
*newConstructor("fromHostPath",
|
||||
*newConstructorParam("name", "withName", nil),
|
||||
*newConstructorParam("hostPath", "mixin.hostPath.withPath", nil)),
|
||||
*newConstructor("fromSecret",
|
||||
*newConstructorParam("name", "withName", nil),
|
||||
*newConstructorParam("secretName", "mixin.secret.withSecretName", nil)),
|
||||
},
|
||||
makeDescriptor("api", "core", "VolumeMount"): []constructor{
|
||||
*newConstructor("new",
|
||||
*newConstructorParam("name", "withName", nil),
|
||||
*newConstructorParam("mountPath", "withMountPath", nil),
|
||||
*newConstructorParam("readOnly", "withReadOnly", false)),
|
||||
},
|
||||
}
|
||||
|
||||
// customConstructor definitions
|
||||
|
||||
deploymentCtor = []constructor{
|
||||
*newConstructor(
|
||||
"new",
|
||||
*newConstructorParam("name", "mixin.metadata.withName", nil),
|
||||
*newConstructorParam("replicas", "mixin.spec.withReplicas", 1),
|
||||
*newConstructorParam("containers", "mixin.spec.template.spec.withContainers", nil),
|
||||
*newConstructorParam("podLabels", "mixin.spec.template.metadata.withLabels",
|
||||
map[string]interface{}{"app": "name"}),
|
||||
),
|
||||
}
|
||||
deploymentRollbackCtor = []constructor{
|
||||
*newConstructor(
|
||||
"new",
|
||||
*newConstructorParam("name", "withName", nil),
|
||||
),
|
||||
}
|
||||
objectList = []constructor{
|
||||
*newConstructor(
|
||||
"new",
|
||||
*newConstructorParam("items", "withItems", nil),
|
||||
),
|
||||
}
|
||||
scaleCtor = []constructor{
|
||||
*newConstructor(
|
||||
"new",
|
||||
*newConstructorParam("replicas", "mixin.spec.withReplicas", 1),
|
||||
),
|
||||
}
|
||||
statefulSetCtor = []constructor{
|
||||
*newConstructor(
|
||||
"new",
|
||||
*newConstructorParam("name", "mixin.metadata.withName", nil),
|
||||
*newConstructorParam("replicas", "mixin.spec.withReplicas", 1),
|
||||
*newConstructorParam("containers", "mixin.spec.template.spec.withContainers", nil),
|
||||
*newConstructorParam("volumeClaims", "mixin.spec.withVolumeClaimTemplates", nil),
|
||||
*newConstructorParam("podLabels", "mixin.spec.template.metadata.withLabels", map[string]interface{}{
|
||||
"app": "name",
|
||||
}),
|
||||
),
|
||||
}
|
||||
tokenReviewCtor = []constructor{
|
||||
*newConstructor(
|
||||
"new",
|
||||
*newConstructorParam("token", "mixin.spec.withToken", nil),
|
||||
),
|
||||
}
|
||||
)
|
97
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/description.go
generated
vendored
Normal file
97
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/description.go
generated
vendored
Normal file
|
@ -0,0 +1,97 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
const (
|
||||
groupCore = "core"
|
||||
)
|
||||
|
||||
var (
|
||||
reNames = []*regexp.Regexp{
|
||||
// Core API, pre-1.8 Kubernetes OR non-Kubernetes codebase APIs
|
||||
regexp.MustCompile(`io\.k8s\.(?P<codebase>\S+)\.pkg\.api\.(?P<version>\S+)\.(?P<kind>\S+)`),
|
||||
// Core API, 1.8+ Kubernetes
|
||||
regexp.MustCompile(`io\.k8s\.api\.(?P<packageType>core)\.(?P<version>\S+)\.(?P<kind>\S+)`),
|
||||
// Other APIs, pre-1.8 Kubernetes OR non-Kubernetes codebase APIs
|
||||
regexp.MustCompile(`io\.k8s\.(?P<codebase>\S+)\.pkg\.(?P<packageType>apis)\.(?P<group>\S+)\.(?P<version>\S+)\.(?P<kind>\S+)`),
|
||||
// Other APIs, 1.8+ Kubernetes
|
||||
regexp.MustCompile(`io\.k8s\.api\.(?P<group>\S+)\.(?P<version>\S+)\.(?P<kind>\S+)`),
|
||||
// Util packageType
|
||||
regexp.MustCompile(`io\.k8s\.(?P<codebase>\S+)\.pkg\.(?P<packageType>util)\.(?P<version>\S+)\.(?P<kind>\S+)`),
|
||||
// Version packageType
|
||||
regexp.MustCompile(`io\.k8s\.(?P<codebase>\S+)\.pkg\.(?P<packageType>version)\.(?P<kind>\S+)`),
|
||||
// Runtime packageType
|
||||
regexp.MustCompile(`io\.k8s\.(?P<codebase>\S+)\.pkg\.(?P<packageType>runtime)\.(?P<kind>\S+)`),
|
||||
}
|
||||
)
|
||||
|
||||
// UnknownDefinitionError is an error signifying an unknown definition.
|
||||
type UnknownDefinitionError struct {
|
||||
name string
|
||||
}
|
||||
|
||||
var _ error = (*UnknownDefinitionError)(nil)
|
||||
|
||||
// NewUnknownDefinitionError creates an instance of UnknownDefinitionError.
|
||||
func NewUnknownDefinitionError(name string) *UnknownDefinitionError {
|
||||
return &UnknownDefinitionError{
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *UnknownDefinitionError) Error() string {
|
||||
return fmt.Sprintf("%q is not a known definition name", e.name)
|
||||
}
|
||||
|
||||
// Description is a description of a Kubernetes definition name.
|
||||
type Description struct {
|
||||
Name string
|
||||
Version string
|
||||
Kind string
|
||||
Group string
|
||||
Codebase string
|
||||
}
|
||||
|
||||
// Validate validates the Description. A description is valid if it has a version.
|
||||
func (d *Description) Validate() error {
|
||||
if d.Version == "" {
|
||||
return fmt.Errorf("version is nil for %q", d.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseDescription takes a definition name and returns a Description.
|
||||
func ParseDescription(name string) (*Description, error) {
|
||||
for _, r := range reNames {
|
||||
if match := r.FindStringSubmatch(name); len(match) > 0 {
|
||||
|
||||
result := make(map[string]string)
|
||||
for i, name := range r.SubexpNames() {
|
||||
if i != 0 {
|
||||
result[name] = match[i]
|
||||
}
|
||||
}
|
||||
|
||||
codebase := result["codebase"]
|
||||
if codebase == "" {
|
||||
codebase = "api"
|
||||
}
|
||||
|
||||
d := &Description{
|
||||
Name: name,
|
||||
Version: result["version"],
|
||||
Kind: result["kind"],
|
||||
Group: result["group"],
|
||||
Codebase: codebase,
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, &UnknownDefinitionError{name: name}
|
||||
}
|
103
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/description_test.go
generated
vendored
Normal file
103
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/description_test.go
generated
vendored
Normal file
|
@ -0,0 +1,103 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func Test_UnknownDefinitionError(t *testing.T) {
|
||||
err := NewUnknownDefinitionError("name")
|
||||
require.Equal(t, `"name" is not a known definition name`, err.Error())
|
||||
}
|
||||
|
||||
func Test_Description_Validate(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
description Description
|
||||
isErr bool
|
||||
}{
|
||||
{
|
||||
name: "with version",
|
||||
description: Description{Version: "version"},
|
||||
},
|
||||
{
|
||||
name: "with out version",
|
||||
description: Description{},
|
||||
isErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := tc.description.Validate()
|
||||
if tc.isErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ParseDescription(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
description Description
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "foo.bar",
|
||||
err: NewUnknownDefinitionError("foo.bar"),
|
||||
},
|
||||
{
|
||||
name: "io.k8s.apimachinery.pkg.version.Info",
|
||||
description: Description{
|
||||
Name: "io.k8s.apimachinery.pkg.version.Info",
|
||||
Kind: "Info",
|
||||
Codebase: "apimachinery",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "io.k8s.apimachinery.pkg.apis.meta.v1.Status",
|
||||
description: Description{
|
||||
Name: "io.k8s.apimachinery.pkg.apis.meta.v1.Status",
|
||||
Kind: "Status",
|
||||
Version: "v1",
|
||||
Group: "meta",
|
||||
Codebase: "apimachinery",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "io.k8s.api.admissionregistration.v1alpha1.AdmissionHookClientConfig",
|
||||
description: Description{
|
||||
Name: "io.k8s.api.admissionregistration.v1alpha1.AdmissionHookClientConfig",
|
||||
Version: "v1alpha1",
|
||||
Kind: "AdmissionHookClientConfig",
|
||||
Group: "admissionregistration",
|
||||
Codebase: "api",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "io.k8s.codebase.pkg.api.version.kind",
|
||||
description: Description{
|
||||
Name: "io.k8s.codebase.pkg.api.version.kind",
|
||||
Version: "version",
|
||||
Kind: "kind",
|
||||
Codebase: "codebase",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
d, err := ParseDescription(tc.name)
|
||||
if tc.err == nil {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.description, *d)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
188
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/document.go
generated
vendored
Normal file
188
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/document.go
generated
vendored
Normal file
|
@ -0,0 +1,188 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
nm "github.com/ksonnet/ksonnet-lib/ksonnet-gen/nodemaker"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type renderNodeFn func(c *Catalog, a *APIObject) (*nm.Object, error)
|
||||
|
||||
// Document represents a ksonnet lib document.
|
||||
type Document struct {
|
||||
catalog *Catalog
|
||||
|
||||
// these are defined to aid testing Document
|
||||
typesFn func() ([]Type, error)
|
||||
fieldsFn func() ([]Field, error)
|
||||
renderFn func(fn renderNodeFn, c *Catalog, o *nm.Object, groups []Group) error
|
||||
renderGroups func(doc *Document, container *nm.Object) error
|
||||
renderHiddenGroups func(doc *Document, container *nm.Object) error
|
||||
objectNodeFn func(c *Catalog, a *APIObject) (*nm.Object, error)
|
||||
}
|
||||
|
||||
// NewDocument creates an instance of Document.
|
||||
func NewDocument(catalog *Catalog) (*Document, error) {
|
||||
if catalog == nil {
|
||||
return nil, errors.New("catalog is nil")
|
||||
}
|
||||
|
||||
return &Document{
|
||||
catalog: catalog,
|
||||
typesFn: catalog.Types,
|
||||
fieldsFn: catalog.Fields,
|
||||
renderFn: render,
|
||||
renderGroups: renderGroups,
|
||||
renderHiddenGroups: renderHiddenGroups,
|
||||
objectNodeFn: apiObjectNode,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Groups returns an alphabetically sorted list of groups.
|
||||
func (d *Document) Groups() ([]Group, error) {
|
||||
resources, err := d.typesFn()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "retrieve resources")
|
||||
}
|
||||
|
||||
var nodeObjects []Object
|
||||
for _, resource := range resources {
|
||||
res := resource
|
||||
nodeObjects = append(nodeObjects, &res)
|
||||
}
|
||||
|
||||
return d.groups(nodeObjects)
|
||||
}
|
||||
|
||||
// HiddenGroups returns an alphabetically sorted list of hidden groups.
|
||||
func (d *Document) HiddenGroups() ([]Group, error) {
|
||||
resources, err := d.fieldsFn()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "retrieve types")
|
||||
}
|
||||
|
||||
var nodeObjects []Object
|
||||
for _, resource := range resources {
|
||||
res := resource
|
||||
nodeObjects = append(nodeObjects, &res)
|
||||
}
|
||||
|
||||
return d.groups(nodeObjects)
|
||||
}
|
||||
|
||||
func (d *Document) groups(resources []Object) ([]Group, error) {
|
||||
gMap := make(map[string]*Group)
|
||||
|
||||
for i := range resources {
|
||||
res := resources[i]
|
||||
name := res.Group()
|
||||
|
||||
g, ok := gMap[name]
|
||||
if !ok {
|
||||
g = NewGroup(name)
|
||||
gMap[name] = g
|
||||
}
|
||||
|
||||
g.AddResource(res)
|
||||
gMap[name] = g
|
||||
}
|
||||
|
||||
var groupNames []string
|
||||
|
||||
for name := range gMap {
|
||||
groupNames = append(groupNames, name)
|
||||
}
|
||||
|
||||
sort.Strings(groupNames)
|
||||
|
||||
var groups []Group
|
||||
|
||||
for _, name := range groupNames {
|
||||
g := gMap[name]
|
||||
groups = append(groups, *g)
|
||||
}
|
||||
|
||||
return groups, nil
|
||||
}
|
||||
|
||||
// Node converts a document to a node.
|
||||
func (d *Document) Node() (*nm.Object, error) {
|
||||
out := nm.NewObject()
|
||||
|
||||
metadata := map[string]interface{}{
|
||||
"kubernetesVersion": d.catalog.Version(),
|
||||
"checksum": d.catalog.Checksum(),
|
||||
}
|
||||
metadataObj, err := nm.KVFromMap(metadata)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "create metadata key")
|
||||
}
|
||||
out.Set(nm.InheritedKey("__ksonnet"), metadataObj)
|
||||
|
||||
if err := d.renderGroups(d, out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hidden := nm.NewObject()
|
||||
|
||||
if err := d.renderHiddenGroups(d, hidden); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out.Set(nm.LocalKey("hidden"), hidden)
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func render(fn renderNodeFn, catalog *Catalog, o *nm.Object, groups []Group) error {
|
||||
for _, group := range groups {
|
||||
groupNode := group.Node()
|
||||
for _, version := range group.Versions() {
|
||||
versionNode := version.Node()
|
||||
for _, apiObject := range version.APIObjects() {
|
||||
objectNode, err := fn(catalog, &apiObject)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "create node %s", apiObject.Kind())
|
||||
}
|
||||
|
||||
versionNode.Set(
|
||||
nm.NewKey(apiObject.Kind(), nm.KeyOptComment(apiObject.Description())),
|
||||
objectNode)
|
||||
}
|
||||
|
||||
groupNode.Set(nm.NewKey(version.Name()), versionNode)
|
||||
}
|
||||
|
||||
o.Set(nm.NewKey(group.Name()), groupNode)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func renderGroups(d *Document, container *nm.Object) error {
|
||||
groups, err := d.Groups()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "retrieve groups")
|
||||
}
|
||||
|
||||
if err = d.renderFn(d.objectNodeFn, d.catalog, container, groups); err != nil {
|
||||
return errors.Wrap(err, "render groups")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func renderHiddenGroups(d *Document, container *nm.Object) error {
|
||||
groups, err := d.HiddenGroups()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "retrieve hidden groups")
|
||||
}
|
||||
|
||||
if err = d.renderFn(d.objectNodeFn, d.catalog, container, groups); err != nil {
|
||||
return errors.Wrap(err, "render hidden groups")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
98
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/document_integration_test.go
generated
vendored
Normal file
98
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/document_integration_test.go
generated
vendored
Normal file
|
@ -0,0 +1,98 @@
|
|||
package ksonnet_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func testdata(name string) string {
|
||||
return filepath.Join("testdata", name)
|
||||
}
|
||||
|
||||
func TestDocument_Integration(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "document")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
lib := genDoc(t, "testdata/swagger-1.8.json")
|
||||
|
||||
k8sPath := filepath.Join(dir, "k8s.libsonnet")
|
||||
writeFile(t, k8sPath, lib.K8s)
|
||||
verifyJsonnet(t, dir, "k8s.libsonnet")
|
||||
|
||||
ksPath := filepath.Join(dir, "k.libsonnet")
|
||||
writeFile(t, ksPath, lib.Extensions)
|
||||
verifyJsonnet(t, dir, "k.libsonnet")
|
||||
|
||||
compPath := filepath.Join(dir, "component.libsonnet")
|
||||
copyFile(t, testdata("component.libsonnet"), compPath)
|
||||
|
||||
cmd := exec.Command(jsonnetCmd(), "component.libsonnet")
|
||||
cmd.Dir = dir
|
||||
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatal(string(out))
|
||||
}
|
||||
|
||||
expected, err := ioutil.ReadFile(testdata("component.json"))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, string(expected), string(out))
|
||||
}
|
||||
|
||||
func jsonnetCmd() string {
|
||||
bin := os.Getenv("JSONNET_BIN")
|
||||
if bin == "" {
|
||||
bin = "jsonnet"
|
||||
}
|
||||
|
||||
return bin
|
||||
}
|
||||
|
||||
func verifyJsonnet(t *testing.T, dir, fileName string) {
|
||||
cmd := exec.Command(jsonnetCmd(), "fmt", fileName)
|
||||
cmd.Dir = dir
|
||||
|
||||
var b bytes.Buffer
|
||||
cmd.Stderr = &b
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
t.Fatalf("%s verification failed: %v", fileName, b.String())
|
||||
}
|
||||
}
|
||||
|
||||
func genDoc(t *testing.T, input string) *ksonnet.Lib {
|
||||
lib, err := ksonnet.GenerateLib(input)
|
||||
require.NoError(t, err)
|
||||
|
||||
return lib
|
||||
}
|
||||
|
||||
func writeFile(t *testing.T, name string, content []byte) {
|
||||
err := ioutil.WriteFile(name, content, 0600)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func copyFile(t *testing.T, src, dest string) {
|
||||
from, err := os.Open(src)
|
||||
require.NoError(t, err)
|
||||
defer from.Close()
|
||||
|
||||
to, err := os.OpenFile(dest, os.O_RDWR|os.O_CREATE, 0666)
|
||||
require.NoError(t, err)
|
||||
defer to.Close()
|
||||
|
||||
_, err = io.Copy(to, from)
|
||||
require.NoError(t, err)
|
||||
}
|
211
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/document_test.go
generated
vendored
Normal file
211
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/document_test.go
generated
vendored
Normal file
|
@ -0,0 +1,211 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/go-openapi/spec"
|
||||
nm "github.com/ksonnet/ksonnet-lib/ksonnet-gen/nodemaker"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDocument_nil_catalog(t *testing.T) {
|
||||
_, err := NewDocument(nil)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestDocument_Groups(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
doc, err := NewDocument(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
groups, err := doc.Groups()
|
||||
require.NoError(t, err)
|
||||
|
||||
var names []string
|
||||
for _, group := range groups {
|
||||
names = append(names, group.Name())
|
||||
}
|
||||
|
||||
sort.Strings(names)
|
||||
expected := []string{"admissionregistration", "apiextensions", "apiregistration", "apps",
|
||||
"authentication", "authorization", "autoscaling", "batch", "certificates", "core",
|
||||
"extensions", "meta", "networking", "policy", "rbac", "scheduling", "settings", "storage"}
|
||||
require.Equal(t, expected, names)
|
||||
}
|
||||
|
||||
func TestDocument_Groups_types_error(t *testing.T) {
|
||||
fn := func(*Catalog, map[string]spec.Schema, []string) (map[string]Property, error) {
|
||||
return nil, errors.New("fail")
|
||||
}
|
||||
|
||||
c := initCatalog(t, "swagger-1.8.json", CatalogOptExtractProperties(fn))
|
||||
|
||||
doc, err := NewDocument(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = doc.Groups()
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestDocument_HiddenGroups(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
doc, err := NewDocument(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
groups, err := doc.HiddenGroups()
|
||||
require.NoError(t, err)
|
||||
|
||||
var names []string
|
||||
for _, group := range groups {
|
||||
names = append(names, group.Name())
|
||||
}
|
||||
|
||||
sort.Strings(names)
|
||||
|
||||
expected := []string{"admissionregistration", "apiextensions", "apiregistration", "apps",
|
||||
"authentication", "authorization", "autoscaling", "batch", "certificates", "core",
|
||||
"extensions", "meta", "networking", "policy", "rbac", "scheduling", "settings",
|
||||
"storage"}
|
||||
require.Equal(t, expected, names)
|
||||
}
|
||||
|
||||
func TestDocument_HiddenGroups_fields_error(t *testing.T) {
|
||||
fn := func(*Catalog, map[string]spec.Schema, []string) (map[string]Property, error) {
|
||||
return nil, errors.New("fail")
|
||||
}
|
||||
|
||||
c := initCatalog(t, "swagger-1.8.json", CatalogOptExtractProperties(fn))
|
||||
|
||||
doc, err := NewDocument(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = doc.HiddenGroups()
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestDocument_Node(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
doc, err := NewDocument(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
n, err := doc.Node()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, name := range []string{"apps", "apiextensions", "core"} {
|
||||
_, ok := n.Get(name).(*nm.Object)
|
||||
assert.True(t, ok, "node %s was not found", name)
|
||||
}
|
||||
|
||||
local, ok := n.Get("hidden").(*nm.Object)
|
||||
require.True(t, ok)
|
||||
|
||||
for _, name := range []string{"apps", "core", "meta"} {
|
||||
_, ok := local.Get(name).(*nm.Object)
|
||||
assert.True(t, ok, "hidden node %s was not found", name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDocument_Node_groups_error(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
doc, err := NewDocument(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
doc.renderGroups = func(*Document, *nm.Object) error {
|
||||
return errors.New("fail")
|
||||
}
|
||||
|
||||
_, err = doc.Node()
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestDocument_Node_hidden_groups_error(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
doc, err := NewDocument(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
doc.renderHiddenGroups = func(*Document, *nm.Object) error {
|
||||
return errors.New("fail")
|
||||
}
|
||||
|
||||
_, err = doc.Node()
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestDocument_Node_api_object_error(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
doc, err := NewDocument(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
doc.objectNodeFn = func(*Catalog, *APIObject) (*nm.Object, error) {
|
||||
return nil, errors.New("fail")
|
||||
}
|
||||
|
||||
_, err = doc.Node()
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func Test_renderGroups_groups_error(t *testing.T) {
|
||||
fn := func(*Catalog, map[string]spec.Schema, []string) (map[string]Property, error) {
|
||||
return nil, errors.New("fail")
|
||||
}
|
||||
|
||||
c := initCatalog(t, "swagger-1.8.json", CatalogOptExtractProperties(fn))
|
||||
|
||||
doc, err := NewDocument(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = renderGroups(doc, nm.NewObject())
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestDocument_renderGroups_render_error(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
doc, err := NewDocument(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
doc.renderFn = func(fn renderNodeFn, c *Catalog, o *nm.Object, groups []Group) error {
|
||||
return errors.New("fail")
|
||||
}
|
||||
|
||||
err = renderGroups(doc, nm.NewObject())
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func Test_renderHiddenGroups_hidden_groups_error(t *testing.T) {
|
||||
fn := func(*Catalog, map[string]spec.Schema, []string) (map[string]Property, error) {
|
||||
return nil, errors.New("fail")
|
||||
}
|
||||
|
||||
c := initCatalog(t, "swagger-1.8.json", CatalogOptExtractProperties(fn))
|
||||
|
||||
doc, err := NewDocument(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = renderHiddenGroups(doc, nm.NewObject())
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestDocument_renderHiddenGroups_render_error(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
doc, err := NewDocument(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
doc.renderFn = func(fn renderNodeFn, c *Catalog, o *nm.Object, groups []Group) error {
|
||||
return errors.New("fail")
|
||||
}
|
||||
|
||||
err = renderHiddenGroups(doc, nm.NewObject())
|
||||
require.Error(t, err)
|
||||
}
|
1110
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/emit.go
generated
vendored
Normal file
1110
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/emit.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
286
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/extension.go
generated
vendored
Normal file
286
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/extension.go
generated
vendored
Normal file
|
@ -0,0 +1,286 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
nm "github.com/ksonnet/ksonnet-lib/ksonnet-gen/nodemaker"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
localK8s = "k8s"
|
||||
)
|
||||
|
||||
// Extension represents a ksonnet lib extension document.
|
||||
type Extension struct {
|
||||
catalog *Catalog
|
||||
}
|
||||
|
||||
// NewExtension creates an an instance of Extension.
|
||||
func NewExtension(catalog *Catalog) *Extension {
|
||||
return &Extension{
|
||||
catalog: catalog,
|
||||
}
|
||||
}
|
||||
|
||||
// Node converts an extension to a node.
|
||||
func (e *Extension) Node() (nm.Noder, error) {
|
||||
ext, err := e.genK8sExtension()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
extBinary := nm.NewBinary(nm.NewVar(localK8s), ext, nm.BopPlus)
|
||||
|
||||
fns := genMapContainers(extBinary)
|
||||
|
||||
k8sImportFile := nm.NewImport("k8s.libsonnet")
|
||||
k8sImport := nm.NewLocal(localK8s, k8sImportFile, fns)
|
||||
|
||||
return k8sImport, nil
|
||||
}
|
||||
|
||||
func (e *Extension) genK8sExtension() (*nm.Object, error) {
|
||||
gi := makeGroupItems()
|
||||
|
||||
e.listExtension(gi)
|
||||
|
||||
if err := e.mapContainersExtension(gi); err != nil {
|
||||
return nil, errors.Wrap(err, "map container extensions")
|
||||
}
|
||||
|
||||
return gi.Node(), nil
|
||||
}
|
||||
|
||||
func (e *Extension) mapContainersExtension(gi *groupItems) error {
|
||||
types, err := e.catalog.TypesWithDescendant("io.k8s.api.core.v1.PodSpec")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "find types with PodSec")
|
||||
}
|
||||
|
||||
mapping := nm.NewObject()
|
||||
mapping.Set(
|
||||
nm.FunctionKey("mapContainers", []string{"f"}),
|
||||
nm.ApplyCall("fn.mapContainers", nm.NewVar("f")),
|
||||
)
|
||||
|
||||
mapping.Set(
|
||||
nm.FunctionKey("mapContainersWithName", []string{"names", "f"}),
|
||||
nm.ApplyCall("fn.mapContainersWithName", nm.NewVar("names"), nm.NewVar("f")),
|
||||
)
|
||||
|
||||
for _, ty := range types {
|
||||
parts := strings.Split(ty.component.String(), ".")
|
||||
gi.add(parts[0], parts[1], FormatKind(parts[2]), mapping, true)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Extension) listExtension(gi *groupItems) {
|
||||
apiVersion := nm.NewObject()
|
||||
apiVersion.Set(nm.InheritedKey("apiVersion"), nm.NewStringDouble("v1"))
|
||||
|
||||
kind := nm.NewObject()
|
||||
kind.Set(nm.InheritedKey("kind"), nm.NewStringDouble("List"))
|
||||
|
||||
items := nm.ApplyCall("self.items", nm.NewVar("items"))
|
||||
|
||||
o := nm.NewObject()
|
||||
o.Set(
|
||||
nm.FunctionKey("new", []string{"items"}),
|
||||
nm.Combine(apiVersion, kind, items),
|
||||
)
|
||||
o.Set(
|
||||
nm.FunctionKey("items", []string{"items"}),
|
||||
convertToArray("items", "", true),
|
||||
)
|
||||
|
||||
gi.add("core", "v1", "list", o, false)
|
||||
}
|
||||
|
||||
type nodeMixin struct {
|
||||
node nm.Noder
|
||||
isMixin bool
|
||||
}
|
||||
|
||||
type groupItems struct {
|
||||
groups map[string]map[string]map[string]nodeMixin
|
||||
}
|
||||
|
||||
func makeGroupItems() *groupItems {
|
||||
return &groupItems{
|
||||
groups: make(map[string]map[string]map[string]nodeMixin),
|
||||
}
|
||||
}
|
||||
|
||||
func (gi *groupItems) add(group, version, key string, node nm.Noder, isMixin bool) {
|
||||
g, ok := gi.groups[group]
|
||||
if !ok {
|
||||
g = make(map[string]map[string]nodeMixin)
|
||||
gi.groups[group] = g
|
||||
}
|
||||
|
||||
v, ok := g[version]
|
||||
if !ok {
|
||||
v = make(map[string]nodeMixin)
|
||||
g[version] = v
|
||||
}
|
||||
|
||||
v[key] = nodeMixin{node: node, isMixin: isMixin}
|
||||
}
|
||||
|
||||
func (gi *groupItems) Node() *nm.Object {
|
||||
var groupNames []string
|
||||
for name := range gi.groups {
|
||||
groupNames = append(groupNames, name)
|
||||
}
|
||||
sort.Strings(groupNames)
|
||||
|
||||
o := nm.NewObject()
|
||||
|
||||
for _, groupName := range groupNames {
|
||||
group := gi.groups[groupName]
|
||||
groupObject := nm.NewObject()
|
||||
|
||||
var versionNames []string
|
||||
for name := range group {
|
||||
versionNames = append(versionNames, name)
|
||||
}
|
||||
sort.Strings(versionNames)
|
||||
|
||||
for _, versionName := range versionNames {
|
||||
version := group[versionName]
|
||||
versionObject := nm.NewObject()
|
||||
|
||||
var keyNames []string
|
||||
for name := range version {
|
||||
keyNames = append(keyNames, name)
|
||||
}
|
||||
sort.Strings(keyNames)
|
||||
|
||||
for _, keyName := range keyNames {
|
||||
node := version[keyName].node
|
||||
isMixin := version[keyName].isMixin
|
||||
|
||||
if isMixin {
|
||||
parent := nm.NewCall(fmt.Sprintf("k8s.%s.%s.%s", groupName, versionName, keyName))
|
||||
node = nm.NewBinary(parent, node, nm.BopPlus)
|
||||
}
|
||||
versionObject.Set(nm.NewKey(keyName), node)
|
||||
}
|
||||
|
||||
parent := nm.NewCall(fmt.Sprintf("k8s.%s.%s", groupName, versionName))
|
||||
groupObject.Set(nm.NewKey(versionName), nm.NewBinary(parent, versionObject, nm.BopPlus))
|
||||
}
|
||||
|
||||
parent := nm.NewCall(fmt.Sprintf("k8s.%s", groupName))
|
||||
o.Set(nm.NewKey(groupName), nm.NewBinary(parent, groupObject, nm.BopPlus))
|
||||
}
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
func genMapContainers(body nm.Noder) *nm.Local {
|
||||
o := nm.NewObject()
|
||||
|
||||
o.Set(
|
||||
nm.FunctionKey("mapContainers", []string{"f"}),
|
||||
createMapContainersFn(),
|
||||
)
|
||||
|
||||
o.Set(
|
||||
nm.FunctionKey("mapContainersWithName", []string{"names", "f"}),
|
||||
createMapContainersWithName(),
|
||||
)
|
||||
|
||||
return nm.NewLocal("fn", o, body)
|
||||
}
|
||||
|
||||
func createMapContainersFn() *nm.Object {
|
||||
o := nm.NewObject()
|
||||
o.Set(
|
||||
nm.LocalKey("podContainers"),
|
||||
nm.NewCall("super.spec.template.spec.containers"),
|
||||
)
|
||||
|
||||
templateSpecObject := nm.NewObject()
|
||||
templateSpecObject.Set(
|
||||
nm.InheritedKey("containers"),
|
||||
nm.ApplyCall("std.map", nm.NewVar("f"), nm.NewVar("podContainers")),
|
||||
)
|
||||
|
||||
templateObject := nm.NewObject()
|
||||
templateObject.Set(
|
||||
nm.InheritedKey("spec", nm.KeyOptMixin(true)),
|
||||
templateSpecObject,
|
||||
)
|
||||
|
||||
specObject := nm.NewObject()
|
||||
specObject.Set(
|
||||
nm.InheritedKey("template", nm.KeyOptMixin(true)),
|
||||
templateObject,
|
||||
)
|
||||
|
||||
o.Set(
|
||||
nm.InheritedKey("spec", nm.KeyOptMixin(true)),
|
||||
specObject,
|
||||
)
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
func createMapContainersWithName() *nm.Local {
|
||||
c1Binary := nm.NewBinary(
|
||||
nm.ApplyCall("std.objectHas", nm.NewVar("c"), nm.NewStringDouble("name")),
|
||||
nm.ApplyCall("inNameSet", nm.NewCall("c.name")),
|
||||
nm.BopAnd,
|
||||
)
|
||||
|
||||
c1True := nm.ApplyCall("f", nm.NewVar("c"))
|
||||
c1False := nm.NewVar("c")
|
||||
|
||||
c1 := nm.NewConditional(c1Binary, c1True, c1False)
|
||||
|
||||
apply := nm.NewApply(c1, []nm.Noder{nm.NewVar("c")}, nil)
|
||||
|
||||
runMap := nm.ApplyCall("self.mapContainers", apply)
|
||||
|
||||
a := nm.NewVar("nameSet")
|
||||
b := nm.ApplyCall("std.set", nm.NewArray([]nm.Noder{nm.NewVar("name")}))
|
||||
|
||||
inNameSet := nm.NewLocal(
|
||||
"inNameSet",
|
||||
nm.NewFunction([]string{"name"}, genIsIntersection(a, b)),
|
||||
runMap,
|
||||
)
|
||||
nameSet := nm.NewLocal("nameSet", setArray("names"), inNameSet)
|
||||
|
||||
return nameSet
|
||||
}
|
||||
|
||||
func setArray(varName string) *nm.Conditional {
|
||||
bin := nm.NewBinary(
|
||||
nm.ApplyCall("std.type", nm.NewVar(varName)),
|
||||
nm.NewStringDouble("array"),
|
||||
nm.BopEqual,
|
||||
)
|
||||
|
||||
tBranch := nm.ApplyCall("std.set", nm.NewVar(varName))
|
||||
fBranch := nm.ApplyCall("std.set", nm.NewArray([]nm.Noder{nm.NewVar(varName)}))
|
||||
|
||||
return nm.NewConditional(bin, tBranch, fBranch)
|
||||
}
|
||||
|
||||
func genIsIntersection(a, b nm.Noder) *nm.Binary {
|
||||
intersection := nm.ApplyCall("std.setInter", a, b)
|
||||
checkLen := nm.ApplyCall("std.length", intersection)
|
||||
|
||||
return nm.NewBinary(
|
||||
checkLen,
|
||||
nm.NewInt(0),
|
||||
nm.BopGreater,
|
||||
)
|
||||
}
|
20
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/extension_test.go
generated
vendored
Normal file
20
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/extension_test.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestExtension_Output(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
e := NewExtension(c)
|
||||
|
||||
node, err := e.Node()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, printer.Fprint(ioutil.Discard, node.Node()))
|
||||
}
|
76
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/field.go
generated
vendored
Normal file
76
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/field.go
generated
vendored
Normal file
|
@ -0,0 +1,76 @@
|
|||
package ksonnet
|
||||
|
||||
// Field is a Kubernetes field.
|
||||
type Field struct {
|
||||
kind string
|
||||
description string
|
||||
properties map[string]Property
|
||||
version string
|
||||
group string
|
||||
codebase string
|
||||
identifier string
|
||||
}
|
||||
|
||||
var _ Object = (*Field)(nil)
|
||||
|
||||
// NewField creates an instance of Field.
|
||||
func NewField(id, desc, codebase, group, ver, kind string, props map[string]Property) *Field {
|
||||
return &Field{
|
||||
identifier: id,
|
||||
description: desc,
|
||||
group: group,
|
||||
codebase: codebase,
|
||||
version: ver,
|
||||
kind: kind,
|
||||
properties: props,
|
||||
}
|
||||
}
|
||||
|
||||
// Kind is the kind for this field.
|
||||
func (f *Field) Kind() string {
|
||||
return f.kind
|
||||
}
|
||||
|
||||
// Version is the version for this field.
|
||||
func (f *Field) Version() string {
|
||||
return f.version
|
||||
}
|
||||
|
||||
// Codebase is the codebase for this field.
|
||||
func (f *Field) Codebase() string {
|
||||
return f.codebase
|
||||
}
|
||||
|
||||
// Group is the group for this field.
|
||||
func (f *Field) Group() string {
|
||||
if f.group == "" {
|
||||
return "core"
|
||||
}
|
||||
|
||||
return f.group
|
||||
}
|
||||
|
||||
// QualifiedGroup is the group for this field.
|
||||
func (f *Field) QualifiedGroup() string {
|
||||
return f.Group()
|
||||
}
|
||||
|
||||
// Description is the description for this field.
|
||||
func (f *Field) Description() string {
|
||||
return f.description
|
||||
}
|
||||
|
||||
// Identifier is the identifier for this field.
|
||||
func (f *Field) Identifier() string {
|
||||
return f.identifier
|
||||
}
|
||||
|
||||
// IsType returns if this item is a type. It always returns false.
|
||||
func (f *Field) IsType() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Properties are the properties for this field.
|
||||
func (f *Field) Properties() map[string]Property {
|
||||
return f.properties
|
||||
}
|
35
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/field_test.go
generated
vendored
Normal file
35
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/field_test.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestField(t *testing.T) {
|
||||
props := make(map[string]Property)
|
||||
props["foo"] = NewLiteralField("name", "integer", "desc", "ref")
|
||||
|
||||
ty := NewField("id", "desc", "codebase", "group", "ver", "kind", props)
|
||||
|
||||
assert.Equal(t, "id", ty.Identifier())
|
||||
assert.Equal(t, "desc", ty.Description())
|
||||
assert.Equal(t, "codebase", ty.Codebase())
|
||||
assert.Equal(t, "group", ty.Group())
|
||||
assert.Equal(t, "group", ty.QualifiedGroup())
|
||||
assert.Equal(t, "ver", ty.Version())
|
||||
assert.Equal(t, "kind", ty.Kind())
|
||||
assert.False(t, ty.IsType())
|
||||
|
||||
assert.Len(t, ty.Properties(), 1)
|
||||
}
|
||||
|
||||
func TestField_no_group(t *testing.T) {
|
||||
props := make(map[string]Property)
|
||||
props["foo"] = NewLiteralField("name", "integer", "desc", "ref")
|
||||
|
||||
ty := NewField("id", "desc", "codebase", "", "ver", "kind", props)
|
||||
|
||||
assert.Equal(t, "core", ty.Group())
|
||||
assert.Equal(t, "core", ty.QualifiedGroup())
|
||||
}
|
64
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/group.go
generated
vendored
Normal file
64
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/group.go
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
nm "github.com/ksonnet/ksonnet-lib/ksonnet-gen/nodemaker"
|
||||
)
|
||||
|
||||
// Group is group of definitions.
|
||||
type Group struct {
|
||||
versions map[string]*Version
|
||||
name string
|
||||
}
|
||||
|
||||
// NewGroup creates an instance of Group.
|
||||
func NewGroup(name string) *Group {
|
||||
return &Group{
|
||||
versions: make(map[string]*Version),
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// Name is the name of the group.
|
||||
func (g *Group) Name() string {
|
||||
return g.name
|
||||
}
|
||||
|
||||
// Versions returns the versions available for this group.
|
||||
func (g *Group) Versions() []Version {
|
||||
var names []string
|
||||
for name := range g.versions {
|
||||
names = append(names, name)
|
||||
}
|
||||
|
||||
sort.Strings(names)
|
||||
|
||||
var versions []Version
|
||||
for _, name := range names {
|
||||
versions = append(versions, *g.versions[name])
|
||||
}
|
||||
|
||||
return versions
|
||||
}
|
||||
|
||||
// AddResource adds a resource to a version.
|
||||
func (g *Group) AddResource(r Object) {
|
||||
name := r.Version()
|
||||
if name == "" {
|
||||
return
|
||||
}
|
||||
|
||||
v, ok := g.versions[name]
|
||||
if !ok {
|
||||
v = NewVersion(name, r.QualifiedGroup())
|
||||
g.versions[name] = v
|
||||
}
|
||||
|
||||
v.AddResource(r)
|
||||
}
|
||||
|
||||
// Node returns an ast node for this group.
|
||||
func (g *Group) Node() *nm.Object {
|
||||
return nm.NewObject()
|
||||
}
|
46
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/group_test.go
generated
vendored
Normal file
46
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/group_test.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
nm "github.com/ksonnet/ksonnet-lib/ksonnet-gen/nodemaker"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGroup_Name(t *testing.T) {
|
||||
g := NewGroup("groupName")
|
||||
require.Equal(t, "groupName", g.Name())
|
||||
}
|
||||
|
||||
func TestGroup_Node(t *testing.T) {
|
||||
g := NewGroup("groupName")
|
||||
versions := nm.NewObject()
|
||||
|
||||
require.Equal(t, versions, g.Node())
|
||||
}
|
||||
|
||||
func TestGroup_Versions(t *testing.T) {
|
||||
g := NewGroup("groupName")
|
||||
g.versions = map[string]*Version{
|
||||
"v1": &Version{},
|
||||
"v2": &Version{},
|
||||
}
|
||||
|
||||
require.Len(t, g.Versions(), 2)
|
||||
}
|
||||
|
||||
func TestGroup_AddResource(t *testing.T) {
|
||||
c1 := Component{Group: "group2", Version: "v1", Kind: "kind"}
|
||||
o1 := NewType("alpha", "desc", "codebase", "group", c1, nil)
|
||||
|
||||
g := NewGroup("groupName")
|
||||
g.AddResource(&o1)
|
||||
|
||||
require.Len(t, g.Versions(), 1)
|
||||
|
||||
c2 := Component{Group: "group2", Version: "", Kind: "kind"}
|
||||
o2 := NewType("beta", "desc", "codebase", "group", c2, nil)
|
||||
g.AddResource(&o2)
|
||||
|
||||
require.Len(t, g.Versions(), 1)
|
||||
}
|
84
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/ksonnet.go
generated
vendored
Normal file
84
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/ksonnet.go
generated
vendored
Normal file
|
@ -0,0 +1,84 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec"
|
||||
"github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Lib is a ksonnet lib.
|
||||
type Lib struct {
|
||||
K8s []byte
|
||||
Extensions []byte
|
||||
Version string
|
||||
}
|
||||
|
||||
// GenerateLib generates ksonnet lib.
|
||||
func GenerateLib(source string) (*Lib, error) {
|
||||
apiSpec, checksum, err := kubespec.Import(source)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "import Kubernetes spec")
|
||||
}
|
||||
|
||||
c, err := NewCatalog(apiSpec, CatalogOptChecksum(checksum))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "create ksonnet catalog")
|
||||
}
|
||||
|
||||
k8s, err := createK8s(c)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "create k8s.libsonnet")
|
||||
}
|
||||
|
||||
k, err := createK(c)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "create k.libsonnet")
|
||||
}
|
||||
|
||||
lib := &Lib{
|
||||
K8s: k8s,
|
||||
Extensions: k,
|
||||
Version: c.apiVersion.String(),
|
||||
}
|
||||
|
||||
return lib, nil
|
||||
}
|
||||
|
||||
func createK8s(c *Catalog) ([]byte, error) {
|
||||
doc, err := NewDocument(c)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "create document")
|
||||
}
|
||||
|
||||
node, err := doc.Node()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "build document node")
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
if err := printer.Fprint(&buf, node.Node()); err != nil {
|
||||
return nil, errors.Wrap(err, "print AST")
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func createK(c *Catalog) ([]byte, error) {
|
||||
e := NewExtension(c)
|
||||
|
||||
node, err := e.Node()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "build extension node")
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
if err := printer.Fprint(&buf, node.Node()); err != nil {
|
||||
return nil, errors.Wrap(err, "print AST")
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
94
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/object.go
generated
vendored
Normal file
94
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/object.go
generated
vendored
Normal file
|
@ -0,0 +1,94 @@
|
|||
package ksonnet
|
||||
|
||||
// Object is an object that can be turned into a node by APIObject.
|
||||
type Object interface {
|
||||
Kind() string
|
||||
Description() string
|
||||
IsType() bool
|
||||
Properties() map[string]Property
|
||||
Version() string
|
||||
Group() string
|
||||
Codebase() string
|
||||
QualifiedGroup() string
|
||||
Identifier() string
|
||||
}
|
||||
|
||||
// Property is a field in a resource
|
||||
type Property interface {
|
||||
Description() string
|
||||
Name() string
|
||||
Ref() string
|
||||
}
|
||||
|
||||
// LiteralField is a literal field. (e.g. string, number, int, array)
|
||||
type LiteralField struct {
|
||||
name string
|
||||
fieldType string
|
||||
description string
|
||||
ref string
|
||||
}
|
||||
|
||||
var _ Property = (*LiteralField)(nil)
|
||||
|
||||
// NewLiteralField creates an instance of LiteralField.
|
||||
func NewLiteralField(name, fieldType, description, ref string) *LiteralField {
|
||||
return &LiteralField{
|
||||
name: name,
|
||||
fieldType: fieldType,
|
||||
description: description,
|
||||
ref: ref,
|
||||
}
|
||||
}
|
||||
|
||||
// FieldType returns the field type of the LiteralField.
|
||||
func (f *LiteralField) FieldType() string {
|
||||
return f.fieldType
|
||||
}
|
||||
|
||||
// Name returns the name of the LiteralField.
|
||||
func (f *LiteralField) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Description returns the description of the LiteralField.
|
||||
func (f *LiteralField) Description() string {
|
||||
return f.description
|
||||
}
|
||||
|
||||
// Ref returns the ref of the LiteralField.
|
||||
func (f *LiteralField) Ref() string {
|
||||
return f.ref
|
||||
}
|
||||
|
||||
// ReferenceField is a reference field.
|
||||
type ReferenceField struct {
|
||||
name string
|
||||
description string
|
||||
ref string
|
||||
}
|
||||
|
||||
var _ Property = (*ReferenceField)(nil)
|
||||
|
||||
// NewReferenceField creates an instance of ReferenceField.
|
||||
func NewReferenceField(name, description, ref string) *ReferenceField {
|
||||
return &ReferenceField{
|
||||
name: name,
|
||||
description: description,
|
||||
ref: ref,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the ReferenceField.
|
||||
func (f *ReferenceField) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Description returns the description of the ReferenceField.
|
||||
func (f *ReferenceField) Description() string {
|
||||
return f.description
|
||||
}
|
||||
|
||||
// Ref returns the defintion this ReferenceField represents.
|
||||
func (f *ReferenceField) Ref() string {
|
||||
return f.ref
|
||||
}
|
24
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/object_test.go
generated
vendored
Normal file
24
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/object_test.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestLiteralField(t *testing.T) {
|
||||
f := NewLiteralField("name", "string", "desc", "ref")
|
||||
|
||||
assert.Equal(t, "name", f.Name())
|
||||
assert.Equal(t, "string", f.FieldType())
|
||||
assert.Equal(t, "desc", f.Description())
|
||||
assert.Equal(t, "ref", f.Ref())
|
||||
}
|
||||
|
||||
func TestReferenceField(t *testing.T) {
|
||||
f := NewReferenceField("name", "desc", "ref")
|
||||
|
||||
assert.Equal(t, "name", f.Name())
|
||||
assert.Equal(t, "desc", f.Description())
|
||||
assert.Equal(t, "ref", f.Ref())
|
||||
}
|
75
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/paths.go
generated
vendored
Normal file
75
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/paths.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func parsePaths(apiSpec *spec.Swagger) (map[string]Component, error) {
|
||||
m := make(map[string]Component)
|
||||
|
||||
if apiSpec.Paths == nil {
|
||||
return nil, errors.New("api spec has zero paths")
|
||||
}
|
||||
paths := apiSpec.Paths.Paths
|
||||
for _, pathItem := range paths {
|
||||
verbs := []*spec.Operation{pathItem.Post, pathItem.Patch, pathItem.Put}
|
||||
for _, verb := range verbs {
|
||||
if verb == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var body spec.Parameter
|
||||
var hasBody bool
|
||||
for _, param := range verb.Parameters {
|
||||
if param.Name == "body" {
|
||||
body = param // shallow copy
|
||||
hasBody = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !hasBody {
|
||||
continue
|
||||
}
|
||||
|
||||
if body.Schema == nil {
|
||||
return nil, errors.Errorf("invalid body parameter - missing required field: schema")
|
||||
}
|
||||
ref := extractRef(*body.Schema)
|
||||
|
||||
component, exists, err := pathExtensionComponent(verb.Extensions)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "extract component for %s", ref)
|
||||
}
|
||||
|
||||
if exists {
|
||||
m[ref] = component
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// pathExtensionComponent generates a component from a method tpe extension
|
||||
func pathExtensionComponent(extensions spec.Extensions) (Component, bool, error) {
|
||||
for x, v := range extensions {
|
||||
if x == extensionGroupVersionKind {
|
||||
gvk, ok := v.(map[string]interface{})
|
||||
if !ok {
|
||||
return Component{}, false, errors.New("gvk extension was invalid")
|
||||
}
|
||||
|
||||
component := Component{
|
||||
Group: gvk["group"].(string),
|
||||
Version: gvk["version"].(string),
|
||||
Kind: gvk["kind"].(string),
|
||||
}
|
||||
return component, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return Component{}, false, nil
|
||||
}
|
24
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/paths_test.go
generated
vendored
Normal file
24
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/paths_test.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func Test_parsePaths(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
m, err := parsePaths(c.apiSpec)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, m)
|
||||
|
||||
expected := Component{
|
||||
Group: "rbac.authorization.k8s.io",
|
||||
Version: "v1alpha1",
|
||||
Kind: "ClusterRoleBinding",
|
||||
}
|
||||
|
||||
assert.Equal(t, expected, m["io.k8s.api.rbac.v1alpha1.ClusterRoleBinding"])
|
||||
}
|
95
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/properties.go
generated
vendored
Normal file
95
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/properties.go
generated
vendored
Normal file
|
@ -0,0 +1,95 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
recursiveRefs = []string{
|
||||
"io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps",
|
||||
}
|
||||
)
|
||||
|
||||
func extractProperties(c *Catalog, properties map[string]spec.Schema, required []string) (map[string]Property, error) {
|
||||
if c == nil {
|
||||
return nil, errors.New("catalog is nil")
|
||||
}
|
||||
|
||||
out := make(map[string]Property)
|
||||
|
||||
for name, schema := range properties {
|
||||
if isSkippedProperty(name, schema) {
|
||||
if !stringInSlice(name, required) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
ref := extractRef(schema)
|
||||
|
||||
if ref != "" && stringInSlice(ref, recursiveRefs) {
|
||||
out[name] = NewLiteralField(name, "object", schema.Description, ref)
|
||||
continue
|
||||
}
|
||||
|
||||
// literal
|
||||
if t := schema.Type; len(t) == 1 {
|
||||
out[name] = buildLiteralField(t[0], name, schema)
|
||||
continue
|
||||
}
|
||||
|
||||
ifr, err := c.isFormatRef(ref)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "check for format ref")
|
||||
}
|
||||
|
||||
if ifr {
|
||||
// don't have to check for existence here because isFormatRef does the same thing
|
||||
formatSchema := c.apiSpec.Definitions[ref]
|
||||
out[name] = buildLiteralField(fieldType(formatSchema), name, schema)
|
||||
continue
|
||||
}
|
||||
|
||||
// must be a mixin
|
||||
f := NewReferenceField(name, schema.Description, ref)
|
||||
out[name] = f
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func buildLiteralField(fieldType, name string, schema spec.Schema) *LiteralField {
|
||||
var itemRef string
|
||||
if schema.Items != nil && schema.Items.Schema != nil {
|
||||
itemRef = extractRef(*schema.Items.Schema)
|
||||
}
|
||||
|
||||
return NewLiteralField(name, fieldType, schema.Description, itemRef)
|
||||
}
|
||||
|
||||
func isSkippedProperty(name string, schema spec.Schema) bool {
|
||||
if stringInSlice(name, blockedPropertyNames) {
|
||||
return true
|
||||
}
|
||||
|
||||
if strings.Contains(strings.ToLower(schema.Description), "read-only") && name != "readOnly" {
|
||||
return true
|
||||
}
|
||||
|
||||
ref := extractRef(schema)
|
||||
if stringInSlice(ref, blockedReferences) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func fieldType(schema spec.Schema) string {
|
||||
if t := schema.Type; len(t) == 1 {
|
||||
return t[0]
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
182
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/properties_test.go
generated
vendored
Normal file
182
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/properties_test.go
generated
vendored
Normal file
|
@ -0,0 +1,182 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func Test_extractProperties_nil_catalog(t *testing.T) {
|
||||
_, err := extractProperties(nil, nil, nil)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func Test_extractProperties_nil_properties(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
m, err := extractProperties(c, nil, []string{})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, m)
|
||||
}
|
||||
|
||||
func Test_extractProperties_literal(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
s, ok := c.apiSpec.Definitions["io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"]
|
||||
require.True(t, ok)
|
||||
|
||||
props, err := extractProperties(c, s.Properties, []string{})
|
||||
require.NoError(t, err)
|
||||
|
||||
i, ok := props["clusterName"]
|
||||
require.True(t, ok)
|
||||
|
||||
prop, ok := i.(*LiteralField)
|
||||
require.True(t, ok)
|
||||
|
||||
assert.Equal(t, "string", prop.FieldType())
|
||||
assert.Equal(t, "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", prop.Description())
|
||||
assert.Equal(t, "", prop.Ref())
|
||||
assert.Equal(t, "clusterName", prop.Name())
|
||||
}
|
||||
|
||||
func Test_extractProperties_json_schema_props(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
s, ok := c.apiSpec.Definitions["io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceValidation"]
|
||||
require.True(t, ok)
|
||||
|
||||
props, err := extractProperties(c, s.Properties, s.Required)
|
||||
require.NoError(t, err)
|
||||
|
||||
i, ok := props["openAPIV3Schema"]
|
||||
require.True(t, ok)
|
||||
|
||||
prop, ok := i.(*LiteralField)
|
||||
require.True(t, ok)
|
||||
|
||||
assert.Equal(t, "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps", prop.Ref())
|
||||
}
|
||||
|
||||
func Test_extractProperties_kind_required(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
s, ok := c.apiSpec.Definitions["io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionNames"]
|
||||
require.True(t, ok)
|
||||
|
||||
props, err := extractProperties(c, s.Properties, s.Required)
|
||||
require.NoError(t, err)
|
||||
|
||||
i, ok := props["kind"]
|
||||
require.True(t, ok)
|
||||
|
||||
prop, ok := i.(*LiteralField)
|
||||
require.True(t, ok)
|
||||
|
||||
assert.Equal(t, "string", prop.FieldType())
|
||||
assert.Equal(t, "Kind is the serialized kind of the resource. It is normally CamelCase and singular.", prop.Description())
|
||||
assert.Equal(t, "", prop.Ref())
|
||||
assert.Equal(t, "kind", prop.Name())
|
||||
}
|
||||
|
||||
func Test_extractProperties_kind_not_required(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
s, ok := c.apiSpec.Definitions["io.k8s.api.apps.v1beta2.Deployment"]
|
||||
require.True(t, ok)
|
||||
|
||||
props, err := extractProperties(c, s.Properties, s.Required)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, ok = props["kind"]
|
||||
require.False(t, ok)
|
||||
}
|
||||
|
||||
func Test_extractProperties_type_ref(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
s, ok := c.apiSpec.Definitions["io.k8s.api.apps.v1beta2.RollingUpdateDeployment"]
|
||||
require.True(t, ok)
|
||||
|
||||
props, err := extractProperties(c, s.Properties, []string{})
|
||||
require.NoError(t, err)
|
||||
|
||||
i, ok := props["maxSurge"]
|
||||
require.True(t, ok)
|
||||
|
||||
prop, ok := i.(*LiteralField)
|
||||
require.True(t, ok)
|
||||
|
||||
assert.Equal(t, "string", prop.FieldType())
|
||||
assert.Equal(t, "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", prop.Description())
|
||||
assert.Equal(t, "", prop.Ref())
|
||||
assert.Equal(t, "maxSurge", prop.Name())
|
||||
}
|
||||
|
||||
func Test_extractProperties_ref(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
s, ok := c.apiSpec.Definitions["io.k8s.api.apps.v1beta2.Deployment"]
|
||||
require.True(t, ok)
|
||||
|
||||
props, err := extractProperties(c, s.Properties, []string{})
|
||||
require.NoError(t, err)
|
||||
|
||||
i, ok := props["metadata"]
|
||||
require.True(t, ok)
|
||||
|
||||
prop, ok := i.(*ReferenceField)
|
||||
require.True(t, ok)
|
||||
|
||||
assert.Equal(t, "Standard object metadata.", prop.Description())
|
||||
assert.Equal(t, "io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", prop.Ref())
|
||||
assert.Equal(t, "metadata", prop.Name())
|
||||
}
|
||||
|
||||
func Test_extractProperties_invalid_format_ref(t *testing.T) {
|
||||
c := initCatalog(t, "invalid_ref.json")
|
||||
|
||||
s, ok := c.apiSpec.Definitions["io.k8s.api.apps.v1beta2.RollingUpdateDeployment"]
|
||||
require.True(t, ok)
|
||||
|
||||
_, err := extractProperties(c, s.Properties, []string{})
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func Test_fieldType(t *testing.T) {
|
||||
|
||||
var (
|
||||
s1 = spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: spec.StringOrArray{"string"},
|
||||
},
|
||||
}
|
||||
|
||||
s2 = spec.Schema{}
|
||||
)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
schema spec.Schema
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "with an item",
|
||||
schema: s1,
|
||||
expected: "string",
|
||||
},
|
||||
{
|
||||
name: "with no items",
|
||||
schema: s2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := fieldType(tc.schema)
|
||||
require.Equal(t, tc.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
366
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/renderer.go
generated
vendored
Normal file
366
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/renderer.go
generated
vendored
Normal file
|
@ -0,0 +1,366 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
nm "github.com/ksonnet/ksonnet-lib/ksonnet-gen/nodemaker"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// renderer is an item that can be rendered.
|
||||
type renderer interface {
|
||||
Render(parent *nm.Object) error
|
||||
}
|
||||
|
||||
type baseRenderer struct {
|
||||
name string
|
||||
description string
|
||||
parent string
|
||||
ref string
|
||||
}
|
||||
|
||||
func newBaseRenderer(field Property, parent string) baseRenderer {
|
||||
return baseRenderer{
|
||||
name: field.Name(),
|
||||
description: field.Description(),
|
||||
parent: parent,
|
||||
ref: field.Ref(),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *baseRenderer) setter() string {
|
||||
return fieldName(r.name, false)
|
||||
}
|
||||
|
||||
func (r *baseRenderer) mixin() string {
|
||||
return fieldName(r.name, true)
|
||||
}
|
||||
|
||||
// LiteralFieldRenderer renders a literal field.
|
||||
type LiteralFieldRenderer struct {
|
||||
lf *LiteralField
|
||||
parentName string
|
||||
}
|
||||
|
||||
// NewLiteralFieldRenderer creates an instance of LiteralField.
|
||||
func NewLiteralFieldRenderer(lf *LiteralField, parentName string) *LiteralFieldRenderer {
|
||||
return &LiteralFieldRenderer{
|
||||
lf: lf,
|
||||
parentName: parentName,
|
||||
}
|
||||
}
|
||||
|
||||
// Render renders the literal field in the container.
|
||||
func (r *LiteralFieldRenderer) Render(container *nm.Object) error {
|
||||
var rndr renderer
|
||||
|
||||
switch ft := r.lf.FieldType(); ft {
|
||||
case "array":
|
||||
rndr = NewArrayRenderer(r.lf, r.parentName)
|
||||
case "object":
|
||||
rndr = NewObjectRenderer(r.lf, r.parentName)
|
||||
case "string", "boolean", "integer", "number":
|
||||
rndr = NewItemRenderer(r.lf, r.parentName)
|
||||
default:
|
||||
return errors.Errorf("unknown literal field type %s", ft)
|
||||
}
|
||||
|
||||
return rndr.Render(container)
|
||||
}
|
||||
|
||||
// ReferenceRenderer renders a reference field.
|
||||
type ReferenceRenderer struct {
|
||||
baseRenderer
|
||||
rf *ReferenceField
|
||||
tl typeLookup
|
||||
}
|
||||
|
||||
// NewReferenceRenderer creates an instance of ReferenceRenderer.
|
||||
func NewReferenceRenderer(rf *ReferenceField, tl typeLookup, parent string) *ReferenceRenderer {
|
||||
return &ReferenceRenderer{
|
||||
baseRenderer: newBaseRenderer(rf, parent),
|
||||
tl: tl,
|
||||
rf: rf,
|
||||
}
|
||||
}
|
||||
|
||||
// Render renders the reference in the container.
|
||||
func (r *ReferenceRenderer) Render(container *nm.Object) error {
|
||||
name := r.rf.Name()
|
||||
desc := r.rf.Description()
|
||||
|
||||
mo := nm.NewObject()
|
||||
mixinPreamble(mo, r.parent, name)
|
||||
|
||||
ref := r.rf.Ref()
|
||||
|
||||
ty, err := r.tl.Field(ref)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "fetch type %s", ref)
|
||||
}
|
||||
|
||||
renderFields(r.tl, mo, name, ty.Properties())
|
||||
|
||||
formattedName := FormatKind(r.rf.Name())
|
||||
|
||||
container.Set(nm.NewKey(formattedName, nm.KeyOptComment(desc)), mo)
|
||||
_ = genTypeAliasEntry(container, name, ref)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ObjectRenderer renders an object field.
|
||||
type ObjectRenderer struct {
|
||||
baseRenderer
|
||||
}
|
||||
|
||||
// NewObjectRenderer creates an instance of ObjectRenderer
|
||||
func NewObjectRenderer(field Property, parent string) *ObjectRenderer {
|
||||
return &ObjectRenderer{
|
||||
baseRenderer: newBaseRenderer(field, parent),
|
||||
}
|
||||
}
|
||||
|
||||
// Render renders the object field in the container.
|
||||
func (r *ObjectRenderer) Render(container *nm.Object) error {
|
||||
wrapper := mixinName(r.parent)
|
||||
setterFn := createObjectWithField(r.name, wrapper, false)
|
||||
setProperty(container, r.setter(), r.description, []string{FormatKind(r.name)}, setterFn)
|
||||
|
||||
mixinFn := createObjectWithField(r.name, wrapper, true)
|
||||
setProperty(container, r.mixin(), r.description, []string{FormatKind(r.name)}, mixinFn)
|
||||
|
||||
_ = genTypeAliasEntry(container, r.name, r.ref)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ItemRenderer renders items.
|
||||
type ItemRenderer struct {
|
||||
baseRenderer
|
||||
}
|
||||
|
||||
var _ renderer = (*ItemRenderer)(nil)
|
||||
|
||||
// NewItemRenderer creates an instance of ItemRenderer.
|
||||
func NewItemRenderer(f Property, parent string) *ItemRenderer {
|
||||
return &ItemRenderer{baseRenderer: newBaseRenderer(f, parent)}
|
||||
}
|
||||
|
||||
// Render renders an item in its parent object.
|
||||
func (r *ItemRenderer) Render(parent *nm.Object) error {
|
||||
noder := createObjectWithField(r.name, mixinName(r.parent), false)
|
||||
setProperty(parent, r.setter(), r.description, []string{FormatKind(r.name)}, noder)
|
||||
|
||||
_ = genTypeAliasEntry(parent, r.name, r.ref)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ArrayRenderer renders arrays.
|
||||
type ArrayRenderer struct {
|
||||
baseRenderer
|
||||
}
|
||||
|
||||
// NewArrayRenderer creates an instance of ArrayRenderer.
|
||||
func NewArrayRenderer(f Property, parent string) *ArrayRenderer {
|
||||
return &ArrayRenderer{baseRenderer: newBaseRenderer(f, parent)}
|
||||
}
|
||||
|
||||
// Render renders an item in its parent object.
|
||||
func (r *ArrayRenderer) Render(container *nm.Object) error {
|
||||
wrapper := mixinName(r.parent)
|
||||
setterFn := convertToArray(r.name, wrapper, false)
|
||||
setProperty(container, r.setter(), r.description, []string{FormatKind(r.name)}, setterFn)
|
||||
|
||||
mixinFn := convertToArray(r.name, wrapper, true)
|
||||
setProperty(container, r.mixin(), r.description, []string{FormatKind(r.name)}, mixinFn)
|
||||
|
||||
_ = genTypeAliasEntry(container, r.name, r.ref)
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertToArray(varName, parent string, mixin bool) nm.Noder {
|
||||
apply := nm.NewApply(
|
||||
nm.NewCall("std.type"),
|
||||
[]nm.Noder{nm.NewVar(FormatKind(varName))},
|
||||
nil)
|
||||
|
||||
test := nm.NewBinary(apply, nm.NewStringDouble("array"), nm.BopEqual)
|
||||
|
||||
var trueBranch nm.Noder
|
||||
var falseBranch nm.Noder
|
||||
|
||||
trueO := nm.OnelineObject()
|
||||
trueO.Set(
|
||||
nm.InheritedKey(varName, nm.KeyOptMixin(mixin)),
|
||||
nm.NewVar(FormatKind(varName)))
|
||||
|
||||
falseO := nm.OnelineObject()
|
||||
falseO.Set(
|
||||
nm.InheritedKey(varName, nm.KeyOptMixin(mixin)),
|
||||
nm.NewArray([]nm.Noder{nm.NewVar(FormatKind(varName))}))
|
||||
|
||||
if parent == "" {
|
||||
trueBranch = trueO
|
||||
falseBranch = falseO
|
||||
} else {
|
||||
trueBranch = nm.NewApply(nm.NewCall(parent), []nm.Noder{trueO}, nil)
|
||||
falseBranch = nm.NewApply(nm.NewCall(parent), []nm.Noder{falseO}, nil)
|
||||
}
|
||||
|
||||
return nm.NewConditional(test, trueBranch, falseBranch)
|
||||
}
|
||||
|
||||
// createObjectWithField creates an object with a field. Creates {field: field} or {field+: field}
|
||||
// if mixin. If it has a parent, it create __parentNameMixin({field: field}).
|
||||
func createObjectWithField(name, parentName string, mixin bool) nm.Noder {
|
||||
var noder nm.Noder
|
||||
io := nm.OnelineObject()
|
||||
io.Set(nm.InheritedKey(name, nm.KeyOptMixin(mixin)), nm.NewVar(FormatKind(name)))
|
||||
|
||||
if parentName == "" {
|
||||
noder = io
|
||||
} else {
|
||||
noder = nm.NewApply(nm.NewCall(parentName), []nm.Noder{io}, nil)
|
||||
}
|
||||
|
||||
return noder
|
||||
}
|
||||
|
||||
func setProperty(o *nm.Object, fnName, desc string, args []string, node nm.Noder) {
|
||||
node = nm.NewBinary(&nm.Self{}, node, nm.BopPlus)
|
||||
key := nm.FunctionKey(fnName, args, nm.KeyOptComment(desc))
|
||||
o.Set(key, node)
|
||||
}
|
||||
|
||||
func mixinPreamble(o *nm.Object, parent, name string) error {
|
||||
if o == nil {
|
||||
return errors.New("parent object is nil")
|
||||
}
|
||||
name = FormatKind(name)
|
||||
|
||||
formattedName := mixinName(name)
|
||||
|
||||
var noder nm.Noder
|
||||
|
||||
io := nm.OnelineObject()
|
||||
io.Set(nm.InheritedKey(name, nm.KeyOptMixin(true)), nm.NewVar(name))
|
||||
|
||||
if parent == "" {
|
||||
noder = io
|
||||
} else {
|
||||
noder = nm.ApplyCall(mixinName(parent), io)
|
||||
}
|
||||
|
||||
o.Set(nm.LocalKey(formattedName, nm.KeyOptParams([]string{name})), noder)
|
||||
|
||||
miFn := nm.NewCall(formattedName)
|
||||
o.Set(nm.FunctionKey("mixinInstance", []string{name}), nm.NewApply(miFn, []nm.Noder{nm.NewVar(name)}, nil))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func genTypeAliasEntry(container *nm.Object, name, refName string) error {
|
||||
if refName == "" {
|
||||
return errors.New("ref name is blank")
|
||||
}
|
||||
|
||||
rd, err := ParseDescription(refName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "parse ref name from %q and %q", name, refName)
|
||||
}
|
||||
|
||||
if rd.Group == "" {
|
||||
rd.Group = "core"
|
||||
}
|
||||
|
||||
if rd.Version == "" {
|
||||
return errors.Errorf("there is no version in the ref name for %q and %q",
|
||||
name, refName)
|
||||
}
|
||||
|
||||
kind := FormatKind(rd.Kind)
|
||||
path := []string{"hidden", rd.Group, rd.Version, kind}
|
||||
location := strings.Join(path, ".")
|
||||
|
||||
typeAliasName := fmt.Sprintf("%sType", name)
|
||||
|
||||
c := nm.NewCall(location)
|
||||
|
||||
container.Set(nm.NewKey(typeAliasName), c)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Generates a field name.
|
||||
func fieldName(name string, isMixin bool) string {
|
||||
var out string
|
||||
|
||||
name = FormatKind(name)
|
||||
|
||||
out = fmt.Sprintf("with%s", strings.Title(name))
|
||||
if isMixin {
|
||||
return fmt.Sprintf("%s%s", out, "Mixin")
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func mixinName(name string) string {
|
||||
if name == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
name = FormatKind(name)
|
||||
|
||||
return fmt.Sprintf("__%sMixin", name)
|
||||
}
|
||||
|
||||
// typeLookup can look up types by id.
|
||||
type typeLookup interface {
|
||||
Field(id string) (*Field, error)
|
||||
}
|
||||
|
||||
type renderFieldsFn func(tl typeLookup, parent *nm.Object, parentName string, props map[string]Property) error
|
||||
|
||||
// renderFields renders fields from a property map.
|
||||
func renderFields(tl typeLookup, parent *nm.Object, parentName string, props map[string]Property) error {
|
||||
container := parent
|
||||
if parentName == "" {
|
||||
container = nm.NewObject()
|
||||
}
|
||||
|
||||
var names []string
|
||||
for name := range props {
|
||||
names = append(names, name)
|
||||
}
|
||||
|
||||
sort.Strings(names)
|
||||
|
||||
for _, name := range names {
|
||||
field := props[name]
|
||||
|
||||
switch t := field.(type) {
|
||||
case *LiteralField:
|
||||
r := NewLiteralFieldRenderer(t, parentName)
|
||||
if err := r.Render(parent); err != nil {
|
||||
return errors.Wrap(err, "render literal field")
|
||||
}
|
||||
case *ReferenceField:
|
||||
r := NewReferenceRenderer(t, tl, parentName)
|
||||
if err := r.Render(container); err != nil {
|
||||
return errors.Wrap(err, "render reference field")
|
||||
}
|
||||
default:
|
||||
return errors.Errorf("unknown field type %T", t)
|
||||
}
|
||||
}
|
||||
|
||||
if parentName == "" {
|
||||
parent.Set(nm.NewKey("mixin"), container)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
429
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/renderer_test.go
generated
vendored
Normal file
429
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/renderer_test.go
generated
vendored
Normal file
|
@ -0,0 +1,429 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
nm "github.com/ksonnet/ksonnet-lib/ksonnet-gen/nodemaker"
|
||||
"github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLiteralFieldRenderer(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
fieldType string
|
||||
hasMixin bool
|
||||
isErr bool
|
||||
}{
|
||||
{
|
||||
name: "item",
|
||||
fieldType: "string",
|
||||
},
|
||||
{
|
||||
name: "array",
|
||||
fieldType: "array",
|
||||
hasMixin: true,
|
||||
},
|
||||
{
|
||||
name: "object",
|
||||
fieldType: "object",
|
||||
hasMixin: true,
|
||||
},
|
||||
{
|
||||
name: "unknown field type",
|
||||
fieldType: "unknown",
|
||||
isErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
f := NewLiteralField("name", tc.fieldType, "desc", "")
|
||||
r := NewLiteralFieldRenderer(f, "")
|
||||
|
||||
o := nm.NewObject()
|
||||
err := r.Render(o)
|
||||
|
||||
if tc.isErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotNil(t, o.Get(fieldName("name", false)))
|
||||
if tc.hasMixin {
|
||||
assert.NotNil(t, o.Get(fieldName("name", true)))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReferenceRenderer(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
ref string
|
||||
isErr bool
|
||||
}{
|
||||
{
|
||||
name: "with a reference",
|
||||
ref: "io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta",
|
||||
},
|
||||
{
|
||||
name: "without a resource",
|
||||
isErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
|
||||
f := NewReferenceField("name", "desc", tc.ref)
|
||||
|
||||
r := NewReferenceRenderer(f, c, "")
|
||||
|
||||
o := nm.NewObject()
|
||||
err := r.Render(o)
|
||||
if tc.isErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, o.Get("name"))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_setProperty(t *testing.T) {
|
||||
o := nm.NewObject()
|
||||
setProperty(o, "fnName", "desc", []string{"arg1"}, nm.NewObject())
|
||||
|
||||
expected := nm.NewObject()
|
||||
node := nm.NewBinary(&nm.Self{}, nm.NewObject(), nm.BopPlus)
|
||||
expected.Set(
|
||||
nm.FunctionKey("fnName", []string{"arg1"}, nm.KeyOptComment("desc")),
|
||||
node)
|
||||
|
||||
require.Equal(t, expected, o)
|
||||
|
||||
}
|
||||
|
||||
func Test_createObjectWithField(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
parent string
|
||||
}{
|
||||
{
|
||||
name: "without parent",
|
||||
parent: "",
|
||||
},
|
||||
{
|
||||
name: "with parent",
|
||||
parent: "parent",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := createObjectWithField("varName", tc.parent, false)
|
||||
|
||||
io := nm.OnelineObject()
|
||||
io.Set(nm.InheritedKey("varName"), nm.NewVar("varName"))
|
||||
|
||||
var expected nm.Noder
|
||||
if tc.parent == "" {
|
||||
expected = io
|
||||
} else {
|
||||
expected = nm.NewApply(nm.NewCall(tc.parent), []nm.Noder{io}, nil)
|
||||
}
|
||||
|
||||
require.Equal(t, expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_convertToArray(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
mixin bool
|
||||
parent string
|
||||
}{
|
||||
{
|
||||
name: "no mixin",
|
||||
mixin: false,
|
||||
},
|
||||
{
|
||||
name: "mixin",
|
||||
mixin: true,
|
||||
},
|
||||
{
|
||||
name: "parent",
|
||||
parent: "parent",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
call := nm.NewCall("std.type")
|
||||
args := nm.NewVar("varName")
|
||||
apply := nm.NewApply(call, []nm.Noder{args}, nil)
|
||||
|
||||
key := nm.InheritedKey("varName", nm.KeyOptMixin(tc.mixin))
|
||||
|
||||
var trueBranch, falseBranch nm.Noder
|
||||
|
||||
bo := nm.NewBinary(apply, nm.NewStringDouble("array"), nm.BopEqual)
|
||||
trueObject := nm.OnelineObject()
|
||||
trueObject.Set(key, nm.NewVar("varName"))
|
||||
|
||||
falseObject := nm.OnelineObject()
|
||||
falseObject.Set(key, nm.NewArray([]nm.Noder{nm.NewVar("varName")}))
|
||||
|
||||
if tc.parent == "" {
|
||||
trueBranch = trueObject
|
||||
falseBranch = falseObject
|
||||
} else {
|
||||
trueBranch = nm.NewApply(nm.NewCall(tc.parent), []nm.Noder{trueObject}, nil)
|
||||
falseBranch = nm.NewApply(nm.NewCall(tc.parent), []nm.Noder{falseObject}, nil)
|
||||
}
|
||||
|
||||
expected := nm.NewConditional(bo, trueBranch, falseBranch)
|
||||
|
||||
got := convertToArray("varName", tc.parent, tc.mixin)
|
||||
|
||||
require.Equal(t, expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_genTypeAlias(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
propName string
|
||||
ref string
|
||||
keyName string
|
||||
alias string
|
||||
isErr bool
|
||||
}{
|
||||
{
|
||||
name: "with a ref",
|
||||
propName: "prop",
|
||||
ref: "io.k8s.api.group.v1.Prop",
|
||||
keyName: "propType",
|
||||
alias: "hidden.group.v1.prop",
|
||||
},
|
||||
{
|
||||
name: "with no ref",
|
||||
isErr: true,
|
||||
},
|
||||
{
|
||||
name: "with an un-parsable ref",
|
||||
ref: "none",
|
||||
isErr: true,
|
||||
},
|
||||
{
|
||||
name: "with an item ref",
|
||||
propName: "prop",
|
||||
ref: "io.k8s.api.group.v1.Prop",
|
||||
keyName: "propType",
|
||||
alias: "hidden.group.v1.prop",
|
||||
},
|
||||
{
|
||||
name: "without a group",
|
||||
propName: "prop",
|
||||
ref: "io.k8s.codebase.pkg.api.version.kind",
|
||||
keyName: "propType",
|
||||
alias: "hidden.core.version.kind",
|
||||
},
|
||||
{
|
||||
name: "without a version",
|
||||
propName: "prop",
|
||||
ref: "io.k8s.codebase.pkg.runtime.kind",
|
||||
isErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
o := nm.NewObject()
|
||||
err := genTypeAliasEntry(o, tc.propName, tc.ref)
|
||||
|
||||
if tc.isErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
|
||||
n := o.Get(tc.keyName)
|
||||
assert.NotNil(t, n)
|
||||
|
||||
expectedCall := nm.NewCall(tc.alias)
|
||||
assert.Equal(t, expectedCall, n)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_mixinPreamble(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
container *nm.Object
|
||||
parentName string
|
||||
mixinName string
|
||||
isErr bool
|
||||
}{
|
||||
{
|
||||
name: "with an empty parent container",
|
||||
mixinName: "name",
|
||||
isErr: true,
|
||||
},
|
||||
{
|
||||
name: "without a parent name",
|
||||
container: nm.NewObject(),
|
||||
mixinName: "name",
|
||||
},
|
||||
{
|
||||
name: "with a parent name",
|
||||
container: nm.NewObject(),
|
||||
parentName: "parent",
|
||||
mixinName: "name",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := mixinPreamble(tc.container, tc.parentName, tc.mixinName)
|
||||
if tc.isErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotNil(t, tc.container.Get("mixinInstance"))
|
||||
|
||||
n := tc.container.Get(mixinName(tc.mixinName))
|
||||
require.NotNil(t, n)
|
||||
|
||||
if tc.parentName == "" {
|
||||
require.IsType(t, &nm.Object{}, n)
|
||||
} else {
|
||||
require.IsType(t, &nm.Apply{}, n)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func Test_fieldName(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
in string
|
||||
isMixin bool
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "is mixin",
|
||||
in: "name",
|
||||
isMixin: true,
|
||||
expected: "withNameMixin",
|
||||
},
|
||||
{
|
||||
name: "is not mixin",
|
||||
in: "name",
|
||||
isMixin: false,
|
||||
expected: "withName",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := fieldName(tc.in, tc.isMixin)
|
||||
require.Equal(t, tc.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_mixinName(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
},
|
||||
{
|
||||
name: "valid",
|
||||
in: "name",
|
||||
expected: "__nameMixin",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := mixinName(tc.in)
|
||||
require.Equal(t, tc.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_renderFields(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
o := nm.NewObject()
|
||||
props := map[string]Property{
|
||||
"name": NewLiteralField("name", "string", "desc", ""),
|
||||
"aref": NewReferenceField("aref", "desc", "io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"),
|
||||
}
|
||||
|
||||
err := renderFields(c, o, "", props)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = printer.Fprint(ioutil.Discard, o.Node())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotNil(t, o.Get(fieldName("name", false)))
|
||||
mo, ok := o.Get("mixin").(*nm.Object)
|
||||
require.True(t, ok)
|
||||
require.NotNil(t, mo.Get("aref"))
|
||||
}
|
||||
|
||||
type customField struct{}
|
||||
|
||||
func (cf *customField) Description() string { return "desc" }
|
||||
func (cf *customField) Name() string { return "name" }
|
||||
func (cf *customField) Ref() string { return "" }
|
||||
|
||||
func Test_renderFields_unknown_type(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
o := nm.NewObject()
|
||||
props := map[string]Property{
|
||||
"name": &customField{},
|
||||
}
|
||||
|
||||
err := renderFields(c, o, "", props)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func Test_renderFields_literal_field_error(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
o := nm.NewObject()
|
||||
props := map[string]Property{
|
||||
"name": NewLiteralField("name", "unknown", "desc", ""),
|
||||
}
|
||||
|
||||
err := renderFields(c, o, "", props)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func Test_renderFields_reference_field_error(t *testing.T) {
|
||||
c := initCatalog(t, "swagger-1.8.json")
|
||||
o := nm.NewObject()
|
||||
props := map[string]Property{
|
||||
"aref": NewReferenceField("aref", "desc", "unknown-id"),
|
||||
}
|
||||
|
||||
err := renderFields(c, o, "", props)
|
||||
require.Error(t, err)
|
||||
}
|
103
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/strings.go
generated
vendored
Normal file
103
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/strings.go
generated
vendored
Normal file
|
@ -0,0 +1,103 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
var (
|
||||
jsonnetKeywords = []string{"assert", "else", "error", "false", "for", "function", "if",
|
||||
"import", "importstr", "in", "null", "tailstrict", "then", "self", "super",
|
||||
"true"}
|
||||
)
|
||||
|
||||
// camelCase converts a string to camel case.
|
||||
func camelCase(in string) string {
|
||||
out := ""
|
||||
|
||||
for i, r := range in {
|
||||
if i == 0 {
|
||||
out += strings.ToLower(string(r))
|
||||
continue
|
||||
}
|
||||
|
||||
out += string(r)
|
||||
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// stringInSlice returns true if the string is in the slice.
|
||||
func stringInSlice(a string, list []string) bool {
|
||||
for _, b := range list {
|
||||
if b == a {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// capitalizer adjusts the case of terms found in a string.
|
||||
func toLower(b byte) byte {
|
||||
return byte(unicode.ToLower(rune(b)))
|
||||
}
|
||||
|
||||
func isUpper(b byte) bool {
|
||||
return unicode.IsUpper(rune(b))
|
||||
}
|
||||
|
||||
// capitalize adjusts the case of terms found in a string. It will convert `HTTPHeader` into
|
||||
// `HttpHeader`.
|
||||
func capitalize(in string) string {
|
||||
l := len(in) - 1
|
||||
|
||||
if l == 0 {
|
||||
// nothing to do when there is a one character strings
|
||||
return in
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
b.WriteByte(in[0])
|
||||
|
||||
for i := 1; i <= l; i++ {
|
||||
if isUpper(in[i-1]) {
|
||||
if i < l {
|
||||
if isUpper(in[i+1]) || (isUpper(in[i]) && i+1 == l) {
|
||||
b.WriteByte(toLower(in[i]))
|
||||
} else {
|
||||
b.WriteByte(in[i])
|
||||
}
|
||||
} else if i == l && isUpper(in[i]) {
|
||||
b.WriteByte(toLower(in[i]))
|
||||
} else {
|
||||
b.WriteByte(in[i])
|
||||
}
|
||||
} else {
|
||||
b.WriteByte(in[i])
|
||||
}
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// FormatKind formats a string in kind format. i.e camel case with jsonnet keywords massaged.
|
||||
func FormatKind(s string) string {
|
||||
if strings.ToLower(s) == "local" {
|
||||
return "localStorage"
|
||||
}
|
||||
|
||||
if strings.HasPrefix(s, "$") {
|
||||
s = "dollar" + strings.Title(strings.TrimPrefix(s, "$"))
|
||||
return s
|
||||
}
|
||||
s = capitalize(s)
|
||||
s = camelCase(s)
|
||||
|
||||
if stringInSlice(s, jsonnetKeywords) {
|
||||
s = s + "Param"
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
167
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/strings_test.go
generated
vendored
Normal file
167
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/strings_test.go
generated
vendored
Normal file
|
@ -0,0 +1,167 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func Test_camelCase(t *testing.T) {
|
||||
cases := []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{
|
||||
in: "foo",
|
||||
out: "foo",
|
||||
},
|
||||
{
|
||||
in: "Foo",
|
||||
out: "foo",
|
||||
},
|
||||
{
|
||||
in: "PascalCase",
|
||||
out: "pascalCase",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.in, func(t *testing.T) {
|
||||
out := camelCase(tc.in)
|
||||
require.Equal(t, tc.out, out)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_stringInSlice(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
s string
|
||||
sl []string
|
||||
found bool
|
||||
}{
|
||||
{
|
||||
name: "item present",
|
||||
s: "a",
|
||||
sl: []string{"a", "b", "c"},
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "item not present",
|
||||
s: "d",
|
||||
sl: []string{"a", "b", "c"},
|
||||
found: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
require.Equal(t, tc.found, stringInSlice(tc.s, tc.sl))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_capitalizer_capitalize(t *testing.T) {
|
||||
tests := []struct {
|
||||
in string
|
||||
want string
|
||||
}{
|
||||
{in: "hostIPC", want: "hostIpc"},
|
||||
{in: "hostPID", want: "hostPid"},
|
||||
{in: "targetCPUUtilizationPercentage", want: "targetCpuUtilizationPercentage"},
|
||||
{in: "externalID", want: "externalId"},
|
||||
{in: "podCIDR", want: "podCidr"},
|
||||
{in: "providerID", want: "providerId"},
|
||||
{in: "bootID", want: "bootId"},
|
||||
{in: "machineID", want: "machineId"},
|
||||
{in: "systemUUID", want: "systemUuid"},
|
||||
{in: "volumeID", want: "volumeId"},
|
||||
{in: "diskURI", want: "diskUri"},
|
||||
{in: "targetWWNs", want: "targetWwns"},
|
||||
{in: "datasetUUID", want: "datasetUuid"},
|
||||
{in: "pdID", want: "pdId"},
|
||||
{in: "scaleIO", want: "scaleIo"},
|
||||
{in: "podIP", want: "podIp"},
|
||||
{in: "hostIP", want: "hostIp"},
|
||||
{in: "clusterIP", want: "clusterIp"},
|
||||
{in: "externalIPs", want: "externalIps"},
|
||||
{in: "loadBalancerIP", want: "loadBalancerIp"},
|
||||
{in: "containerID", want: "containerId"},
|
||||
{in: "imageID", want: "imageId"},
|
||||
{in: "serverAddressByClientCIDRs", want: "serverAddressByClientCidrs"},
|
||||
{in: "clientCIDR", want: "clientCidr"},
|
||||
{in: "nonResourceURLs", want: "nonResourceUrls"},
|
||||
{in: "currentCPUUtilizationPercentage", want: "currentCpuUtilizationPercentage"},
|
||||
{in: "downwardAPI", want: "downwardApi"},
|
||||
{in: "AWSElasticBlockStoreVolumeSource", want: "AwsElasticBlockStoreVolumeSource"},
|
||||
{in: "CephFSVolumeSource", want: "CephFsVolumeSource"},
|
||||
{in: "DownwardAPIProjection", want: "DownwardApiProjection"},
|
||||
{in: "DownwardAPIVolumeFile", want: "DownwardApiVolumeFile"},
|
||||
{in: "DownwardAPIVolumeSource", want: "DownwardApiVolumeSource"},
|
||||
{in: "FCVolumeSource", want: "FcVolumeSource"},
|
||||
{in: "GCEPersistentDiskVolumeSource", want: "GcePersistentDiskVolumeSource"},
|
||||
{in: "HTTPGetAction", want: "HttpGetAction"},
|
||||
{in: "HTTPHeader", want: "HttpHeader"},
|
||||
{in: "ISCSIVolumeSource", want: "IscsiVolumeSource"},
|
||||
{in: "NFSVolumeSource", want: "NfsVolumeSource"},
|
||||
{in: "RBDVolumeSource", want: "RbdVolumeSource"},
|
||||
{in: "SELinuxOptions", want: "SeLinuxOptions"},
|
||||
{in: "ScaleIOVolumeSource", want: "ScaleIoVolumeSource"},
|
||||
{in: "TCPSocketAction", want: "TcpSocketAction"},
|
||||
{in: "APIVersion", want: "ApiVersion"},
|
||||
{in: "FSGroupStrategyOptions", want: "FsGroupStrategyOptions"},
|
||||
{in: "HTTPIngressPath", want: "HttpIngressPath"},
|
||||
{in: "HTTPIngressRuleValue", want: "HttpIngressRuleValue"},
|
||||
{in: "IDRange", want: "IdRange"},
|
||||
{in: "IngressTLS", want: "IngressTls"},
|
||||
{in: "SELinuxStrategyOptions", want: "SeLinuxStrategyOptions"},
|
||||
{in: "APIGroup", want: "ApiGroup"},
|
||||
{in: "APIGroupList", want: "ApiGroupList"},
|
||||
{in: "APIResource", want: "ApiResource"},
|
||||
{in: "APIResourceList", want: "ApiResourceList"},
|
||||
{in: "APIVersions", want: "ApiVersions"},
|
||||
{in: "ServerAddressByClientCIDR", want: "ServerAddressByClientCidr"},
|
||||
{in: "a", want: "a"},
|
||||
{in: "A", want: "A"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.in, func(t *testing.T) {
|
||||
require.Equal(t, tt.want, capitalize(tt.in), "c.capitalize(%s)", tt.in)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_FormatKind(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "local",
|
||||
expected: "localStorage",
|
||||
},
|
||||
{
|
||||
name: "error",
|
||||
expected: "errorParam",
|
||||
},
|
||||
{
|
||||
name: "foo",
|
||||
expected: "foo",
|
||||
},
|
||||
{
|
||||
name: "CIDRType",
|
||||
expected: "cidrType",
|
||||
},
|
||||
{
|
||||
name: "$ref",
|
||||
expected: "dollarRef",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := FormatKind(tc.name)
|
||||
require.Equal(t, tc.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
36
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/testdata/component.json
generated
vendored
Normal file
36
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/testdata/component.json
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
"apiVersion": "v1",
|
||||
"items": [
|
||||
{
|
||||
"apiVersion": "apps/v1beta2",
|
||||
"kind": "Deployment",
|
||||
"metadata": {
|
||||
"name": "appName"
|
||||
},
|
||||
"spec": {
|
||||
"replicas": 2,
|
||||
"template": {
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "customName"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"image": "nginx:latest",
|
||||
"name": "appName",
|
||||
"ports": [
|
||||
{
|
||||
"containerPort": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"kind": "List"
|
||||
}
|
66
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/testdata/component.libsonnet
generated
vendored
Normal file
66
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/testdata/component.libsonnet
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
local k = import 'k.libsonnet';
|
||||
|
||||
local params = {
|
||||
version: 'v1beta2',
|
||||
name: 'appName',
|
||||
replicas: 3,
|
||||
containerPort: 80,
|
||||
image: 'nginx:latest',
|
||||
labels: { app: 'customName' },
|
||||
};
|
||||
|
||||
// defining the deployment version as a variable means you potentially have the ability to
|
||||
// set versions in params. It also means a single prototype can support multiple versions of a
|
||||
// resource.
|
||||
local deploymentVersion = params.version;
|
||||
|
||||
// container creates a container object
|
||||
local container = function(version, name, image, containerPort)
|
||||
// create a local variable with our resource
|
||||
local deployment = k.apps[deploymentVersion].deployment;
|
||||
|
||||
local containersType = deployment.mixin.spec.template.spec.containersType;
|
||||
local portsType = containersType.portsType;
|
||||
|
||||
local port = portsType.withContainerPort(containerPort);
|
||||
|
||||
containersType
|
||||
.withName(name)
|
||||
.withImage(image)
|
||||
.withPorts(port);
|
||||
|
||||
|
||||
// createDeployment is our function for creating a deployment
|
||||
local createDeployment = function(version, name, containers, podLabels={}, replicas=1)
|
||||
// create a local variable with our resource
|
||||
local deployment = k.apps[version].deployment;
|
||||
|
||||
local labels = { app: name } + podLabels;
|
||||
local metadata = deployment.mixin.metadata.withName(name);
|
||||
local spec = deployment.mixin.spec.withReplicas(replicas);
|
||||
local templateSpec = spec.template.spec.withContainers(containers);
|
||||
local templateMetadata = spec.template.metadata.withLabels(labels);
|
||||
|
||||
deployment
|
||||
.new()
|
||||
+ metadata
|
||||
+ spec
|
||||
+ templateSpec
|
||||
+ templateMetadata;
|
||||
|
||||
|
||||
local containers = [
|
||||
container(deploymentVersion, params.name, params.image, params.containerPort),
|
||||
];
|
||||
|
||||
// The createDeployment function allows authors to generate the objects they would like rather
|
||||
// than being confined to what is generated in ksonnet-lib.
|
||||
local appDeployment = createDeployment(
|
||||
deploymentVersion,
|
||||
params.name,
|
||||
containers,
|
||||
podLabels=params.labels,
|
||||
replicas=2
|
||||
);
|
||||
|
||||
k.core.v1.list.new([appDeployment])
|
3
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/testdata/constructor.libsonnet
generated
vendored
Normal file
3
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/testdata/constructor.libsonnet
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
new(name='', nestedName='', nestedItem='', str='val', obj={ key: 'val' }, array=['val'], other='', foo=''):: apiVersion + kind + self.withArray(array).withName(name).withObj(obj).withStr(str) + self.foo.bar.baz.withItem(nestedItem).withName(nestedName) + self.last.path.withFoo(foo) + self.other.withArray(other),
|
||||
}
|
111
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/testdata/generated_k.libsonnet
generated
vendored
Normal file
111
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/testdata/generated_k.libsonnet
generated
vendored
Normal file
|
@ -0,0 +1,111 @@
|
|||
local k8s = import 'k8s.libsonnet';
|
||||
local fn = {
|
||||
mapContainers(f):: {
|
||||
local podContainers = super.spec.template.spec.containers,
|
||||
spec+: {
|
||||
template+: {
|
||||
spec+: {
|
||||
containers: std.map(f, podContainers),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
mapContainersWithName(names, f)::
|
||||
local nameSet = if std.type(names) == 'array' then std.set(names) else std.set([names]);
|
||||
local inNameSet(name) = std.length(std.setInter(nameSet, std.set([name]))) > 0;
|
||||
|
||||
self.mapContainers(function(c) if std.objectHas(c, 'name') && inNameSet(c.name) then f(c) else c),
|
||||
};
|
||||
|
||||
k8s {
|
||||
apps:: k8s.apps {
|
||||
v1beta1:: k8s.apps.v1beta1 {
|
||||
deployment:: k8s.apps.v1beta1.deployment {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
statefulSet:: k8s.apps.v1beta1.statefulSet {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
},
|
||||
v1beta2:: k8s.apps.v1beta2 {
|
||||
daemonSet:: k8s.apps.v1beta2.daemonSet {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
deployment:: k8s.apps.v1beta2.deployment {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
replicaSet:: k8s.apps.v1beta2.replicaSet {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
statefulSet:: k8s.apps.v1beta2.statefulSet {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
},
|
||||
},
|
||||
batch:: k8s.batch {
|
||||
v1:: k8s.batch.v1 {
|
||||
job:: k8s.batch.v1.job {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
},
|
||||
v1beta1:: k8s.batch.v1beta1 {
|
||||
cronJob:: k8s.batch.v1beta1.cronJob {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
},
|
||||
v2alpha1:: k8s.batch.v2alpha1 {
|
||||
cronJob:: k8s.batch.v2alpha1.cronJob {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
},
|
||||
},
|
||||
core:: k8s.core {
|
||||
v1:: k8s.core.v1 {
|
||||
list:: {
|
||||
new(items):: {
|
||||
apiVersion: 'v1',
|
||||
} + {
|
||||
kind: 'List',
|
||||
} + self.items(items),
|
||||
items(items):: if std.type(items) == 'array' then { items+: items } else { items+: [items] },
|
||||
},
|
||||
pod:: k8s.core.v1.pod {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
podTemplate:: k8s.core.v1.podTemplate {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
replicationController:: k8s.core.v1.replicationController {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
},
|
||||
},
|
||||
extensions:: k8s.extensions {
|
||||
v1beta1:: k8s.extensions.v1beta1 {
|
||||
daemonSet:: k8s.extensions.v1beta1.daemonSet {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
deployment:: k8s.extensions.v1beta1.deployment {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
replicaSet:: k8s.extensions.v1beta1.replicaSet {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
12
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/testdata/invalid_definition.json
generated
vendored
Normal file
12
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/testdata/invalid_definition.json
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"version": "v1.8.0"
|
||||
},
|
||||
"paths": {
|
||||
"/invalid": {}
|
||||
},
|
||||
"definitions": {
|
||||
"invalid": {}
|
||||
}
|
||||
}
|
24
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/testdata/invalid_ref.json
generated
vendored
Normal file
24
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/testdata/invalid_ref.json
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"version": "v1.8.0"
|
||||
},
|
||||
"paths": {
|
||||
"/invalid": {}
|
||||
},
|
||||
"definitions": {
|
||||
"io.k8s.api.apps.v1beta2.RollingUpdateDeployment": {
|
||||
"description": "Spec to control the desired behavior of rolling update.",
|
||||
"properties": {
|
||||
"maxSurge": {
|
||||
"description": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.",
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"
|
||||
},
|
||||
"maxUnavailable": {
|
||||
"description": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.",
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
111
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/testdata/k.libsonnet
generated
vendored
Normal file
111
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/testdata/k.libsonnet
generated
vendored
Normal file
|
@ -0,0 +1,111 @@
|
|||
local k8s = import 'k8s.libsonnet';
|
||||
local fn = {
|
||||
mapContainers(f):: {
|
||||
local podContainers = super.spec.template.spec.containers,
|
||||
spec+: {
|
||||
template+: {
|
||||
spec+: {
|
||||
containers: std.map(f, podContainers),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
mapContainersWithName(names, f)::
|
||||
local nameSet = if std.type(names) == 'array' then std.set(names) else std.set([names]);
|
||||
local inNameSet(name) = std.length(std.setInter(nameSet, std.set([name]))) > 0;
|
||||
|
||||
self.mapContainers(function(c) if std.objectHas(c, 'name') && inNameSet(c.name) then f(c) else c),
|
||||
};
|
||||
|
||||
k8s {
|
||||
apps:: k8s.apps {
|
||||
v1beta1:: k8s.apps.v1beta1 {
|
||||
deployment:: k8s.apps.v1beta1.deployment {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
statefulSet:: k8s.apps.v1beta1.statefulSet {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
},
|
||||
v1beta2:: k8s.apps.v1beta2 {
|
||||
daemonSet:: k8s.apps.v1beta2.daemonSet {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
deployment:: k8s.apps.v1beta2.deployment {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
replicaSet:: k8s.apps.v1beta2.replicaSet {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
statefulSet:: k8s.apps.v1beta2.statefulSet {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
},
|
||||
},
|
||||
batch:: k8s.batch {
|
||||
v1:: k8s.batch.v1 {
|
||||
job:: k8s.batch.v1.job {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
},
|
||||
v1beta1:: k8s.batch.v1beta1 {
|
||||
cronJob:: k8s.batch.v1beta1.cronJob {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
},
|
||||
v2alpha1:: k8s.batch.v2alpha1 {
|
||||
cronJob:: k8s.batch.v2alpha1.cronJob {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
},
|
||||
},
|
||||
core:: k8s.core {
|
||||
v1:: k8s.core.v1 {
|
||||
list:: {
|
||||
new(items):: {
|
||||
apiVersion: 'v1',
|
||||
} + {
|
||||
kind: 'List',
|
||||
} + self.items(items),
|
||||
items(items):: if std.type(items) == 'array' then { items+: items } else { items+: [items] },
|
||||
},
|
||||
pod:: k8s.core.v1.pod {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
podTemplate:: k8s.core.v1.podTemplate {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
replicationController:: k8s.core.v1.replicationController {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
},
|
||||
},
|
||||
extensions:: k8s.extensions {
|
||||
v1beta1:: k8s.extensions.v1beta1 {
|
||||
daemonSet:: k8s.extensions.v1beta1.daemonSet {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
deployment:: k8s.extensions.v1beta1.deployment {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
replicaSet:: k8s.extensions.v1beta1.replicaSet {
|
||||
mapContainers(f):: fn.mapContainers(f),
|
||||
mapContainersWithName(names, f):: fn.mapContainersWithName(names, f),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
73741
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/testdata/swagger-1.8.json
generated
vendored
Normal file
73741
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/testdata/swagger-1.8.json
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
74
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/type.go
generated
vendored
Normal file
74
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/type.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
package ksonnet
|
||||
|
||||
// Type is a Kubernetes kind.
|
||||
type Type struct {
|
||||
description string
|
||||
properties map[string]Property
|
||||
component Component
|
||||
group string
|
||||
codebase string
|
||||
identifier string
|
||||
}
|
||||
|
||||
var _ Object = (*Type)(nil)
|
||||
|
||||
// NewType creates an instance of Type.
|
||||
func NewType(identifier, description, codebase, group string, component Component, props map[string]Property) Type {
|
||||
return Type{
|
||||
description: description,
|
||||
group: group,
|
||||
codebase: codebase,
|
||||
component: component,
|
||||
properties: props,
|
||||
identifier: identifier,
|
||||
}
|
||||
}
|
||||
|
||||
// Kind is the kind for this type
|
||||
func (t *Type) Kind() string {
|
||||
return t.component.Kind
|
||||
}
|
||||
|
||||
// Version is the version for this type
|
||||
func (t *Type) Version() string {
|
||||
return t.component.Version
|
||||
}
|
||||
|
||||
// Codebase is the codebase for this field.
|
||||
func (t *Type) Codebase() string {
|
||||
return t.codebase
|
||||
}
|
||||
|
||||
// Group is the group for this type
|
||||
func (t *Type) Group() string {
|
||||
if t.group == "" {
|
||||
return "core"
|
||||
}
|
||||
|
||||
return t.group
|
||||
}
|
||||
|
||||
// QualifiedGroup is the group for this type
|
||||
func (t *Type) QualifiedGroup() string {
|
||||
return t.component.Group
|
||||
}
|
||||
|
||||
// Description is description for this type
|
||||
func (t *Type) Description() string {
|
||||
return t.description
|
||||
}
|
||||
|
||||
// Identifier is identifier for this type
|
||||
func (t *Type) Identifier() string {
|
||||
return t.identifier
|
||||
}
|
||||
|
||||
// IsType returns if this item is a type. It always returns true.
|
||||
func (t *Type) IsType() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Properties are the properties for this type.
|
||||
func (t *Type) Properties() map[string]Property {
|
||||
return t.properties
|
||||
}
|
47
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/type_test.go
generated
vendored
Normal file
47
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/type_test.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestType(t *testing.T) {
|
||||
props := make(map[string]Property)
|
||||
props["foo"] = NewLiteralField("name", "integer", "desc", "ref")
|
||||
|
||||
c := Component{
|
||||
Group: "group2",
|
||||
Version: "ver",
|
||||
Kind: "kind",
|
||||
}
|
||||
|
||||
r := NewType("id", "desc", "codebase", "group1", c, props)
|
||||
|
||||
assert.Equal(t, "id", r.Identifier())
|
||||
assert.Equal(t, "desc", r.Description())
|
||||
assert.Equal(t, "group1", r.Group())
|
||||
assert.Equal(t, "ver", r.Version())
|
||||
assert.Equal(t, "kind", r.Kind())
|
||||
assert.Equal(t, "group2", r.QualifiedGroup())
|
||||
assert.True(t, r.IsType())
|
||||
|
||||
assert.Len(t, r.Properties(), 1)
|
||||
}
|
||||
|
||||
func TestType_no_group(t *testing.T) {
|
||||
props := make(map[string]Property)
|
||||
props["foo"] = NewLiteralField("name", "integer", "desc", "ref")
|
||||
|
||||
c := Component{
|
||||
Group: "group2",
|
||||
Version: "ver",
|
||||
Kind: "kind",
|
||||
}
|
||||
|
||||
r := NewType("id", "desc", "codebase", "", c, props)
|
||||
|
||||
assert.Equal(t, "core", r.Group())
|
||||
assert.Equal(t, "group2", r.QualifiedGroup())
|
||||
|
||||
}
|
27
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/util.go
generated
vendored
Normal file
27
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/util.go
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec"
|
||||
)
|
||||
|
||||
const constructorName = "new"
|
||||
|
||||
var (
|
||||
specialProperties = map[kubespec.PropertyName]kubespec.PropertyName{
|
||||
"apiVersion": "apiVersion",
|
||||
"kind": "kind",
|
||||
}
|
||||
|
||||
specialPropertiesList []string
|
||||
)
|
||||
|
||||
func init() {
|
||||
for k := range specialProperties {
|
||||
specialPropertiesList = append(specialPropertiesList, string(k))
|
||||
}
|
||||
}
|
||||
|
||||
func isSpecialProperty(pn kubespec.PropertyName) bool {
|
||||
_, ok := specialProperties[pn]
|
||||
return ok
|
||||
}
|
78
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/version.go
generated
vendored
Normal file
78
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/version.go
generated
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/google/go-jsonnet/ast"
|
||||
nm "github.com/ksonnet/ksonnet-lib/ksonnet-gen/nodemaker"
|
||||
)
|
||||
|
||||
// Version is an API version.
|
||||
type Version struct {
|
||||
name string
|
||||
group string
|
||||
|
||||
resources []*APIObject
|
||||
}
|
||||
|
||||
// NewVersion creates an instance of Version.
|
||||
func NewVersion(name, group string) *Version {
|
||||
v := &Version{
|
||||
name: name,
|
||||
group: group,
|
||||
resources: make([]*APIObject, 0),
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// APIObjects returns a slice of APIObjects sorted by name.
|
||||
func (v *Version) APIObjects() []APIObject {
|
||||
var objects []APIObject
|
||||
for _, resource := range v.resources {
|
||||
objects = append(objects, *resource)
|
||||
}
|
||||
|
||||
sort.Slice(objects, func(i, j int) bool {
|
||||
return objects[i].Kind() < objects[j].Kind()
|
||||
})
|
||||
|
||||
return objects
|
||||
}
|
||||
|
||||
// Name is the name of the version.
|
||||
func (v *Version) Name() string {
|
||||
return v.name
|
||||
}
|
||||
|
||||
// AddResource adds a resource to the version.
|
||||
func (v *Version) AddResource(resource Object) {
|
||||
ao := NewAPIObject(resource)
|
||||
v.resources = append(v.resources, ao)
|
||||
}
|
||||
|
||||
// APIVersion returns the version.
|
||||
func (v *Version) APIVersion() string {
|
||||
if v.group == "core" || v.group == "" {
|
||||
return v.name
|
||||
}
|
||||
return fmt.Sprintf("%s/%s", v.group, v.name)
|
||||
}
|
||||
|
||||
// Node returns an ast node for this version.
|
||||
func (v *Version) Node() *nm.Object {
|
||||
o := nm.NewObject()
|
||||
|
||||
avo := nm.OnelineObject()
|
||||
avo.Set(
|
||||
nm.NewKey(
|
||||
"apiVersion",
|
||||
nm.KeyOptCategory(ast.ObjectFieldID),
|
||||
nm.KeyOptVisibility(ast.ObjectFieldInherit)),
|
||||
nm.NewStringDouble(v.APIVersion()))
|
||||
|
||||
o.Set(nm.LocalKey("apiVersion"), avo)
|
||||
|
||||
return o
|
||||
}
|
73
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/version_test.go
generated
vendored
Normal file
73
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet/version_test.go
generated
vendored
Normal file
|
@ -0,0 +1,73 @@
|
|||
package ksonnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
nm "github.com/ksonnet/ksonnet-lib/ksonnet-gen/nodemaker"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestVersion_Name(t *testing.T) {
|
||||
v := NewVersion("v1", "groupName")
|
||||
require.Equal(t, "v1", v.Name())
|
||||
}
|
||||
|
||||
func TestVersion_APIVersion(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
groupName string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "groupName group",
|
||||
groupName: "groupName",
|
||||
expected: "groupName/v1",
|
||||
},
|
||||
{
|
||||
name: "core group",
|
||||
groupName: "core",
|
||||
expected: "v1",
|
||||
},
|
||||
{
|
||||
name: "empty group",
|
||||
groupName: "",
|
||||
expected: "v1",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
v := NewVersion("v1", tc.groupName)
|
||||
require.Equal(t, tc.expected, v.APIVersion())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVersion_AddResource(t *testing.T) {
|
||||
v := NewVersion("v1", "groupName")
|
||||
|
||||
c1 := Component{Group: "group2", Version: "v1", Kind: "kind1"}
|
||||
o1 := NewType("alpha", "desc", "codebase", "group", c1, nil)
|
||||
v.AddResource(&o1)
|
||||
|
||||
c2 := Component{Group: "group2", Version: "v1", Kind: "kind2"}
|
||||
o2 := NewType("beta", "desc", "codebase", "group", c2, nil)
|
||||
v.AddResource(&o2)
|
||||
|
||||
require.Len(t, v.APIObjects(), 2)
|
||||
}
|
||||
|
||||
func TestVersion_Node(t *testing.T) {
|
||||
v := NewVersion("v1", "groupName")
|
||||
|
||||
n := v.Node()
|
||||
require.NotNil(t, n)
|
||||
|
||||
av, ok := n.Get("apiVersion").(*nm.Object)
|
||||
require.True(t, ok)
|
||||
|
||||
vStr, ok := av.Get("apiVersion").(*nm.StringDouble)
|
||||
require.True(t, ok)
|
||||
|
||||
require.Equal(t, nm.NewStringDouble(v.APIVersion()), vStr)
|
||||
}
|
41
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec/importer.go
generated
vendored
Normal file
41
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec/importer.go
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
package kubespec
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/go-openapi/swag"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Import imports an OpenAPI swagger schema.
|
||||
func Import(path string) (*spec.Swagger, string, error) {
|
||||
b, err := swag.LoadFromFileOrHTTP(path)
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrap(err, "load schema from path")
|
||||
}
|
||||
|
||||
h := sha256.New()
|
||||
h.Write(b)
|
||||
|
||||
checksum := fmt.Sprintf("%x", h.Sum(nil))
|
||||
|
||||
spec, err := CreateAPISpec(b)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return spec, checksum, nil
|
||||
}
|
||||
|
||||
// CreateAPISpec a swagger file into a *spec.Swagger.
|
||||
func CreateAPISpec(b []byte) (*spec.Swagger, error) {
|
||||
var apiSpec spec.Swagger
|
||||
if err := json.Unmarshal(b, &apiSpec); err != nil {
|
||||
return nil, errors.Wrap(err, "parse swagger JSON")
|
||||
}
|
||||
|
||||
return &apiSpec, nil
|
||||
}
|
66
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec/importer_test.go
generated
vendored
Normal file
66
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec/importer_test.go
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
package kubespec_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func testdata(name string) string {
|
||||
return filepath.Join("testdata", name)
|
||||
}
|
||||
|
||||
func TestImporter_Import(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
location string
|
||||
checksum string
|
||||
isErr bool
|
||||
}{
|
||||
{
|
||||
name: "missing file",
|
||||
location: "missing.json",
|
||||
isErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid file",
|
||||
location: testdata("invalid.json"),
|
||||
isErr: true,
|
||||
},
|
||||
{
|
||||
name: "valid file",
|
||||
location: testdata("deployment.json"),
|
||||
checksum: "0958866ac95c381dc661136396c73456038854df20b06688332a91a463857135",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
t.Logf("path = %s", r.URL.Path)
|
||||
if r.URL.Path != "/swagger.json" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintln(w, `{"swagger": "2.0", "info": {"title": "Kubernetes"}}`)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
apiSpec, checksum, err := kubespec.Import(tc.location)
|
||||
if tc.isErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, apiSpec)
|
||||
require.Equal(t, tc.checksum, checksum)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
90
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec/new.go
generated
vendored
Normal file
90
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec/new.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
|||
package kubespec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var (
|
||||
regexes = []*regexp.Regexp{
|
||||
// Core API, pre-1.8 Kubernetes OR non-Kubernetes codebase APIs
|
||||
regexp.MustCompile(`io\.k8s\.(?P<codebase>\S+)\.pkg\.api\.(?P<version>\S+)\.(?P<kind>\S+)`),
|
||||
// Core API, 1.8+ Kubernetes
|
||||
regexp.MustCompile(`io\.k8s\.api\.(?P<packageType>core)\.(?P<version>\S+)\.(?P<kind>\S+)`),
|
||||
// Other APIs, pre-1.8 Kubernetes OR non-Kubernetes codebase APIs
|
||||
regexp.MustCompile(`io\.k8s\.(?P<codebase>\S+)\.pkg\.(?P<packageType>apis)\.(?P<group>\S+)\.(?P<version>\S+)\.(?P<kind>\S+)`),
|
||||
// Other APIs, 1.8+ Kubernetes
|
||||
regexp.MustCompile(`io\.k8s\.api\.(?P<group>\S+)\.(?P<version>\S+)\.(?P<kind>\S+)`),
|
||||
// Util packageType
|
||||
regexp.MustCompile(`io\.k8s\.(?P<codebase>\S+)\.pkg\.(?P<packageType>util)\.(?P<version>\S+)\.(?P<kind>\S+)`),
|
||||
// Version packageType
|
||||
regexp.MustCompile(`io\.k8s\.(?P<codebase>\S+)\.pkg\.(?P<packageType>version)\.(?P<kind>\S+)`),
|
||||
// Runtime packageType
|
||||
regexp.MustCompile(`io\.k8s\.(?P<codebase>\S+)\.pkg\.(?P<packageType>runtime)\.(?P<kind>\S+)`),
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
packageTypeMap = map[string]Package{
|
||||
"core": Core,
|
||||
"apis": APIs,
|
||||
"util": Util,
|
||||
"runtime": Runtime,
|
||||
"version": Version,
|
||||
}
|
||||
)
|
||||
|
||||
type description struct {
|
||||
name string
|
||||
codebase string
|
||||
version string
|
||||
kind ObjectKind
|
||||
packageType Package
|
||||
group string
|
||||
}
|
||||
|
||||
func (d *description) Validate() error {
|
||||
if d.version == "" {
|
||||
return fmt.Errorf("version is nil for %q", d.name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func describeDefinition(name string) (*description, error) {
|
||||
for _, r := range regexes {
|
||||
if match := r.FindStringSubmatch(name); len(match) > 0 {
|
||||
|
||||
result := make(map[string]string)
|
||||
for i, name := range r.SubexpNames() {
|
||||
if i != 0 {
|
||||
result[name] = match[i]
|
||||
}
|
||||
}
|
||||
|
||||
// Hacky heuristics to fix missing fields
|
||||
if result["codebase"] == "" {
|
||||
result["codebase"] = "kubernetes"
|
||||
}
|
||||
if result["packageType"] == "" && result["group"] == "" {
|
||||
result["packageType"] = "core"
|
||||
}
|
||||
if result["packageType"] == "" && result["group"] != "" {
|
||||
result["packageType"] = "apis"
|
||||
}
|
||||
|
||||
d := &description{
|
||||
name: name,
|
||||
codebase: result["codebase"],
|
||||
version: result["version"],
|
||||
kind: ObjectKind(result["kind"]),
|
||||
packageType: packageTypeMap[result["packageType"]],
|
||||
group: result["group"],
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unknown definition %q", name)
|
||||
}
|
201
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec/parsing.go
generated
vendored
Normal file
201
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec/parsing.go
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
|||
package kubespec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Utility methods for `DefinitionName` and `ObjectRef`.
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
// Parse will parse a `DefinitionName` into a structured
|
||||
// `ParsedDefinitionName`.
|
||||
func (dn *DefinitionName) Parse() (*ParsedDefinitionName, error) {
|
||||
name := string(*dn)
|
||||
|
||||
desc, err := describeDefinition(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("describe definition: %#v", err)
|
||||
}
|
||||
|
||||
pd := ParsedDefinitionName{
|
||||
Codebase: desc.codebase,
|
||||
Kind: desc.kind,
|
||||
PackageType: desc.packageType,
|
||||
}
|
||||
|
||||
if desc.group != "" {
|
||||
group := GroupName(desc.group)
|
||||
pd.Group = &group
|
||||
}
|
||||
|
||||
if desc.version != "" {
|
||||
version := VersionString(desc.version)
|
||||
pd.Version = &version
|
||||
}
|
||||
|
||||
return &pd, nil
|
||||
}
|
||||
|
||||
// Name parses a `DefinitionName` from an `ObjectRef`. `ObjectRef`s
|
||||
// that refer to a definition contain two parts: (1) a special prefix,
|
||||
// and (2) a `DefinitionName`, so this function simply strips the
|
||||
// prefix off.
|
||||
func (or *ObjectRef) Name() *DefinitionName {
|
||||
defn := "#/definitions/"
|
||||
ref := string(*or)
|
||||
if !strings.HasPrefix(ref, defn) {
|
||||
log.Fatalln(ref)
|
||||
}
|
||||
name := DefinitionName(strings.TrimPrefix(ref, defn))
|
||||
return &name
|
||||
}
|
||||
|
||||
func (dn DefinitionName) AsObjectRef() *ObjectRef {
|
||||
or := ObjectRef("#/definitions/" + dn)
|
||||
return &or
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Parsed definition name.
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
// Package represents the type of the definition, either `APIs`, which
|
||||
// have API groups (e.g., extensions, apps, meta, and so on), or
|
||||
// `Core`, which does not.
|
||||
type Package int
|
||||
|
||||
const (
|
||||
// Core is a package that contains the Kubernetes Core objects.
|
||||
Core Package = iota
|
||||
|
||||
// APIs is a set of non-core packages grouped loosely by semantic
|
||||
// functionality (e.g., apps, extensions, and so on).
|
||||
APIs
|
||||
|
||||
//
|
||||
// Internal packages.
|
||||
//
|
||||
|
||||
// Util is a package that contains utilities used for both testing
|
||||
// and running Kubernetes.
|
||||
Util
|
||||
|
||||
// Runtime is a package that contains various utilities used in the
|
||||
// Kubernetes runtime.
|
||||
Runtime
|
||||
|
||||
// Version is a package that supplies version information collected
|
||||
// at build time.
|
||||
Version
|
||||
)
|
||||
|
||||
// ParsedDefinitionName is a parsed version of a fully-qualified
|
||||
// OpenAPI spec name. For example,
|
||||
// `io.k8s.kubernetes.pkg.api.v1.Container` would parse into an
|
||||
// instance of the struct below.
|
||||
type ParsedDefinitionName struct {
|
||||
PackageType Package
|
||||
Codebase string
|
||||
Group *GroupName // Pointer because it's optional.
|
||||
Version *VersionString // Pointer because it's optional.
|
||||
Kind ObjectKind
|
||||
}
|
||||
|
||||
// GroupName represetents a Kubernetes group name (e.g., apps,
|
||||
// extensions, etc.)
|
||||
type GroupName string
|
||||
|
||||
func (gn GroupName) String() string {
|
||||
return string(gn)
|
||||
}
|
||||
|
||||
// ObjectKind represents the `kind` of a Kubernetes API object (e.g.,
|
||||
// Service, Deployment, etc.)
|
||||
type ObjectKind string
|
||||
|
||||
func (ok ObjectKind) String() string {
|
||||
return string(ok)
|
||||
}
|
||||
|
||||
// VersionString is the string representation of an API version (e.g.,
|
||||
// v1, v1beta1, etc.)
|
||||
type VersionString string
|
||||
|
||||
func (vs VersionString) String() string {
|
||||
return string(vs)
|
||||
}
|
||||
|
||||
// Unparse transforms a `ParsedDefinitionName` back into its
|
||||
// corresponding string, e.g.,
|
||||
// `io.k8s.kubernetes.pkg.api.v1.Container`.
|
||||
func (p *ParsedDefinitionName) Unparse(isLegacySchema bool) (DefinitionName, error) {
|
||||
withNewSchema := !isLegacySchema
|
||||
|
||||
k8s := "kubernetes"
|
||||
switch p.PackageType {
|
||||
case Core:
|
||||
{
|
||||
if withNewSchema && p.Codebase == k8s {
|
||||
return DefinitionName(fmt.Sprintf(
|
||||
"io.k8s.api.core.%s.%s",
|
||||
*p.Version,
|
||||
p.Kind)), nil
|
||||
}
|
||||
|
||||
return DefinitionName(fmt.Sprintf(
|
||||
"io.k8s.%s.pkg.api.%s.%s",
|
||||
p.Codebase,
|
||||
*p.Version,
|
||||
p.Kind)), nil
|
||||
}
|
||||
case Util:
|
||||
{
|
||||
return DefinitionName(fmt.Sprintf(
|
||||
"io.k8s.%s.pkg.util.%s.%s",
|
||||
p.Codebase,
|
||||
*p.Version,
|
||||
p.Kind)), nil
|
||||
}
|
||||
case APIs:
|
||||
{
|
||||
if withNewSchema && p.Codebase == k8s {
|
||||
return DefinitionName(fmt.Sprintf(
|
||||
"io.k8s.api.%s.%s.%s",
|
||||
*p.Group,
|
||||
*p.Version,
|
||||
p.Kind)), nil
|
||||
}
|
||||
return DefinitionName(fmt.Sprintf(
|
||||
"io.k8s.%s.pkg.apis.%s.%s.%s",
|
||||
p.Codebase,
|
||||
*p.Group,
|
||||
*p.Version,
|
||||
p.Kind)), nil
|
||||
}
|
||||
case Version:
|
||||
{
|
||||
return DefinitionName(fmt.Sprintf(
|
||||
"io.k8s.%s.pkg.version.%s",
|
||||
p.Codebase,
|
||||
p.Kind)), nil
|
||||
}
|
||||
case Runtime:
|
||||
{
|
||||
return DefinitionName(fmt.Sprintf(
|
||||
"io.k8s.%s.pkg.runtime.%s",
|
||||
p.Codebase,
|
||||
p.Kind)), nil
|
||||
}
|
||||
default:
|
||||
{
|
||||
return "",
|
||||
fmt.Errorf(
|
||||
"Failed to unparse definition name, did not recognize kind '%d'",
|
||||
p.PackageType)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
382
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec/parsing_test.go
generated
vendored
Normal file
382
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec/parsing_test.go
generated
vendored
Normal file
|
@ -0,0 +1,382 @@
|
|||
package kubespec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var legacyNamespaces = []string{
|
||||
"io.k8s.kubernetes.pkg.api.v1.NodeAffinity",
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerStatus",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ThirdPartyResource",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.RoleRef",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ConfigMapKeySelector",
|
||||
"io.k8s.kubernetes.pkg.api.v1.LimitRangeItem",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PersistentVolumeClaimStatus",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PodTemplateList",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetSpec",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta",
|
||||
"io.k8s.kubernetes.pkg.api.v1.LocalObjectReference",
|
||||
"io.k8s.kubernetes.pkg.api.v1.FlockerVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PersistentVolume",
|
||||
"io.k8s.kubernetes.pkg.api.v1.Toleration",
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerStatus",
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscaler",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.PodSecurityPolicy",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ConfigMapProjection",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ContainerStatus",
|
||||
"io.k8s.kubernetes.pkg.apis.policy.v1beta1.PodDisruptionBudget",
|
||||
"io.k8s.kubernetes.pkg.api.v1.GitRepoVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentList",
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentSpec",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetList",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR",
|
||||
"io.k8s.kubernetes.pkg.api.v1.Binding",
|
||||
"io.k8s.kubernetes.pkg.api.v1.Pod",
|
||||
"io.k8s.kubernetes.pkg.apis.batch.v2alpha1.CronJob",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.RoleBindingList",
|
||||
"io.k8s.kubernetes.pkg.apis.storage.v1.StorageClass",
|
||||
"io.k8s.kubernetes.pkg.api.v1.LimitRange",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PersistentVolumeClaimList",
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentRollback",
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1beta1.SubjectAccessReview",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1beta1.PolicyRule",
|
||||
"io.k8s.kubernetes.pkg.api.v1.Affinity",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PersistentVolumeClaimSpec",
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1.SubjectAccessReviewStatus",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicySpec",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PersistentVolumeClaimVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ResourceQuotaSpec",
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.Deployment",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1beta1.Role",
|
||||
"io.k8s.kubernetes.pkg.apis.storage.v1beta1.StorageClassList",
|
||||
"io.k8s.kubernetes.pkg.api.v1.LoadBalancerStatus",
|
||||
"io.k8s.kubernetes.pkg.api.v1.Probe",
|
||||
"io.k8s.kubernetes.pkg.api.v1.GlusterfsVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PersistentVolumeList",
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1.SubjectAccessReviewSpec",
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.CrossVersionObjectReference",
|
||||
"io.k8s.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequest",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.APIVersions",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ConfigMap",
|
||||
"io.k8s.kubernetes.pkg.api.v1.NodeList",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleBinding",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery",
|
||||
"io.k8s.kubernetes.pkg.api.v1.NodeAddress",
|
||||
"io.k8s.kubernetes.pkg.apis.storage.v1.StorageClassList",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ReplicationControllerStatus",
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentCondition",
|
||||
"io.k8s.kubernetes.pkg.apis.authentication.v1.TokenReviewSpec",
|
||||
"io.k8s.kubernetes.pkg.api.v1.AzureFileVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.api.v1.EnvFromSource",
|
||||
"io.k8s.kubernetes.pkg.api.v1.Node",
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.StatefulSetSpec",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyPort",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ContainerState",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IngressStatus",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1beta1.ClusterRole",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ResourceQuota",
|
||||
"io.k8s.kubernetes.pkg.apis.settings.v1alpha1.PodPresetSpec",
|
||||
"io.k8s.kubernetes.pkg.api.v1.Service",
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1beta1.ResourceAttributes",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.RollingUpdateDaemonSet",
|
||||
"io.k8s.kubernetes.pkg.apis.policy.v1beta1.PodDisruptionBudgetSpec",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PodAntiAffinity",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PortworxVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.api.v1.RBDVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentStatus",
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1.LocalSubjectAccessReview",
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1beta1.LocalSubjectAccessReview",
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1beta1.NonResourceAttributes",
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.MetricStatus",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta",
|
||||
"io.k8s.kubernetes.pkg.api.v1.FlexVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IngressBackend",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IngressList",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ComponentCondition",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PersistentVolumeClaim",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.SELinuxStrategyOptions",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.Role",
|
||||
"io.k8s.kubernetes.pkg.api.v1.Container",
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.ResourceMetricSource",
|
||||
"io.k8s.kubernetes.pkg.api.v1.HostAlias",
|
||||
"io.k8s.kubernetes.pkg.api.v1.NodeSystemInfo",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ResourceRequirements",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ScaleIOVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.apis.authentication.v1.UserInfo",
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerSpec",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions",
|
||||
"io.k8s.apimachinery.pkg.version.Info",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.HTTPIngressPath",
|
||||
"io.k8s.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequestList",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.HostPortRange",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetStatus",
|
||||
"io.k8s.kubernetes.pkg.api.v1.Lifecycle",
|
||||
"io.k8s.kubernetes.pkg.api.v1.VolumeMount",
|
||||
"io.k8s.kubernetes.pkg.api.v1.SecretEnvSource",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetCondition",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ComponentStatusList",
|
||||
"io.k8s.kubernetes.pkg.api.v1.Secret",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ObjectFieldSelector",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PodTemplate",
|
||||
"io.k8s.kubernetes.pkg.api.v1.SecretProjection",
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1.SubjectAccessReview",
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerList",
|
||||
"io.k8s.kubernetes.pkg.api.v1.EndpointsList",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ExecAction",
|
||||
"io.k8s.kubernetes.pkg.apis.authentication.v1beta1.TokenReview",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyIngressRule",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1beta1.ClusterRoleList",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ObjectReference",
|
||||
"io.k8s.kubernetes.pkg.api.v1.SecurityContext",
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.MetricSpec",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.Status",
|
||||
"io.k8s.kubernetes.pkg.api.v1.NodeCondition",
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1.SelfSubjectAccessReview",
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v1.ScaleSpec",
|
||||
"io.k8s.kubernetes.pkg.api.v1.Namespace",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ServiceStatus",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.RollingUpdateDeployment",
|
||||
"io.k8s.kubernetes.pkg.api.v1.FCVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1.ResourceAttributes",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ProjectedVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.api.v1.VsphereVirtualDiskVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1beta1.RoleBindingList",
|
||||
"io.k8s.kubernetes.pkg.apis.storage.v1beta1.StorageClass",
|
||||
"io.k8s.kubernetes.pkg.api.v1.EnvVarSource",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PhotonPersistentDiskVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.ObjectMetricSource",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1beta1.RoleRef",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.Initializer",
|
||||
"io.k8s.kubernetes.pkg.api.v1.Event",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ResourceQuotaStatus",
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1beta1.SelfSubjectAccessReviewSpec",
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.ScaleSpec",
|
||||
"io.k8s.kubernetes.pkg.apis.policy.v1beta1.PodDisruptionBudgetStatus",
|
||||
"io.k8s.kubernetes.pkg.apis.settings.v1alpha1.PodPresetList",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ConfigMapEnvSource",
|
||||
"io.k8s.kubernetes.pkg.api.v1.NodeSelectorRequirement",
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerSpec",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentList",
|
||||
"io.k8s.kubernetes.pkg.api.v1.Endpoints",
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscaler",
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1beta1.SelfSubjectAccessReview",
|
||||
"io.k8s.kubernetes.pkg.apis.batch.v2alpha1.JobTemplateSpec",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.APIVersion",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.PodSecurityPolicyList",
|
||||
"io.k8s.kubernetes.pkg.api.v1.DaemonEndpoint",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ISCSIVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.api.v1.GCEPersistentDiskVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.api.v1.NodeSelector",
|
||||
"io.k8s.kubernetes.pkg.apis.authentication.v1.TokenReviewStatus",
|
||||
"io.k8s.kubernetes.pkg.apis.batch.v2alpha1.CronJobStatus",
|
||||
"io.k8s.apimachinery.pkg.runtime.RawExtension",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ContainerImage",
|
||||
"io.k8s.kubernetes.pkg.api.v1.SecretKeySelector",
|
||||
"io.k8s.kubernetes.pkg.apis.batch.v2alpha1.CronJobList",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyPeer",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.PodSecurityPolicySpec",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ScaleSpec",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.APIGroupList",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ReplicationController",
|
||||
"io.k8s.kubernetes.pkg.api.v1.TCPSocketAction",
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.StatefulSetList",
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1.SelfSubjectAccessReviewSpec",
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1beta1.SubjectAccessReviewStatus",
|
||||
"io.k8s.kubernetes.pkg.api.v1.AttachedVolume",
|
||||
"io.k8s.kubernetes.pkg.api.v1.KeyToPath",
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.ObjectMetricStatus",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.Patch",
|
||||
"io.k8s.kubernetes.pkg.api.v1.SecretList",
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.ScaleStatus",
|
||||
"io.k8s.kubernetes.pkg.apis.authentication.v1.TokenReview",
|
||||
"io.k8s.kubernetes.pkg.apis.batch.v1.JobStatus",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1beta1.Subject",
|
||||
"io.k8s.apimachinery.pkg.api.resource.Quantity",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ReplicationControllerList",
|
||||
"io.k8s.kubernetes.pkg.apis.batch.v2alpha1.CronJobSpec",
|
||||
"io.k8s.kubernetes.pkg.api.v1.EnvVar",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PodStatus",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetList",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.RoleBinding",
|
||||
"io.k8s.kubernetes.pkg.api.v1.EndpointAddress",
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v1.ScaleStatus",
|
||||
"io.k8s.kubernetes.pkg.apis.batch.v1.JobCondition",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.FSGroupStrategyOptions",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.Ingress",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRole",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1beta1.ClusterRoleBinding",
|
||||
"io.k8s.kubernetes.pkg.api.v1.NodeDaemonEndpoints",
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v1.CrossVersionObjectReference",
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.ResourceMetricStatus",
|
||||
"io.k8s.kubernetes.pkg.api.v1.AzureDiskVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ResourceQuotaList",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1beta1.RoleList",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PodCondition",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ThirdPartyResourceList",
|
||||
"io.k8s.apimachinery.pkg.util.intstr.IntOrString",
|
||||
"io.k8s.kubernetes.pkg.api.v1.LoadBalancerIngress",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.RollbackConfig",
|
||||
"io.k8s.kubernetes.pkg.apis.settings.v1alpha1.PodPreset",
|
||||
"io.k8s.kubernetes.pkg.api.v1.Taint",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IngressSpec",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ContainerStateRunning",
|
||||
"io.k8s.kubernetes.pkg.api.v1.VolumeProjection",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PreferredSchedulingTerm",
|
||||
"io.k8s.kubernetes.pkg.api.v1.SecretVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentCondition",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentSpec",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentStatus",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleBindingList",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ConfigMapList",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ServicePort",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DaemonSet",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentStrategy",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IngressTLS",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ContainerStateTerminated",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetUpdateStrategy",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyList",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.Scale",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ReplicationControllerSpec",
|
||||
"io.k8s.kubernetes.pkg.apis.authentication.v1beta1.UserInfo",
|
||||
"io.k8s.kubernetes.pkg.api.v1.NodeStatus",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PersistentVolumeSpec",
|
||||
"io.k8s.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequestStatus",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.HTTPIngressRuleValue",
|
||||
"io.k8s.kubernetes.pkg.apis.policy.v1beta1.Eviction",
|
||||
"io.k8s.kubernetes.pkg.api.v1.DownwardAPIVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.api.v1.NamespaceSpec",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ComponentStatus",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PersistentVolumeStatus",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.SupplementalGroupsStrategyOptions",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleList",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.PolicyRule",
|
||||
"io.k8s.kubernetes.pkg.api.v1.NamespaceStatus",
|
||||
"io.k8s.kubernetes.pkg.api.v1.NodeSpec",
|
||||
"io.k8s.kubernetes.pkg.api.v1.DownwardAPIVolumeFile",
|
||||
"io.k8s.kubernetes.pkg.api.v1.EndpointSubset",
|
||||
"io.k8s.kubernetes.pkg.api.v1.Handler",
|
||||
"io.k8s.kubernetes.pkg.api.v1.WeightedPodAffinityTerm",
|
||||
"io.k8s.kubernetes.pkg.apis.batch.v1.JobList",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1beta1.ClusterRoleBindingList",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.APIResource",
|
||||
"io.k8s.kubernetes.pkg.api.v1.AWSElasticBlockStoreVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.api.v1.NFSVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentStrategy",
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.StatefulSetStatus",
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerList",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentRollback",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetSpec",
|
||||
"io.k8s.kubernetes.pkg.api.v1.CinderVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.api.v1.EmptyDirVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PodTemplateSpec",
|
||||
"io.k8s.kubernetes.pkg.api.v1.SELinuxOptions",
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.RollingUpdateDeployment",
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.PodsMetricStatus",
|
||||
"io.k8s.kubernetes.pkg.api.v1.Capabilities",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PodSecurityContext",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ContainerPort",
|
||||
"io.k8s.kubernetes.pkg.api.v1.EventSource",
|
||||
"io.k8s.kubernetes.pkg.api.v1.HostPathVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PodAffinity",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PodSpec",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ReplicationControllerCondition",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails",
|
||||
"io.k8s.kubernetes.pkg.api.v1.CephFSVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.Scale",
|
||||
"io.k8s.kubernetes.pkg.apis.batch.v1.Job",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1beta1.RoleBinding",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ServiceList",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ServiceSpec",
|
||||
"io.k8s.kubernetes.pkg.apis.authentication.v1beta1.TokenReviewSpec",
|
||||
"io.k8s.kubernetes.pkg.apis.batch.v1.JobSpec",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetStatus",
|
||||
"io.k8s.kubernetes.pkg.api.v1.NamespaceList",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ServiceAccountList",
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1.NonResourceAttributes",
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1beta1.SubjectAccessReviewSpec",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSet",
|
||||
"io.k8s.kubernetes.pkg.api.v1.HTTPGetAction",
|
||||
"io.k8s.kubernetes.pkg.api.v1.LimitRangeList",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ContainerStateWaiting",
|
||||
"io.k8s.kubernetes.pkg.api.v1.HTTPHeader",
|
||||
"io.k8s.kubernetes.pkg.api.v1.QuobyteVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ResourceFieldSelector",
|
||||
"io.k8s.kubernetes.pkg.api.v1.Volume",
|
||||
"io.k8s.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequestCondition",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.Initializers",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.Time",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.RunAsUserStrategyOptions",
|
||||
"io.k8s.kubernetes.pkg.apis.policy.v1beta1.PodDisruptionBudgetList",
|
||||
"io.k8s.kubernetes.pkg.api.v1.LimitRangeSpec",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector",
|
||||
"io.k8s.kubernetes.pkg.api.v1.DownwardAPIProjection",
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.RollbackConfig",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IDRange",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement",
|
||||
"io.k8s.kubernetes.pkg.api.v1.NodeSelectorTerm",
|
||||
"io.k8s.kubernetes.pkg.api.v1.EventList",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PodAffinityTerm",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ServiceAccount",
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.StatefulSet",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ScaleStatus",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.Subject",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList",
|
||||
"io.k8s.kubernetes.pkg.api.v1.ConfigMapVolumeSource",
|
||||
"io.k8s.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequestSpec",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.Deployment",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IngressRule",
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicy",
|
||||
"io.k8s.kubernetes.pkg.api.v1.PodList",
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v1.Scale",
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.PodsMetricSource",
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.RoleList",
|
||||
"io.k8s.kubernetes.pkg.api.v1.EndpointPort",
|
||||
"io.k8s.kubernetes.pkg.apis.authentication.v1beta1.TokenReviewStatus",
|
||||
}
|
||||
|
||||
// Definition naming schema for Kubernetes 1.8.x+
|
||||
var newSchemaNamespaces = []string{
|
||||
"io.k8s.api.core.v1.ConfigMapList",
|
||||
"io.k8s.api.policy.v1beta1.Eviction",
|
||||
"io.k8s.api.apps.v1beta2.ControllerRevision",
|
||||
"io.k8s.api.batch.v2alpha1.CronJobList",
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.Status",
|
||||
}
|
||||
|
||||
func testDefinitionName(namespace string, withNewSchema bool, t *testing.T) {
|
||||
dn := DefinitionName(namespace)
|
||||
parsed, err := dn.Parse()
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error while parsing: %v", err)
|
||||
}
|
||||
|
||||
unparsed, err := parsed.Unparse(withNewSchema)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error while unparsing: %v", err)
|
||||
}
|
||||
|
||||
if dn != unparsed {
|
||||
t.Errorf("Expected '%s' got '%s'", string(dn), unparsed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNamespaceParser(t *testing.T) {
|
||||
for _, namespace := range legacyNamespaces {
|
||||
t.Run(fmt.Sprintf("legacy namespace: %s", namespace), func(t *testing.T) {
|
||||
testDefinitionName(namespace, true, t)
|
||||
})
|
||||
}
|
||||
|
||||
for _, namespace := range newSchemaNamespaces {
|
||||
t.Run(fmt.Sprintf("namespace: %s", namespace), func(t *testing.T) {
|
||||
testDefinitionName(namespace, false, t)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
165
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec/swagger.go
generated
vendored
Normal file
165
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec/swagger.go
generated
vendored
Normal file
|
@ -0,0 +1,165 @@
|
|||
package kubespec
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// APISpec represents an OpenAPI specification of an API.
|
||||
type APISpec struct {
|
||||
SwaggerVersion string `json:"swagger"`
|
||||
Info *SchemaInfo `json:"info"`
|
||||
Definitions SchemaDefinitions `json:"definitions"`
|
||||
|
||||
// Fields we currently ignore:
|
||||
// - paths
|
||||
// - securityDefinitions
|
||||
// - security
|
||||
|
||||
// Not part of the OpenAPI spec. Filled in later.
|
||||
FilePath string
|
||||
Text []byte
|
||||
}
|
||||
|
||||
// SchemaInfo contains information about the the API represented with
|
||||
// `APISpec`. For example, `title` might be `"Kubernetes"`, and
|
||||
// `version` might be `"v1.7.0"`.
|
||||
type SchemaInfo struct {
|
||||
Title string `json:"title"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
// SchemaDefinition is an API object definition. For example, this
|
||||
// might contain a name (e.g., `v1.APIGroup`), a set of properties
|
||||
// (e.g., `apiVersion`, `kind`, and so on), and the names of required
|
||||
// properties.
|
||||
type SchemaDefinition struct {
|
||||
Type *SchemaType `json:"type"`
|
||||
Description string `json:"description"` // nullable.
|
||||
Required []string `json:"required"` // nullable.
|
||||
Properties Properties `json:"properties"` // nullable.
|
||||
TopLevelSpecs TopLevelSpecs `json:"x-kubernetes-group-version-kind"`
|
||||
Ref string `json:"$ref,omitempty"`
|
||||
}
|
||||
|
||||
// IsDeprecated returns true if the definition has a description
|
||||
// that starts with "Deprecated" and a $ref that is not empty.
|
||||
func (sd *SchemaDefinition) IsDeprecated() bool {
|
||||
if strings.HasPrefix(sd.Description, "Deprecated") {
|
||||
if sd.Ref != "" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// IsCRD returns true if the definition represents a CRD.
|
||||
func (sd *SchemaDefinition) IsCRD() bool {
|
||||
_, ok := sd.Properties["Schema"]
|
||||
return ok
|
||||
}
|
||||
|
||||
// QualifiedGroupName is the qualified group name. It is retrieved
|
||||
// from the x-kubernetes-group-version-kind field. If it doesn't
|
||||
// exist, the group name is returned.
|
||||
func (sd *SchemaDefinition) QualifiedGroupName(groupName string) string {
|
||||
if len(sd.TopLevelSpecs) > 0 && sd.TopLevelSpecs[0].Group != "" {
|
||||
return string(sd.TopLevelSpecs[0].Group)
|
||||
}
|
||||
|
||||
return groupName
|
||||
}
|
||||
|
||||
// TopLevelSpec is a property that exists on `SchemaDefinition`s for
|
||||
// top-level API objects.
|
||||
type TopLevelSpec struct {
|
||||
Group GroupName `json:"Group"`
|
||||
Version VersionString `json:"Version"`
|
||||
Kind ObjectKind `json:"Kind"`
|
||||
}
|
||||
type TopLevelSpecs []*TopLevelSpec
|
||||
|
||||
// SchemaDefinitions is a named collection of `SchemaDefinition`s,
|
||||
// represented as a collection mapping definition name ->
|
||||
// `SchemaDefinition`.
|
||||
type SchemaDefinitions map[DefinitionName]*SchemaDefinition
|
||||
|
||||
// Property represents an object property for some API object. For
|
||||
// example, `v1.APIGroup` might contain a property called
|
||||
// `apiVersion`, which would be specifid by a `Property`.
|
||||
type Property struct {
|
||||
Description string `json:"description"`
|
||||
Type *SchemaType `json:"type"`
|
||||
Ref *ObjectRef `json:"$ref"`
|
||||
Items Items `json:"items"` // nil unless Type == "array".
|
||||
}
|
||||
|
||||
// Properties is a named collection of `Properties`s, represented as a
|
||||
// collection mapping definition name -> `Properties`.
|
||||
type Properties map[PropertyName]*Property
|
||||
|
||||
// Items represents the type of an element in an array. Usually this
|
||||
// is used to fully specify a `Property` object whose `type` field is
|
||||
// `"array"`.
|
||||
type Items struct {
|
||||
Ref *ObjectRef `json:"$ref"`
|
||||
|
||||
// Ignored fields:
|
||||
// - Type *SchemaType `json:"type"`
|
||||
// - Format *string `json:"format"`
|
||||
}
|
||||
|
||||
// SchemaType represents the type of some object in an API spec. For
|
||||
// example, a property might have type `string`.
|
||||
type SchemaType string
|
||||
|
||||
func (st SchemaType) String() string {
|
||||
return string(st)
|
||||
}
|
||||
|
||||
// ObjectRef represents a reference to some API object. For example,
|
||||
// `#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta`
|
||||
type ObjectRef string
|
||||
|
||||
func (or ObjectRef) String() string {
|
||||
return string(or)
|
||||
}
|
||||
|
||||
// IsMixinRef will check whether a `ObjectRef` refers to an API object
|
||||
// that can be turned into a mixin. This should be true of the vast
|
||||
// majority of non-nil `ObjectRef`s. The most common exception is
|
||||
// `IntOrString`, which should not be turned into a mixin, and should
|
||||
// instead by transformed into a property method that behaves
|
||||
// identically to one taking an int or a ref as argument.
|
||||
func (or *ObjectRef) IsMixinRef() bool {
|
||||
if or == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return *or != "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"
|
||||
}
|
||||
|
||||
func stringInSlice(a string, list []string) bool {
|
||||
for _, b := range list {
|
||||
if b == a {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PropertyName represents the name of a property. For example,
|
||||
// `apiVersion` or `kind`.
|
||||
type PropertyName string
|
||||
|
||||
func (pn PropertyName) String() string {
|
||||
return string(pn)
|
||||
}
|
||||
|
||||
// DefinitionName represents the name of a definition. For example,
|
||||
// `v1.APIGroup`.
|
||||
type DefinitionName string
|
||||
|
||||
func (dn DefinitionName) String() string {
|
||||
return string(dn)
|
||||
}
|
678
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec/testdata/deployment.json
generated
vendored
Normal file
678
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec/testdata/deployment.json
generated
vendored
Normal file
|
@ -0,0 +1,678 @@
|
|||
{
|
||||
"swagger": "2.0",
|
||||
"definitions": {
|
||||
"io.k8s.api.apps.v1beta2.Deployment": {
|
||||
"description": "Deployment enables declarative updates for Pods and ReplicaSets.",
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources",
|
||||
"type": "string"
|
||||
},
|
||||
"kind": {
|
||||
"description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
|
||||
"type": "string"
|
||||
},
|
||||
"metadata": {
|
||||
"description": "Standard object metadata.",
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
|
||||
},
|
||||
"spec": {
|
||||
"description": "Specification of the desired behavior of the Deployment.",
|
||||
"$ref": "#/definitions/io.k8s.api.apps.v1beta2.DeploymentSpec"
|
||||
},
|
||||
"status": {
|
||||
"description": "Most recently observed status of the Deployment.",
|
||||
"$ref": "#/definitions/io.k8s.api.apps.v1beta2.DeploymentStatus"
|
||||
}
|
||||
},
|
||||
"x-kubernetes-group-version-kind": [
|
||||
{
|
||||
"group": "apps",
|
||||
"kind": "Deployment",
|
||||
"version": "v1beta2"
|
||||
}
|
||||
]
|
||||
},
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta": {
|
||||
"description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.",
|
||||
"properties": {
|
||||
"annotations": {
|
||||
"description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"clusterName": {
|
||||
"description": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.",
|
||||
"type": "string"
|
||||
},
|
||||
"creationTimestamp": {
|
||||
"description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
|
||||
},
|
||||
"deletionGracePeriodSeconds": {
|
||||
"description": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"deletionTimestamp": {
|
||||
"description": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
|
||||
},
|
||||
"finalizers": {
|
||||
"description": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"x-kubernetes-patch-strategy": "merge"
|
||||
},
|
||||
"generateName": {
|
||||
"description": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency",
|
||||
"type": "string"
|
||||
},
|
||||
"generation": {
|
||||
"description": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"initializers": {
|
||||
"description": "An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.",
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Initializers"
|
||||
},
|
||||
"labels": {
|
||||
"description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"description": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces",
|
||||
"type": "string"
|
||||
},
|
||||
"ownerReferences": {
|
||||
"description": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference"
|
||||
},
|
||||
"x-kubernetes-patch-merge-key": "uid",
|
||||
"x-kubernetes-patch-strategy": "merge"
|
||||
},
|
||||
"resourceVersion": {
|
||||
"description": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency",
|
||||
"type": "string"
|
||||
},
|
||||
"selfLink": {
|
||||
"description": "SelfLink is a URL representing this object. Populated by the system. Read-only.",
|
||||
"type": "string"
|
||||
},
|
||||
"uid": {
|
||||
"description": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.Time": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.Initializers": {
|
||||
"description": "Initializers tracks the progress of initialization.",
|
||||
"required": [
|
||||
"pending"
|
||||
],
|
||||
"properties": {
|
||||
"pending": {
|
||||
"description": "Pending is a list of initializers that must execute in order before this object is visible. When the last pending initializer is removed, and no failing result is set, the initializers struct will be set to nil and the object is considered as initialized and visible to all clients.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Initializer"
|
||||
},
|
||||
"x-kubernetes-patch-merge-key": "name",
|
||||
"x-kubernetes-patch-strategy": "merge"
|
||||
},
|
||||
"result": {
|
||||
"description": "If result is set with the Failure field, the object will be persisted to storage and then deleted, ensuring that other clients can observe the deletion.",
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
|
||||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.Status": {
|
||||
"description": "Status is a return value for calls that don't return other objects.",
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources",
|
||||
"type": "string"
|
||||
},
|
||||
"code": {
|
||||
"description": "Suggested HTTP return code for this status, 0 if not set.",
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"details": {
|
||||
"description": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.",
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails"
|
||||
},
|
||||
"kind": {
|
||||
"description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
|
||||
"type": "string"
|
||||
},
|
||||
"message": {
|
||||
"description": "A human-readable description of the status of this operation.",
|
||||
"type": "string"
|
||||
},
|
||||
"metadata": {
|
||||
"description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
|
||||
},
|
||||
"reason": {
|
||||
"description": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.",
|
||||
"type": "string"
|
||||
},
|
||||
"status": {
|
||||
"description": "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"x-kubernetes-group-version-kind": [
|
||||
{
|
||||
"group": "",
|
||||
"kind": "Status",
|
||||
"version": "v1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails": {
|
||||
"description": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.",
|
||||
"properties": {
|
||||
"causes": {
|
||||
"description": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause"
|
||||
}
|
||||
},
|
||||
"group": {
|
||||
"description": "The group attribute of the resource associated with the status StatusReason.",
|
||||
"type": "string"
|
||||
},
|
||||
"kind": {
|
||||
"description": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).",
|
||||
"type": "string"
|
||||
},
|
||||
"retryAfterSeconds": {
|
||||
"description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.",
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"uid": {
|
||||
"description": "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta": {
|
||||
"description": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.",
|
||||
"properties": {
|
||||
"continue": {
|
||||
"description": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response.",
|
||||
"type": "string"
|
||||
},
|
||||
"resourceVersion": {
|
||||
"description": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency",
|
||||
"type": "string"
|
||||
},
|
||||
"selfLink": {
|
||||
"description": "selfLink is a URL representing this object. Populated by the system. Read-only.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.api.apps.v1beta2.DeploymentSpec": {
|
||||
"description": "DeploymentSpec is the specification of the desired behavior of the Deployment.",
|
||||
"required": [
|
||||
"template"
|
||||
],
|
||||
"properties": {
|
||||
"minReadySeconds": {
|
||||
"description": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"paused": {
|
||||
"description": "Indicates that the deployment is paused.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"progressDeadlineSeconds": {
|
||||
"description": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.",
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"replicas": {
|
||||
"description": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.",
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"revisionHistoryLimit": {
|
||||
"description": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.",
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"selector": {
|
||||
"description": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.",
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"
|
||||
},
|
||||
"strategy": {
|
||||
"description": "The deployment strategy to use to replace existing pods with new ones.",
|
||||
"$ref": "#/definitions/io.k8s.api.apps.v1beta2.DeploymentStrategy"
|
||||
},
|
||||
"template": {
|
||||
"description": "Template describes the pods that will be created.",
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.PodTemplateSpec"
|
||||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector": {
|
||||
"description": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.",
|
||||
"properties": {
|
||||
"matchExpressions": {
|
||||
"description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement"
|
||||
}
|
||||
},
|
||||
"matchLabels": {
|
||||
"description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.api.apps.v1beta2.DeploymentStrategy": {
|
||||
"description": "DeploymentStrategy describes how to replace existing pods with new ones.",
|
||||
"properties": {
|
||||
"rollingUpdate": {
|
||||
"description": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.",
|
||||
"$ref": "#/definitions/io.k8s.api.apps.v1beta2.RollingUpdateDeployment"
|
||||
},
|
||||
"type": {
|
||||
"description": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.api.apps.v1beta2.RollingUpdateDeployment": {
|
||||
"description": "Spec to control the desired behavior of rolling update.",
|
||||
"properties": {
|
||||
"maxSurge": {
|
||||
"description": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.",
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"
|
||||
},
|
||||
"maxUnavailable": {
|
||||
"description": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.",
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"
|
||||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.apimachinery.pkg.util.intstr.IntOrString": {
|
||||
"type": "string",
|
||||
"format": "int-or-string"
|
||||
},
|
||||
"io.k8s.api.core.v1.PodTemplateSpec": {
|
||||
"description": "PodTemplateSpec describes the data a pod should have when created from a template",
|
||||
"properties": {
|
||||
"metadata": {
|
||||
"description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
|
||||
},
|
||||
"spec": {
|
||||
"description": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.PodSpec"
|
||||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.api.core.v1.PodSpec": {
|
||||
"description": "PodSpec is a description of a pod.",
|
||||
"required": [
|
||||
"containers"
|
||||
],
|
||||
"properties": {
|
||||
"activeDeadlineSeconds": {
|
||||
"description": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"affinity": {
|
||||
"description": "If specified, the pod's scheduling constraints",
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.Affinity"
|
||||
},
|
||||
"automountServiceAccountToken": {
|
||||
"description": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"containers": {
|
||||
"description": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.Container"
|
||||
},
|
||||
"x-kubernetes-patch-merge-key": "name",
|
||||
"x-kubernetes-patch-strategy": "merge"
|
||||
},
|
||||
"dnsPolicy": {
|
||||
"description": "Set DNS policy for containers within the pod. One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. Defaults to \"ClusterFirst\". To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.",
|
||||
"type": "string"
|
||||
},
|
||||
"hostAliases": {
|
||||
"description": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.HostAlias"
|
||||
},
|
||||
"x-kubernetes-patch-merge-key": "ip",
|
||||
"x-kubernetes-patch-strategy": "merge"
|
||||
},
|
||||
"hostIPC": {
|
||||
"description": "Use the host's ipc namespace. Optional: Default to false.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"hostNetwork": {
|
||||
"description": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"hostPID": {
|
||||
"description": "Use the host's pid namespace. Optional: Default to false.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"hostname": {
|
||||
"description": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.",
|
||||
"type": "string"
|
||||
},
|
||||
"imagePullSecrets": {
|
||||
"description": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference"
|
||||
},
|
||||
"x-kubernetes-patch-merge-key": "name",
|
||||
"x-kubernetes-patch-strategy": "merge"
|
||||
},
|
||||
"initContainers": {
|
||||
"description": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.Container"
|
||||
},
|
||||
"x-kubernetes-patch-merge-key": "name",
|
||||
"x-kubernetes-patch-strategy": "merge"
|
||||
},
|
||||
"nodeName": {
|
||||
"description": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.",
|
||||
"type": "string"
|
||||
},
|
||||
"nodeSelector": {
|
||||
"description": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"priority": {
|
||||
"description": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.",
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"priorityClassName": {
|
||||
"description": "If specified, indicates the pod's priority. \"SYSTEM\" is a special keyword which indicates the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
|
||||
"type": "string"
|
||||
},
|
||||
"restartPolicy": {
|
||||
"description": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy",
|
||||
"type": "string"
|
||||
},
|
||||
"schedulerName": {
|
||||
"description": "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.",
|
||||
"type": "string"
|
||||
},
|
||||
"securityContext": {
|
||||
"description": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.",
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.PodSecurityContext"
|
||||
},
|
||||
"serviceAccount": {
|
||||
"description": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.",
|
||||
"type": "string"
|
||||
},
|
||||
"serviceAccountName": {
|
||||
"description": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/",
|
||||
"type": "string"
|
||||
},
|
||||
"subdomain": {
|
||||
"description": "If specified, the fully qualified Pod hostname will be \"<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>\". If not specified, the pod will not have a domainname at all.",
|
||||
"type": "string"
|
||||
},
|
||||
"terminationGracePeriodSeconds": {
|
||||
"description": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"tolerations": {
|
||||
"description": "If specified, the pod's tolerations.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.Toleration"
|
||||
}
|
||||
},
|
||||
"volumes": {
|
||||
"description": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.Volume"
|
||||
},
|
||||
"x-kubernetes-patch-merge-key": "name",
|
||||
"x-kubernetes-patch-strategy": "merge,retainKeys"
|
||||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.api.core.v1.Affinity": {
|
||||
"description": "Affinity is a group of affinity scheduling rules.",
|
||||
"properties": {
|
||||
"nodeAffinity": {
|
||||
"description": "Describes node affinity scheduling rules for the pod.",
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.NodeAffinity"
|
||||
},
|
||||
"podAffinity": {
|
||||
"description": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).",
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.PodAffinity"
|
||||
},
|
||||
"podAntiAffinity": {
|
||||
"description": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).",
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.PodAntiAffinity"
|
||||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.api.core.v1.NodeAffinity": {
|
||||
"description": "Node affinity is a group of node affinity scheduling rules.",
|
||||
"properties": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": {
|
||||
"description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.PreferredSchedulingTerm"
|
||||
}
|
||||
},
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
"description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.",
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector"
|
||||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.api.core.v1.NodeSelector": {
|
||||
"description": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.",
|
||||
"required": [
|
||||
"nodeSelectorTerms"
|
||||
],
|
||||
"properties": {
|
||||
"nodeSelectorTerms": {
|
||||
"description": "Required. A list of node selector terms. The terms are ORed.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.NodeSelectorTerm"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.api.core.v1.PodAffinity": {
|
||||
"description": "Pod affinity is a group of inter pod affinity scheduling rules.",
|
||||
"properties": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": {
|
||||
"description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.WeightedPodAffinityTerm"
|
||||
}
|
||||
},
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
"description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.PodAffinityTerm"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.api.core.v1.PodAntiAffinity": {
|
||||
"description": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.",
|
||||
"properties": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": {
|
||||
"description": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.WeightedPodAffinityTerm"
|
||||
}
|
||||
},
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
"description": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.PodAffinityTerm"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.api.core.v1.PodSecurityContext": {
|
||||
"description": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.",
|
||||
"properties": {
|
||||
"fsGroup": {
|
||||
"description": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"runAsNonRoot": {
|
||||
"description": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"runAsUser": {
|
||||
"description": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"seLinuxOptions": {
|
||||
"description": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.SELinuxOptions"
|
||||
},
|
||||
"supplementalGroups": {
|
||||
"description": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.api.core.v1.SELinuxOptions": {
|
||||
"description": "SELinuxOptions are the labels to be applied to the container",
|
||||
"properties": {
|
||||
"level": {
|
||||
"description": "Level is SELinux level label that applies to the container.",
|
||||
"type": "string"
|
||||
},
|
||||
"role": {
|
||||
"description": "Role is a SELinux role label that applies to the container.",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"description": "Type is a SELinux type label that applies to the container.",
|
||||
"type": "string"
|
||||
},
|
||||
"user": {
|
||||
"description": "User is a SELinux user label that applies to the container.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.api.apps.v1beta2.DeploymentStatus": {
|
||||
"description": "DeploymentStatus is the most recently observed status of the Deployment.",
|
||||
"properties": {
|
||||
"availableReplicas": {
|
||||
"description": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.",
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"collisionCount": {
|
||||
"description": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"conditions": {
|
||||
"description": "Represents the latest available observations of a deployment's current state.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.api.apps.v1beta2.DeploymentCondition"
|
||||
},
|
||||
"x-kubernetes-patch-merge-key": "type",
|
||||
"x-kubernetes-patch-strategy": "merge"
|
||||
},
|
||||
"observedGeneration": {
|
||||
"description": "The generation observed by the deployment controller.",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"readyReplicas": {
|
||||
"description": "Total number of ready pods targeted by this deployment.",
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"replicas": {
|
||||
"description": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).",
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"unavailableReplicas": {
|
||||
"description": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"updatedReplicas": {
|
||||
"description": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.",
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
1
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec/testdata/invalid.json
generated
vendored
Normal file
1
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec/testdata/invalid.json
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
////// invalid
|
103
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubeversion/blacklist.jq
generated
vendored
Executable file
103
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubeversion/blacklist.jq
generated
vendored
Executable file
|
@ -0,0 +1,103 @@
|
|||
#!/usr/bin/env jq -S -f
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# USAGE NOTES.
|
||||
#
|
||||
# This `jq` script will generate a list of top-level Kubernetes API
|
||||
# objects that contain either (or both of):
|
||||
#
|
||||
# 1. a property with the name `"status"`, or
|
||||
# 2. a property whose type is `meta.v1.ListMeta`.
|
||||
#
|
||||
# For example:
|
||||
#
|
||||
# {
|
||||
# "io.k8s.apimachinery.pkg.apis.meta.v1.Status": [
|
||||
# "status", "metadata"
|
||||
# ]
|
||||
# }
|
||||
#
|
||||
# This would indicate that the fields `metadata` and `status` are to
|
||||
# be blacklisted in the object `meta.v1.Status`.
|
||||
#
|
||||
#
|
||||
# Usage:
|
||||
# cat swagger.json | jq -S -f blacklist.jq
|
||||
#
|
||||
# Or, if you are on an OS with jq > v1.4
|
||||
# cat swagger.json | ./blacklist.jq
|
||||
#
|
||||
# NOTE: It is very important to pass the -S flag here, because sorting
|
||||
# the object keys makes the output diffable.
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
# has_status_prop takes an Kubernetes API object definition from the
|
||||
# swagger spec, and outputs a boolean indicating whether that API
|
||||
# object has a property called `status`.
|
||||
#
|
||||
# For example, the input might be a
|
||||
# `io.k8s.kubernetes.pkg.apis.apps.v1beta1.Deployment` object, which
|
||||
# does indeed have a `status` field.
|
||||
def has_status_prop:
|
||||
. as $definition
|
||||
| if $definition.properties.status != null then true else false end;
|
||||
|
||||
# property_has_listmeta_type takes the property of a Kubernetes API
|
||||
# object definition, and returns a bool indicating whether its type is
|
||||
# a `$ref` of `meta.v1.ListMeta`.
|
||||
#
|
||||
# For example, `io.k8s.kubernetes.pkg.apis.apps.v1beta1.Deployment`
|
||||
# does not have a property with a type that is a `$ref` to
|
||||
# `meta.v1.ListMeta`.
|
||||
def property_has_listmeta_type:
|
||||
. as $property
|
||||
| $property["$ref"] != null and
|
||||
$property["$ref"] == "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta";
|
||||
|
||||
# props_with_listmeta_type returns the names of all properties in some
|
||||
# Kubernetes API object definition whose type is `meta.v1.ListMeta`.
|
||||
#
|
||||
# For example, `io.k8s.kubernetes.pkg.apis.apps.v1beta1.Deployment`
|
||||
# does not contain any properties with this type, so we would return
|
||||
# an empty array, while another object might return a list of names.
|
||||
def props_with_listmeta_type: [
|
||||
. as $definition
|
||||
| select($definition.properties != null)
|
||||
| $definition.properties
|
||||
| to_entries[]
|
||||
| select(.value | property_has_listmeta_type)
|
||||
| .key
|
||||
];
|
||||
|
||||
# entry_blacklist_props takes a key/value pair representing a
|
||||
# Kubernetes API object and its name, and returns a list of properties
|
||||
# that are blacklisted.
|
||||
#
|
||||
# For example, `.key` might be
|
||||
# `io.k8s.kubernetes.pkg.apis.apps.v1beta1.Deployment`, while `.value`
|
||||
# woudl be the actual swagger specification of the `Deployment`
|
||||
# object.
|
||||
def entry_blacklist_props:
|
||||
.value as $definition
|
||||
| ($definition | has_status_prop) as $has_status_prop
|
||||
| ($definition | props_with_listmeta_type) as $props_with_listmeta_type
|
||||
| ($props_with_listmeta_type | length > 0) as $has_listmeta_type_props
|
||||
| if $has_status_prop and $has_listmeta_type_props
|
||||
then {(.key): (["status"] | .+ $props_with_listmeta_type)}
|
||||
elif $has_status_prop
|
||||
then {(.key): ["status"]}
|
||||
elif $has_listmeta_type_props
|
||||
then {(.key): $props_with_listmeta_type}
|
||||
else {(.key): []}
|
||||
end;
|
||||
|
||||
def create_blacklist:
|
||||
[ .definitions | to_entries[] | entry_blacklist_props ]
|
||||
| add
|
||||
| with_entries(select(.value | length > 0));
|
||||
|
||||
|
||||
# Execute.
|
||||
create_blacklist
|
951
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubeversion/data.go
generated
vendored
Normal file
951
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubeversion/data.go
generated
vendored
Normal file
|
@ -0,0 +1,951 @@
|
|||
package kubeversion
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Kubernetes version-specific data for customizing code that's
|
||||
// emitted.
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
var versions = map[string]versionData{
|
||||
"v1.7.0": versionData{
|
||||
beta: false,
|
||||
idAliases: map[string]string{
|
||||
// Properties of objects. Stuff like `cinder.volumeId`.
|
||||
"hostIPC": "hostIpc",
|
||||
"hostPID": "hostPid",
|
||||
"targetCPUUtilizationPercentage": "targetCpuUtilizationPercentage",
|
||||
"externalID": "externalId",
|
||||
"podCIDR": "podCidr",
|
||||
"providerID": "providerId",
|
||||
"bootID": "bootId",
|
||||
"machineID": "machineId",
|
||||
"systemUUID": "systemUuid",
|
||||
"volumeID": "volumeId",
|
||||
"diskURI": "diskUri",
|
||||
"targetWWNs": "targetWwns",
|
||||
"datasetUUID": "datasetUuid",
|
||||
"pdID": "pdId",
|
||||
"scaleIO": "scaleIo",
|
||||
"podIP": "podIp",
|
||||
"hostIP": "hostIp",
|
||||
"clusterIP": "clusterIp",
|
||||
"externalIPs": "externalIps",
|
||||
"loadBalancerIP": "loadBalancerIp",
|
||||
"containerID": "containerId",
|
||||
"imageID": "imageId",
|
||||
"serverAddressByClientCIDRs": "serverAddressByClientCidrs",
|
||||
"clientCIDR": "clientCidr",
|
||||
"nonResourceURLs": "nonResourceUrls",
|
||||
"currentCPUUtilizationPercentage": "currentCpuUtilizationPercentage",
|
||||
"downwardAPI": "downwardApi",
|
||||
|
||||
// Types. These have capitalized first letters, and exist in
|
||||
// places like `core.v1.AWSElasticBlockStoreVolumeSource`.
|
||||
"AWSElasticBlockStoreVolumeSource": "awsElasticBlockStoreVolumeSource",
|
||||
"CephFSVolumeSource": "cephFsVolumeSource",
|
||||
"DownwardAPIProjection": "downwardApiProjection",
|
||||
"DownwardAPIVolumeFile": "downwardApiVolumeFile",
|
||||
"DownwardAPIVolumeSource": "downwardApiVolumeSource",
|
||||
"FCVolumeSource": "fcVolumeSource",
|
||||
"GCEPersistentDiskVolumeSource": "gcePersistentDiskVolumeSource",
|
||||
"HTTPGetAction": "httpGetAction",
|
||||
"HTTPHeader": "httpHeader",
|
||||
"ISCSIVolumeSource": "iscsiVolumeSource",
|
||||
"NFSVolumeSource": "nfsVolumeSource",
|
||||
"RBDVolumeSource": "rbdVolumeSource",
|
||||
"SELinuxOptions": "seLinuxOptions",
|
||||
"ScaleIOVolumeSource": "scaleIoVolumeSource",
|
||||
"TCPSocketAction": "tcpSocketAction",
|
||||
"APIVersion": "apiVersion",
|
||||
"FSGroupStrategyOptions": "fsGroupStrategyOptions",
|
||||
"HTTPIngressPath": "httpIngressPath",
|
||||
"HTTPIngressRuleValue": "httpIngressRuleValue",
|
||||
"IDRange": "idRange",
|
||||
"IngressTLS": "ingressTls",
|
||||
"SELinuxStrategyOptions": "seLinuxStrategyOptions",
|
||||
"APIGroup": "apiGroup",
|
||||
"APIGroupList": "apiGroupList",
|
||||
"APIResource": "apiResource",
|
||||
"APIResourceList": "apiResourceList",
|
||||
"APIVersions": "apiVersions",
|
||||
"ServerAddressByClientCIDR": "serverAddressByClientCidr",
|
||||
|
||||
// Collisions with Jsonnet keywords.
|
||||
"local": "localStorage",
|
||||
},
|
||||
constructorSpecs: map[string][]CustomConstructorSpec{
|
||||
//
|
||||
// Apps namespace.
|
||||
//
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.Deployment": deploymentCtor,
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentList": objectList,
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentRollback": deploymentRollbackCtor,
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.Scale": scaleCtor,
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.StatefulSet": statefulSetCtor,
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.StatefulSetList": objectList,
|
||||
|
||||
//
|
||||
// Extensions namespace.
|
||||
//
|
||||
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.Deployment": deploymentCtor,
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentList": objectList,
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentRollback": deploymentRollbackCtor,
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.Scale": scaleCtor,
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.StatefulSet": statefulSetCtor,
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.StatefulSetList": objectList,
|
||||
|
||||
//
|
||||
// Authentication namespace.
|
||||
//
|
||||
|
||||
"io.k8s.kubernetes.pkg.apis.authentication.v1.TokenReview": []CustomConstructorSpec{
|
||||
newConstructor(
|
||||
"new",
|
||||
newParamNestedRef("token", "mixin.spec.withToken")),
|
||||
},
|
||||
"io.k8s.kubernetes.pkg.apis.authentication.v1beta1.TokenReview": []CustomConstructorSpec{
|
||||
newConstructor(
|
||||
"new",
|
||||
newParamNestedRef("token", "mixin.spec.withToken")),
|
||||
},
|
||||
|
||||
//
|
||||
// Autoscaling namespace.
|
||||
//
|
||||
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerList": objectList,
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v1.Scale": scaleCtor,
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerList": objectList,
|
||||
|
||||
//
|
||||
// Batch namespace.
|
||||
//
|
||||
|
||||
"io.k8s.kubernetes.pkg.apis.batch.v1.JobList": objectList,
|
||||
"io.k8s.kubernetes.pkg.apis.batch.v2alpha1.CronJobList": objectList,
|
||||
|
||||
//
|
||||
// Certificates namespace.
|
||||
//
|
||||
|
||||
"io.k8s.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequestList": objectList,
|
||||
|
||||
//
|
||||
// Core namespace.
|
||||
//
|
||||
|
||||
"io.k8s.kubernetes.pkg.api.v1.ConfigMap": []CustomConstructorSpec{
|
||||
newConstructor(
|
||||
"new",
|
||||
newParamNestedRef("name", "mixin.metadata.withName"),
|
||||
newParam("data")),
|
||||
},
|
||||
"io.k8s.kubernetes.pkg.api.v1.ConfigMapList": objectList,
|
||||
"io.k8s.kubernetes.pkg.api.v1.Container": []CustomConstructorSpec{
|
||||
newConstructor("new", newParam("name"), newParam("image")),
|
||||
},
|
||||
"io.k8s.kubernetes.pkg.api.v1.ContainerPort": []CustomConstructorSpec{
|
||||
newConstructor("new", newParam("containerPort")),
|
||||
newConstructor("newNamed", newParam("name"), newParam("containerPort")),
|
||||
},
|
||||
"io.k8s.kubernetes.pkg.api.v1.EndpointsList": objectList,
|
||||
"io.k8s.kubernetes.pkg.api.v1.EnvVar": []CustomConstructorSpec{
|
||||
newConstructor("new", newParam("name"), newParam("value")),
|
||||
newConstructor(
|
||||
"fromSecretRef",
|
||||
newParam("name"),
|
||||
newParamNestedRef("secretRefName", "mixin.valueFrom.secretKeyRef.withName"),
|
||||
newParamNestedRef("secretRefKey", "mixin.valueFrom.secretKeyRef.withKey")),
|
||||
newConstructor(
|
||||
"fromFieldPath",
|
||||
newParam("name"),
|
||||
newParamNestedRef("fieldPath", "mixin.valueFrom.fieldRef.withFieldPath")),
|
||||
},
|
||||
"io.k8s.kubernetes.pkg.api.v1.EventList": objectList,
|
||||
"io.k8s.kubernetes.pkg.api.v1.KeyToPath": []CustomConstructorSpec{
|
||||
newConstructor("new", newParam("key"), newParam("path")),
|
||||
},
|
||||
"io.k8s.kubernetes.pkg.api.v1.LimitRangeList": objectList,
|
||||
"io.k8s.kubernetes.pkg.api.v1.Namespace": []CustomConstructorSpec{
|
||||
newConstructor(
|
||||
"new",
|
||||
newParamNestedRef("name", "mixin.metadata.withName")),
|
||||
},
|
||||
"io.k8s.kubernetes.pkg.api.v1.NamespaceList": objectList,
|
||||
"io.k8s.kubernetes.pkg.api.v1.NodeList": objectList,
|
||||
"io.k8s.kubernetes.pkg.api.v1.PersistentVolumeClaimList": objectList,
|
||||
"io.k8s.kubernetes.pkg.api.v1.PersistentVolumeList": objectList,
|
||||
"io.k8s.kubernetes.pkg.api.v1.PodList": objectList,
|
||||
"io.k8s.kubernetes.pkg.api.v1.PodTemplateList": objectList,
|
||||
"io.k8s.kubernetes.pkg.api.v1.ReplicationControllerList": objectList,
|
||||
"io.k8s.kubernetes.pkg.api.v1.ResourceQuotaList": objectList,
|
||||
"io.k8s.kubernetes.pkg.api.v1.Secret": []CustomConstructorSpec{
|
||||
newConstructor(
|
||||
"new",
|
||||
newParamNestedRef("name", "mixin.metadata.withName"),
|
||||
newParam("data"),
|
||||
newParamWithDefault("type", "\"Opaque\"")),
|
||||
newConstructor(
|
||||
"fromString",
|
||||
newParamNestedRef("name", "mixin.metadata.withName"),
|
||||
newParam("stringData"),
|
||||
newParamWithDefault("type", "\"Opaque\"")),
|
||||
},
|
||||
"io.k8s.kubernetes.pkg.api.v1.SecretList": objectList,
|
||||
"io.k8s.kubernetes.pkg.api.v1.Service": []CustomConstructorSpec{
|
||||
newConstructor(
|
||||
"new",
|
||||
newParamNestedRef("name", "mixin.metadata.withName"),
|
||||
newParamNestedRef("selector", "mixin.spec.withSelector"),
|
||||
newParamNestedRef("ports", "mixin.spec.withPorts")),
|
||||
},
|
||||
"io.k8s.kubernetes.pkg.api.v1.ServiceAccount": []CustomConstructorSpec{
|
||||
newConstructor(
|
||||
"new",
|
||||
newParamNestedRef("name", "mixin.metadata.withName")),
|
||||
},
|
||||
"io.k8s.kubernetes.pkg.api.v1.ServiceAccountList": objectList,
|
||||
"io.k8s.kubernetes.pkg.api.v1.ServiceList": objectList,
|
||||
"io.k8s.kubernetes.pkg.api.v1.ServicePort": []CustomConstructorSpec{
|
||||
newConstructor("new", newParam("port"), newParam("targetPort")),
|
||||
newConstructor("newNamed", newParam("name"), newParam("port"), newParam("targetPort")),
|
||||
},
|
||||
"io.k8s.kubernetes.pkg.api.v1.Volume": []CustomConstructorSpec{
|
||||
newConstructor(
|
||||
"fromConfigMap",
|
||||
newParam("name"),
|
||||
newParamNestedRef("configMapName", "mixin.configMap.withName"),
|
||||
newParamNestedRef("configMapItems", "mixin.configMap.withItems")),
|
||||
newConstructor(
|
||||
"fromEmptyDir",
|
||||
newParam("name"),
|
||||
newParamNestedRefDefault("emptyDir", "mixin.emptyDir.mixinInstance", "{}")),
|
||||
newConstructor(
|
||||
"fromPersistentVolumeClaim",
|
||||
newParam("name"),
|
||||
newParamNestedRef("claimName", "mixin.persistentVolumeClaim.withClaimName")),
|
||||
newConstructor(
|
||||
"fromHostPath",
|
||||
newParam("name"),
|
||||
newParamNestedRef("hostPath", "mixin.hostPath.withPath")),
|
||||
newConstructor(
|
||||
"fromSecret",
|
||||
newParam("name"),
|
||||
newParamNestedRef("secretName", "mixin.secret.withSecretName")),
|
||||
},
|
||||
"io.k8s.kubernetes.pkg.api.v1.VolumeMount": []CustomConstructorSpec{
|
||||
newConstructor("new", newParam("name"), newParam("mountPath"), newParamWithDefault("readOnly", "false")),
|
||||
},
|
||||
},
|
||||
|
||||
propertyBlacklist: map[string]propertySet{
|
||||
// Metadata fields.
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta": newPropertySet(
|
||||
"creationTimestamp", "deletionTimestamp", "generation",
|
||||
"ownerReferences", "resourceVersion", "selfLink", "uid",
|
||||
),
|
||||
|
||||
// Fields whose types are
|
||||
// `io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta`.
|
||||
"io.k8s.kubernetes.pkg.api.v1.ComponentStatusList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.ConfigMapList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.EndpointsList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.EventList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.LimitRangeList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.NamespaceList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.NodeList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.PersistentVolumeClaimList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.PersistentVolumeList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.PodList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.PodTemplateList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.ReplicationControllerList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.ResourceQuotaList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.SecretList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.ServiceAccountList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.ServiceList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.StatefulSetList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.batch.v1.JobList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.batch.v2alpha1.CronJobList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequestList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IngressList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.PodSecurityPolicyList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ThirdPartyResourceList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.policy.v1beta1.PodDisruptionBudgetList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleBindingList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.RoleBindingList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.RoleList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1beta1.ClusterRoleBindingList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1beta1.ClusterRoleList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1beta1.RoleBindingList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.rbac.v1beta1.RoleList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.settings.v1alpha1.PodPresetList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.storage.v1.StorageClassList": newPropertySet("metadata"),
|
||||
"io.k8s.kubernetes.pkg.apis.storage.v1beta1.StorageClassList": newPropertySet("metadata"),
|
||||
|
||||
// Status fields.
|
||||
"io.k8s.kubernetes.pkg.api.v1.Namespace": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.Node": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.NodeCondition": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.PersistentVolume": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.PersistentVolumeClaim": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.Pod": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.PodCondition": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.ReplicationController": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.ReplicationControllerCondition": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.ResourceQuota": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.api.v1.Service": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.Deployment": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentCondition": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.Scale": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.apps.v1beta1.StatefulSet": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.authentication.v1.TokenReview": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.authentication.v1beta1.TokenReview": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1.LocalSubjectAccessReview": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1.SelfSubjectAccessReview": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1.SubjectAccessReview": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1beta1.LocalSubjectAccessReview": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1beta1.SelfSubjectAccessReview": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.authorization.v1beta1.SubjectAccessReview": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscaler": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v1.Scale": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscaler": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.batch.v1.Job": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.batch.v1.JobCondition": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.batch.v2alpha1.CronJob": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequest": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DaemonSet": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.Deployment": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentCondition": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.Ingress": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSet": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetCondition": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.Scale": newPropertySet("status"),
|
||||
"io.k8s.kubernetes.pkg.apis.policy.v1beta1.PodDisruptionBudget": newPropertySet("status"),
|
||||
|
||||
// TODO: Find a more principled way to omit "status" types.
|
||||
// Currently we emit these in the `local hidden` in the `root`,
|
||||
// so that we can type aliases. To get around the fact that some
|
||||
// of their function names collide with Jsonnet keywords, we
|
||||
// simply choose not to emit them. Eventually we will approach
|
||||
// this problem in a more principled manner.
|
||||
"io.k8s.kubernetes.pkg.api.v1.ComponentCondition": newPropertySet("error", "status"),
|
||||
"io.k8s.kubernetes.pkg.apis.authentication.v1.TokenReviewStatus": newPropertySet("error"),
|
||||
"io.k8s.kubernetes.pkg.apis.authentication.v1beta1.TokenReviewStatus": newPropertySet("error"),
|
||||
|
||||
// Has both status and a property with type
|
||||
// `io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta`.
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.Status": newPropertySet("status", "metadata"),
|
||||
|
||||
// Misc.
|
||||
"io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetSpec": newPropertySet("templateGeneration"),
|
||||
},
|
||||
kSource: `local k8s = import "k8s.libsonnet";
|
||||
|
||||
local apps = k8s.apps;
|
||||
local core = k8s.core;
|
||||
local extensions = k8s.extensions;
|
||||
|
||||
local hidden = {
|
||||
mapContainers(f):: {
|
||||
local podContainers = super.spec.template.spec.containers,
|
||||
spec+: {
|
||||
template+: {
|
||||
spec+: {
|
||||
// IMPORTANT: This overwrites the 'containers' field
|
||||
// for this deployment.
|
||||
containers: std.map(f, podContainers),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
mapContainersWithName(names, f) ::
|
||||
local nameSet =
|
||||
if std.type(names) == "array"
|
||||
then std.set(names)
|
||||
else std.set([names]);
|
||||
local inNameSet(name) = std.length(std.setInter(nameSet, std.set([name]))) > 0;
|
||||
self.mapContainers(
|
||||
function(c)
|
||||
if std.objectHas(c, "name") && inNameSet(c.name)
|
||||
then f(c)
|
||||
else c
|
||||
),
|
||||
};
|
||||
|
||||
k8s + {
|
||||
apps:: apps + {
|
||||
v1beta1:: apps.v1beta1 + {
|
||||
local v1beta1 = apps.v1beta1,
|
||||
|
||||
daemonSet:: v1beta1.daemonSet + {
|
||||
mapContainers(f):: hidden.mapContainers(f),
|
||||
mapContainersWithName(names, f):: hidden.mapContainersWithName(names, f),
|
||||
},
|
||||
|
||||
deployment:: v1beta1.deployment + {
|
||||
mapContainers(f):: hidden.mapContainers(f),
|
||||
mapContainersWithName(names, f):: hidden.mapContainersWithName(names, f),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
core:: core + {
|
||||
v1:: core.v1 + {
|
||||
list:: {
|
||||
new(items)::
|
||||
{apiVersion: "v1"} +
|
||||
{kind: "List"} +
|
||||
self.items(items),
|
||||
|
||||
items(items):: if std.type(items) == "array" then {items+: items} else {items+: [items]},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
extensions:: extensions + {
|
||||
v1beta1:: extensions.v1beta1 + {
|
||||
local v1beta1 = extensions.v1beta1,
|
||||
|
||||
daemonSet:: v1beta1.daemonSet + {
|
||||
mapContainers(f):: hidden.mapContainers(f),
|
||||
mapContainersWithName(names, f):: hidden.mapContainersWithName(names, f),
|
||||
},
|
||||
|
||||
deployment:: v1beta1.deployment + {
|
||||
mapContainers(f):: hidden.mapContainers(f),
|
||||
mapContainersWithName(names, f):: hidden.mapContainersWithName(names, f),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
`,
|
||||
},
|
||||
"v1.8.0": versionData{
|
||||
beta: true,
|
||||
idAliases: map[string]string{
|
||||
// Properties of objects. Stuff like `cinder.volumeId`.
|
||||
"hostIPC": "hostIpc",
|
||||
"hostPID": "hostPid",
|
||||
"targetCPUUtilizationPercentage": "targetCpuUtilizationPercentage",
|
||||
"externalID": "externalId",
|
||||
"podCIDR": "podCidr",
|
||||
"providerID": "providerId",
|
||||
"bootID": "bootId",
|
||||
"machineID": "machineId",
|
||||
"systemUUID": "systemUuid",
|
||||
"volumeID": "volumeId",
|
||||
"diskURI": "diskUri",
|
||||
"targetWWNs": "targetWwns",
|
||||
"datasetUUID": "datasetUuid",
|
||||
"pdID": "pdId",
|
||||
"scaleIO": "scaleIo",
|
||||
"podIP": "podIp",
|
||||
"hostIP": "hostIp",
|
||||
"clusterIP": "clusterIp",
|
||||
"externalIPs": "externalIps",
|
||||
"loadBalancerIP": "loadBalancerIp",
|
||||
"containerID": "containerId",
|
||||
"imageID": "imageId",
|
||||
"serverAddressByClientCIDRs": "serverAddressByClientCidrs",
|
||||
"clientCIDR": "clientCidr",
|
||||
"nonResourceURLs": "nonResourceUrls",
|
||||
"currentCPUUtilizationPercentage": "currentCpuUtilizationPercentage",
|
||||
"downwardAPI": "downwardApi",
|
||||
"storagePolicyID": "storagePolicyId",
|
||||
"clientIP": "clientIp",
|
||||
"insecureSkipTLSVerify": "insecureSkipTlsVerify",
|
||||
"cephFSPersistentVolumeSource": "cephFsPersistentVolumeSource",
|
||||
"clientIPConfig": "clientIpConfig",
|
||||
"storageOSPersistentVolumeSource": "storageOsPersistentVolumeSource",
|
||||
"storageOSVolumeSource": "storageOsVolumeSource",
|
||||
|
||||
// Types. These have capitalized first letters, and exist in
|
||||
// places like `core.v1.AWSElasticBlockStoreVolumeSource`.
|
||||
"AWSElasticBlockStoreVolumeSource": "awsElasticBlockStoreVolumeSource",
|
||||
"CephFSVolumeSource": "cephFsVolumeSource",
|
||||
"DownwardAPIProjection": "downwardApiProjection",
|
||||
"DownwardAPIVolumeFile": "downwardApiVolumeFile",
|
||||
"DownwardAPIVolumeSource": "downwardApiVolumeSource",
|
||||
"FCVolumeSource": "fcVolumeSource",
|
||||
"GCEPersistentDiskVolumeSource": "gcePersistentDiskVolumeSource",
|
||||
"HTTPGetAction": "httpGetAction",
|
||||
"HTTPHeader": "httpHeader",
|
||||
"ISCSIVolumeSource": "iscsiVolumeSource",
|
||||
"NFSVolumeSource": "nfsVolumeSource",
|
||||
"RBDVolumeSource": "rbdVolumeSource",
|
||||
"SELinuxOptions": "seLinuxOptions",
|
||||
"ScaleIOVolumeSource": "scaleIoVolumeSource",
|
||||
"TCPSocketAction": "tcpSocketAction",
|
||||
"APIVersion": "apiVersion",
|
||||
"FSGroupStrategyOptions": "fsGroupStrategyOptions",
|
||||
"HTTPIngressPath": "httpIngressPath",
|
||||
"HTTPIngressRuleValue": "httpIngressRuleValue",
|
||||
"IDRange": "idRange",
|
||||
"IngressTLS": "ingressTls",
|
||||
"SELinuxStrategyOptions": "seLinuxStrategyOptions",
|
||||
"APIGroup": "apiGroup",
|
||||
"APIGroupList": "apiGroupList",
|
||||
"APIResource": "apiResource",
|
||||
"APIResourceList": "apiResourceList",
|
||||
"APIVersions": "apiVersions",
|
||||
"ServerAddressByClientCIDR": "serverAddressByClientCidr",
|
||||
"APIServiceCondition": "apiServiceCondition",
|
||||
"APIServiceList": "apiServiceList",
|
||||
"APIServiceSpec": "apiServiceSpec",
|
||||
"APIServiceStatus": "apiServiceStatus",
|
||||
"IPBlock": "ipBlock",
|
||||
"JSON": "json",
|
||||
"APIService": "apiService",
|
||||
|
||||
// Collisions with Jsonnet keywords.
|
||||
"local": "localStorage",
|
||||
},
|
||||
constructorSpecs: map[string][]CustomConstructorSpec{
|
||||
"io.k8s.api.admissionregistration.v1alpha1.ExternalAdmissionHookConfigurationList": objectList,
|
||||
"io.k8s.api.admissionregistration.v1alpha1.InitializerConfigurationList": objectList,
|
||||
|
||||
"io.k8s.api.apps.v1beta1.ControllerRevisionList": objectList,
|
||||
"io.k8s.api.apps.v1beta1.Deployment": deploymentCtor,
|
||||
"io.k8s.api.apps.v1beta1.DeploymentList": objectList,
|
||||
"io.k8s.api.apps.v1beta1.DeploymentRollback": deploymentRollbackCtor,
|
||||
"io.k8s.api.apps.v1beta1.Scale": scaleCtor,
|
||||
"io.k8s.api.apps.v1beta1.StatefulSet": statefulSetCtor,
|
||||
"io.k8s.api.apps.v1beta1.StatefulSetList": objectList,
|
||||
|
||||
"io.k8s.api.apps.v1beta2.ControllerRevisionList": objectList,
|
||||
"io.k8s.api.apps.v1beta2.DaemonSetList": objectList,
|
||||
"io.k8s.api.apps.v1beta2.Deployment": deploymentCtor,
|
||||
"io.k8s.api.apps.v1beta2.DeploymentList": objectList,
|
||||
"io.k8s.api.apps.v1beta2.ReplicaSetList": objectList,
|
||||
"io.k8s.api.apps.v1beta2.Scale": scaleCtor,
|
||||
"io.k8s.api.apps.v1beta2.StatefulSet": statefulSetCtor,
|
||||
"io.k8s.api.apps.v1beta2.StatefulSetList": objectList,
|
||||
|
||||
"io.k8s.api.authentication.v1.TokenReview": tokenReviewCtor,
|
||||
"io.k8s.api.authentication.v1beta1.TokenReview": tokenReviewCtor,
|
||||
|
||||
"io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerList": objectList,
|
||||
"io.k8s.api.autoscaling.v1.Scale": scaleCtor,
|
||||
"io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerList": objectList,
|
||||
|
||||
"io.k8s.api.batch.v1.JobList": objectList,
|
||||
"io.k8s.api.batch.v1beta1.CronJobList": objectList,
|
||||
"io.k8s.api.batch.v2alpha1.CronJobList": objectList,
|
||||
|
||||
"io.k8s.api.certificates.v1beta1.CertificateSigningRequestList": objectList,
|
||||
|
||||
"io.k8s.api.extensions.v1beta1.DaemonSetList": objectList,
|
||||
"io.k8s.api.extensions.v1beta1.Deployment": deploymentCtor,
|
||||
"io.k8s.api.extensions.v1beta1.DeploymentList": objectList,
|
||||
"io.k8s.api.extensions.v1beta1.DeploymentRollback": deploymentRollbackCtor,
|
||||
"io.k8s.api.extensions.v1beta1.IngressList": objectList,
|
||||
"io.k8s.api.extensions.v1beta1.NetworkPolicyList": objectList,
|
||||
"io.k8s.api.extensions.v1beta1.PodSecurityPolicyList": objectList,
|
||||
"io.k8s.api.extensions.v1beta1.ReplicaSetList": objectList,
|
||||
"io.k8s.api.extensions.v1beta1.Scale": scaleCtor,
|
||||
|
||||
"io.k8s.api.networking.v1.NetworkPolicyList": objectList,
|
||||
|
||||
"io.k8s.api.policy.v1beta1.PodDisruptionBudgetList": objectList,
|
||||
|
||||
"io.k8s.api.rbac.v1.ClusterRoleBindingList": objectList,
|
||||
"io.k8s.api.rbac.v1.ClusterRoleList": objectList,
|
||||
"io.k8s.api.rbac.v1.RoleBindingList": objectList,
|
||||
"io.k8s.api.rbac.v1.RoleList": objectList,
|
||||
"io.k8s.api.rbac.v1beta1.ClusterRoleBindingList": objectList,
|
||||
"io.k8s.api.rbac.v1beta1.ClusterRoleList": objectList,
|
||||
"io.k8s.api.rbac.v1beta1.RoleBindingList": objectList,
|
||||
"io.k8s.api.rbac.v1beta1.RoleList": objectList,
|
||||
|
||||
"io.k8s.api.scheduling.v1alpha1.PriorityClassList": objectList,
|
||||
|
||||
"io.k8s.api.settings.v1alpha1.PodPresetList": objectList,
|
||||
|
||||
"io.k8s.api.storage.v1.StorageClassList": objectList,
|
||||
"io.k8s.api.storage.v1beta1.StorageClassList": objectList,
|
||||
|
||||
//
|
||||
// Core.
|
||||
//
|
||||
|
||||
"io.k8s.api.core.v1.ConfigMap": []CustomConstructorSpec{
|
||||
newConstructor(
|
||||
"new",
|
||||
newParamNestedRef("name", "mixin.metadata.withName"),
|
||||
newParam("data")),
|
||||
},
|
||||
"io.k8s.api.core.v1.ConfigMapList": objectList,
|
||||
"io.k8s.api.core.v1.Container": []CustomConstructorSpec{
|
||||
newConstructor("new", newParam("name"), newParam("image")),
|
||||
},
|
||||
"io.k8s.api.core.v1.ContainerPort": []CustomConstructorSpec{
|
||||
newConstructor("new", newParam("containerPort")),
|
||||
newConstructor("newNamed", newParam("name"), newParam("containerPort")),
|
||||
},
|
||||
"io.k8s.api.core.v1.EndpointsList": objectList,
|
||||
"io.k8s.api.core.v1.EnvVar": []CustomConstructorSpec{
|
||||
newConstructor("new", newParam("name"), newParam("value")),
|
||||
newConstructor(
|
||||
"fromSecretRef",
|
||||
newParam("name"),
|
||||
newParamNestedRef("secretRefName", "mixin.valueFrom.secretKeyRef.withName"),
|
||||
newParamNestedRef("secretRefKey", "mixin.valueFrom.secretKeyRef.withKey")),
|
||||
newConstructor(
|
||||
"fromFieldPath",
|
||||
newParam("name"),
|
||||
newParamNestedRef("fieldPath", "mixin.valueFrom.fieldRef.withFieldPath")),
|
||||
},
|
||||
"io.k8s.api.core.v1.EventList": objectList,
|
||||
"io.k8s.api.core.v1.KeyToPath": []CustomConstructorSpec{
|
||||
newConstructor("new", newParam("key"), newParam("path")),
|
||||
},
|
||||
"io.k8s.api.core.v1.LimitRangeList": objectList,
|
||||
"io.k8s.api.core.v1.Namespace": []CustomConstructorSpec{
|
||||
newConstructor(
|
||||
"new",
|
||||
newParamNestedRef("name", "mixin.metadata.withName")),
|
||||
},
|
||||
"io.k8s.api.core.v1.NamespaceList": objectList,
|
||||
"io.k8s.api.core.v1.NodeList": objectList,
|
||||
"io.k8s.api.core.v1.PersistentVolumeClaimList": objectList,
|
||||
"io.k8s.api.core.v1.PersistentVolumeList": objectList,
|
||||
"io.k8s.api.core.v1.PodList": objectList,
|
||||
"io.k8s.api.core.v1.PodTemplateList": objectList,
|
||||
"io.k8s.api.core.v1.ReplicationControllerList": objectList,
|
||||
"io.k8s.api.core.v1.ResourceQuotaList": objectList,
|
||||
"io.k8s.api.core.v1.Secret": []CustomConstructorSpec{
|
||||
newConstructor(
|
||||
"new",
|
||||
newParamNestedRef("name", "mixin.metadata.withName"),
|
||||
newParam("data"),
|
||||
newParamWithDefault("type", "\"Opaque\"")),
|
||||
newConstructor(
|
||||
"fromString",
|
||||
newParamNestedRef("name", "mixin.metadata.withName"),
|
||||
newParam("stringData"),
|
||||
newParamWithDefault("type", "\"Opaque\"")),
|
||||
},
|
||||
"io.k8s.api.core.v1.SecretList": objectList,
|
||||
"io.k8s.api.core.v1.Service": []CustomConstructorSpec{
|
||||
newConstructor(
|
||||
"new",
|
||||
newParamNestedRef("name", "mixin.metadata.withName"),
|
||||
newParamNestedRef("selector", "mixin.spec.withSelector"),
|
||||
newParamNestedRef("ports", "mixin.spec.withPorts")),
|
||||
},
|
||||
"io.k8s.api.core.v1.ServiceAccount": []CustomConstructorSpec{
|
||||
newConstructor(
|
||||
"new",
|
||||
newParamNestedRef("name", "mixin.metadata.withName")),
|
||||
},
|
||||
"io.k8s.api.core.v1.ServiceAccountList": objectList,
|
||||
"io.k8s.api.core.v1.ServiceList": objectList,
|
||||
"io.k8s.api.core.v1.ServicePort": []CustomConstructorSpec{
|
||||
newConstructor("new", newParam("port"), newParam("targetPort")),
|
||||
newConstructor("newNamed", newParam("name"), newParam("port"), newParam("targetPort")),
|
||||
},
|
||||
"io.k8s.api.core.v1.Volume": []CustomConstructorSpec{
|
||||
newConstructor(
|
||||
"fromConfigMap",
|
||||
newParam("name"),
|
||||
newParamNestedRef("configMapName", "mixin.configMap.withName"),
|
||||
newParamNestedRef("configMapItems", "mixin.configMap.withItems")),
|
||||
newConstructor(
|
||||
"fromEmptyDir",
|
||||
newParam("name"),
|
||||
newParamNestedRefDefault("emptyDir", "mixin.emptyDir.mixinInstance", "{}")),
|
||||
newConstructor(
|
||||
"fromPersistentVolumeClaim",
|
||||
newParam("name"),
|
||||
newParamNestedRef("claimName", "mixin.persistentVolumeClaim.withClaimName")),
|
||||
newConstructor(
|
||||
"fromHostPath",
|
||||
newParam("name"),
|
||||
newParamNestedRef("hostPath", "mixin.hostPath.withPath")),
|
||||
newConstructor(
|
||||
"fromSecret",
|
||||
newParam("name"),
|
||||
newParamNestedRef("secretName", "mixin.secret.withSecretName")),
|
||||
},
|
||||
"io.k8s.api.core.v1.VolumeMount": []CustomConstructorSpec{
|
||||
newConstructor("new", newParam("name"), newParam("mountPath"), newParamWithDefault("readOnly", "false")),
|
||||
},
|
||||
},
|
||||
idBlacklist: map[string]interface{}{
|
||||
"io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionSpec": false,
|
||||
"io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition": false,
|
||||
"io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceValidation": false,
|
||||
},
|
||||
propertyBlacklist: map[string]propertySet{
|
||||
// Metadata fields.
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta": newPropertySet(
|
||||
"creationTimestamp", "deletionTimestamp", "generation",
|
||||
"ownerReferences", "resourceVersion", "selfLink", "uid",
|
||||
),
|
||||
|
||||
// Fields whose types are
|
||||
// `io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta`.
|
||||
"io.k8s.api.admissionregistration.v1alpha1.ExternalAdmissionHookConfigurationList": newPropertySet("metadata"),
|
||||
"io.k8s.api.admissionregistration.v1alpha1.InitializerConfigurationList": newPropertySet("metadata"),
|
||||
"io.k8s.api.apps.v1beta1.ControllerRevisionList": newPropertySet("metadata"),
|
||||
"io.k8s.api.apps.v1beta1.DeploymentList": newPropertySet("metadata"),
|
||||
"io.k8s.api.apps.v1beta1.StatefulSetList": newPropertySet("metadata"),
|
||||
"io.k8s.api.apps.v1beta2.ControllerRevisionList": newPropertySet("metadata"),
|
||||
"io.k8s.api.apps.v1beta2.DaemonSetList": newPropertySet("metadata"),
|
||||
"io.k8s.api.apps.v1beta2.DeploymentList": newPropertySet("metadata"),
|
||||
"io.k8s.api.apps.v1beta2.ReplicaSetList": newPropertySet("metadata"),
|
||||
"io.k8s.api.apps.v1beta2.StatefulSetList": newPropertySet("metadata"),
|
||||
"io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerList": newPropertySet("metadata"),
|
||||
"io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerList": newPropertySet("metadata"),
|
||||
"io.k8s.api.batch.v1.JobList": newPropertySet("metadata"),
|
||||
"io.k8s.api.batch.v1beta1.CronJobList": newPropertySet("metadata"),
|
||||
"io.k8s.api.batch.v2alpha1.CronJobList": newPropertySet("metadata"),
|
||||
"io.k8s.api.certificates.v1beta1.CertificateSigningRequestList": newPropertySet("metadata"),
|
||||
"io.k8s.api.core.v1.ComponentStatusList": newPropertySet("metadata"),
|
||||
"io.k8s.api.core.v1.ConfigMapList": newPropertySet("metadata"),
|
||||
"io.k8s.api.core.v1.EndpointsList": newPropertySet("metadata"),
|
||||
"io.k8s.api.core.v1.EventList": newPropertySet("metadata"),
|
||||
"io.k8s.api.core.v1.LimitRangeList": newPropertySet("metadata"),
|
||||
"io.k8s.api.core.v1.NamespaceList": newPropertySet("metadata"),
|
||||
"io.k8s.api.core.v1.NodeList": newPropertySet("metadata"),
|
||||
"io.k8s.api.core.v1.PersistentVolumeClaimList": newPropertySet("metadata"),
|
||||
"io.k8s.api.core.v1.PersistentVolumeList": newPropertySet("metadata"),
|
||||
"io.k8s.api.core.v1.PodList": newPropertySet("metadata"),
|
||||
"io.k8s.api.core.v1.PodTemplateList": newPropertySet("metadata"),
|
||||
"io.k8s.api.core.v1.ReplicationControllerList": newPropertySet("metadata"),
|
||||
"io.k8s.api.core.v1.ResourceQuotaList": newPropertySet("metadata"),
|
||||
"io.k8s.api.core.v1.SecretList": newPropertySet("metadata"),
|
||||
"io.k8s.api.core.v1.ServiceAccountList": newPropertySet("metadata"),
|
||||
"io.k8s.api.core.v1.ServiceList": newPropertySet("metadata"),
|
||||
"io.k8s.api.extensions.v1beta1.DaemonSetList": newPropertySet("metadata"),
|
||||
"io.k8s.api.extensions.v1beta1.DeploymentList": newPropertySet("metadata"),
|
||||
"io.k8s.api.extensions.v1beta1.IngressList": newPropertySet("metadata"),
|
||||
"io.k8s.api.extensions.v1beta1.NetworkPolicyList": newPropertySet("metadata"),
|
||||
"io.k8s.api.extensions.v1beta1.PodSecurityPolicyList": newPropertySet("metadata"),
|
||||
"io.k8s.api.extensions.v1beta1.ReplicaSetList": newPropertySet("metadata"),
|
||||
"io.k8s.api.networking.v1.NetworkPolicyList": newPropertySet("metadata"),
|
||||
"io.k8s.api.policy.v1beta1.PodDisruptionBudgetList": newPropertySet("metadata"),
|
||||
"io.k8s.api.rbac.v1.ClusterRoleBindingList": newPropertySet("metadata"),
|
||||
"io.k8s.api.rbac.v1.ClusterRoleList": newPropertySet("metadata"),
|
||||
"io.k8s.api.rbac.v1.RoleBindingList": newPropertySet("metadata"),
|
||||
"io.k8s.api.rbac.v1.RoleList": newPropertySet("metadata"),
|
||||
"io.k8s.api.rbac.v1alpha1.ClusterRoleBindingList": newPropertySet("metadata"),
|
||||
"io.k8s.api.rbac.v1alpha1.ClusterRoleList": newPropertySet("metadata"),
|
||||
"io.k8s.api.rbac.v1alpha1.RoleBindingList": newPropertySet("metadata"),
|
||||
"io.k8s.api.rbac.v1alpha1.RoleList": newPropertySet("metadata"),
|
||||
"io.k8s.api.rbac.v1beta1.ClusterRoleBindingList": newPropertySet("metadata"),
|
||||
"io.k8s.api.rbac.v1beta1.ClusterRoleList": newPropertySet("metadata"),
|
||||
"io.k8s.api.rbac.v1beta1.RoleBindingList": newPropertySet("metadata"),
|
||||
"io.k8s.api.rbac.v1beta1.RoleList": newPropertySet("metadata"),
|
||||
"io.k8s.api.scheduling.v1alpha1.PriorityClassList": newPropertySet("metadata"),
|
||||
"io.k8s.api.settings.v1alpha1.PodPresetList": newPropertySet("metadata"),
|
||||
"io.k8s.api.storage.v1.StorageClassList": newPropertySet("metadata"),
|
||||
"io.k8s.api.storage.v1beta1.StorageClassList": newPropertySet("metadata"),
|
||||
"io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionList": newPropertySet("metadata"),
|
||||
"io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceList": newPropertySet("metadata"),
|
||||
|
||||
// Status fields.
|
||||
"io.k8s.api.apps.v1beta1.Deployment": newPropertySet("status"),
|
||||
"io.k8s.api.apps.v1beta1.DeploymentCondition": newPropertySet("status"),
|
||||
"io.k8s.api.apps.v1beta1.Scale": newPropertySet("status"),
|
||||
"io.k8s.api.apps.v1beta1.StatefulSet": newPropertySet("status"),
|
||||
"io.k8s.api.apps.v1beta2.DaemonSet": newPropertySet("status"),
|
||||
"io.k8s.api.apps.v1beta2.Deployment": newPropertySet("status"),
|
||||
"io.k8s.api.apps.v1beta2.DeploymentCondition": newPropertySet("status"),
|
||||
"io.k8s.api.apps.v1beta2.ReplicaSet": newPropertySet("status"),
|
||||
"io.k8s.api.apps.v1beta2.ReplicaSetCondition": newPropertySet("status"),
|
||||
"io.k8s.api.apps.v1beta2.Scale": newPropertySet("status"),
|
||||
"io.k8s.api.apps.v1beta2.StatefulSet": newPropertySet("status"),
|
||||
"io.k8s.api.authentication.v1.TokenReview": newPropertySet("status"),
|
||||
"io.k8s.api.authentication.v1beta1.TokenReview": newPropertySet("status"),
|
||||
"io.k8s.api.authorization.v1.LocalSubjectAccessReview": newPropertySet("status"),
|
||||
"io.k8s.api.authorization.v1.SelfSubjectAccessReview": newPropertySet("status"),
|
||||
"io.k8s.api.authorization.v1.SelfSubjectRulesReview": newPropertySet("status"),
|
||||
"io.k8s.api.authorization.v1.SubjectAccessReview": newPropertySet("status"),
|
||||
"io.k8s.api.authorization.v1beta1.LocalSubjectAccessReview": newPropertySet("status"),
|
||||
"io.k8s.api.authorization.v1beta1.SelfSubjectAccessReview": newPropertySet("status"),
|
||||
"io.k8s.api.authorization.v1beta1.SelfSubjectRulesReview": newPropertySet("status"),
|
||||
"io.k8s.api.authorization.v1beta1.SubjectAccessReview": newPropertySet("status"),
|
||||
"io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler": newPropertySet("status"),
|
||||
"io.k8s.api.autoscaling.v1.Scale": newPropertySet("status"),
|
||||
"io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler": newPropertySet("status"),
|
||||
"io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerCondition": newPropertySet("status"),
|
||||
"io.k8s.api.batch.v1.Job": newPropertySet("status"),
|
||||
"io.k8s.api.batch.v1.JobCondition": newPropertySet("status"),
|
||||
"io.k8s.api.batch.v1beta1.CronJob": newPropertySet("status"),
|
||||
"io.k8s.api.batch.v2alpha1.CronJob": newPropertySet("status"),
|
||||
"io.k8s.api.certificates.v1beta1.CertificateSigningRequest": newPropertySet("status"),
|
||||
"io.k8s.api.core.v1.ComponentCondition": newPropertySet("status"),
|
||||
"io.k8s.api.core.v1.Namespace": newPropertySet("status"),
|
||||
"io.k8s.api.core.v1.Node": newPropertySet("status"),
|
||||
"io.k8s.api.core.v1.NodeCondition": newPropertySet("status"),
|
||||
"io.k8s.api.core.v1.PersistentVolume": newPropertySet("status"),
|
||||
"io.k8s.api.core.v1.PersistentVolumeClaim": newPropertySet("status"),
|
||||
"io.k8s.api.core.v1.PersistentVolumeClaimCondition": newPropertySet("status"),
|
||||
"io.k8s.api.core.v1.Pod": newPropertySet("status"),
|
||||
"io.k8s.api.core.v1.PodCondition": newPropertySet("status"),
|
||||
"io.k8s.api.core.v1.ReplicationController": newPropertySet("status"),
|
||||
"io.k8s.api.core.v1.ReplicationControllerCondition": newPropertySet("status"),
|
||||
"io.k8s.api.core.v1.ResourceQuota": newPropertySet("status"),
|
||||
"io.k8s.api.core.v1.Service": newPropertySet("status"),
|
||||
"io.k8s.api.extensions.v1beta1.DaemonSet": newPropertySet("status"),
|
||||
"io.k8s.api.extensions.v1beta1.Deployment": newPropertySet("status"),
|
||||
"io.k8s.api.extensions.v1beta1.DeploymentCondition": newPropertySet("status"),
|
||||
"io.k8s.api.extensions.v1beta1.Ingress": newPropertySet("status"),
|
||||
"io.k8s.api.extensions.v1beta1.ReplicaSet": newPropertySet("status"),
|
||||
"io.k8s.api.extensions.v1beta1.ReplicaSetCondition": newPropertySet("status"),
|
||||
"io.k8s.api.extensions.v1beta1.Scale": newPropertySet("status"),
|
||||
"io.k8s.api.policy.v1beta1.PodDisruptionBudget": newPropertySet("status"),
|
||||
"io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition": newPropertySet("status"),
|
||||
"io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionCondition": newPropertySet("status"),
|
||||
"io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService": newPropertySet("status"),
|
||||
"io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceCondition": newPropertySet("status"),
|
||||
|
||||
// Has both status and a property with type
|
||||
// `io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta`.
|
||||
"io.k8s.apimachinery.pkg.apis.meta.v1.Status": newPropertySet("status", "metadata"),
|
||||
|
||||
// Misc.
|
||||
"io.k8s.api.extensions.v1beta1.DaemonSetSpec": newPropertySet("templateGeneration"),
|
||||
},
|
||||
kSource: `local k8s = import "k8s.libsonnet";
|
||||
|
||||
local apps = k8s.apps;
|
||||
local core = k8s.core;
|
||||
local extensions = k8s.extensions;
|
||||
|
||||
local hidden = {
|
||||
mapContainers(f):: {
|
||||
local podContainers = super.spec.template.spec.containers,
|
||||
spec+: {
|
||||
template+: {
|
||||
spec+: {
|
||||
// IMPORTANT: This overwrites the 'containers' field
|
||||
// for this deployment.
|
||||
containers: std.map(f, podContainers),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
mapContainersWithName(names, f) ::
|
||||
local nameSet =
|
||||
if std.type(names) == "array"
|
||||
then std.set(names)
|
||||
else std.set([names]);
|
||||
local inNameSet(name) = std.length(std.setInter(nameSet, std.set([name]))) > 0;
|
||||
self.mapContainers(
|
||||
function(c)
|
||||
if std.objectHas(c, "name") && inNameSet(c.name)
|
||||
then f(c)
|
||||
else c
|
||||
),
|
||||
};
|
||||
|
||||
k8s + {
|
||||
apps:: apps + {
|
||||
v1beta1:: apps.v1beta1 + {
|
||||
local v1beta1 = apps.v1beta1,
|
||||
|
||||
daemonSet:: v1beta1.daemonSet + {
|
||||
mapContainers(f):: hidden.mapContainers(f),
|
||||
mapContainersWithName(names, f):: hidden.mapContainersWithName(names, f),
|
||||
},
|
||||
|
||||
deployment:: v1beta1.deployment + {
|
||||
mapContainers(f):: hidden.mapContainers(f),
|
||||
mapContainersWithName(names, f):: hidden.mapContainersWithName(names, f),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
core:: core + {
|
||||
v1:: core.v1 + {
|
||||
list:: {
|
||||
new(items)::
|
||||
{apiVersion: "v1"} +
|
||||
{kind: "List"} +
|
||||
self.items(items),
|
||||
|
||||
items(items):: if std.type(items) == "array" then {items+: items} else {items+: [items]},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
extensions:: extensions + {
|
||||
v1beta1:: extensions.v1beta1 + {
|
||||
local v1beta1 = extensions.v1beta1,
|
||||
|
||||
daemonSet:: v1beta1.daemonSet + {
|
||||
mapContainers(f):: hidden.mapContainers(f),
|
||||
mapContainersWithName(names, f):: hidden.mapContainersWithName(names, f),
|
||||
},
|
||||
|
||||
deployment:: v1beta1.deployment + {
|
||||
mapContainers(f):: hidden.mapContainers(f),
|
||||
mapContainersWithName(names, f):: hidden.mapContainersWithName(names, f),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Utility specs, for duplicated objects.
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
var objectList = []CustomConstructorSpec{
|
||||
newConstructor(
|
||||
"new",
|
||||
newParam("items")),
|
||||
}
|
||||
var deploymentCtor = []CustomConstructorSpec{
|
||||
newConstructor(
|
||||
"new",
|
||||
newParamNestedRef("name", "mixin.metadata.withName"),
|
||||
newParamNestedRef("replicas", "mixin.spec.withReplicas"),
|
||||
newParamNestedRef("containers", "mixin.spec.template.spec.withContainers"),
|
||||
newParamNestedRefDefault(
|
||||
"podLabels",
|
||||
"mixin.spec.template.metadata.withLabels",
|
||||
"{app: name}")),
|
||||
}
|
||||
var deploymentRollbackCtor = []CustomConstructorSpec{
|
||||
newConstructor(
|
||||
"new",
|
||||
newParam("name")),
|
||||
}
|
||||
var scaleCtor = []CustomConstructorSpec{
|
||||
newConstructor(
|
||||
"new",
|
||||
newParamNestedRef("replicas", "mixin.spec.withReplicas")),
|
||||
}
|
||||
var statefulSetCtor = []CustomConstructorSpec{
|
||||
newConstructor(
|
||||
"new",
|
||||
newParamNestedRef("name", "mixin.metadata.withName"),
|
||||
newParamNestedRef("replicas", "mixin.spec.withReplicas"),
|
||||
newParamNestedRef("containers", "mixin.spec.template.spec.withContainers"),
|
||||
newParamNestedRef("volumeClaims", "mixin.spec.withVolumeClaimTemplates"),
|
||||
newParamNestedRefDefault(
|
||||
"podLabels",
|
||||
"mixin.spec.template.metadata.withLabels",
|
||||
"{app: name}")),
|
||||
}
|
||||
|
||||
var tokenReviewCtor = []CustomConstructorSpec{
|
||||
newConstructor(
|
||||
"new",
|
||||
newParamNestedRef("token", "mixin.spec.withToken")),
|
||||
}
|
242
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubeversion/version.go
generated
vendored
Normal file
242
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubeversion/version.go
generated
vendored
Normal file
|
@ -0,0 +1,242 @@
|
|||
// Package kubeversion contains a collection of helper methods that
|
||||
// help to customize the code generated for ksonnet-lib to suit
|
||||
// different Kubernetes versions.
|
||||
//
|
||||
// For example, we may choose not to emit certain properties for some
|
||||
// objects in Kubernetes v1.7.0; or, we might want to rename a
|
||||
// property method. This package contains both the helper methods that
|
||||
// perform such transformations, as well as the data for the
|
||||
// transformations we use for each version.
|
||||
package kubeversion
|
||||
|
||||
import (
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec"
|
||||
)
|
||||
|
||||
// KSource returns the source of `k.libsonnet` for a specific version
|
||||
// of Kubernetes.
|
||||
func KSource(k8sVersion string) string {
|
||||
var verStrs []string
|
||||
for k := range versions {
|
||||
verStrs = append(verStrs, k)
|
||||
}
|
||||
|
||||
verData, ok := versions[k8sVersion]
|
||||
if !ok {
|
||||
log.Fatalf("Unrecognized Kubernetes version %q. Currently accepts %q",
|
||||
k8sVersion, strings.Join(verStrs, ", "))
|
||||
}
|
||||
|
||||
return verData.kSource
|
||||
}
|
||||
|
||||
// Beta returns the beta status of the version.
|
||||
func Beta(k8sVersion string) bool {
|
||||
k8sVersion = strings.TrimLeft(k8sVersion, "v")
|
||||
ver := strings.Split(k8sVersion, ".")
|
||||
if len(ver) >= 2 {
|
||||
if ver[0] == "1" {
|
||||
if ver[1] == "8" {
|
||||
k8sVersion = "v1.8.0"
|
||||
} else if ver[1] == "7" {
|
||||
k8sVersion = "v1.7.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
verData, ok := versions[k8sVersion]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
return verData.beta
|
||||
}
|
||||
|
||||
// MapIdentifier takes a text identifier and maps it to a
|
||||
// Jsonnet-appropriate identifier, for some version of Kubernetes. For
|
||||
// example, in Kubernetes v1.7.0, we might map `clusterIP` ->
|
||||
// `clusterIp`.
|
||||
func MapIdentifier(k8sVersion, id string) string {
|
||||
verData, ok := versions[k8sVersion]
|
||||
if !ok {
|
||||
log.Fatalf("Unrecognized Kubernetes version '%s'", k8sVersion)
|
||||
}
|
||||
|
||||
if alias, ok := verData.idAliases[id]; ok {
|
||||
return alias
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// IsBlacklistedProperty taks a definition name (e.g.,
|
||||
// `io.k8s.kubernetes.pkg.apis.apps.v1beta1.Deployment`), a property
|
||||
// name (e.g., `status`), and reports whether it is blacklisted for
|
||||
// some Kubernetes version. This is particularly useful when deciding
|
||||
// whether or not to generate mixins and property methods for a given
|
||||
// property (as we likely wouldn't in the case of, say, `status`).
|
||||
func IsBlacklistedID(k8sVersion string, path kubespec.DefinitionName) bool {
|
||||
verData, ok := versions[k8sVersion]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
_, ok = verData.idBlacklist[string(path)]
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsBlacklistedProperty taks a definition name (e.g.,
|
||||
// `io.k8s.kubernetes.pkg.apis.apps.v1beta1.Deployment`), a property
|
||||
// name (e.g., `status`), and reports whether it is blacklisted for
|
||||
// some Kubernetes version. This is particularly useful when deciding
|
||||
// whether or not to generate mixins and property methods for a given
|
||||
// property (as we likely wouldn't in the case of, say, `status`).
|
||||
func IsBlacklistedProperty(
|
||||
k8sVersion string, path kubespec.DefinitionName,
|
||||
propertyName kubespec.PropertyName,
|
||||
) bool {
|
||||
verData, ok := versions[k8sVersion]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
bl, ok := verData.propertyBlacklist[string(path)]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
_, ok = bl[string(propertyName)]
|
||||
return ok
|
||||
}
|
||||
|
||||
func ConstructorSpec(
|
||||
k8sVersion string, path kubespec.DefinitionName,
|
||||
) ([]CustomConstructorSpec, bool) {
|
||||
verData, ok := versions[k8sVersion]
|
||||
if !ok {
|
||||
log.Fatalf("Unrecognized Kubernetes version '%s'", k8sVersion)
|
||||
}
|
||||
|
||||
spec, ok := verData.constructorSpecs[string(path)]
|
||||
return spec, ok
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Core data structures for specifying version information.
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
type versionData struct {
|
||||
idAliases map[string]string
|
||||
constructorSpecs map[string][]CustomConstructorSpec
|
||||
idBlacklist map[string]interface{}
|
||||
propertyBlacklist map[string]propertySet
|
||||
kSource string
|
||||
beta bool
|
||||
}
|
||||
|
||||
type propertySet map[string]bool
|
||||
|
||||
func newPropertySet(strings ...string) propertySet {
|
||||
ps := make(propertySet)
|
||||
for _, s := range strings {
|
||||
ps[s] = true
|
||||
}
|
||||
|
||||
return ps
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Public Data structures for specifying custom constructors for API
|
||||
// objects.
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
// CustomConstructorSpec specifies a custom constructor for
|
||||
// `ksonnet-gen` to emit as part of ksonnet-lib. In particular, this
|
||||
// specifies a constructor of the form:
|
||||
//
|
||||
// foo(bar, baz):: self.bar(bar) + self.baz(baz)
|
||||
//
|
||||
// The parameter list and the body are all generated from the `Params`
|
||||
// field.
|
||||
//
|
||||
// DESIGN NOTES:
|
||||
//
|
||||
// * If the user specifies a custom constructor, we will not emit the
|
||||
// default zero-argument constructor, `new()`. This is a purposeful
|
||||
// decision which we make because we are typically customizing the
|
||||
// constructors precisely because the zero-argument constructor is
|
||||
// not meaninful for a given API object.
|
||||
// * We currently do not check that parameter names are unique.
|
||||
// Duplicate identifiers in a parameter list results in a Jsonnet
|
||||
// compiler error, though, so this should be caught by review and
|
||||
// CI, and it is hence not important for this case to be covered by
|
||||
// this code.
|
||||
type CustomConstructorSpec struct {
|
||||
ID string
|
||||
Params []CustomConstructorParam
|
||||
}
|
||||
|
||||
func newConstructor(
|
||||
id string, params ...CustomConstructorParam,
|
||||
) CustomConstructorSpec {
|
||||
return CustomConstructorSpec{
|
||||
ID: id,
|
||||
Params: params,
|
||||
}
|
||||
}
|
||||
|
||||
// CustomConstructorParam specifies a parameter for a
|
||||
// `CustomConstructorSpec`. This class allows users to specify
|
||||
// constructors of various forms, including:
|
||||
//
|
||||
// * The "normal" form, e.g., `foo(bar):: self.bar(bar)`,
|
||||
// * Parameters with default values, e.g., `foo(bar="baz")::
|
||||
// self.bar(bar)`, and
|
||||
// * Parameters that are nested inside the object, e.g., `foo(bar)::
|
||||
// self.baz.bat.bar(bar)`
|
||||
//
|
||||
// DESIGN NOTES:
|
||||
//
|
||||
// * For constructors that use nested paths, we do not currently check
|
||||
// that the path is valid. So for example, `self.baz.bat.bar` in the
|
||||
// example above may not correspond to a real property. We make this
|
||||
// decision because it complicates the code, and it doesn't seem
|
||||
// worth it since this feature is used relatively rarely.
|
||||
type CustomConstructorParam struct {
|
||||
ID string
|
||||
DefaultValue *string
|
||||
RelativePath *string
|
||||
}
|
||||
|
||||
func newParam(name string) CustomConstructorParam {
|
||||
return CustomConstructorParam{
|
||||
ID: name,
|
||||
DefaultValue: nil,
|
||||
}
|
||||
}
|
||||
|
||||
func newParamWithDefault(name, def string) CustomConstructorParam {
|
||||
return CustomConstructorParam{
|
||||
ID: name,
|
||||
DefaultValue: &def,
|
||||
}
|
||||
}
|
||||
|
||||
func newParamNestedRef(name, relativePath string) CustomConstructorParam {
|
||||
return CustomConstructorParam{
|
||||
ID: name,
|
||||
RelativePath: &relativePath,
|
||||
}
|
||||
}
|
||||
|
||||
func newParamNestedRefDefault(
|
||||
name, relativePath, def string,
|
||||
) CustomConstructorParam {
|
||||
return CustomConstructorParam{
|
||||
ID: name,
|
||||
RelativePath: &relativePath,
|
||||
DefaultValue: &def,
|
||||
}
|
||||
}
|
29
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubeversion/version_test.go
generated
vendored
Normal file
29
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubeversion/version_test.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
package kubeversion
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestBeta(t *testing.T) {
|
||||
cases := []struct {
|
||||
expected bool
|
||||
in string
|
||||
}{
|
||||
{expected: true, in: "1.8.0"},
|
||||
{expected: true, in: "v1.8.0"},
|
||||
{expected: true, in: "1.8"},
|
||||
{expected: true, in: "1.8.4"},
|
||||
{expected: true, in: "1.8.5"},
|
||||
{expected: false, in: "1.7"},
|
||||
{expected: false, in: "1.9.0"},
|
||||
{expected: false, in: "1.6.0"},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.in, func(t *testing.T) {
|
||||
b := Beta(tc.in)
|
||||
if b != tc.expected {
|
||||
t.Errorf("Beta() got %v; expected %v", b, tc.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
88
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/main.go
generated
vendored
Normal file
88
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/main.go
generated
vendored
Normal file
|
@ -0,0 +1,88 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/ksonnet/ksonnet-lib/ksonnet-gen/ksonnet"
|
||||
"github.com/ksonnet/ksonnet-lib/ksonnet-gen/kubespec"
|
||||
)
|
||||
|
||||
var usage = "Usage: ksonnet-gen [path to k8s OpenAPI swagger.json] [output dir]"
|
||||
|
||||
func main() {
|
||||
if len(os.Args) != 3 {
|
||||
log.Fatal(usage)
|
||||
}
|
||||
|
||||
swaggerPath := os.Args[1]
|
||||
text, err := ioutil.ReadFile(swaggerPath)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not read file at '%s':\n%v", swaggerPath, err)
|
||||
}
|
||||
|
||||
// Deserialize the API object.
|
||||
s := kubespec.APISpec{}
|
||||
err = json.Unmarshal(text, &s)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not deserialize schema:\n%v", err)
|
||||
}
|
||||
s.Text = text
|
||||
s.FilePath = filepath.Dir(swaggerPath)
|
||||
|
||||
// Emit Jsonnet code.
|
||||
ksonnetLibSHA := getSHARevision(".")
|
||||
k8sSHA := getSHARevision(s.FilePath)
|
||||
kBytes, k8sBytes, err := ksonnet.Emit(&s, &ksonnetLibSHA, &k8sSHA)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not write ksonnet library:\n%v", err)
|
||||
}
|
||||
|
||||
// Write out.
|
||||
k8sOutfile := fmt.Sprintf("%s/%s", os.Args[2], "k8s.libsonnet")
|
||||
err = ioutil.WriteFile(k8sOutfile, k8sBytes, 0644)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not write `k8s.libsonnet`:\n%v", err)
|
||||
}
|
||||
|
||||
kOutfile := fmt.Sprintf("%s/%s", os.Args[2], "k.libsonnet")
|
||||
err = ioutil.WriteFile(kOutfile, kBytes, 0644)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not write `k.libsonnet`:\n%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func getSHARevision(dir string) string {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatalf("Could get working directory:\n%v", err)
|
||||
}
|
||||
|
||||
err = os.Chdir(dir)
|
||||
if err != nil {
|
||||
log.Fatalf("Could cd to directory of repository at '%s':\n%v", dir, err)
|
||||
}
|
||||
|
||||
sha, err := exec.Command("sh", "-c", "git rev-parse HEAD").Output()
|
||||
if err != nil {
|
||||
log.Fatalf("Could not find SHA of HEAD:\n%v", err)
|
||||
}
|
||||
|
||||
err = os.Chdir(cwd)
|
||||
if err != nil {
|
||||
log.Fatalf("Could cd back to current directory '%s':\n%v", cwd, err)
|
||||
}
|
||||
|
||||
return strings.TrimSpace(string(sha))
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Get rid of time in logs.
|
||||
log.SetFlags(0)
|
||||
}
|
974
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/nodemaker/nodemaker.go
generated
vendored
Normal file
974
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/nodemaker/nodemaker.go
generated
vendored
Normal file
|
@ -0,0 +1,974 @@
|
|||
package nodemaker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-jsonnet/ast"
|
||||
"github.com/ksonnet/ksonnet-lib/ksonnet-gen/astext"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Noder is an entity that can be converted to a jsonnet node.
|
||||
type Noder interface {
|
||||
Node() ast.Node
|
||||
}
|
||||
|
||||
type field struct {
|
||||
key Key
|
||||
value Noder
|
||||
}
|
||||
|
||||
// ObjectOptOneline is a functional option which sets the object's oneline status.
|
||||
func ObjectOptOneline(oneline bool) ObjectOpt {
|
||||
return func(o *Object) {
|
||||
o.Oneline = oneline
|
||||
}
|
||||
}
|
||||
|
||||
// ObjectOpt is a functional option for Object.
|
||||
type ObjectOpt func(*Object)
|
||||
|
||||
// Object is an item that can have multiple keys with values.
|
||||
type Object struct {
|
||||
Oneline bool
|
||||
fields map[string]Noder
|
||||
keys map[string]Key
|
||||
keyList []string
|
||||
}
|
||||
|
||||
var _ Noder = (*Object)(nil)
|
||||
|
||||
// KVFromMap creates a object using a map.
|
||||
func KVFromMap(m map[string]interface{}) (*Object, error) {
|
||||
if m == nil {
|
||||
return nil, errors.New("map is nil")
|
||||
}
|
||||
|
||||
var names []string
|
||||
for name := range m {
|
||||
names = append(names, name)
|
||||
}
|
||||
sort.Strings(names)
|
||||
|
||||
o := NewObject()
|
||||
|
||||
for _, name := range names {
|
||||
child, err := ValueToNoder(m[name])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "convert value to noder")
|
||||
}
|
||||
|
||||
o.Set(InheritedKey(name), child)
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// ValueToNoder converts a value to a Noder.
|
||||
func ValueToNoder(v interface{}) (Noder, error) {
|
||||
if v == nil {
|
||||
return nil, errors.New("value is nil")
|
||||
}
|
||||
|
||||
switch t := v.(type) {
|
||||
case string, float64, int, bool:
|
||||
return convertValueToNoder(t)
|
||||
case []interface{}:
|
||||
var elements []Noder
|
||||
for _, val := range t {
|
||||
noder, err := convertValueToNoder(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
elements = append(elements, noder)
|
||||
}
|
||||
array := NewArray(elements)
|
||||
return array, nil
|
||||
case map[interface{}]interface{}:
|
||||
newMap, err := convertMapToStringKey(t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return KVFromMap(newMap)
|
||||
case map[string]interface{}:
|
||||
return KVFromMap(t)
|
||||
default:
|
||||
return nil, errors.Errorf("unsupported type %T", t)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func convertMapToStringKey(m map[interface{}]interface{}) (map[string]interface{}, error) {
|
||||
newMap := make(map[string]interface{})
|
||||
for k := range m {
|
||||
s, ok := k.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("map key is not a string")
|
||||
}
|
||||
|
||||
newMap[s] = m[s]
|
||||
}
|
||||
|
||||
return newMap, nil
|
||||
}
|
||||
|
||||
func convertValueToNoder(val interface{}) (Noder, error) {
|
||||
switch t := val.(type) {
|
||||
case string:
|
||||
return NewStringDouble(t), nil
|
||||
case float64:
|
||||
return NewFloat(t), nil
|
||||
case int:
|
||||
return NewInt(t), nil
|
||||
case bool:
|
||||
return NewBoolean(t), nil
|
||||
case map[string]interface{}:
|
||||
return ValueToNoder(t)
|
||||
default:
|
||||
return nil, errors.Errorf("unsupported type %T", t)
|
||||
}
|
||||
}
|
||||
|
||||
// NewObject creates an Object. ObjectOpt functional arguments can be used to configure the
|
||||
// newly generated key.
|
||||
func NewObject(opts ...ObjectOpt) *Object {
|
||||
o := &Object{
|
||||
fields: make(map[string]Noder),
|
||||
keys: make(map[string]Key),
|
||||
keyList: make([]string, 0),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(o)
|
||||
}
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
// OnelineObject is a convenience method for creating a online object.
|
||||
func OnelineObject(opts ...ObjectOpt) *Object {
|
||||
opts = append(opts, ObjectOptOneline(true))
|
||||
return NewObject(opts...)
|
||||
}
|
||||
|
||||
// Set sets a field with a value.
|
||||
func (o *Object) Set(key Key, value Noder) error {
|
||||
name := key.name
|
||||
|
||||
if _, ok := o.keys[name]; ok {
|
||||
return errors.Errorf("field %q already exists in the object", name)
|
||||
}
|
||||
|
||||
o.keys[name] = key
|
||||
o.fields[name] = value
|
||||
o.keyList = append(o.keyList, name)
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// Get retrieves a field by name.
|
||||
func (o *Object) Get(keyName string) Noder {
|
||||
return o.fields[keyName]
|
||||
}
|
||||
|
||||
// Keys returns a slice of keys in the object.
|
||||
func (o *Object) Keys() []Key {
|
||||
var keys []Key
|
||||
|
||||
for _, name := range o.keyList {
|
||||
keys = append(keys, o.keys[name])
|
||||
}
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
var (
|
||||
reField = regexp.MustCompile(`^[A-Za-z]+[A-Za-z0-9]*$`)
|
||||
)
|
||||
|
||||
// Node converts the object to a jsonnet node.
|
||||
func (o *Object) Node() ast.Node {
|
||||
ao := &astext.Object{
|
||||
Oneline: o.Oneline,
|
||||
}
|
||||
|
||||
for _, name := range o.keyList {
|
||||
k := o.keys[name]
|
||||
v := o.fields[name]
|
||||
|
||||
of := astext.ObjectField{
|
||||
Comment: o.generateComment(k.comment),
|
||||
}
|
||||
|
||||
if k.category == ast.ObjectLocal {
|
||||
of.Id = newIdentifier(name)
|
||||
of.Kind = k.category
|
||||
} else if stringInSlice(name, jsonnetReservedWords) {
|
||||
of.Expr1 = NewStringDouble(name).Node()
|
||||
of.Kind = ast.ObjectFieldStr
|
||||
} else if reField.MatchString(name) {
|
||||
id := ast.Identifier(name)
|
||||
of.Kind = ast.ObjectFieldID
|
||||
of.Id = &id
|
||||
} else {
|
||||
of.Expr1 = NewStringDouble(name).Node()
|
||||
of.Kind = ast.ObjectFieldStr
|
||||
}
|
||||
|
||||
of.Hide = k.visibility
|
||||
of.Expr2 = v.Node()
|
||||
of.Method = k.Method()
|
||||
of.SuperSugar = k.Mixin()
|
||||
|
||||
ao.Fields = append(ao.Fields, of)
|
||||
}
|
||||
|
||||
return ao
|
||||
}
|
||||
|
||||
func (o *Object) generateComment(text string) *astext.Comment {
|
||||
if text != "" {
|
||||
return &astext.Comment{Text: text}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Boolean is a boolean.
|
||||
type Boolean struct {
|
||||
value bool
|
||||
}
|
||||
|
||||
// NewBoolean creates an instance of Boolean.
|
||||
func NewBoolean(value bool) *Boolean {
|
||||
return &Boolean{
|
||||
value: value,
|
||||
}
|
||||
}
|
||||
|
||||
// Node converts Boolean to a jsonnet node.
|
||||
func (b *Boolean) Node() ast.Node {
|
||||
return &ast.LiteralBoolean{
|
||||
Value: b.value,
|
||||
}
|
||||
}
|
||||
|
||||
// StringDouble is double quoted string.
|
||||
type StringDouble struct {
|
||||
text string
|
||||
}
|
||||
|
||||
// NewStringDouble creates an instance of StringDouble.
|
||||
func NewStringDouble(text string) *StringDouble {
|
||||
return &StringDouble{
|
||||
text: text,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *StringDouble) node() *ast.LiteralString {
|
||||
return &ast.LiteralString{
|
||||
Kind: ast.StringDouble,
|
||||
Value: t.text,
|
||||
}
|
||||
}
|
||||
|
||||
// Node converts the StringDouble to a jsonnet node.
|
||||
func (t *StringDouble) Node() ast.Node {
|
||||
return t.node()
|
||||
}
|
||||
|
||||
// Number is an a number.
|
||||
type Number struct {
|
||||
number float64
|
||||
value string
|
||||
}
|
||||
|
||||
var _ Noder = (*Number)(nil)
|
||||
|
||||
// NewInt creates an integer number.
|
||||
func NewInt(i int) *Number {
|
||||
return &Number{
|
||||
number: float64(i),
|
||||
value: strconv.Itoa(i),
|
||||
}
|
||||
}
|
||||
|
||||
// NewFloat creates a float instance of a number.
|
||||
func NewFloat(f float64) *Number {
|
||||
return &Number{
|
||||
number: f,
|
||||
value: strconv.FormatFloat(f, 'f', -1, 64),
|
||||
}
|
||||
}
|
||||
|
||||
// Node converts the Number to a jsonnet node.
|
||||
func (t *Number) Node() ast.Node {
|
||||
return &ast.LiteralNumber{
|
||||
Value: t.number,
|
||||
OriginalString: t.value,
|
||||
}
|
||||
}
|
||||
|
||||
// Array is an an array.
|
||||
type Array struct {
|
||||
elements []Noder
|
||||
}
|
||||
|
||||
var _ Noder = (*Array)(nil)
|
||||
|
||||
// NewArray creates an instance of Array.
|
||||
func NewArray(elements []Noder) *Array {
|
||||
return &Array{
|
||||
elements: elements,
|
||||
}
|
||||
}
|
||||
|
||||
// Node converts the Array to a jsonnet node.
|
||||
func (t *Array) Node() ast.Node {
|
||||
var nodes []ast.Node
|
||||
for _, element := range t.elements {
|
||||
nodes = append(nodes, element.Node())
|
||||
}
|
||||
|
||||
return &ast.Array{
|
||||
Elements: nodes,
|
||||
}
|
||||
}
|
||||
|
||||
// KeyOptCategory is a functional option for setting key category
|
||||
func KeyOptCategory(kc ast.ObjectFieldKind) KeyOpt {
|
||||
return func(k *Key) {
|
||||
k.category = kc
|
||||
}
|
||||
}
|
||||
|
||||
// KeyOptVisibility is a functional option for setting key visibility
|
||||
func KeyOptVisibility(kv ast.ObjectFieldHide) KeyOpt {
|
||||
return func(k *Key) {
|
||||
k.visibility = kv
|
||||
}
|
||||
}
|
||||
|
||||
// KeyOptComment is a functional option for setting a comment on a key
|
||||
func KeyOptComment(text string) KeyOpt {
|
||||
return func(k *Key) {
|
||||
k.comment = text
|
||||
}
|
||||
}
|
||||
|
||||
// KeyOptMixin is a functional option for setting this key as a mixin
|
||||
func KeyOptMixin(b bool) KeyOpt {
|
||||
return func(k *Key) {
|
||||
k.mixin = b
|
||||
}
|
||||
}
|
||||
|
||||
// KeyOptParams is functional option for setting params for a key. If there are no required
|
||||
// parameters, pass an empty []string.
|
||||
func KeyOptParams(params []string) KeyOpt {
|
||||
return func(k *Key) {
|
||||
k.params = params
|
||||
}
|
||||
}
|
||||
|
||||
// KeyOptNamedParams is a functional option for setting named params for a key.
|
||||
func KeyOptNamedParams(params ...OptionalArg) KeyOpt {
|
||||
return func(k *Key) {
|
||||
k.namedParams = params
|
||||
}
|
||||
}
|
||||
|
||||
// KeyOpt is a functional option for configuring Key.
|
||||
type KeyOpt func(k *Key)
|
||||
|
||||
var (
|
||||
jsonnetReservedWords = []string{"assert", "else", "error", "false", "for", "function", "if",
|
||||
"import", "importstr", "in", "local", "null", "tailstrict", "then", "self", "super", "true"}
|
||||
)
|
||||
|
||||
// Key names a fields in an object.
|
||||
type Key struct {
|
||||
name string
|
||||
category ast.ObjectFieldKind
|
||||
visibility ast.ObjectFieldHide
|
||||
comment string
|
||||
params []string
|
||||
namedParams []OptionalArg
|
||||
mixin bool
|
||||
}
|
||||
|
||||
var (
|
||||
reStartsWithNonAlpha = regexp.MustCompile(`^[^A-Za-z]`)
|
||||
)
|
||||
|
||||
// NewKey creates an instance of Key. KeyOpt functional options can be used to configure the
|
||||
// newly generated key.
|
||||
func NewKey(name string, opts ...KeyOpt) Key {
|
||||
|
||||
category := ast.ObjectFieldID
|
||||
for _, s := range jsonnetReservedWords {
|
||||
if s == name {
|
||||
category = ast.ObjectFieldStr
|
||||
}
|
||||
}
|
||||
|
||||
if reStartsWithNonAlpha.Match([]byte(name)) {
|
||||
category = ast.ObjectFieldStr
|
||||
}
|
||||
|
||||
k := Key{
|
||||
name: name,
|
||||
category: category,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(&k)
|
||||
}
|
||||
|
||||
return k
|
||||
}
|
||||
|
||||
// InheritedKey is a convenience method for creating an inherited key.
|
||||
func InheritedKey(name string, opts ...KeyOpt) Key {
|
||||
opts = append(opts, KeyOptVisibility(ast.ObjectFieldInherit))
|
||||
return NewKey(name, opts...)
|
||||
}
|
||||
|
||||
// LocalKey is a convenience method for creating a local key.
|
||||
func LocalKey(name string, opts ...KeyOpt) Key {
|
||||
opts = append(opts, KeyOptCategory(ast.ObjectLocal))
|
||||
return NewKey(name, opts...)
|
||||
}
|
||||
|
||||
// FunctionKey is a convenience method for creating a function key.
|
||||
func FunctionKey(name string, args []string, opts ...KeyOpt) Key {
|
||||
opts = append(opts, KeyOptParams(args), KeyOptCategory(ast.ObjectFieldID))
|
||||
return NewKey(name, opts...)
|
||||
}
|
||||
|
||||
// Method returns the jsonnet AST object file method parameter.
|
||||
func (k *Key) Method() *ast.Function {
|
||||
if k.params == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
f := &ast.Function{
|
||||
Parameters: ast.Parameters{
|
||||
Required: ast.Identifiers{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, p := range k.params {
|
||||
f.Parameters.Required = append(f.Parameters.Required, *newIdentifier(p))
|
||||
}
|
||||
|
||||
for _, p := range k.namedParams {
|
||||
f.Parameters.Optional = append(f.Parameters.Optional, p.NamedParameter())
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// Mixin returns true if the jsonnet object should be super sugared.
|
||||
func (k Key) Mixin() bool {
|
||||
return k.mixin
|
||||
}
|
||||
|
||||
// BinaryOp is a binary operation.
|
||||
type BinaryOp string
|
||||
|
||||
const (
|
||||
// BopPlus is +
|
||||
BopPlus BinaryOp = "+"
|
||||
// BopEqual is ==
|
||||
BopEqual = "=="
|
||||
// BopGreater is >
|
||||
BopGreater = ">"
|
||||
// BopAnd is &&
|
||||
BopAnd = "&&"
|
||||
)
|
||||
|
||||
// Binary represents a binary operation
|
||||
type Binary struct {
|
||||
Left Noder
|
||||
Right Noder
|
||||
Op BinaryOp
|
||||
Chainer
|
||||
}
|
||||
|
||||
var _ Noder = (*Binary)(nil)
|
||||
|
||||
// NewBinary creates an instance of Binary.
|
||||
func NewBinary(left, right Noder, op BinaryOp) *Binary {
|
||||
return &Binary{
|
||||
Left: left,
|
||||
Right: right,
|
||||
Op: op,
|
||||
}
|
||||
}
|
||||
|
||||
// Node converts a BinaryOp into an ast node. This will panic if the binary operator
|
||||
// is unknown.
|
||||
func (b *Binary) Node() ast.Node {
|
||||
op, ok := ast.BopMap[string(b.Op)]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("%q is an invalid binary operation", b.Op))
|
||||
}
|
||||
|
||||
return &ast.Binary{
|
||||
Left: b.Left.Node(),
|
||||
Right: b.Right.Node(),
|
||||
Op: op,
|
||||
}
|
||||
}
|
||||
|
||||
// Var represents a variable.
|
||||
type Var struct {
|
||||
ID string
|
||||
Chainer
|
||||
}
|
||||
|
||||
var _ Noder = (*Binary)(nil)
|
||||
|
||||
// NewVar creates an instance of Var.
|
||||
func NewVar(id string) *Var {
|
||||
return &Var{
|
||||
ID: id,
|
||||
}
|
||||
}
|
||||
|
||||
// Node converts the var to a jsonnet ast node.
|
||||
func (v *Var) Node() ast.Node {
|
||||
return &ast.Var{
|
||||
Id: *newIdentifier(v.ID),
|
||||
}
|
||||
}
|
||||
|
||||
// Self represents self.
|
||||
type Self struct{}
|
||||
|
||||
var _ Noder = (*Self)(nil)
|
||||
|
||||
// Node converts self to a jsonnet self node.
|
||||
func (s *Self) Node() ast.Node {
|
||||
return &ast.Self{}
|
||||
}
|
||||
|
||||
// Conditional represents a conditional
|
||||
type Conditional struct {
|
||||
Cond Noder
|
||||
BranchTrue Noder
|
||||
BranchFalse Noder
|
||||
Chainer
|
||||
}
|
||||
|
||||
var _ Noder = (*Conditional)(nil)
|
||||
|
||||
// NewConditional creates an instance of Conditional.
|
||||
func NewConditional(cond, tbranch, fbranch Noder) *Conditional {
|
||||
return &Conditional{
|
||||
Cond: cond,
|
||||
BranchTrue: tbranch,
|
||||
BranchFalse: fbranch,
|
||||
}
|
||||
}
|
||||
|
||||
// Node converts the Conditional to a jsonnet ast node.
|
||||
func (c *Conditional) Node() ast.Node {
|
||||
cond := &ast.Conditional{
|
||||
Cond: c.Cond.Node(),
|
||||
BranchTrue: c.BranchTrue.Node(),
|
||||
}
|
||||
|
||||
if c.BranchFalse != nil {
|
||||
cond.BranchFalse = c.BranchFalse.Node()
|
||||
}
|
||||
|
||||
return cond
|
||||
}
|
||||
|
||||
// OptionalArg is an optional argument.
|
||||
type OptionalArg struct {
|
||||
Name string
|
||||
Default Noder
|
||||
}
|
||||
|
||||
// NamedArgument converts the OptionalArgument to a jsonnet NamedArgument.
|
||||
func (oa *OptionalArg) NamedArgument() ast.NamedArgument {
|
||||
na := ast.NamedArgument{
|
||||
Name: *newIdentifier(oa.Name),
|
||||
}
|
||||
|
||||
if oa.Default == nil {
|
||||
na.Arg = NewStringDouble("").Node()
|
||||
} else {
|
||||
na.Arg = oa.Default.Node()
|
||||
}
|
||||
|
||||
return na
|
||||
}
|
||||
|
||||
// NamedParameter converts the OptionalArgument to a jsonnet NamedParameter.
|
||||
func (oa *OptionalArg) NamedParameter() ast.NamedParameter {
|
||||
np := ast.NamedParameter{
|
||||
Name: *newIdentifier(oa.Name),
|
||||
}
|
||||
|
||||
if oa.Default != nil {
|
||||
np.DefaultArg = oa.Default.Node()
|
||||
}
|
||||
|
||||
return np
|
||||
}
|
||||
|
||||
// Apply represents an application of a function.
|
||||
type Apply struct {
|
||||
target Chainable
|
||||
positionalArgs []Noder
|
||||
optionalArgs []OptionalArg
|
||||
Chainer
|
||||
}
|
||||
|
||||
var _ Targetable = (*Apply)(nil)
|
||||
|
||||
// NewApply creates an instance of Apply.
|
||||
func NewApply(target Chainable, positionalArgs []Noder, optionalArgs []OptionalArg) *Apply {
|
||||
return &Apply{
|
||||
target: target,
|
||||
positionalArgs: positionalArgs,
|
||||
optionalArgs: optionalArgs,
|
||||
}
|
||||
}
|
||||
|
||||
// ApplyCall creates an Apply using a method string.
|
||||
func ApplyCall(method string, args ...Noder) *Apply {
|
||||
return NewApply(NewCall(method), args, nil)
|
||||
}
|
||||
|
||||
// SetTarget sets the target of this Apply.
|
||||
func (a *Apply) SetTarget(c Chainable) {
|
||||
a.target = c
|
||||
}
|
||||
|
||||
// Node converts the Apply to a jsonnet ast node.
|
||||
func (a *Apply) Node() ast.Node {
|
||||
apply := &ast.Apply{
|
||||
Target: a.target.Node(),
|
||||
}
|
||||
|
||||
for _, arg := range a.positionalArgs {
|
||||
apply.Arguments.Positional = append(apply.Arguments.Positional, arg.Node())
|
||||
}
|
||||
|
||||
for _, arg := range a.optionalArgs {
|
||||
apply.Arguments.Named = append(apply.Arguments.Named, arg.NamedArgument())
|
||||
}
|
||||
|
||||
return apply
|
||||
}
|
||||
|
||||
// Call is a function call.
|
||||
type Call struct {
|
||||
parts []string
|
||||
target Chainable
|
||||
Chainer
|
||||
}
|
||||
|
||||
var _ Targetable = (*Call)(nil)
|
||||
|
||||
// NewCall creates an instance of Call.
|
||||
func NewCall(method string) *Call {
|
||||
parts := strings.Split(method, ".")
|
||||
|
||||
return &Call{
|
||||
parts: parts,
|
||||
}
|
||||
}
|
||||
|
||||
// SetTarget sets the target of this Call.
|
||||
func (c *Call) SetTarget(chainable Chainable) {
|
||||
c.target = chainable
|
||||
}
|
||||
|
||||
// Node converts the Call to a jsonnet ast node.
|
||||
func (c *Call) Node() ast.Node {
|
||||
parts := c.parts
|
||||
|
||||
if len(parts) == 1 {
|
||||
return NewVar(parts[0]).Node()
|
||||
}
|
||||
|
||||
var theVar *Var
|
||||
var cur *Index
|
||||
|
||||
switch t := c.target.(type) {
|
||||
case *Var:
|
||||
theVar = t
|
||||
case *Index:
|
||||
cur = t
|
||||
}
|
||||
|
||||
for i := range parts {
|
||||
part := parts[i]
|
||||
if i == 0 && theVar == nil {
|
||||
v := NewVar(part)
|
||||
theVar = v
|
||||
continue
|
||||
}
|
||||
idx := NewIndex(part)
|
||||
if theVar != nil {
|
||||
idx.SetTarget(theVar)
|
||||
theVar = nil
|
||||
} else if cur != nil {
|
||||
idx.SetTarget(cur)
|
||||
}
|
||||
|
||||
cur = idx
|
||||
}
|
||||
|
||||
if theVar != nil {
|
||||
return theVar.Node()
|
||||
}
|
||||
|
||||
return cur.Node()
|
||||
}
|
||||
|
||||
// Index is an index type.
|
||||
type Index struct {
|
||||
ID string
|
||||
Target Chainable
|
||||
Chainer
|
||||
}
|
||||
|
||||
var _ Targetable = (*Index)(nil)
|
||||
|
||||
// NewIndex creates an instance of Index.
|
||||
func NewIndex(id string) *Index {
|
||||
return &Index{
|
||||
ID: id,
|
||||
}
|
||||
}
|
||||
|
||||
// SetTarget sets the target for this Index.
|
||||
func (i *Index) SetTarget(c Chainable) {
|
||||
i.Target = c
|
||||
}
|
||||
|
||||
// Node converts the Index to a Jsonnet AST node.
|
||||
func (i *Index) Node() ast.Node {
|
||||
astIndex := &ast.Index{Id: newIdentifier(i.ID)}
|
||||
|
||||
if i.Target != nil {
|
||||
astIndex.Target = i.Target.Node()
|
||||
}
|
||||
|
||||
return astIndex
|
||||
}
|
||||
|
||||
// Chainable is an interface that signifies this object can be
|
||||
// used in CallChain.
|
||||
type Chainable interface {
|
||||
Chainable()
|
||||
Node() ast.Node
|
||||
}
|
||||
|
||||
// Targetable is a Chainable that allows you to set a target.
|
||||
// Can be used with Calls, Indexes, and Applies.
|
||||
type Targetable interface {
|
||||
Chainable
|
||||
SetTarget(Chainable)
|
||||
}
|
||||
|
||||
// Chainer is an extension struct to bring the Chainable
|
||||
// function into a type.
|
||||
type Chainer struct{}
|
||||
|
||||
// Chainable implements the Chainable interface.
|
||||
func (c *Chainer) Chainable() {}
|
||||
|
||||
// CallChain creates a call chain. It allows you to string
|
||||
// an arbitrary amount of Chainables together.
|
||||
type CallChain struct {
|
||||
links []Chainable
|
||||
}
|
||||
|
||||
var _ Noder = (*CallChain)(nil)
|
||||
|
||||
// NewCallChain creates an instance of CallChain.
|
||||
func NewCallChain(links ...Chainable) *CallChain {
|
||||
|
||||
return &CallChain{
|
||||
links: links,
|
||||
}
|
||||
}
|
||||
|
||||
// Node converts the CallChain to a Jsonnet AST node.
|
||||
// nolint: gocyclo
|
||||
func (cc *CallChain) Node() ast.Node {
|
||||
if len(cc.links) == 1 {
|
||||
return cc.links[0].Node()
|
||||
}
|
||||
|
||||
var previous Chainable
|
||||
|
||||
for i := range cc.links {
|
||||
switch t := cc.links[i].(type) {
|
||||
default:
|
||||
panic(fmt.Sprintf("unhandled node type %T", t))
|
||||
case *Var:
|
||||
previous = t
|
||||
case *Index:
|
||||
if previous != nil {
|
||||
t.SetTarget(previous)
|
||||
}
|
||||
|
||||
previous = t
|
||||
case *Apply:
|
||||
if previous != nil {
|
||||
if targetable, ok := t.target.(Targetable); ok {
|
||||
targetable.SetTarget(previous)
|
||||
}
|
||||
}
|
||||
|
||||
previous = t
|
||||
case *Call:
|
||||
if previous != nil {
|
||||
t.SetTarget(previous)
|
||||
}
|
||||
|
||||
previous = t
|
||||
}
|
||||
}
|
||||
|
||||
return previous.Node()
|
||||
}
|
||||
|
||||
// Local is a local declaration.
|
||||
type Local struct {
|
||||
name string
|
||||
value Noder
|
||||
Body Noder
|
||||
}
|
||||
|
||||
var _ Noder = (*Local)(nil)
|
||||
|
||||
// NewLocal creates an instance of Local.
|
||||
func NewLocal(name string, value, body Noder) *Local {
|
||||
return &Local{name: name, value: value, Body: body}
|
||||
}
|
||||
|
||||
// Node converts the Local to a jsonnet ast node.
|
||||
func (l *Local) Node() ast.Node {
|
||||
id := *newIdentifier(l.name)
|
||||
|
||||
local := &ast.Local{
|
||||
Binds: ast.LocalBinds{
|
||||
{
|
||||
Variable: id,
|
||||
Body: l.value.Node(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if l.Body != nil {
|
||||
local.Body = l.Body.Node()
|
||||
}
|
||||
|
||||
return local
|
||||
}
|
||||
|
||||
// Import is an import declaration.
|
||||
type Import struct {
|
||||
name string
|
||||
}
|
||||
|
||||
var _ Noder = (*Import)(nil)
|
||||
|
||||
// NewImport creates an instance of Import.
|
||||
func NewImport(name string) *Import {
|
||||
return &Import{name: name}
|
||||
}
|
||||
|
||||
// Node converts the Import to a jsonnet ast node.
|
||||
func (i *Import) Node() ast.Node {
|
||||
file := NewStringDouble(i.name)
|
||||
|
||||
return &ast.Import{
|
||||
File: file.node(),
|
||||
}
|
||||
}
|
||||
|
||||
// Function is a function.
|
||||
type Function struct {
|
||||
req []string
|
||||
body Noder
|
||||
}
|
||||
|
||||
var _ Noder = (*Function)(nil)
|
||||
|
||||
// NewFunction creates an instance of Function.
|
||||
func NewFunction(req []string, body Noder) *Function {
|
||||
return &Function{
|
||||
req: req,
|
||||
body: body,
|
||||
}
|
||||
}
|
||||
|
||||
// Node converts the Function to a jsonnet ast node.
|
||||
func (f *Function) Node() ast.Node {
|
||||
fun := &ast.Function{
|
||||
Parameters: ast.Parameters{},
|
||||
Body: f.body.Node(),
|
||||
}
|
||||
|
||||
var ids ast.Identifiers
|
||||
for _, param := range f.req {
|
||||
ids = append(ids, *newIdentifier(param))
|
||||
}
|
||||
fun.Parameters.Required = ids
|
||||
|
||||
return fun
|
||||
}
|
||||
|
||||
// Combine combines multiple nodes into a single node. If one argument is passed,
|
||||
// it is returned. If two or more arguments are passed, they are combined using a
|
||||
// Binary.
|
||||
func Combine(nodes ...Noder) Noder {
|
||||
l := len(nodes)
|
||||
|
||||
switch {
|
||||
case l == 1:
|
||||
return nodes[0]
|
||||
case l >= 2:
|
||||
sum := NewBinary(nodes[0], nodes[1], BopPlus)
|
||||
|
||||
for i := 2; i < l; i++ {
|
||||
sum = NewBinary(sum, nodes[i], BopPlus)
|
||||
}
|
||||
|
||||
return sum
|
||||
}
|
||||
|
||||
return NewObject()
|
||||
}
|
||||
|
||||
// newIdentifier creates an identifier.
|
||||
func newIdentifier(value string) *ast.Identifier {
|
||||
id := ast.Identifier(value)
|
||||
return &id
|
||||
}
|
||||
|
||||
func stringInSlice(s string, sl []string) bool {
|
||||
for i := range sl {
|
||||
if sl[i] == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
1345
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/nodemaker/nodemaker_test.go
generated
vendored
Normal file
1345
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/nodemaker/nodemaker_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
2
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/doc.go
generated
vendored
Normal file
2
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
// Package printer implements printing of jsonnet AST nodes.
|
||||
package printer
|
1075
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/printer.go
generated
vendored
Normal file
1075
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/printer.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
1273
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/printer_test.go
generated
vendored
Normal file
1273
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/printer_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
1
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/apply_brace
generated
vendored
Normal file
1
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/apply_brace
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
params {}
|
1
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/apply_with_index
generated
vendored
Normal file
1
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/apply_with_index
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
alpha.beta.charlie('arg1')
|
1
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/apply_with_multiple_arguments
generated
vendored
Normal file
1
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/apply_with_multiple_arguments
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
self(a, b)
|
1
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/apply_with_number
generated
vendored
Normal file
1
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/apply_with_number
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
function() 1
|
1
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/array
generated
vendored
Normal file
1
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/array
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
[foo, self, 'string']
|
7
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/array_comp
generated
vendored
Normal file
7
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/array_comp
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
[
|
||||
{
|
||||
kind: kind,
|
||||
qty: 4 / 3,
|
||||
}
|
||||
for kind in ['Honey Syrup', 'Lemon Juice', 'Farmers Gin']
|
||||
]
|
3
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/block_string
generated
vendored
Normal file
3
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/block_string
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
|||
|
||||
text
|
||||
|||
|
4
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/boolean
generated
vendored
Normal file
4
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/boolean
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
isTrue: true,
|
||||
isFalse: false,
|
||||
}
|
1
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/chained_apply
generated
vendored
Normal file
1
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/chained_apply
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
di.withFoo(foo).withBar(bar)
|
1
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/conditional
generated
vendored
Normal file
1
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/conditional
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
if std.type(foo) == 'array' then { foo: foo } else { foo: [foo] }
|
1
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/conditional_no_false
generated
vendored
Normal file
1
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/conditional_no_false
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
if std.type(foo) == 'array' then { foo: foo }
|
5
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/declarations
generated
vendored
Normal file
5
system/monitoring/vendor/github.com/ksonnet/ksonnet-lib/ksonnet-gen/printer/testdata/declarations
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
local a = import 'a';
|
||||
local b = 'b';
|
||||
local c = deployment.new();
|
||||
|
||||
{}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue