Compare commits
118 Commits
Author | SHA1 | Date | |
---|---|---|---|
d058b7bc93 | |||
a0f5a11ff1 | |||
05b9431ecb | |||
e179616db4 | |||
b92fbdc50d | |||
f26df57bfd | |||
8892ea9936 | |||
1bad9b4fa8 | |||
2b21e14c2c | |||
8caf755914 | |||
27f671e41a | |||
74a0e2cf6e | |||
f3403cd0f5 | |||
8d8b9a157e | |||
4a58676647 | |||
7b914d31d6 | |||
c001626b85 | |||
355f9d4cf7 | |||
40ef427336 | |||
e16b009a7f | |||
25671d42a5 | |||
dd0aee081a | |||
cfbd037f79 | |||
3b56c827f9 | |||
870b44fdbc | |||
79a10dd445 | |||
57afeec94f | |||
b118cd5812 | |||
7863e5902e | |||
0a9991de96 | |||
28c786c2a6 | |||
1c899b55eb | |||
7af88388bb | |||
51df2cbbaa | |||
b48de9f87e | |||
85065005d4 | |||
0696ecb037 | |||
05d32ae705 | |||
fd955544af | |||
0a77c3d81e | |||
66e90428b5 | |||
310cda1f18 | |||
daaf72962f | |||
93dee74459 | |||
1064696c96 | |||
7df5da38bf | |||
e4e0a63e17 | |||
bf2a21ddea | |||
6a07527632 | |||
0f7675c792 | |||
f3bbfdc34c | |||
a4186c1031 | |||
809570f4bc | |||
135199e597 | |||
8a92640d10 | |||
788804627e | |||
150f35ea3b | |||
1ce99cf02f | |||
5e92352b0a | |||
8664e42233 | |||
7942e9edbe | |||
01c4137dc9 | |||
c083baf640 | |||
d1d71e1256 | |||
6c762a7755 | |||
946a503a25 | |||
6a8ffc1f02 | |||
ca2b875ab9 | |||
29185fb9fd | |||
df7241ac5c | |||
52d67f6b60 | |||
34ce5be006 | |||
f2fd7dbbe7 | |||
08cda559a3 | |||
02a5171e37 | |||
552676cc9d | |||
caaa68234c | |||
eddc308055 | |||
0e2c50c374 | |||
c790ce5953 | |||
cf7aef31e2 | |||
feb810ab27 | |||
3f2fe54bfd | |||
6568dff8e1 | |||
773ec630b4 | |||
8d71d2f973 | |||
f9662a1e4b | |||
f7b526eacd | |||
e32bb272b2 | |||
e3f632d77f | |||
63174f85ae | |||
eb0f704ebe | |||
b69a38015c | |||
41f6a80c91 | |||
79f0670f7e | |||
313d1a3aae | |||
8428b9cdbd | |||
de1e3676eb | |||
de1e5edd02 | |||
ef989284c2 | |||
e060f86b9a | |||
0a38220497 | |||
fb60841047 | |||
0f4a872082 | |||
26b1f59d66 | |||
064908397e | |||
42642c94c3 | |||
123fd981e9 | |||
20109c3392 | |||
f3b9c34e3b | |||
ab4b8720d1 | |||
052b53aa5c | |||
fadac8c5c3 | |||
d0c9dfe2e5 | |||
d0ebb34432 | |||
db04deca0d | |||
c148d3ceb9 | |||
42826ae133 |
@ -1,4 +1,11 @@
|
|||||||
|
.git
|
||||||
.github
|
.github
|
||||||
.travis.yml
|
.travis.yml
|
||||||
|
*.md
|
||||||
|
env
|
||||||
build*
|
build*
|
||||||
*.env
|
docker-compose.override.yml
|
||||||
|
.netbox/.git*
|
||||||
|
.netbox/.travis.yml
|
||||||
|
.netbox/docs
|
||||||
|
.netbox/scripts
|
||||||
|
@ -1,11 +1,21 @@
|
|||||||
|
---
|
||||||
|
name: Bug report
|
||||||
|
about: Create a report to help us improve
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
|
||||||
Before raising an issue here, answer the following questions for yourself, please:
|
Before raising an issue here, answer the following questions for yourself, please:
|
||||||
|
|
||||||
* Did you read through the troubleshooting section? (https://github.com/netbox-community/netbox-docker/#troubleshooting)
|
* Did you read through the troubleshooting section? (https://github.com/netbox-community/netbox-docker/wiki/Troubleshooting)
|
||||||
|
* Have you had a look at the rest of the wiki? (https://github.com/netbox-community/netbox-docker/wiki)
|
||||||
* Have you updated to the latest version and tried again? (i.e. `git pull` and `docker-compose pull`)
|
* Have you updated to the latest version and tried again? (i.e. `git pull` and `docker-compose pull`)
|
||||||
* Have you reset the project and tried again? (i.e. `docker-compose down -v`)
|
* Have you reset the project and tried again? (i.e. `docker-compose down -v`)
|
||||||
* Are you confident that your problem is related to the Docker or Docker Compose setup this project provides?
|
* Are you confident that your problem is related to the Docker image or Docker Compose file this project provides?
|
||||||
(Otherwise ask on the Netbox mailing list, please: https://groups.google.com/d/forum/netbox-discuss)
|
(Otherwise ask on the Netbox mailing list, please: https://groups.google.com/d/forum/netbox-discuss)
|
||||||
* Have you looked through the issues already resolved?
|
* Have you looked through the issues already resolved?
|
||||||
|
|
||||||
@ -15,6 +25,8 @@ Please try this means to get help before opening an issue here:
|
|||||||
* On the networktocode Slack in the #netbox channel: http://slack.networktocode.com/
|
* On the networktocode Slack in the #netbox channel: http://slack.networktocode.com/
|
||||||
* On the Netbox mailing list: https://groups.google.com/d/forum/netbox-discuss
|
* On the Netbox mailing list: https://groups.google.com/d/forum/netbox-discuss
|
||||||
|
|
||||||
|
Please don't open an issue when you have a PR ready. Just submit the PR, that's good enough.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
## Current Behavior
|
## Current Behavior
|
||||||
@ -35,12 +47,22 @@ The output of `docker version`: `XXXXX`
|
|||||||
The output of `git rev-parse HEAD`: `XXXXX`
|
The output of `git rev-parse HEAD`: `XXXXX`
|
||||||
The command you used to start the project: `XXXXX`
|
The command you used to start the project: `XXXXX`
|
||||||
|
|
||||||
|
<!-- adjust the `latest` tag to the version you're using -->
|
||||||
|
The output of `docker inspect netboxcommunity/netbox:latest --format "{{json .ContainerConfig.Labels}}"`:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"JSON JSON JSON":
|
||||||
|
"--> Please paste formatted json. (Use e.g. `jq` or https://jsonformatter.curiousconcept.com/)"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
The output of `docker-compose logs netbox`:
|
The output of `docker-compose logs netbox`:
|
||||||
<!--
|
<!--
|
||||||
If your log is very long, create a Gist instead (and post the link to it): https://gist.github.com
|
If your log is very long, create a Gist instead (and post the link to it): https://gist.github.com
|
||||||
-->
|
-->
|
||||||
|
|
||||||
```
|
```text
|
||||||
LOG LOG LOG
|
LOG LOG LOG
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -50,6 +72,6 @@ Only if you have gotten a 5xx http error, else delete this section.
|
|||||||
If your log is very long, create a Gist instead (and post the link to it): https://gist.github.com
|
If your log is very long, create a Gist instead (and post the link to it): https://gist.github.com
|
||||||
-->
|
-->
|
||||||
|
|
||||||
```
|
```text
|
||||||
LOG LOG LOG
|
LOG LOG LOG
|
||||||
```
|
```
|
54
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
54
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
---
|
||||||
|
name: Feature or Change Request
|
||||||
|
about: Request a new feature or a change of the current behavior
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
<!--
|
||||||
|
|
||||||
|
Before raising an issue here, answer the following questions for yourself, please:
|
||||||
|
|
||||||
|
* Did you read through the troubleshooting section? (https://github.com/netbox-community/netbox-docker/wiki/Troubleshooting)
|
||||||
|
* Have you had a look at the rest of the wiki? (https://github.com/netbox-community/netbox-docker/wiki)
|
||||||
|
* Have you read the release notes recently (https://github.com/netbox-community/netbox-docker/releases)
|
||||||
|
* Are you confident that your feature/change request is related to the Docker image or Docker Compose file this project provides?
|
||||||
|
(Otherwise ask on the Netbox mailing list, please: https://groups.google.com/d/forum/netbox-discuss)
|
||||||
|
* Have you looked through the issues already resolved?
|
||||||
|
|
||||||
|
Please try this means to get help before opening an issue here:
|
||||||
|
|
||||||
|
* On the networktocode Slack in the #netbox-docker channel: http://slack.networktocode.com/
|
||||||
|
* On the networktocode Slack in the #netbox channel: http://slack.networktocode.com/
|
||||||
|
* On the Netbox mailing list: https://groups.google.com/d/forum/netbox-discuss
|
||||||
|
|
||||||
|
Please don't open an issue when you have a PR ready. Just submit the PR, that's good enough.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
## Desired Behavior
|
||||||
|
|
||||||
|
<!-- please describe the behavior you desire -->
|
||||||
|
...
|
||||||
|
|
||||||
|
## Contrast to Current Behavior
|
||||||
|
|
||||||
|
<!-- please describe how the desired behavior is different from the current behavior -->
|
||||||
|
...
|
||||||
|
|
||||||
|
## Changes Required
|
||||||
|
|
||||||
|
<!-- if you can, please elaborate what changes would exactly be required -->
|
||||||
|
...
|
||||||
|
|
||||||
|
## Discussion: Benefits and Drawbacks
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Please make your case here:
|
||||||
|
- Why do you think this project and the community will benefit from your suggestion?
|
||||||
|
- What are the drawbacks of this change? Is it backwards-compatible?
|
||||||
|
- Anything else that you think is relevant to the discussion of this feature/change request.
|
||||||
|
-->
|
||||||
|
...
|
85
.github/pull_request_template.md
vendored
Normal file
85
.github/pull_request_template.md
vendored
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
<!--
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
Thank you for sharing your work and for opening a PR.
|
||||||
|
|
||||||
|
(!) IMPORTANT (!):
|
||||||
|
First make sure that you point your PR to the `develop` branch!
|
||||||
|
|
||||||
|
Now please read the comments carefully and try to provide information
|
||||||
|
on all relevant titles.
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Please don't open an extra issue when submiting a PR.
|
||||||
|
|
||||||
|
But if there is already a related issue, please put it's number here.
|
||||||
|
|
||||||
|
E.g. #123 or N/A
|
||||||
|
-->
|
||||||
|
|
||||||
|
Related Issue:
|
||||||
|
|
||||||
|
## New Behavior
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Please describe in a few words the intentions of your PR.
|
||||||
|
-->
|
||||||
|
|
||||||
|
...
|
||||||
|
|
||||||
|
## Contrast to Current Behavior
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Please describe in a few words how the new behavior is different
|
||||||
|
from the current behavior.
|
||||||
|
-->
|
||||||
|
|
||||||
|
...
|
||||||
|
|
||||||
|
## Discussion: Benefits and Drawbacks
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Please make your case here:
|
||||||
|
|
||||||
|
- Why do you think this project and the community will benefit from your
|
||||||
|
proposed change?
|
||||||
|
- What are the drawbacks of this change?
|
||||||
|
- Is it backwards-compatible?
|
||||||
|
- Anything else that you think is relevant to the discussion of this PR.
|
||||||
|
|
||||||
|
(No need to write a huge article here. Just a few sentences that give some
|
||||||
|
additional context about the motivations for the change.)
|
||||||
|
-->
|
||||||
|
|
||||||
|
...
|
||||||
|
|
||||||
|
## Changes to the Wiki
|
||||||
|
|
||||||
|
<!--
|
||||||
|
If the README.md must be updated, please include the changes in the PR.
|
||||||
|
If the Wiki must be updated, please make a suggestion below.
|
||||||
|
-->
|
||||||
|
|
||||||
|
...
|
||||||
|
|
||||||
|
## Proposed Release Note Entry
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Please provide a short summary of your PR that we can copy & paste
|
||||||
|
into the release notes.
|
||||||
|
-->
|
||||||
|
|
||||||
|
...
|
||||||
|
|
||||||
|
## Double Check
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Please put an x into the brackets (like `[x]`) if you've completed that task.
|
||||||
|
-->
|
||||||
|
|
||||||
|
* [ ] I have read the comments and followed the PR template.
|
||||||
|
* [ ] I have provided and explained my PR according to the information in the comments.
|
||||||
|
* [ ] My PR targets the `develop` branch.
|
35
.github/workflows/push.yml
vendored
Normal file
35
.github/workflows/push.yml
vendored
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches-ignore:
|
||||||
|
- release
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
build_cmd:
|
||||||
|
- ./build-latest.sh
|
||||||
|
- PRERELEASE=true ./build-latest.sh
|
||||||
|
- ./build-branches.sh
|
||||||
|
docker_from:
|
||||||
|
- '' # use the default of the DOCKERFILE
|
||||||
|
- python:3.7-alpine
|
||||||
|
- python:3.8-alpine
|
||||||
|
- python:3.9-rc-alpine
|
||||||
|
fail-fast: false
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Builds new Netbox Docker Images
|
||||||
|
steps:
|
||||||
|
- id: git-checkout
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v1
|
||||||
|
- id: docker-build
|
||||||
|
name: Build the image from '${{ matrix.docker_from }}' with '${{ matrix.build_cmd }}'
|
||||||
|
run: ${{ matrix.build_cmd }}
|
||||||
|
env:
|
||||||
|
DOCKER_FROM: ${{ matrix.docker_from }}
|
||||||
|
GH_ACTION: enable
|
||||||
|
- id: docker-test
|
||||||
|
name: Test the image
|
||||||
|
run: IMAGE="${FINAL_DOCKER_TAG}" ./test.sh
|
||||||
|
if: steps.docker-build.outputs.skipped != 'true'
|
49
.github/workflows/release.yml
vendored
Normal file
49
.github/workflows/release.yml
vendored
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- release
|
||||||
|
schedule:
|
||||||
|
- cron: '45 5 * * *'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
build_cmd:
|
||||||
|
- ./build-latest.sh
|
||||||
|
- PRERELEASE=true ./build-latest.sh
|
||||||
|
- ./build-branches.sh
|
||||||
|
fail-fast: false
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Builds new Netbox Docker Images
|
||||||
|
steps:
|
||||||
|
- id: git-checkout
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v1
|
||||||
|
- id: docker-build
|
||||||
|
name: Build the image with '${{ matrix.build_cmd }}'
|
||||||
|
run: ${{ matrix.build_cmd }}
|
||||||
|
env:
|
||||||
|
GH_ACTION: enable
|
||||||
|
- id: docker-test
|
||||||
|
name: Test the image
|
||||||
|
run: IMAGE="${FINAL_DOCKER_TAG}" ./test.sh
|
||||||
|
if: steps.docker-build.outputs.skipped != 'true'
|
||||||
|
- id: registry-login
|
||||||
|
name: Login to the Docker Registry
|
||||||
|
run: |
|
||||||
|
echo "::add-mask::$DOCKERHUB_USERNAME"
|
||||||
|
echo "::add-mask::$DOCKERHUB_PASSWORD"
|
||||||
|
docker login -u "$DOCKERHUB_USERNAME" --password "${DOCKERHUB_PASSWORD}" "${DOCKER_REGISTRY}"
|
||||||
|
env:
|
||||||
|
DOCKERHUB_USERNAME: ${{ secrets.dockerhub_username }}
|
||||||
|
DOCKERHUB_PASSWORD: ${{ secrets.dockerhub_password }}
|
||||||
|
if: steps.docker-build.outputs.skipped != 'true'
|
||||||
|
- id: registry-push
|
||||||
|
name: Push the image
|
||||||
|
run: ${{ matrix.build_cmd }} --push-only
|
||||||
|
if: steps.docker-build.outputs.skipped != 'true'
|
||||||
|
- id: registry-logout
|
||||||
|
name: Logout of the Docker Registry
|
||||||
|
run: docker logout "${DOCKER_REGISTRY}"
|
||||||
|
if: steps.docker-build.outputs.skipped != 'true'
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -1 +1,4 @@
|
|||||||
*.sql.gz
|
*.sql.gz
|
||||||
|
.netbox
|
||||||
|
.initializers
|
||||||
|
docker-compose.override.yml
|
||||||
|
@ -11,29 +11,26 @@ Autotest: Internal and External Pull Requests
|
|||||||
Repository Links: Enable for Base Image
|
Repository Links: Enable for Base Image
|
||||||
Build Rules:
|
Build Rules:
|
||||||
- Source Type: Branch
|
- Source Type: Branch
|
||||||
Source: master
|
Source: release
|
||||||
Docker Tag: branches-main
|
Docker Tag: branches
|
||||||
Dockerfile location: Dockerfile
|
Dockerfile location: Dockerfile
|
||||||
|
Build Context: /
|
||||||
|
Autobuild: on
|
||||||
|
Build Caching: on
|
||||||
- Source Type: Branch
|
- Source Type: Branch
|
||||||
Source: master
|
Source: release
|
||||||
Docker Tag: branches-ldap
|
Docker Tag: prerelease
|
||||||
Dockerfile location: Dockerfile.ldap
|
|
||||||
- Source Type: Branch
|
|
||||||
Source: master
|
|
||||||
Docker Tag: prerelease-main
|
|
||||||
Dockerfile location: Dockerfile
|
Dockerfile location: Dockerfile
|
||||||
|
Build Context: /
|
||||||
|
Autobuild: on
|
||||||
|
Build Caching: on
|
||||||
- Source Type: Branch
|
- Source Type: Branch
|
||||||
Source: master
|
Source: release
|
||||||
Docker Tag: prerelease-ldap
|
Docker Tag: release
|
||||||
Dockerfile location: Dockerfile.ldap
|
|
||||||
- Source Type: Branch
|
|
||||||
Source: master
|
|
||||||
Docker Tag: release-main
|
|
||||||
Dockerfile location: Dockerfile
|
Dockerfile location: Dockerfile
|
||||||
- Source Type: Branch
|
Build Context: /
|
||||||
Source: master
|
Autobuild: on
|
||||||
Docker Tag: release-ldap
|
Build Caching: on
|
||||||
Dockerfile location: Dockerfile.ldap
|
|
||||||
Build Environment Variables:
|
Build Environment Variables:
|
||||||
# Create an app on Github and use it's OATH credentials here
|
# Create an app on Github and use it's OATH credentials here
|
||||||
- Key: GITHUB_OAUTH_CLIENT_ID
|
- Key: GITHUB_OAUTH_CLIENT_ID
|
||||||
@ -42,6 +39,7 @@ Build Environment Variables:
|
|||||||
Value: <secret>
|
Value: <secret>
|
||||||
Build Triggers:
|
Build Triggers:
|
||||||
- Name: Cron Trigger
|
- Name: Cron Trigger
|
||||||
|
Trigger URL: <generated>
|
||||||
# Use this trigger in combination with e.g. https://cron-job.org in order to regularly schedule builds
|
# Use this trigger in combination with e.g. https://cron-job.org in order to regularly schedule builds
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -51,16 +49,15 @@ The build system of cloud.docker.com is not made for this kind of project.
|
|||||||
But we found a way to make it work, and this is how:
|
But we found a way to make it work, and this is how:
|
||||||
|
|
||||||
1. The docker hub build system [allows to overwrite the scripts that get executed
|
1. The docker hub build system [allows to overwrite the scripts that get executed
|
||||||
for `build`, `test` and `push`](overwrite). See `hooks/*`.
|
for `build`, `test` and `push`](overwrite). See `/hooks/*`.
|
||||||
2. Shared functionality of the scripts `build`, `test` and `push` is extracted to `hooks/common`.
|
2. Shared functionality of the scripts `build`, `test` and `push` is extracted to `/hooks/common`.
|
||||||
3. The `build` script runs `run_build()` from `hooks/common`.
|
3. The `build` script runs `run_build()` from `/hooks/common`.
|
||||||
This triggers either `build-branches.sh`, `build-latest.sh` or directly `build.sh`.
|
This triggers either `/build-branches.sh`, `/build-latest.sh` or directly `/build.sh`.
|
||||||
4. The `test` script just invokes `docker-compose` commands.
|
4. The `test` script just invokes `docker-compose` commands.
|
||||||
5. The `push` script runs `run_build()` from `hooks/common` with a `--push-only` flag.
|
5. The `push` script runs `run_build()` from `hooks/common` with a `--push-only` flag.
|
||||||
This causes the `build.sh` script to not re-build the Docker image, but just the just built image.
|
This causes the `build.sh` script to not re-build the Docker image, but just the just built image.
|
||||||
|
|
||||||
The _Docker Tag_ configuration setting is misused to select the type (_release_, _prerelease_, _branches_) of the build as well as the variant (_main_, _ldap_).
|
The _Docker Tag_ configuration setting (`$DOCKER_TAG`) is only used to select the type (_release_, _prerelease_, _branches_) of the build in `hooks/common`.
|
||||||
|
Because it has a different meaning in all the other build scripts, it is `unset` after it has served it's purpose.
|
||||||
The _Dockerfile location_ configuration setting is completely ignored by the build scripts.
|
|
||||||
|
|
||||||
[overwrite]: https://docs.docker.com/docker-hub/builds/advanced/#override-build-test-or-push-commands
|
[overwrite]: https://docs.docker.com/docker-hub/builds/advanced/#override-build-test-or-push-commands
|
||||||
|
116
Dockerfile
116
Dockerfile
@ -1,4 +1,5 @@
|
|||||||
FROM python:3.7-alpine3.10
|
ARG FROM=python:3.7-alpine
|
||||||
|
FROM ${FROM} as builder
|
||||||
|
|
||||||
RUN apk add --no-cache \
|
RUN apk add --no-cache \
|
||||||
bash \
|
bash \
|
||||||
@ -7,51 +8,57 @@ RUN apk add --no-cache \
|
|||||||
cyrus-sasl-dev \
|
cyrus-sasl-dev \
|
||||||
graphviz \
|
graphviz \
|
||||||
jpeg-dev \
|
jpeg-dev \
|
||||||
|
libevent-dev \
|
||||||
libffi-dev \
|
libffi-dev \
|
||||||
libxml2-dev \
|
|
||||||
libxslt-dev \
|
libxslt-dev \
|
||||||
openldap-dev \
|
openldap-dev \
|
||||||
postgresql-dev \
|
postgresql-dev
|
||||||
ttf-ubuntu-font-family \
|
|
||||||
wget
|
|
||||||
|
|
||||||
RUN pip install \
|
WORKDIR /install
|
||||||
|
|
||||||
|
RUN pip install --prefix="/install" --no-warn-script-location \
|
||||||
# gunicorn is used for launching netbox
|
# gunicorn is used for launching netbox
|
||||||
gunicorn \
|
gunicorn \
|
||||||
|
greenlet \
|
||||||
|
eventlet \
|
||||||
# napalm is used for gathering information from network devices
|
# napalm is used for gathering information from network devices
|
||||||
napalm \
|
napalm \
|
||||||
# ruamel is used in startup_scripts
|
# ruamel is used in startup_scripts
|
||||||
'ruamel.yaml>=0.15,<0.16' \
|
'ruamel.yaml>=0.15,<0.16' \
|
||||||
# pinning django to the version required by netbox
|
# django_auth_ldap is required for ldap
|
||||||
# adding it here, to install the correct version of
|
django_auth_ldap \
|
||||||
# django-rq
|
# django-storages was introduced in 2.7 and is optional
|
||||||
'Django>=2.2,<2.3' \
|
django-storages
|
||||||
# django-rq is used for webhooks
|
|
||||||
django-rq
|
|
||||||
|
|
||||||
ARG BRANCH=master
|
ARG NETBOX_PATH
|
||||||
|
COPY ${NETBOX_PATH}/requirements.txt /
|
||||||
|
RUN pip install --prefix="/install" --no-warn-script-location -r /requirements.txt
|
||||||
|
|
||||||
WORKDIR /tmp
|
###
|
||||||
|
# Main stage
|
||||||
|
###
|
||||||
|
|
||||||
# As the requirements don't change very often,
|
ARG FROM
|
||||||
# and as they take some time to compile,
|
FROM ${FROM} as main
|
||||||
# we try to cache them very agressively.
|
|
||||||
ARG REQUIREMENTS_URL=https://raw.githubusercontent.com/netbox-community/netbox/$BRANCH/requirements.txt
|
|
||||||
ADD ${REQUIREMENTS_URL} requirements.txt
|
|
||||||
RUN pip install -r requirements.txt
|
|
||||||
|
|
||||||
# Cache bust when the upstream branch changes:
|
RUN apk add --no-cache \
|
||||||
# ADD will fetch the file and check if it has changed
|
bash \
|
||||||
# If not, Docker will use the existing build cache.
|
ca-certificates \
|
||||||
# If yes, Docker will bust the cache and run every build step from here on.
|
graphviz \
|
||||||
ARG REF_URL=https://api.github.com/repos/netbox-community/netbox/contents?ref=$BRANCH
|
libevent \
|
||||||
ADD ${REF_URL} version.json
|
libffi \
|
||||||
|
libjpeg-turbo \
|
||||||
|
libressl \
|
||||||
|
libxslt \
|
||||||
|
postgresql-libs \
|
||||||
|
ttf-ubuntu-font-family
|
||||||
|
|
||||||
WORKDIR /opt
|
WORKDIR /opt
|
||||||
|
|
||||||
ARG URL=https://github.com/netbox-community/netbox/archive/$BRANCH.tar.gz
|
COPY --from=builder /install /usr/local
|
||||||
RUN wget -q -O - "${URL}" | tar xz \
|
|
||||||
&& mv netbox* netbox
|
ARG NETBOX_PATH
|
||||||
|
COPY ${NETBOX_PATH} /opt/netbox
|
||||||
|
|
||||||
COPY docker/configuration.docker.py /opt/netbox/netbox/netbox/configuration.py
|
COPY docker/configuration.docker.py /opt/netbox/netbox/netbox/configuration.py
|
||||||
COPY configuration/gunicorn_config.py /etc/netbox/config/
|
COPY configuration/gunicorn_config.py /etc/netbox/config/
|
||||||
@ -63,11 +70,56 @@ COPY configuration/configuration.py /etc/netbox/config/configuration.py
|
|||||||
|
|
||||||
WORKDIR /opt/netbox/netbox
|
WORKDIR /opt/netbox/netbox
|
||||||
|
|
||||||
|
# Must set permissions for '/opt/netbox/netbox/static' directory
|
||||||
|
# to g+w so that `./manage.py collectstatic` can be executed during
|
||||||
|
# container startup.
|
||||||
|
# Must set permissions for '/opt/netbox/netbox/media' directory
|
||||||
|
# to g+w so that pictures can be uploaded to netbox.
|
||||||
|
RUN mkdir static && chmod g+w static media
|
||||||
|
|
||||||
ENTRYPOINT [ "/opt/netbox/docker-entrypoint.sh" ]
|
ENTRYPOINT [ "/opt/netbox/docker-entrypoint.sh" ]
|
||||||
|
|
||||||
CMD ["gunicorn", "-c /etc/netbox/config/gunicorn_config.py", "netbox.wsgi"]
|
CMD ["gunicorn", "-c /etc/netbox/config/gunicorn_config.py", "netbox.wsgi"]
|
||||||
|
|
||||||
LABEL SRC_URL="$URL"
|
LABEL ORIGINAL_TAG="" \
|
||||||
|
NETBOX_GIT_BRANCH="" \
|
||||||
|
NETBOX_GIT_REF="" \
|
||||||
|
NETBOX_GIT_URL="" \
|
||||||
|
# See http://label-schema.org/rc1/#build-time-labels
|
||||||
|
# Also https://microbadger.com/labels
|
||||||
|
org.label-schema.schema-version="1.0" \
|
||||||
|
org.label-schema.build-date="" \
|
||||||
|
org.label-schema.name="Netbox Docker" \
|
||||||
|
org.label-schema.description="A container based distribution of Netbox, the free and open IPAM and DCIM solution." \
|
||||||
|
org.label-schema.vendor="The netbox-docker contributors." \
|
||||||
|
org.label-schema.url="https://github.com/netbox-community/netbox-docker" \
|
||||||
|
org.label-schema.usage="https://github.com/netbox-community/netbox-docker/wiki" \
|
||||||
|
org.label-schema.vcs-url="https://github.com/netbox-community/netbox-docker.git" \
|
||||||
|
org.label-schema.vcs-ref="" \
|
||||||
|
org.label-schema.version="snapshot" \
|
||||||
|
# See https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys
|
||||||
|
org.opencontainers.image.created="" \
|
||||||
|
org.opencontainers.image.title="Netbox Docker" \
|
||||||
|
org.opencontainers.image.description="A container based distribution of Netbox, the free and open IPAM and DCIM solution." \
|
||||||
|
org.opencontainers.image.licenses="Apache-2.0" \
|
||||||
|
org.opencontainers.image.authors="The netbox-docker contributors." \
|
||||||
|
org.opencontainers.image.vendor="The netbox-docker contributors." \
|
||||||
|
org.opencontainers.image.url="https://github.com/netbox-community/netbox-docker" \
|
||||||
|
org.opencontainers.image.documentation="https://github.com/netbox-community/netbox-docker/wiki" \
|
||||||
|
org.opencontainers.image.source="https://github.com/netbox-community/netbox-docker.git" \
|
||||||
|
org.opencontainers.image.revision="" \
|
||||||
|
org.opencontainers.image.version="snapshot"
|
||||||
|
|
||||||
ARG NETBOX_DOCKER_PROJECT_VERSION=snapshot
|
#####
|
||||||
LABEL NETBOX_DOCKER_PROJECT_VERSION="$NETBOX_DOCKER_PROJECT_VERSION"
|
## LDAP specific configuration
|
||||||
|
#####
|
||||||
|
|
||||||
|
FROM main as ldap
|
||||||
|
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
libsasl \
|
||||||
|
libldap \
|
||||||
|
util-linux
|
||||||
|
|
||||||
|
COPY docker/ldap_config.docker.py /opt/netbox/netbox/netbox/ldap_config.py
|
||||||
|
COPY configuration/ldap_config.py /etc/netbox/config/ldap_config.py
|
||||||
|
@ -1,9 +0,0 @@
|
|||||||
ARG DOCKER_ORG=netboxcommunity
|
|
||||||
ARG DOCKER_REPO=netbox
|
|
||||||
ARG FROM_TAG=latest
|
|
||||||
FROM $DOCKER_ORG/$DOCKER_REPO:$FROM_TAG
|
|
||||||
|
|
||||||
RUN pip install django_auth_ldap
|
|
||||||
|
|
||||||
COPY docker/ldap_config.docker.py /opt/netbox/netbox/netbox/ldap_config.py
|
|
||||||
COPY configuration/ldap_config.py /etc/netbox/config/ldap_config.py
|
|
73
README.md
73
README.md
@ -1,21 +1,52 @@
|
|||||||
# netbox-docker
|
# netbox-docker
|
||||||
|
|
||||||
|
[][github-release]
|
||||||
|
[][github-stargazers]
|
||||||
|

|
||||||
|

|
||||||
|

|
||||||
|
[][netbox-docker-microbadger]
|
||||||
|
[][netbox-docker-microbadger]
|
||||||
|
[][netbox-docker-license]
|
||||||
|
|
||||||
[The Github repository](netbox-docker-github) houses the components needed to build Netbox as a Docker container.
|
[The Github repository](netbox-docker-github) houses the components needed to build Netbox as a Docker container.
|
||||||
Images are built using this code and are released to [Docker Hub][netbox-dockerhub] once a day.
|
Images are built using this code and are released to [Docker Hub][netbox-dockerhub] once a day.
|
||||||
|
|
||||||
Do you have any questions? Before opening an issue on Github, please join the [Network To Code][ntc-slack] Slack and ask for help in our [`#netbox-docker`][netbox-docker-slack] channel.
|
Do you have any questions? Before opening an issue on Github, please join the [Network To Code][ntc-slack] Slack and ask for help in our [`#netbox-docker`][netbox-docker-slack] channel.
|
||||||
|
|
||||||
|
[github-stargazers]: https://github.com/netbox-community/netbox-docker/stargazers
|
||||||
|
[github-release]: https://github.com/netbox-community/netbox-docker/releases
|
||||||
|
[netbox-docker-microbadger]: https://microbadger.com/images/netboxcommunity/netbox
|
||||||
[netbox-dockerhub]: https://hub.docker.com/r/netboxcommunity/netbox/tags/
|
[netbox-dockerhub]: https://hub.docker.com/r/netboxcommunity/netbox/tags/
|
||||||
[netbox-docker-github]: https://github.com/netbox-community/netbox-docker/
|
[netbox-docker-github]: https://github.com/netbox-community/netbox-docker/
|
||||||
[ntc-slack]: http://slack.networktocode.com/
|
[ntc-slack]: http://slack.networktocode.com/
|
||||||
[netbox-docker-slack]: https://slack.com/app_redirect?channel=netbox-docker&team=T09LQ7E9E
|
[netbox-docker-slack]: https://slack.com/app_redirect?channel=netbox-docker&team=T09LQ7E9E
|
||||||
|
[netbox-docker-license]: https://github.com/netbox-community/netbox-docker/blob/master/LICENSE
|
||||||
|
|
||||||
|
## Docker Tags
|
||||||
|
|
||||||
|
* `vX.Y.Z`: Release builds, built from [releases of Netbox][netbox-releases].
|
||||||
|
* `latest`: Release builds, built from [`master` branch of Netbox][netbox-master].
|
||||||
|
* `snapshot`: Pre-release builds, built from the [`develop` branch of Netbox][netbox-develop].
|
||||||
|
* `develop-X.Y`: Pre-release builds, built from the corresponding [branch of Netbox][netbox-branches].
|
||||||
|
|
||||||
|
Then there is currently one extra tags for each of the above labels:
|
||||||
|
|
||||||
|
* `-ldap`: Contains additional dependencies and configurations for connecting Netbox to an LDAP directroy.
|
||||||
|
[Learn more about that in our wiki][netbox-docker-ldap].
|
||||||
|
|
||||||
|
[netbox-releases]: https://github.com/netbox-community/netbox/releases
|
||||||
|
[netbox-master]: https://github.com/netbox-community/netbox/tree/master
|
||||||
|
[netbox-develop]: https://github.com/netbox-community/netbox/tree/develop
|
||||||
|
[netbox-branches]: https://github.com/netbox-community/netbox/branches
|
||||||
|
[netbox-docker-ldap]: https://github.com/netbox-community/netbox-docker/wiki/LDAP
|
||||||
|
|
||||||
## Quickstart
|
## Quickstart
|
||||||
|
|
||||||
To get Netbox up and running:
|
To get Netbox up and running in Docker:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone -b master https://github.com/netbox-community/netbox-docker.git
|
git clone -b release https://github.com/netbox-community/netbox-docker.git
|
||||||
cd netbox-docker
|
cd netbox-docker
|
||||||
docker-compose pull
|
docker-compose pull
|
||||||
docker-compose up -d
|
docker-compose up -d
|
||||||
@ -37,29 +68,32 @@ $ xdg-open "http://$(docker-compose port nginx 8080)/" &>/dev/null &
|
|||||||
|
|
||||||
Alternatively, use something like [Reception][docker-reception] to connect to _docker-compose_ projects.
|
Alternatively, use something like [Reception][docker-reception] to connect to _docker-compose_ projects.
|
||||||
|
|
||||||
Default credentials:
|
The default credentials are:
|
||||||
|
|
||||||
* Username: **admin**
|
* Username: **admin**
|
||||||
* Password: **admin**
|
* Password: **admin**
|
||||||
* API Token: **0123456789abcdef0123456789abcdef01234567**
|
* API Token: **0123456789abcdef0123456789abcdef01234567**
|
||||||
|
|
||||||
|
There is a more complete [Getting Started guide on our Wiki][wiki-getting-started].
|
||||||
|
|
||||||
|
[wiki-getting-started]: https://github.com/netbox-community/netbox-docker/wiki/Getting-Started
|
||||||
[docker-reception]: https://github.com/nxt-engineering/reception
|
[docker-reception]: https://github.com/nxt-engineering/reception
|
||||||
|
|
||||||
## Dependencies
|
## Dependencies
|
||||||
|
|
||||||
This project relies only on *Docker* and *docker-compose* meeting this requirements:
|
This project relies only on *Docker* and *docker-compose* meeting these requirements:
|
||||||
|
|
||||||
* The *Docker version* must be at least `1.13.0`.
|
* The *Docker version* must be at least `17.05`.
|
||||||
* The *docker-compose version* must be at least `1.10.0`.
|
* The *docker-compose version* must be at least `1.17.0`.
|
||||||
|
|
||||||
To ensure this, compare the output of `docker --version` and `docker-compose --version` with the requirements above.
|
To check the version installed on your system run `docker --version` and `docker-compose --version`.
|
||||||
|
|
||||||
## Reference Documentation
|
## Documentation
|
||||||
|
|
||||||
Please refer [to the wiki][wiki] for further information on how to use this Netbox Docker image properly.
|
Please refer [to our wiki on Github][netbox-docker-wiki] for further information on how to use this Netbox Docker image properly.
|
||||||
It covers advanced topics such as using secret files, deployment to Kubernetes as well as NAPALM and LDAP configuration.
|
It covers advanced topics such as using secret files, deployment to Kubernetes as well as NAPALM and LDAP configuration.
|
||||||
|
|
||||||
[wiki]: https://github.com/netbox-community/netbox-docker/wiki/
|
[netbox-docker-wiki]: https://github.com/netbox-community/netbox-docker/wiki/
|
||||||
|
|
||||||
## Netbox Version
|
## Netbox Version
|
||||||
|
|
||||||
@ -69,24 +103,20 @@ To use this feature, set the environment-variable `VERSION` before launching `do
|
|||||||
[any tag of the `netboxcommunity/netbox` Docker image on Docker Hub][netbox-dockerhub].
|
[any tag of the `netboxcommunity/netbox` Docker image on Docker Hub][netbox-dockerhub].
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export VERSION=v2.2.6
|
export VERSION=v2.7.1
|
||||||
docker-compose pull netbox
|
docker-compose pull netbox
|
||||||
docker-compose up -d
|
docker-compose up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
You can also build a specific version of the Netbox image. This time, `VERSION` indicates any valid
|
You can also build a specific version of the Netbox Docker image yourself.
|
||||||
[Git Reference][git-ref] declared on [the 'netbox-community/netbox' Github repository][netbox-github].
|
`VERSION` can be any valid [git ref][git-ref] in that case.
|
||||||
Most commonly you will specify a tag or branch name.
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export VERSION=develop
|
export VERSION=v2.7.1
|
||||||
docker-compose build --no-cache netbox
|
./build.sh $VERSION
|
||||||
docker-compose up -d
|
docker-compose up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
Hint: If you're building a specific version by tag name, the `--no-cache` argument is not strictly necessary.
|
|
||||||
This can increase the build speed if you're just adjusting the config, for example.
|
|
||||||
|
|
||||||
[git-ref]: https://git-scm.com/book/en/v2/Git-Internals-Git-References
|
[git-ref]: https://git-scm.com/book/en/v2/Git-Internals-Git-References
|
||||||
[netbox-github]: https://github.com/netbox-community/netbox/releases
|
[netbox-github]: https://github.com/netbox-community/netbox/releases
|
||||||
|
|
||||||
@ -94,8 +124,9 @@ This can increase the build speed if you're just adjusting the config, for examp
|
|||||||
|
|
||||||
From time to time it might become necessary to re-engineer the structure of this setup.
|
From time to time it might become necessary to re-engineer the structure of this setup.
|
||||||
Things like the `docker-compose.yml` file or your Kubernetes or OpenShift configurations have to be adjusted as a consequence.
|
Things like the `docker-compose.yml` file or your Kubernetes or OpenShift configurations have to be adjusted as a consequence.
|
||||||
Since April 2018 each image built from this repo contains a `NETBOX_DOCKER_PROJECT_VERSION` label.
|
Since November 2019 each image built from this repo contains a `org.opencontainers.image.version` label.
|
||||||
You can check the label of your local image by running `docker inspect netboxcommunity/netbox:v2.3.1 --format "{{json .ContainerConfig.Labels}}"`.
|
(The images contained labels since April 2018, although in November 2019 the labels' names changed.)
|
||||||
|
You can check the label of your local image by running `docker inspect netboxcommunity/netbox:v2.7.1 --format "{{json .ContainerConfig.Labels}}"`.
|
||||||
|
|
||||||
Please read [the release notes][releases] carefully when updating to a new image version.
|
Please read [the release notes][releases] carefully when updating to a new image version.
|
||||||
|
|
||||||
|
35
build-all.sh
35
build-all.sh
@ -12,39 +12,13 @@ BUILDS=("${BUILD:-"${ALL_BUILDS[@]}"}")
|
|||||||
|
|
||||||
echo "⚙️ Configured builds: ${BUILDS[*]}"
|
echo "⚙️ Configured builds: ${BUILDS[*]}"
|
||||||
|
|
||||||
VARIANTS=("main" "ldap")
|
|
||||||
|
|
||||||
if [ -n "${DEBUG}" ]; then
|
if [ -n "${DEBUG}" ]; then
|
||||||
export DEBUG
|
export DEBUG
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ERROR=0
|
ERROR=0
|
||||||
|
|
||||||
# Don't build if not on `master` and don't build if on a pull request,
|
for BUILD in "${BUILDS[@]}"; do
|
||||||
# but build when DEBUG is not empty
|
|
||||||
for VARIANT in "${VARIANTS[@]}"; do
|
|
||||||
export VARIANT
|
|
||||||
|
|
||||||
# Checking which VARIANT to build
|
|
||||||
if [ "${VARIANT}" == "main" ]; then
|
|
||||||
DOCKERFILE="${DOCKERFILE_PATH-Dockerfile}"
|
|
||||||
else
|
|
||||||
DOCKERFILE="${DOCKERFILE_PATH-Dockerfile}.${VARIANT}"
|
|
||||||
|
|
||||||
# Fail fast
|
|
||||||
if [ ! -f "${DOCKERFILE}" ]; then
|
|
||||||
echo "🚨 The Dockerfile '${DOCKERFILE}' for variant '${VARIANT}' doesn't exist."
|
|
||||||
ERROR=1
|
|
||||||
|
|
||||||
if [ -z "$DEBUG" ]; then
|
|
||||||
continue
|
|
||||||
else
|
|
||||||
echo "⚠️ Would skip this, but DEBUG is enabled."
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
for BUILD in "${BUILDS[@]}"; do
|
|
||||||
echo "🛠 Building '$BUILD' from '$DOCKERFILE'"
|
echo "🛠 Building '$BUILD' from '$DOCKERFILE'"
|
||||||
case $BUILD in
|
case $BUILD in
|
||||||
release)
|
release)
|
||||||
@ -62,12 +36,6 @@ for VARIANT in "${VARIANTS[@]}"; do
|
|||||||
# shellcheck disable=SC2068
|
# shellcheck disable=SC2068
|
||||||
./build-branches.sh $@ || ERROR=1
|
./build-branches.sh $@ || ERROR=1
|
||||||
;;
|
;;
|
||||||
special)
|
|
||||||
# special build
|
|
||||||
# shellcheck disable=SC2068
|
|
||||||
#SRC_ORG=lampwins TAG=webhooks-backend ./build.sh "feature/webhooks-backend" $@ || ERROR=1
|
|
||||||
echo "✅ No special builds today."
|
|
||||||
;;
|
|
||||||
*)
|
*)
|
||||||
echo "🚨 Unrecognized build '$BUILD'."
|
echo "🚨 Unrecognized build '$BUILD'."
|
||||||
|
|
||||||
@ -78,7 +46,6 @@ for VARIANT in "${VARIANTS[@]}"; do
|
|||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
|
||||||
done
|
done
|
||||||
|
|
||||||
exit $ERROR
|
exit $ERROR
|
||||||
|
@ -1,8 +1,12 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# Builds develop, develop-* and master branches
|
# Builds develop, develop-* and master branches of Netbox
|
||||||
|
|
||||||
echo "▶️ $0 $*"
|
echo "▶️ $0 $*"
|
||||||
|
|
||||||
|
###
|
||||||
|
# Checking for the presence of GITHUB_OAUTH_CLIENT_ID
|
||||||
|
# and GITHUB_OAUTH_CLIENT_SECRET
|
||||||
|
###
|
||||||
if [ -n "${GITHUB_OAUTH_CLIENT_ID}" ] && [ -n "${GITHUB_OAUTH_CLIENT_SECRET}" ]; then
|
if [ -n "${GITHUB_OAUTH_CLIENT_ID}" ] && [ -n "${GITHUB_OAUTH_CLIENT_SECRET}" ]; then
|
||||||
echo "🗝 Performing authenticated Github API calls."
|
echo "🗝 Performing authenticated Github API calls."
|
||||||
GITHUB_OAUTH_PARAMS="client_id=${GITHUB_OAUTH_CLIENT_ID}&client_secret=${GITHUB_OAUTH_CLIENT_SECRET}"
|
GITHUB_OAUTH_PARAMS="client_id=${GITHUB_OAUTH_CLIENT_ID}&client_secret=${GITHUB_OAUTH_CLIENT_SECRET}"
|
||||||
@ -11,18 +15,33 @@ else
|
|||||||
GITHUB_OAUTH_PARAMS=""
|
GITHUB_OAUTH_PARAMS=""
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
###
|
||||||
|
# Calling Github to get the all branches
|
||||||
|
###
|
||||||
ORIGINAL_GITHUB_REPO="${SRC_ORG-netbox-community}/${SRC_REPO-netbox}"
|
ORIGINAL_GITHUB_REPO="${SRC_ORG-netbox-community}/${SRC_REPO-netbox}"
|
||||||
GITHUB_REPO="${GITHUB_REPO-$ORIGINAL_GITHUB_REPO}"
|
GITHUB_REPO="${GITHUB_REPO-$ORIGINAL_GITHUB_REPO}"
|
||||||
URL_RELEASES="https://api.github.com/repos/${GITHUB_REPO}/branches?${GITHUB_OAUTH_PARAMS}"
|
URL_RELEASES="https://api.github.com/repos/${GITHUB_REPO}/branches?${GITHUB_OAUTH_PARAMS}"
|
||||||
|
|
||||||
|
# Composing the JQ commans to extract the most recent version number
|
||||||
|
JQ_BRANCHES='map(.name) | .[] | scan("^[^v].+") | match("^(master|develop).*") | .string'
|
||||||
|
|
||||||
CURL="curl -sS"
|
CURL="curl -sS"
|
||||||
|
|
||||||
BRANCHES=$($CURL "${URL_RELEASES}" | jq -r 'map(.name) | .[] | scan("^[^v].+") | match("^(master|develop).*") | .string')
|
# Querying the Github API to fetch all branches
|
||||||
|
BRANCHES=$($CURL "${URL_RELEASES}" | jq -r "$JQ_BRANCHES")
|
||||||
|
|
||||||
|
###
|
||||||
|
# Building each branch
|
||||||
|
###
|
||||||
|
|
||||||
|
# keeping track whether an error occured
|
||||||
ERROR=0
|
ERROR=0
|
||||||
|
|
||||||
|
# calling build.sh for each branch
|
||||||
for BRANCH in $BRANCHES; do
|
for BRANCH in $BRANCHES; do
|
||||||
# shellcheck disable=SC2068
|
# shellcheck disable=SC2068
|
||||||
./build.sh "${BRANCH}" $@ || ERROR=1
|
./build.sh "${BRANCH}" $@ || ERROR=1
|
||||||
done
|
done
|
||||||
|
|
||||||
|
# returning whether an error occured
|
||||||
exit $ERROR
|
exit $ERROR
|
||||||
|
@ -3,6 +3,10 @@
|
|||||||
|
|
||||||
echo "▶️ $0 $*"
|
echo "▶️ $0 $*"
|
||||||
|
|
||||||
|
###
|
||||||
|
# Checking for the presence of GITHUB_OAUTH_CLIENT_ID
|
||||||
|
# and GITHUB_OAUTH_CLIENT_SECRET
|
||||||
|
###
|
||||||
if [ -n "${GITHUB_OAUTH_CLIENT_ID}" ] && [ -n "${GITHUB_OAUTH_CLIENT_SECRET}" ]; then
|
if [ -n "${GITHUB_OAUTH_CLIENT_ID}" ] && [ -n "${GITHUB_OAUTH_CLIENT_SECRET}" ]; then
|
||||||
echo "🗝 Performing authenticated Github API calls."
|
echo "🗝 Performing authenticated Github API calls."
|
||||||
GITHUB_OAUTH_PARAMS="client_id=${GITHUB_OAUTH_CLIENT_ID}&client_secret=${GITHUB_OAUTH_CLIENT_SECRET}"
|
GITHUB_OAUTH_PARAMS="client_id=${GITHUB_OAUTH_CLIENT_ID}&client_secret=${GITHUB_OAUTH_CLIENT_SECRET}"
|
||||||
@ -11,17 +15,38 @@ else
|
|||||||
GITHUB_OAUTH_PARAMS=""
|
GITHUB_OAUTH_PARAMS=""
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
###
|
||||||
|
# Checking if PRERELEASE is either unset, 'true' or 'false'
|
||||||
|
###
|
||||||
|
if [ -n "${PRERELEASE}" ] &&
|
||||||
|
{ [ "${PRERELEASE}" != "true" ] && [ "${PRERELEASE}" != "false" ]; }; then
|
||||||
|
|
||||||
|
if [ -z "${DEBUG}" ]; then
|
||||||
|
echo "⚠️ PRERELEASE must be either unset, 'true' or 'false', but was '${PRERELEASE}'!"
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "⚠️ Would exit here with code '1', but DEBUG is enabled."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
###
|
||||||
|
# Calling Github to get the latest version
|
||||||
|
###
|
||||||
ORIGINAL_GITHUB_REPO="netbox-community/netbox"
|
ORIGINAL_GITHUB_REPO="netbox-community/netbox"
|
||||||
GITHUB_REPO="${GITHUB_REPO-$ORIGINAL_GITHUB_REPO}"
|
GITHUB_REPO="${GITHUB_REPO-$ORIGINAL_GITHUB_REPO}"
|
||||||
URL_RELEASES="https://api.github.com/repos/${GITHUB_REPO}/releases?${GITHUB_OAUTH_PARAMS}"
|
URL_RELEASES="https://api.github.com/repos/${GITHUB_REPO}/releases?${GITHUB_OAUTH_PARAMS}"
|
||||||
|
|
||||||
|
# Composing the JQ commans to extract the most recent version number
|
||||||
JQ_LATEST="group_by(.prerelease) | .[] | sort_by(.published_at) | reverse | .[0] | select(.prerelease==${PRERELEASE-false}) | .tag_name"
|
JQ_LATEST="group_by(.prerelease) | .[] | sort_by(.published_at) | reverse | .[0] | select(.prerelease==${PRERELEASE-false}) | .tag_name"
|
||||||
|
|
||||||
CURL="curl -sS"
|
CURL="curl -sS"
|
||||||
|
|
||||||
|
# Querying the Github API to fetch the most recent version number
|
||||||
VERSION=$($CURL "${URL_RELEASES}" | jq -r "${JQ_LATEST}")
|
VERSION=$($CURL "${URL_RELEASES}" | jq -r "${JQ_LATEST}")
|
||||||
|
|
||||||
|
###
|
||||||
# Check if the prerelease version is actually higher than stable version
|
# Check if the prerelease version is actually higher than stable version
|
||||||
|
###
|
||||||
if [ "${PRERELEASE}" == "true" ]; then
|
if [ "${PRERELEASE}" == "true" ]; then
|
||||||
JQ_STABLE="group_by(.prerelease) | .[] | sort_by(.published_at) | reverse | .[0] | select(.prerelease==false) | .tag_name"
|
JQ_STABLE="group_by(.prerelease) | .[] | sort_by(.published_at) | reverse | .[0] | select(.prerelease==false) | .tag_name"
|
||||||
STABLE_VERSION=$($CURL "${URL_RELEASES}" | jq -r "${JQ_STABLE}")
|
STABLE_VERSION=$($CURL "${URL_RELEASES}" | jq -r "${JQ_STABLE}")
|
||||||
@ -35,10 +60,16 @@ if [ "${PRERELEASE}" == "true" ]; then
|
|||||||
# shellcheck disable=SC2003
|
# shellcheck disable=SC2003
|
||||||
MINOR_UNSTABLE=$(expr match "${VERSION}" 'v[0-9]\+\.\([0-9]\+\)')
|
MINOR_UNSTABLE=$(expr match "${VERSION}" 'v[0-9]\+\.\([0-9]\+\)')
|
||||||
|
|
||||||
if ( [ "$MAJOR_STABLE" -eq "$MAJOR_UNSTABLE" ] && [ "$MINOR_STABLE" -ge "$MINOR_UNSTABLE" ] ) \
|
if { [ "${MAJOR_STABLE}" -eq "${MAJOR_UNSTABLE}" ] \
|
||||||
|| [ "$MAJOR_STABLE" -gt "$MAJOR_UNSTABLE" ]; then
|
&& [ "${MINOR_STABLE}" -ge "${MINOR_UNSTABLE}" ];
|
||||||
echo "❎ Latest unstable version ('$VERSION') is not higher than the latest stable version ('$STABLE_VERSION')."
|
} || [ "${MAJOR_STABLE}" -gt "${MAJOR_UNSTABLE}" ]; then
|
||||||
|
|
||||||
|
echo "❎ Latest unstable version '${VERSION}' is not higher than the latest stable version '$STABLE_VERSION'."
|
||||||
if [ -z "$DEBUG" ]; then
|
if [ -z "$DEBUG" ]; then
|
||||||
|
if [ -n "${GH_ACTION}" ]; then
|
||||||
|
echo "::set-output name=skipped::true"
|
||||||
|
fi
|
||||||
|
|
||||||
exit 0
|
exit 0
|
||||||
else
|
else
|
||||||
echo "⚠️ Would exit here with code '0', but DEBUG is enabled."
|
echo "⚠️ Would exit here with code '0', but DEBUG is enabled."
|
||||||
@ -46,32 +77,6 @@ if [ "${PRERELEASE}" == "true" ]; then
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Check if that version is not already available on docker hub:
|
# shellcheck disable=SC2068
|
||||||
ORIGINAL_DOCKERHUB_REPO="${DOCKER_ORG-netboxcommunity}/${DOCKER_REPO-netbox}"
|
./build.sh "${VERSION}" $@
|
||||||
DOCKERHUB_REPO="${DOCKERHUB_REPO-$ORIGINAL_DOCKERHUB_REPO}"
|
exit $?
|
||||||
URL_DOCKERHUB_TOKEN="https://auth.docker.io/token?service=registry.docker.io&scope=repository:${DOCKERHUB_REPO}:pull"
|
|
||||||
BEARER_TOKEN="$($CURL "${URL_DOCKERHUB_TOKEN}" | jq -r .token)"
|
|
||||||
|
|
||||||
URL_DOCKERHUB_TAG="https://registry.hub.docker.com/v2/${DOCKERHUB_REPO}/tags/list"
|
|
||||||
AUTHORIZATION_HEADER="Authorization: Bearer ${BEARER_TOKEN}"
|
|
||||||
|
|
||||||
if [ -z "$VARIANT" ] || [ "$VARIANT" == "main" ]; then
|
|
||||||
DOCKER_TAG="${VERSION}"
|
|
||||||
else
|
|
||||||
DOCKER_TAG="${VERSION}-${VARIANT}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
ALREADY_BUILT="$($CURL -H "${AUTHORIZATION_HEADER}" "${URL_DOCKERHUB_TAG}" | jq -e ".tags | any(.==\"${DOCKER_TAG}\")")"
|
|
||||||
|
|
||||||
if [ -n "$DEBUG" ] || [ "$ALREADY_BUILT" == "false" ]; then
|
|
||||||
if [ -n "$DEBUG" ]; then
|
|
||||||
echo "⚠️ Would not build, because ${DOCKER_TAG} already exists on https://hub.docker.com/r/${DOCKERHUB_REPO}, but DEBUG is enabled."
|
|
||||||
fi
|
|
||||||
|
|
||||||
# shellcheck disable=SC2068
|
|
||||||
./build.sh "${VERSION}" $@
|
|
||||||
exit $?
|
|
||||||
else
|
|
||||||
echo "✅ ${DOCKER_TAG} already exists on https://hub.docker.com/r/${DOCKERHUB_REPO}"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
380
build.sh
380
build.sh
@ -1,5 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# Builds the Dockerfile[.variant] and injects tgz'ed Netbox code from Github
|
# Clones the Netbox repository with git from Github and builds the Dockerfile
|
||||||
|
|
||||||
echo "▶️ $0 $*"
|
echo "▶️ $0 $*"
|
||||||
|
|
||||||
@ -8,66 +8,85 @@ set -e
|
|||||||
if [ "${1}x" == "x" ] || [ "${1}" == "--help" ] || [ "${1}" == "-h" ]; then
|
if [ "${1}x" == "x" ] || [ "${1}" == "--help" ] || [ "${1}" == "-h" ]; then
|
||||||
echo "Usage: ${0} <branch> [--push|--push-only]"
|
echo "Usage: ${0} <branch> [--push|--push-only]"
|
||||||
echo " branch The branch or tag to build. Required."
|
echo " branch The branch or tag to build. Required."
|
||||||
echo " --push Pushes built the Docker image to the registry."
|
echo " --push Pushes the built Docker image to the registry."
|
||||||
echo " --push-only Does not build. Only pushes the Docker image to the registry."
|
echo " --push-only Only pushes the Docker image to the registry, but does not build it."
|
||||||
echo ""
|
echo ""
|
||||||
echo "You can use the following ENV variables to customize the build:"
|
echo "You can use the following ENV variables to customize the build:"
|
||||||
echo " DEBUG If defined, the script does not stop when certain checks are unsatisfied."
|
echo " SRC_ORG Which fork of netbox to use (i.e. github.com/\${SRC_ORG}/\${SRC_REPO})."
|
||||||
echo " DRY_RUN Prints all build statements instead of running them."
|
echo " Default: netbox-community"
|
||||||
echo " DOCKER_OPTS Add parameters to Docker."
|
echo " SRC_REPO The name of the repository to use (i.e. github.com/\${SRC_ORG}/\${SRC_REPO})."
|
||||||
echo " Default:"
|
echo " Default: netbox"
|
||||||
echo " When <TAG> starts with 'v': \"\""
|
echo " URL Where to fetch the code from."
|
||||||
echo " Else: \"--no-cache\""
|
echo " Must be a git repository. Can be private."
|
||||||
echo " BRANCH The branch to build."
|
echo " Default: https://github.com/\${SRC_ORG}/\${SRC_REPO}.git"
|
||||||
echo " Also used for tagging the image."
|
echo " NETBOX_PATH The path where netbox will be checkout out."
|
||||||
|
echo " Must not be outside of the netbox-docker repository (because of Docker)!"
|
||||||
|
echo " Default: .netbox"
|
||||||
|
echo " SKIP_GIT If defined, git is not invoked and \${NETBOX_PATH} will not be altered."
|
||||||
|
echo " This may be useful, if you are manually managing the NETBOX_PATH."
|
||||||
|
echo " Default: undefined"
|
||||||
echo " TAG The version part of the docker tag."
|
echo " TAG The version part of the docker tag."
|
||||||
echo " Default:"
|
echo " Default:"
|
||||||
echo " When <BRANCH>=master: latest"
|
echo " When <branch>=master: latest"
|
||||||
echo " When <BRANCH>=develop: snapshot"
|
echo " When <branch>=develop: snapshot"
|
||||||
echo " Else: same as <BRANCH>"
|
echo " Else: same as <branch>"
|
||||||
echo " DOCKER_ORG The Docker registry (i.e. hub.docker.com/r/<DOCKER_ORG>/<DOCKER_REPO>) "
|
echo " DOCKER_REGISTRY The Docker repository's registry (i.e. '\${DOCKER_REGISTRY}/\${DOCKER_ORG}/\${DOCKER_REPO}'')"
|
||||||
echo " Also used for tagging the image."
|
echo " Used for tagging the image."
|
||||||
|
echo " Default: docker.io"
|
||||||
|
echo " DOCKER_ORG The Docker repository's organisation (i.e. '\${DOCKER_REGISTRY}/\${DOCKER_ORG}/\${DOCKER_REPO}'')"
|
||||||
|
echo " Used for tagging the image."
|
||||||
echo " Default: netboxcommunity"
|
echo " Default: netboxcommunity"
|
||||||
echo " DOCKER_REPO The Docker registry (i.e. hub.docker.com/r/<DOCKER_ORG>/<DOCKER_REPO>) "
|
echo " DOCKER_REPO The Docker repository's name (i.e. '\${DOCKER_REGISTRY}/\${DOCKER_ORG}/\${DOCKER_REPO}'')"
|
||||||
echo " Also used for tagging the image."
|
echo " Used for tagging the image."
|
||||||
echo " Default: netbox"
|
echo " Default: netbox"
|
||||||
echo " DOCKER_TAG The name of the tag which is applied to the image."
|
echo " DOCKER_TAG The name of the tag which is applied to the image."
|
||||||
echo " Useful for pushing into another registry than hub.docker.com."
|
echo " Useful for pushing into another registry than hub.docker.com."
|
||||||
echo " Default: <DOCKER_ORG>/<DOCKER_REPO>:<BRANCH>"
|
echo " Default: \${DOCKER_REGISTRY}/\${DOCKER_ORG}/\${DOCKER_REPO}:\${TAG}"
|
||||||
echo " DOCKER_SHORT_TAG The name of the short tag which is applied to the image."
|
echo " DOCKER_SHORT_TAG The name of the short tag which is applied to the"
|
||||||
echo " This is used to tag all patch releases to their containing version e.g. v2.5.1 -> v2.5"
|
echo " image. This is used to tag all patch releases to their"
|
||||||
echo " Default: <DOCKER_ORG>/<DOCKER_REPO>:\$MAJOR.\$MINOR"
|
echo " containing version e.g. v2.5.1 -> v2.5"
|
||||||
echo " SRC_ORG Which fork of netbox to use (i.e. github.com/<SRC_ORG>/<SRC_REPO>)."
|
echo " Default: \${DOCKER_REGISTRY}/\${DOCKER_ORG}/\${DOCKER_REPO}:<MAJOR>.<MINOR>"
|
||||||
echo " Default: netbox-community"
|
echo " DOCKERFILE The name of Dockerfile to use."
|
||||||
echo " SRC_REPO The name of the netbox for to use (i.e. github.com/<SRC_ORG>/<SRC_REPO>)."
|
echo " Default: Dockerfile"
|
||||||
echo " Default: netbox"
|
echo " DOCKER_FROM The base image to use."
|
||||||
echo " URL Where to fetch the package from."
|
echo " Default: Whatever is defined as default in the Dockerfile."
|
||||||
echo " Must be a tar.gz file of the source code."
|
echo " DOCKER_TARGET A specific target to build."
|
||||||
echo " Default: https://github.com/<SRC_ORG>/<SRC_REPO>/archive/\$BRANCH.tar.gz"
|
echo " It's currently not possible to pass multiple targets."
|
||||||
echo " VARIANT The variant to build."
|
echo " Default: main ldap"
|
||||||
echo " The value will be used as a suffix to the \$TAG and for the Dockerfile"
|
|
||||||
echo " selection. The TAG being build must exist for the base variant and"
|
|
||||||
echo " corresponding Dockerfile must start with the following lines:"
|
|
||||||
echo " ARG DOCKER_ORG=netboxcommunity"
|
|
||||||
echo " ARG DOCKER_REPO=netbox"
|
|
||||||
echo " ARG FROM_TAG=latest"
|
|
||||||
echo " FROM \$DOCKER_ORG/\$DOCKER_REPO:\$FROM_TAG"
|
|
||||||
echo " Example: VARIANT=ldap will result in the tag 'latest-ldap' and the"
|
|
||||||
echo " Dockerfile './Dockerfile.ldap' being used."
|
|
||||||
echo " Exception: VARIANT=main will use the './Dockerfile' Dockerfile"
|
|
||||||
echo " Default: main"
|
|
||||||
echo " HTTP_PROXY The proxy to use for http requests."
|
echo " HTTP_PROXY The proxy to use for http requests."
|
||||||
echo " Example: http://proxy.domain.tld:3128"
|
echo " Example: http://proxy.domain.tld:3128"
|
||||||
echo " Default: empty"
|
echo " Default: undefined"
|
||||||
echo " HTTPS_PROXY The proxy to use for https requests."
|
|
||||||
echo " Example: http://proxy.domain.tld:3128"
|
|
||||||
echo " Default: empty"
|
|
||||||
echo " FTP_PROXY The proxy to use for ftp requests."
|
|
||||||
echo " Example: http://proxy.domain.tld:3128"
|
|
||||||
echo " Default: empty"
|
|
||||||
echo " NO_PROXY Comma-separated list of domain extensions proxy should not be used for."
|
echo " NO_PROXY Comma-separated list of domain extensions proxy should not be used for."
|
||||||
echo " Example: .domain1.tld,.domain2.tld"
|
echo " Example: .domain1.tld,.domain2.tld"
|
||||||
echo " Default: empty"
|
echo " Default: undefined"
|
||||||
|
echo " DEBUG If defined, the script does not stop when certain checks are unsatisfied."
|
||||||
|
echo " Default: undefined"
|
||||||
|
echo " DRY_RUN Prints all build statements instead of running them."
|
||||||
|
echo " Default: undefined"
|
||||||
|
echo " GH_ACTION If defined, special 'echo' statements are enabled that set the"
|
||||||
|
echo " following environment variables in Github Actions:"
|
||||||
|
echo " - FINAL_DOCKER_TAG: The final value of the DOCKER_TAG env variable"
|
||||||
|
echo " Default: undefined"
|
||||||
|
echo ""
|
||||||
|
echo "Examples:"
|
||||||
|
echo " ${0} master"
|
||||||
|
echo " This will fetch the latest 'master' branch, build a Docker Image and tag it"
|
||||||
|
echo " 'netboxcommunity/netbox:latest'."
|
||||||
|
echo " ${0} develop"
|
||||||
|
echo " This will fetch the latest 'develop' branch, build a Docker Image and tag it"
|
||||||
|
echo " 'netboxcommunity/netbox:snapshot'."
|
||||||
|
echo " ${0} v2.6.6"
|
||||||
|
echo " This will fetch the 'v2.6.6' tag, build a Docker Image and tag it"
|
||||||
|
echo " 'netboxcommunity/netbox:v2.6.6' and 'netboxcommunity/netbox:v2.6'."
|
||||||
|
echo " ${0} develop-2.7"
|
||||||
|
echo " This will fetch the 'develop-2.7' branch, build a Docker Image and tag it"
|
||||||
|
echo " 'netboxcommunity/netbox:develop-2.7'."
|
||||||
|
echo " SRC_ORG=cimnine ${0} feature-x"
|
||||||
|
echo " This will fetch the 'feature-x' branch from https://github.com/cimnine/netbox.git,"
|
||||||
|
echo " build a Docker Image and tag it 'netboxcommunity/netbox:feature-x'."
|
||||||
|
echo " SRC_ORG=cimnine DOCKER_ORG=cimnine ${0} feature-x"
|
||||||
|
echo " This will fetch the 'feature-x' branch from https://github.com/cimnine/netbox.git,"
|
||||||
|
echo " build a Docker Image and tag it 'cimnine/netbox:feature-x'."
|
||||||
|
|
||||||
if [ "${1}x" == "x" ]; then
|
if [ "${1}x" == "x" ]; then
|
||||||
exit 1
|
exit 1
|
||||||
@ -76,122 +95,217 @@ if [ "${1}x" == "x" ] || [ "${1}" == "--help" ] || [ "${1}" == "-h" ]; then
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# read the project version and trim it
|
###
|
||||||
# see https://stackoverflow.com/a/3232433/172132
|
# Enabling dry-run mode
|
||||||
NETBOX_DOCKER_PROJECT_VERSION="${NETBOX_DOCKER_PROJECT_VERSION-$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' VERSION)}"
|
###
|
||||||
|
if [ -z "${DRY_RUN}" ]; then
|
||||||
# variables for fetching the source
|
DRY=""
|
||||||
SRC_ORG="${SRC_ORG-netbox-community}"
|
|
||||||
SRC_REPO="${SRC_REPO-netbox}"
|
|
||||||
BRANCH="${1}"
|
|
||||||
URL="${URL-https://github.com/${SRC_ORG}/${SRC_REPO}/archive/$BRANCH.tar.gz}"
|
|
||||||
|
|
||||||
# Checking which VARIANT to build
|
|
||||||
VARIANT="${VARIANT-main}"
|
|
||||||
if [ "$VARIANT" == "main" ]; then
|
|
||||||
DOCKERFILE="Dockerfile"
|
|
||||||
else
|
else
|
||||||
DOCKERFILE="Dockerfile.${VARIANT}"
|
echo "⚠️ DRY_RUN MODE ON ⚠️"
|
||||||
|
DRY="echo"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Fail fast
|
###
|
||||||
if [ ! -f "${DOCKERFILE}" ]; then
|
# Variables for fetching the source
|
||||||
echo "🚨 The Dockerfile ${DOCKERFILE} for variant '${VARIANT}' doesn't exist."
|
###
|
||||||
|
SRC_ORG="${SRC_ORG-netbox-community}"
|
||||||
|
SRC_REPO="${SRC_REPO-netbox}"
|
||||||
|
NETBOX_BRANCH="${1}"
|
||||||
|
URL="${URL-https://github.com/${SRC_ORG}/${SRC_REPO}.git}"
|
||||||
|
NETBOX_PATH="${NETBOX_PATH-.netbox}"
|
||||||
|
|
||||||
if [ -z "$DEBUG" ]; then
|
###
|
||||||
|
# Fetching the source
|
||||||
|
###
|
||||||
|
if [ "${2}" != "--push-only" ] && [ -z "${SKIP_GIT}" ] ; then
|
||||||
|
echo "🌐 Checking out '${NETBOX_BRANCH}' of netbox from the url '${URL}' into '${NETBOX_PATH}'"
|
||||||
|
if [ ! -d "${NETBOX_PATH}" ]; then
|
||||||
|
$DRY git clone -q --depth 10 -b "${NETBOX_BRANCH}" "${URL}" "${NETBOX_PATH}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
(
|
||||||
|
$DRY cd "${NETBOX_PATH}"
|
||||||
|
|
||||||
|
if [ -n "${HTTP_PROXY}" ]; then
|
||||||
|
git config http.proxy "${HTTP_PROXY}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
$DRY git remote set-url origin "${URL}"
|
||||||
|
$DRY git fetch -qp --depth 10 origin "${NETBOX_BRANCH}"
|
||||||
|
$DRY git checkout -qf FETCH_HEAD
|
||||||
|
$DRY git prune
|
||||||
|
)
|
||||||
|
echo "✅ Checked out netbox"
|
||||||
|
fi
|
||||||
|
|
||||||
|
###
|
||||||
|
# Determining the value for DOCKERFILE
|
||||||
|
# and checking whether it exists
|
||||||
|
###
|
||||||
|
DOCKERFILE="${DOCKERFILE-Dockerfile}"
|
||||||
|
if [ ! -f "${DOCKERFILE}" ]; then
|
||||||
|
echo "🚨 The Dockerfile ${DOCKERFILE} doesn't exist."
|
||||||
|
|
||||||
|
if [ -z "${DEBUG}" ]; then
|
||||||
exit 1
|
exit 1
|
||||||
else
|
else
|
||||||
echo "⚠️ Would exit here with code '1', but DEBUG is enabled."
|
echo "⚠️ Would exit here with code '1', but DEBUG is enabled."
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# variables for tagging the docker image
|
###
|
||||||
|
# Variables for labelling the docker image
|
||||||
|
###
|
||||||
|
BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M+00:00')"
|
||||||
|
|
||||||
|
if [ -d ".git" ]; then
|
||||||
|
GIT_REF="$(git rev-parse HEAD)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Read the project version from the `VERSION` file and trim it, see https://stackoverflow.com/a/3232433/172132
|
||||||
|
PROJECT_VERSION="${PROJECT_VERSION-$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' VERSION)}"
|
||||||
|
|
||||||
|
# Get the Git information from the netbox directory
|
||||||
|
if [ -d "${NETBOX_PATH}/.git" ]; then
|
||||||
|
NETBOX_GIT_REF=$(cd ${NETBOX_PATH}; git rev-parse HEAD)
|
||||||
|
NETBOX_GIT_BRANCH=$(cd ${NETBOX_PATH}; git rev-parse --abbrev-ref HEAD)
|
||||||
|
NETBOX_GIT_URL=$(cd ${NETBOX_PATH}; git remote get-url origin)
|
||||||
|
fi
|
||||||
|
|
||||||
|
###
|
||||||
|
# Variables for tagging the docker image
|
||||||
|
###
|
||||||
|
DOCKER_REGISTRY="${DOCKER_REGISTRY-docker.io}"
|
||||||
DOCKER_ORG="${DOCKER_ORG-netboxcommunity}"
|
DOCKER_ORG="${DOCKER_ORG-netboxcommunity}"
|
||||||
DOCKER_REPO="${DOCKER_REPO-netbox}"
|
DOCKER_REPO="${DOCKER_REPO-netbox}"
|
||||||
case "${BRANCH}" in
|
case "${NETBOX_BRANCH}" in
|
||||||
master)
|
master)
|
||||||
TAG="${TAG-latest}";;
|
TAG="${TAG-latest}";;
|
||||||
develop)
|
develop)
|
||||||
TAG="${TAG-snapshot}";;
|
TAG="${TAG-snapshot}";;
|
||||||
*)
|
*)
|
||||||
TAG="${TAG-$BRANCH}";;
|
TAG="${TAG-$NETBOX_BRANCH}";;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
DOCKER_TAG="${DOCKER_TAG-${DOCKER_ORG}/${DOCKER_REPO}:${TAG}}"
|
###
|
||||||
if [ "$VARIANT" != "main" ]; then
|
# Determine targets to build
|
||||||
DOCKER_TAG="${DOCKER_TAG}-${VARIANT}"
|
###
|
||||||
fi
|
DEFAULT_DOCKER_TARGETS=("main" "ldap")
|
||||||
|
DOCKER_TARGETS=( "${DOCKER_TARGET:-"${DEFAULT_DOCKER_TARGETS[@]}"}")
|
||||||
|
echo "🏭 Building the following targets:" "${DOCKER_TARGETS[@]}"
|
||||||
|
|
||||||
if [[ "${TAG}" =~ ^v([0-9]+)\.([0-9]+)\.[0-9]+$ ]]; then
|
###
|
||||||
|
# Build each target
|
||||||
|
###
|
||||||
|
export DOCKER_BUILDKIT=${DOCKER_BUILDKIT-1}
|
||||||
|
for DOCKER_TARGET in "${DOCKER_TARGETS[@]}"; do
|
||||||
|
echo "🏗 Building the target '${DOCKER_TARGET}'"
|
||||||
|
|
||||||
|
###
|
||||||
|
# composing the final TARGET_DOCKER_TAG
|
||||||
|
###
|
||||||
|
TARGET_DOCKER_TAG="${DOCKER_TAG-${DOCKER_REGISTRY}/${DOCKER_ORG}/${DOCKER_REPO}:${TAG}}"
|
||||||
|
if [ "${DOCKER_TARGET}" != "main" ]; then
|
||||||
|
TARGET_DOCKER_TAG="${TARGET_DOCKER_TAG}-${DOCKER_TARGET}"
|
||||||
|
fi
|
||||||
|
if [ -n "${GH_ACTION}" ]; then
|
||||||
|
echo "::set-env name=FINAL_DOCKER_TAG::${TARGET_DOCKER_TAG}"
|
||||||
|
echo "::set-output name=skipped::false"
|
||||||
|
fi
|
||||||
|
|
||||||
|
###
|
||||||
|
# composing the additional DOCKER_SHORT_TAG,
|
||||||
|
# i.e. "v2.6.1" becomes "v2.6",
|
||||||
|
# which is only relevant for version tags
|
||||||
|
###
|
||||||
|
if [[ "${TAG}" =~ ^v([0-9]+)\.([0-9]+)\.[0-9]+$ ]]; then
|
||||||
MAJOR=${BASH_REMATCH[1]}
|
MAJOR=${BASH_REMATCH[1]}
|
||||||
MINOR=${BASH_REMATCH[2]}
|
MINOR=${BASH_REMATCH[2]}
|
||||||
|
|
||||||
DOCKER_SHORT_TAG="${DOCKER_SHORT_TAG-${DOCKER_ORG}/${DOCKER_REPO}:v${MAJOR}.${MINOR}}"
|
TARGET_DOCKER_SHORT_TAG="${DOCKER_SHORT_TAG-${DOCKER_REGISTRY}/${DOCKER_ORG}/${DOCKER_REPO}:v${MAJOR}.${MINOR}}"
|
||||||
|
|
||||||
if [ "$VARIANT" != "main" ]; then
|
if [ "${DOCKER_TARGET}" != "main" ]; then
|
||||||
DOCKER_SHORT_TAG="${DOCKER_SHORT_TAG}-${VARIANT}"
|
TARGET_DOCKER_SHORT_TAG="${TARGET_DOCKER_SHORT_TAG}-${DOCKER_TARGET}"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
|
||||||
|
|
||||||
DOCKER_OPTS=("${DOCKER_OPTS[@]}")
|
###
|
||||||
|
# Proceeding to buils stage, except if `--push-only` is passed
|
||||||
|
###
|
||||||
|
if [ "${2}" != "--push-only" ] ; then
|
||||||
|
###
|
||||||
|
# Composing all arguments for `docker build`
|
||||||
|
###
|
||||||
|
DOCKER_BUILD_ARGS=(
|
||||||
|
--pull
|
||||||
|
--target "${DOCKER_TARGET}"
|
||||||
|
-f "${DOCKERFILE}"
|
||||||
|
-t "${TARGET_DOCKER_TAG}"
|
||||||
|
)
|
||||||
|
if [ -n "${TARGET_DOCKER_SHORT_TAG}" ]; then
|
||||||
|
DOCKER_BUILD_ARGS+=( -t "${TARGET_DOCKER_SHORT_TAG}" )
|
||||||
|
fi
|
||||||
|
|
||||||
# caching is only ok for version tags
|
# --label
|
||||||
case "${TAG}" in
|
if [ "${DOCKER_TARGET}" == "main" ]; then
|
||||||
v*) ;;
|
DOCKER_BUILD_ARGS+=(
|
||||||
*) DOCKER_OPTS+=( "--no-cache" ) ;;
|
--label "ORIGINAL_TAG=${TARGET_DOCKER_TAG}"
|
||||||
esac
|
|
||||||
|
|
||||||
DOCKER_OPTS+=( "--pull" )
|
--label "org.label-schema.build-date=${BUILD_DATE}"
|
||||||
|
--label "org.opencontainers.image.created=${BUILD_DATE}"
|
||||||
|
|
||||||
# Build args
|
--label "org.label-schema.version=${PROJECT_VERSION}"
|
||||||
DOCKER_BUILD_ARGS=(
|
--label "org.opencontainers.image.version=${PROJECT_VERSION}"
|
||||||
--build-arg "NETBOX_DOCKER_PROJECT_VERSION=${NETBOX_DOCKER_PROJECT_VERSION}"
|
)
|
||||||
--build-arg "FROM_TAG=${TAG}"
|
if [ -d ".git" ]; then
|
||||||
--build-arg "BRANCH=${BRANCH}"
|
DOCKER_BUILD_ARGS+=(
|
||||||
--build-arg "URL=${URL}"
|
--label "org.label-schema.vcs-ref=${GIT_REF}"
|
||||||
--build-arg "DOCKER_ORG=${DOCKER_ORG}"
|
--label "org.opencontainers.image.revision=${GIT_REF}"
|
||||||
--build-arg "DOCKER_REPO=${DOCKER_REPO}"
|
)
|
||||||
)
|
fi
|
||||||
if [ -n "$HTTP_PROXY" ]; then
|
if [ -d "${NETBOX_PATH}/.git" ]; then
|
||||||
|
DOCKER_BUILD_ARGS+=(
|
||||||
|
--label "NETBOX_GIT_BRANCH=${NETBOX_GIT_BRANCH}"
|
||||||
|
--label "NETBOX_GIT_REF=${NETBOX_GIT_REF}"
|
||||||
|
--label "NETBOX_GIT_URL=${NETBOX_GIT_URL}"
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# --build-arg
|
||||||
|
DOCKER_BUILD_ARGS+=( --build-arg "NETBOX_PATH=${NETBOX_PATH}" )
|
||||||
|
|
||||||
|
if [ -n "${DOCKER_FROM}" ]; then
|
||||||
|
DOCKER_BUILD_ARGS+=( --build-arg "FROM=${DOCKER_FROM}" )
|
||||||
|
fi
|
||||||
|
if [ -n "${HTTP_PROXY}" ]; then
|
||||||
DOCKER_BUILD_ARGS+=( --build-arg "http_proxy=${HTTP_PROXY}" )
|
DOCKER_BUILD_ARGS+=( --build-arg "http_proxy=${HTTP_PROXY}" )
|
||||||
fi
|
|
||||||
if [ -n "$HTTPS_PROXY" ]; then
|
|
||||||
DOCKER_BUILD_ARGS+=( --build-arg "https_proxy=${HTTPS_PROXY}" )
|
DOCKER_BUILD_ARGS+=( --build-arg "https_proxy=${HTTPS_PROXY}" )
|
||||||
fi
|
fi
|
||||||
if [ -n "$FTP_PROXY" ]; then
|
if [ -n "${NO_PROXY}" ]; then
|
||||||
DOCKER_BUILD_ARGS+=( --build-arg "ftp_proxy=${FTP_PROXY}" )
|
|
||||||
fi
|
|
||||||
if [ -n "$NO_PROXY" ]; then
|
|
||||||
DOCKER_BUILD_ARGS+=( --build-arg "no_proxy=${NO_PROXY}" )
|
DOCKER_BUILD_ARGS+=( --build-arg "no_proxy=${NO_PROXY}" )
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$DRY_RUN" ]; then
|
|
||||||
DOCKER_CMD="docker"
|
|
||||||
else
|
|
||||||
echo "⚠️ DRY_RUN MODE ON ⚠️"
|
|
||||||
DOCKER_CMD="echo docker"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "${2}" != "--push-only" ] ; then
|
|
||||||
echo "🐳 Building the Docker image '${DOCKER_TAG}' from the url '${URL}'."
|
|
||||||
$DOCKER_CMD build -t "${DOCKER_TAG}" "${DOCKER_BUILD_ARGS[@]}" "${DOCKER_OPTS[@]}" -f "${DOCKERFILE}" .
|
|
||||||
echo "✅ Finished building the Docker images '${DOCKER_TAG}'"
|
|
||||||
|
|
||||||
if [ -n "$DOCKER_SHORT_TAG" ]; then
|
|
||||||
echo "🐳 Tagging image '${DOCKER_SHORT_TAG}'."
|
|
||||||
$DOCKER_CMD tag "${DOCKER_TAG}" "${DOCKER_SHORT_TAG}"
|
|
||||||
echo "✅ Tagged image '${DOCKER_SHORT_TAG}'"
|
|
||||||
fi
|
fi
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "${2}" == "--push" ] || [ "${2}" == "--push-only" ] ; then
|
###
|
||||||
echo "⏫ Pushing '${DOCKER_TAG}"
|
# Building the docker image
|
||||||
$DOCKER_CMD push "${DOCKER_TAG}"
|
###
|
||||||
echo "✅ Finished pushing the Docker image '${DOCKER_TAG}'."
|
echo "🐳 Building the Docker image '${TARGET_DOCKER_TAG}'."
|
||||||
|
$DRY docker build "${DOCKER_BUILD_ARGS[@]}" .
|
||||||
if [ -n "$DOCKER_SHORT_TAG" ]; then
|
echo "✅ Finished building the Docker images '${TARGET_DOCKER_TAG}'"
|
||||||
echo "⏫ Pushing '${DOCKER_SHORT_TAG}'"
|
|
||||||
$DOCKER_CMD push "${DOCKER_SHORT_TAG}"
|
|
||||||
echo "✅ Finished pushing the Docker image '${DOCKER_SHORT_TAG}'."
|
|
||||||
fi
|
fi
|
||||||
fi
|
|
||||||
|
###
|
||||||
|
# Pushing the docker images if either `--push` or `--push-only` are passed
|
||||||
|
###
|
||||||
|
if [ "${2}" == "--push" ] || [ "${2}" == "--push-only" ] ; then
|
||||||
|
echo "⏫ Pushing '${TARGET_DOCKER_TAG}"
|
||||||
|
$DRY docker push "${TARGET_DOCKER_TAG}"
|
||||||
|
echo "✅ Finished pushing the Docker image '${TARGET_DOCKER_TAG}'."
|
||||||
|
|
||||||
|
if [ -n "${TARGET_DOCKER_SHORT_TAG}" ]; then
|
||||||
|
echo "⏫ Pushing '${TARGET_DOCKER_SHORT_TAG}'"
|
||||||
|
$DRY docker push "${TARGET_DOCKER_SHORT_TAG}"
|
||||||
|
echo "✅ Finished pushing the Docker image '${TARGET_DOCKER_SHORT_TAG}'."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
@ -37,6 +37,10 @@ DATABASE = {
|
|||||||
# PostgreSQL password
|
# PostgreSQL password
|
||||||
'HOST': os.environ.get('DB_HOST', 'localhost'), # Database server
|
'HOST': os.environ.get('DB_HOST', 'localhost'), # Database server
|
||||||
'PORT': os.environ.get('DB_PORT', ''), # Database port (leave blank for default)
|
'PORT': os.environ.get('DB_PORT', ''), # Database port (leave blank for default)
|
||||||
|
'OPTIONS': {'sslmode': os.environ.get('DB_SSLMODE', 'prefer')},
|
||||||
|
# Database connection SSLMODE
|
||||||
|
'CONN_MAX_AGE': int(os.environ.get('DB_CONN_MAX_AGE', '300')),
|
||||||
|
# Database connection persistence
|
||||||
}
|
}
|
||||||
|
|
||||||
# This key is used for secure generation of random numbers and strings. It must never be exposed outside of this file.
|
# This key is used for secure generation of random numbers and strings. It must never be exposed outside of this file.
|
||||||
@ -47,13 +51,22 @@ SECRET_KEY = os.environ.get('SECRET_KEY', read_secret('secret_key'))
|
|||||||
|
|
||||||
# Redis database settings. The Redis database is used for caching and background processing such as webhooks
|
# Redis database settings. The Redis database is used for caching and background processing such as webhooks
|
||||||
REDIS = {
|
REDIS = {
|
||||||
|
'webhooks': {
|
||||||
'HOST': os.environ.get('REDIS_HOST', 'localhost'),
|
'HOST': os.environ.get('REDIS_HOST', 'localhost'),
|
||||||
'PORT': int(os.environ.get('REDIS_PORT', 6379)),
|
'PORT': int(os.environ.get('REDIS_PORT', 6379)),
|
||||||
'PASSWORD': os.environ.get('REDIS_PASSWORD', read_secret('redis_password')),
|
'PASSWORD': os.environ.get('REDIS_PASSWORD', read_secret('redis_password')),
|
||||||
'DATABASE': os.environ.get('REDIS_DATABASE', '0'),
|
'DATABASE': int(os.environ.get('REDIS_DATABASE', 0)),
|
||||||
'CACHE_DATABASE': os.environ.get('REDIS_CACHE_DATABASE', '1'),
|
'DEFAULT_TIMEOUT': int(os.environ.get('REDIS_TIMEOUT', 300)),
|
||||||
'DEFAULT_TIMEOUT': os.environ.get('REDIS_TIMEOUT', '300'),
|
|
||||||
'SSL': os.environ.get('REDIS_SSL', 'False').lower() == 'true',
|
'SSL': os.environ.get('REDIS_SSL', 'False').lower() == 'true',
|
||||||
|
},
|
||||||
|
'caching': {
|
||||||
|
'HOST': os.environ.get('REDIS_CACHE_HOST', os.environ.get('REDIS_HOST', 'localhost')),
|
||||||
|
'PORT': int(os.environ.get('REDIS_CACHE_PORT', os.environ.get('REDIS_PORT', 6379))),
|
||||||
|
'PASSWORD': os.environ.get('REDIS_CACHE_PASSWORD', os.environ.get('REDIS_PASSWORD', read_secret('redis_cache_password'))),
|
||||||
|
'DATABASE': int(os.environ.get('REDIS_CACHE_DATABASE', 1)),
|
||||||
|
'DEFAULT_TIMEOUT': int(os.environ.get('REDIS_CACHE_TIMEOUT', os.environ.get('REDIS_TIMEOUT', 300))),
|
||||||
|
'SSL': os.environ.get('REDIS_CACHE_SSL', os.environ.get('REDIS_SSL', 'False')).lower() == 'true',
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
#########################
|
#########################
|
||||||
@ -161,13 +174,13 @@ PREFER_IPV4 = os.environ.get('PREFER_IPV4', 'False').lower() == 'true'
|
|||||||
# this setting is derived from the installed location.
|
# this setting is derived from the installed location.
|
||||||
REPORTS_ROOT = os.environ.get('REPORTS_ROOT', '/etc/netbox/reports')
|
REPORTS_ROOT = os.environ.get('REPORTS_ROOT', '/etc/netbox/reports')
|
||||||
|
|
||||||
|
# The file path where custom scripts will be stored. A trailing slash is not needed. Note that the default value of
|
||||||
|
# this setting is derived from the installed location.
|
||||||
|
SCRIPTS_ROOT = os.environ.get('SCRIPTS_ROOT', '/etc/netbox/scripts')
|
||||||
|
|
||||||
# Time zone (default: UTC)
|
# Time zone (default: UTC)
|
||||||
TIME_ZONE = os.environ.get('TIME_ZONE', 'UTC')
|
TIME_ZONE = os.environ.get('TIME_ZONE', 'UTC')
|
||||||
|
|
||||||
# The Webhook event backend is disabled by default. Set this to True to enable it. Note that this requires a Redis
|
|
||||||
# database be configured and accessible by NetBox (see `REDIS` below).
|
|
||||||
WEBHOOKS_ENABLED = os.environ.get('WEBHOOKS_ENABLED', 'False').lower() == 'true'
|
|
||||||
|
|
||||||
# Date/time formatting. See the following link for supported formats:
|
# Date/time formatting. See the following link for supported formats:
|
||||||
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
|
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
|
||||||
DATE_FORMAT = os.environ.get('DATE_FORMAT', 'N j, Y')
|
DATE_FORMAT = os.environ.get('DATE_FORMAT', 'N j, Y')
|
||||||
|
45
docker-compose.test.yml
Normal file
45
docker-compose.test.yml
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
version: '3.4'
|
||||||
|
services:
|
||||||
|
netbox:
|
||||||
|
image: ${IMAGE-netboxcommunity/netbox:latest}
|
||||||
|
depends_on:
|
||||||
|
- postgres
|
||||||
|
- redis
|
||||||
|
env_file: env/netbox.env
|
||||||
|
user: '101'
|
||||||
|
volumes:
|
||||||
|
- ./startup_scripts:/opt/netbox/startup_scripts:z,ro
|
||||||
|
- ./${INITIALIZERS_DIR-initializers}:/opt/netbox/initializers:z,ro
|
||||||
|
- ./configuration:/etc/netbox/config:z,ro
|
||||||
|
- ./reports:/etc/netbox/reports:z,ro
|
||||||
|
- ./scripts:/etc/netbox/scripts:z,ro
|
||||||
|
- netbox-nginx-config:/etc/netbox-nginx:z
|
||||||
|
- netbox-static-files:/opt/netbox/netbox/static:z
|
||||||
|
- netbox-media-files:/opt/netbox/netbox/media:z
|
||||||
|
nginx:
|
||||||
|
command: nginx -c /etc/netbox-nginx/nginx.conf
|
||||||
|
image: nginx:1.17-alpine
|
||||||
|
depends_on:
|
||||||
|
- netbox
|
||||||
|
ports:
|
||||||
|
- 8080
|
||||||
|
volumes:
|
||||||
|
- netbox-static-files:/opt/netbox/netbox/static:ro
|
||||||
|
- netbox-nginx-config:/etc/netbox-nginx/:ro
|
||||||
|
postgres:
|
||||||
|
image: postgres:11-alpine
|
||||||
|
env_file: env/postgres.env
|
||||||
|
redis:
|
||||||
|
image: redis:5-alpine
|
||||||
|
command:
|
||||||
|
- sh
|
||||||
|
- -c # this is to evaluate the $REDIS_PASSWORD from the env
|
||||||
|
- redis-server --requirepass $$REDIS_PASSWORD ## $$ because of docker-compose
|
||||||
|
env_file: env/redis.env
|
||||||
|
volumes:
|
||||||
|
netbox-static-files:
|
||||||
|
driver: local
|
||||||
|
netbox-nginx-config:
|
||||||
|
driver: local
|
||||||
|
netbox-media-files:
|
||||||
|
driver: local
|
@ -1,21 +1,20 @@
|
|||||||
version: '3'
|
version: '3.4'
|
||||||
services:
|
services:
|
||||||
netbox: &netbox
|
netbox: &netbox
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
args:
|
|
||||||
- BRANCH=${VERSION-master}
|
|
||||||
image: netboxcommunity/netbox:${VERSION-latest}
|
image: netboxcommunity/netbox:${VERSION-latest}
|
||||||
depends_on:
|
depends_on:
|
||||||
- postgres
|
- postgres
|
||||||
- redis
|
- redis
|
||||||
|
- redis-cache
|
||||||
- netbox-worker
|
- netbox-worker
|
||||||
env_file: env/netbox.env
|
env_file: env/netbox.env
|
||||||
|
user: '101'
|
||||||
volumes:
|
volumes:
|
||||||
- ./startup_scripts:/opt/netbox/startup_scripts:z,ro
|
- ./startup_scripts:/opt/netbox/startup_scripts:z,ro
|
||||||
- ./initializers:/opt/netbox/initializers:z,ro
|
- ./initializers:/opt/netbox/initializers:z,ro
|
||||||
- ./configuration:/etc/netbox/config:z,ro
|
- ./configuration:/etc/netbox/config:z,ro
|
||||||
- ./reports:/etc/netbox/reports:z,ro
|
- ./reports:/etc/netbox/reports:z,ro
|
||||||
|
- ./scripts:/etc/netbox/scripts:z,ro
|
||||||
- netbox-nginx-config:/etc/netbox-nginx:z
|
- netbox-nginx-config:/etc/netbox-nginx:z
|
||||||
- netbox-static-files:/opt/netbox/netbox/static:z
|
- netbox-static-files:/opt/netbox/netbox/static:z
|
||||||
- netbox-media-files:/opt/netbox/netbox/media:z
|
- netbox-media-files:/opt/netbox/netbox/media:z
|
||||||
@ -30,7 +29,7 @@ services:
|
|||||||
- rqworker
|
- rqworker
|
||||||
nginx:
|
nginx:
|
||||||
command: nginx -c /etc/netbox-nginx/nginx.conf
|
command: nginx -c /etc/netbox-nginx/nginx.conf
|
||||||
image: nginx:1.15-alpine
|
image: nginx:1.17-alpine
|
||||||
depends_on:
|
depends_on:
|
||||||
- netbox
|
- netbox
|
||||||
ports:
|
ports:
|
||||||
@ -39,12 +38,12 @@ services:
|
|||||||
- netbox-static-files:/opt/netbox/netbox/static:ro
|
- netbox-static-files:/opt/netbox/netbox/static:ro
|
||||||
- netbox-nginx-config:/etc/netbox-nginx/:ro
|
- netbox-nginx-config:/etc/netbox-nginx/:ro
|
||||||
postgres:
|
postgres:
|
||||||
image: postgres:10.4-alpine
|
image: postgres:11-alpine
|
||||||
env_file: env/postgres.env
|
env_file: env/postgres.env
|
||||||
volumes:
|
volumes:
|
||||||
- netbox-postgres-data:/var/lib/postgresql/data
|
- netbox-postgres-data:/var/lib/postgresql/data
|
||||||
redis:
|
redis:
|
||||||
image: redis:4-alpine
|
image: redis:5-alpine
|
||||||
command:
|
command:
|
||||||
- sh
|
- sh
|
||||||
- -c # this is to evaluate the $REDIS_PASSWORD from the env
|
- -c # this is to evaluate the $REDIS_PASSWORD from the env
|
||||||
@ -52,6 +51,13 @@ services:
|
|||||||
env_file: env/redis.env
|
env_file: env/redis.env
|
||||||
volumes:
|
volumes:
|
||||||
- netbox-redis-data:/data
|
- netbox-redis-data:/data
|
||||||
|
redis-cache:
|
||||||
|
image: redis:5-alpine
|
||||||
|
command:
|
||||||
|
- sh
|
||||||
|
- -c # this is to evaluate the $REDIS_PASSWORD from the env
|
||||||
|
- redis-server --requirepass $$REDIS_PASSWORD ## $$ because of docker-compose
|
||||||
|
env_file: env/redis.env
|
||||||
volumes:
|
volumes:
|
||||||
netbox-static-files:
|
netbox-static-files:
|
||||||
driver: local
|
driver: local
|
||||||
@ -59,8 +65,6 @@ volumes:
|
|||||||
driver: local
|
driver: local
|
||||||
netbox-media-files:
|
netbox-media-files:
|
||||||
driver: local
|
driver: local
|
||||||
netbox-report-files:
|
|
||||||
driver: local
|
|
||||||
netbox-postgres-data:
|
netbox-postgres-data:
|
||||||
driver: local
|
driver: local
|
||||||
netbox-redis-data:
|
netbox-redis-data:
|
||||||
|
@ -1,12 +1,27 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
# Runs on every start of the Netbox Docker container
|
||||||
|
|
||||||
|
# Stop when an error occures
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# wait shortly and then run db migrations (retry on error)
|
# Allows Netbox to be run as non-root users
|
||||||
while ! ./manage.py migrate 2>&1; do
|
umask 002
|
||||||
echo "⏳ Waiting on DB..."
|
|
||||||
sleep 3
|
|
||||||
done
|
|
||||||
|
|
||||||
|
# Try to connect to the DB
|
||||||
|
DB_WAIT_TIMEOUT=${DB_WAIT_TIMEOUT-3}
|
||||||
|
MAX_DB_WAIT_TIME=${MAX_DB_WAIT_TIME-30}
|
||||||
|
CUR_DB_WAIT_TIME=0
|
||||||
|
while ! ./manage.py migrate 2>&1 && [ "${CUR_DB_WAIT_TIME}" -lt "${MAX_DB_WAIT_TIME}" ]; do
|
||||||
|
echo "⏳ Waiting on DB... (${CUR_DB_WAIT_TIME}s / ${MAX_DB_WAIT_TIME}s)"
|
||||||
|
sleep "${DB_WAIT_TIMEOUT}"
|
||||||
|
CUR_DB_WAIT_TIME=$(( CUR_DB_WAIT_TIME + DB_WAIT_TIMEOUT ))
|
||||||
|
done
|
||||||
|
if [ "${CUR_DB_WAIT_TIME}" -ge "${MAX_DB_WAIT_TIME}" ]; then
|
||||||
|
echo "❌ Waited ${MAX_DB_WAIT_TIME}s or more for the DB to become ready."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create Superuser if required
|
||||||
if [ "$SKIP_SUPERUSER" == "true" ]; then
|
if [ "$SKIP_SUPERUSER" == "true" ]; then
|
||||||
echo "↩️ Skip creating the superuser"
|
echo "↩️ Skip creating the superuser"
|
||||||
else
|
else
|
||||||
@ -42,21 +57,19 @@ END
|
|||||||
echo "💡 Superuser Username: ${SUPERUSER_NAME}, E-Mail: ${SUPERUSER_EMAIL}"
|
echo "💡 Superuser Username: ${SUPERUSER_NAME}, E-Mail: ${SUPERUSER_EMAIL}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Run the startup scripts (and initializers)
|
||||||
if [ "$SKIP_STARTUP_SCRIPTS" == "true" ]; then
|
if [ "$SKIP_STARTUP_SCRIPTS" == "true" ]; then
|
||||||
echo "↩️ Skipping startup scripts"
|
echo "↩️ Skipping startup scripts"
|
||||||
else
|
else
|
||||||
for script in /opt/netbox/startup_scripts/*.py; do
|
echo "import runpy; runpy.run_path('../startup_scripts')" | ./manage.py shell --interface python
|
||||||
echo "⚙️ Executing '$script'"
|
|
||||||
./manage.py shell --interface python < "${script}"
|
|
||||||
done
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# copy static files
|
# Copy static files
|
||||||
./manage.py collectstatic --no-input
|
./manage.py collectstatic --no-input
|
||||||
|
|
||||||
echo "✅ Initialisation is done."
|
echo "✅ Initialisation is done."
|
||||||
|
|
||||||
# launch whatever is passed by docker
|
# Launch whatever is passed by docker
|
||||||
# (i.e. the RUN instruction in the Dockerfile)
|
# (i.e. the RUN instruction in the Dockerfile)
|
||||||
#
|
#
|
||||||
# shellcheck disable=SC2068
|
# shellcheck disable=SC2068
|
||||||
|
9
env/netbox.env
vendored
9
env/netbox.env
vendored
@ -17,12 +17,15 @@ MAX_PAGE_SIZE=1000
|
|||||||
REDIS_HOST=redis
|
REDIS_HOST=redis
|
||||||
REDIS_PASSWORD=H733Kdjndks81
|
REDIS_PASSWORD=H733Kdjndks81
|
||||||
REDIS_DATABASE=0
|
REDIS_DATABASE=0
|
||||||
REDIS_CACHE_DATABASE=1
|
|
||||||
REDIS_SSL=false
|
REDIS_SSL=false
|
||||||
|
REDIS_CACHE_HOST=redis-cache
|
||||||
|
REDIS_CACHE_PASSWORD=t4Ph722qJ5QHeQ1qfu36
|
||||||
|
REDIS_CACHE_DATABASE=0
|
||||||
|
REDIS_CACHE_SSL=false
|
||||||
SECRET_KEY=r8OwDznj!!dci#P9ghmRfdu1Ysxm0AiPeDCQhKE+N_rClfWNj
|
SECRET_KEY=r8OwDznj!!dci#P9ghmRfdu1Ysxm0AiPeDCQhKE+N_rClfWNj
|
||||||
SKIP_STARTUP_SCRIPTS=false
|
SKIP_STARTUP_SCRIPTS=false
|
||||||
SKIP_SUPERUSER=true
|
SKIP_SUPERUSER=false
|
||||||
SUPERUSER_NAME=admin2
|
SUPERUSER_NAME=admin
|
||||||
SUPERUSER_EMAIL=admin@example.com
|
SUPERUSER_EMAIL=admin@example.com
|
||||||
SUPERUSER_PASSWORD=admin
|
SUPERUSER_PASSWORD=admin
|
||||||
SUPERUSER_API_TOKEN=0123456789abcdef0123456789abcdef01234567
|
SUPERUSER_API_TOKEN=0123456789abcdef0123456789abcdef01234567
|
||||||
|
1
env/redis-cache.env
vendored
Normal file
1
env/redis-cache.env
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
REDIS_PASSWORD=t4Ph722qJ5QHeQ1qfu36
|
@ -1,5 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
. hooks/common
|
|
||||||
|
|
||||||
run_build
|
|
82
hooks/common
82
hooks/common
@ -1,82 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
ensure_jq() {
|
|
||||||
echo "🛠🛠🛠 Installing JQ via apt-get"
|
|
||||||
[ -x "$(command -v jq)" ] || ( apt-get update && apt-get install -y jq )
|
|
||||||
}
|
|
||||||
|
|
||||||
ensure_dockerfile_present() {
|
|
||||||
if [ "${VARIANT}" == "main" ]; then
|
|
||||||
DOCKERFILE="Dockerfile"
|
|
||||||
else
|
|
||||||
DOCKERFILE="Dockerfile.${VARIANT}"
|
|
||||||
|
|
||||||
# Fail fast
|
|
||||||
if [ ! -f "${DOCKERFILE}" ]; then
|
|
||||||
echo "🚨 The Dockerfile '${DOCKERFILE}' for variant '${VARIANT}' doesn't exist."
|
|
||||||
|
|
||||||
if [ -z "$DEBUG" ]; then
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "⚠️ Would skip this, but DEBUG is enabled."
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "${DOCKERFILE}" != "${DOCKERFILE_PATH}" ]; then
|
|
||||||
echo "⚠️ The specified Dockerfile '${DOCKERFILE_PATH}' does not match the expected Dockerfile '${DOCKERFILE}'."
|
|
||||||
echo " This script will use '${DOCKERFILE}' and ignore '${DOCKERFILE_PATH}'."
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Passes args to the scripts
|
|
||||||
run_build() {
|
|
||||||
echo "🐳🐳🐳 Building '${BUILD}' images, the '${VARIANT:-main}' variant"
|
|
||||||
case $BUILD in
|
|
||||||
release)
|
|
||||||
# build the latest release
|
|
||||||
# shellcheck disable=SC2068
|
|
||||||
./build-latest.sh $@
|
|
||||||
;;
|
|
||||||
prerelease)
|
|
||||||
# build the latest pre-release
|
|
||||||
# shellcheck disable=SC2068
|
|
||||||
PRERELEASE=true ./build-latest.sh $@
|
|
||||||
;;
|
|
||||||
branches)
|
|
||||||
# build all branches
|
|
||||||
# shellcheck disable=SC2068
|
|
||||||
./build-branches.sh $@
|
|
||||||
;;
|
|
||||||
special)
|
|
||||||
# special build
|
|
||||||
# shellcheck disable=SC2068
|
|
||||||
#SRC_ORG=lampwins TAG=webhooks-backend ./build.sh "feature/webhooks-backend" $@
|
|
||||||
echo "✅ No special builds today."
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "🚨 Unrecognized build '$BUILD'."
|
|
||||||
|
|
||||||
if [ -z "$DEBUG" ]; then
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "⚠️ Would exit here with code '1', but DEBUG is enabled."
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
echo "🤖🤖🤖 Preparing build"
|
|
||||||
export DOCKER_ORG="index.docker.io/netboxcommunity"
|
|
||||||
export DOCKER_REPO=netbox
|
|
||||||
export DOCKERHUB_REPO=netboxcommunity/netbox
|
|
||||||
|
|
||||||
# mis-using the "${DOCKER_TAG}" variable as "branch to build"
|
|
||||||
export BUILD="${DOCKER_TAG%-*}"
|
|
||||||
export VARIANT="${DOCKER_TAG#*-}"
|
|
||||||
|
|
||||||
unset DOCKER_TAG
|
|
||||||
|
|
||||||
ensure_dockerfile_present
|
|
||||||
|
|
||||||
ensure_jq
|
|
@ -1,5 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
. hooks/common
|
|
||||||
|
|
||||||
run_build --push-only
|
|
12
hooks/test
12
hooks/test
@ -1,12 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
. hooks/common
|
|
||||||
|
|
||||||
if [ "${VARIANT}" == "main" ] && [ "${BUILD}" == "BRANCHES" ]; then
|
|
||||||
echo "🐳🐳🐳 Testing"
|
|
||||||
docker-compose pull --parallel
|
|
||||||
docker-compose build
|
|
||||||
docker-compose run netbox ./manage.py test
|
|
||||||
else
|
|
||||||
echo "🐳🐳🐳 No tests are implemented for build '${BUILD}' with variant '${VARIANT}'."
|
|
||||||
fi
|
|
@ -1,3 +1,18 @@
|
|||||||
|
## Possible Choices:
|
||||||
|
## type:
|
||||||
|
## - text
|
||||||
|
## - integer
|
||||||
|
## - boolean
|
||||||
|
## - date
|
||||||
|
## - url
|
||||||
|
## - select
|
||||||
|
## filter_logic:
|
||||||
|
## - disabled
|
||||||
|
## - loose
|
||||||
|
## - exact
|
||||||
|
##
|
||||||
|
## Examples:
|
||||||
|
|
||||||
# text_field:
|
# text_field:
|
||||||
# type: text
|
# type: text
|
||||||
# label: Custom Text
|
# label: Custom Text
|
||||||
@ -22,8 +37,8 @@
|
|||||||
# weight: 10
|
# weight: 10
|
||||||
# on_objects:
|
# on_objects:
|
||||||
# - tenancy.models.Tenant
|
# - tenancy.models.Tenant
|
||||||
# selection_field:
|
# select_field:
|
||||||
# type: selection
|
# type: select
|
||||||
# label: Choose between items
|
# label: Choose between items
|
||||||
# required: false
|
# required: false
|
||||||
# filter_logic: exact
|
# filter_logic: exact
|
||||||
@ -41,8 +56,8 @@
|
|||||||
# weight: 50
|
# weight: 50
|
||||||
# - value: Fourth Item
|
# - value: Fourth Item
|
||||||
# weight: 40
|
# weight: 40
|
||||||
# selection_field_auto_weight:
|
# select_field_auto_weight:
|
||||||
# type: selection
|
# type: select
|
||||||
# label: Choose between items
|
# label: Choose between items
|
||||||
# required: false
|
# required: false
|
||||||
# filter_logic: loose
|
# filter_logic: loose
|
||||||
|
@ -1,8 +1,18 @@
|
|||||||
|
## Possible Choices:
|
||||||
|
## type:
|
||||||
|
## - virtual
|
||||||
|
## - lag
|
||||||
|
## - 1000base-t
|
||||||
|
## - ... and many more. See for yourself:
|
||||||
|
## https://github.com/netbox-community/netbox/blob/295d4f0394b431351c0cb2c3ecc791df68c6c2fb/netbox/dcim/choices.py#L510
|
||||||
|
##
|
||||||
|
## Examples:
|
||||||
|
|
||||||
# - device: server01
|
# - device: server01
|
||||||
# enabled: true
|
# enabled: true
|
||||||
# type: Virtual
|
# type: virtual
|
||||||
# name: to-server02
|
# name: to-server02
|
||||||
# - device: server02
|
# - device: server02
|
||||||
# enabled: true
|
# enabled: true
|
||||||
# type: Virtual
|
# type: virtual
|
||||||
# name: to-server01
|
# name: to-server01
|
||||||
|
@ -1,9 +1,24 @@
|
|||||||
|
## Possible Choices:
|
||||||
|
## face:
|
||||||
|
## - front
|
||||||
|
## - rear
|
||||||
|
## status:
|
||||||
|
## - offline
|
||||||
|
## - active
|
||||||
|
## - planned
|
||||||
|
## - staged
|
||||||
|
## - failed
|
||||||
|
## - inventory
|
||||||
|
## - decommissioning
|
||||||
|
##
|
||||||
|
## Examples:
|
||||||
|
|
||||||
# - name: server01
|
# - name: server01
|
||||||
# device_role: server
|
# device_role: server
|
||||||
# device_type: Other
|
# device_type: Other
|
||||||
# site: AMS 1
|
# site: AMS 1
|
||||||
# rack: rack-01
|
# rack: rack-01
|
||||||
# face: Front
|
# face: front
|
||||||
# position: 1
|
# position: 1
|
||||||
# custom_fields:
|
# custom_fields:
|
||||||
# text_field: Description
|
# text_field: Description
|
||||||
@ -12,7 +27,7 @@
|
|||||||
# device_type: Other
|
# device_type: Other
|
||||||
# site: AMS 2
|
# site: AMS 2
|
||||||
# rack: rack-02
|
# rack: rack-02
|
||||||
# face: Front
|
# face: front
|
||||||
# position: 2
|
# position: 2
|
||||||
# custom_fields:
|
# custom_fields:
|
||||||
# text_field: Description
|
# text_field: Description
|
||||||
@ -21,7 +36,7 @@
|
|||||||
# device_type: Other
|
# device_type: Other
|
||||||
# site: SING 1
|
# site: SING 1
|
||||||
# rack: rack-03
|
# rack: rack-03
|
||||||
# face: Front
|
# face: front
|
||||||
# position: 3
|
# position: 3
|
||||||
# custom_fields:
|
# custom_fields:
|
||||||
# text_field: Description
|
# text_field: Description
|
||||||
|
@ -1,26 +1,44 @@
|
|||||||
|
## Possible Choices:
|
||||||
|
## status:
|
||||||
|
## - active
|
||||||
|
## - reserved
|
||||||
|
## - deprecated
|
||||||
|
## - dhcp
|
||||||
|
## role:
|
||||||
|
## - loopback
|
||||||
|
## - secondary
|
||||||
|
## - anycast
|
||||||
|
## - vip
|
||||||
|
## - vrrp
|
||||||
|
## - hsrp
|
||||||
|
## - glbp
|
||||||
|
## - carp
|
||||||
|
##
|
||||||
|
## Examples:
|
||||||
|
|
||||||
# - address: 10.1.1.1/24
|
# - address: 10.1.1.1/24
|
||||||
# device: server01
|
# device: server01
|
||||||
# interface: to-server02
|
# interface: to-server02
|
||||||
# status: Active
|
# status: active
|
||||||
# vrf: vrf1
|
# vrf: vrf1
|
||||||
# - address: 2001:db8:a000:1::1/64
|
# - address: 2001:db8:a000:1::1/64
|
||||||
# device: server01
|
# device: server01
|
||||||
# interface: to-server02
|
# interface: to-server02
|
||||||
# status: Active
|
# status: active
|
||||||
# vrf: vrf1
|
# vrf: vrf1
|
||||||
# - address: 10.1.1.2/24
|
# - address: 10.1.1.2/24
|
||||||
# device: server02
|
# device: server02
|
||||||
# interface: to-server01
|
# interface: to-server01
|
||||||
# status: Active
|
# status: active
|
||||||
# - address: 2001:db8:a000:1::2/64
|
# - address: 2001:db8:a000:1::2/64
|
||||||
# device: server02
|
# device: server02
|
||||||
# interface: to-server01
|
# interface: to-server01
|
||||||
# status: Active
|
# status: active
|
||||||
# - address: 10.1.1.10/24
|
# - address: 10.1.1.10/24
|
||||||
# description: reserved IP
|
# description: reserved IP
|
||||||
# status: Reserved
|
# status: reserved
|
||||||
# tenant: tenant1
|
# tenant: tenant1
|
||||||
# - address: 2001:db8:a000:1::10/64
|
# - address: 2001:db8:a000:1::10/64
|
||||||
# description: reserved IP
|
# description: reserved IP
|
||||||
# status: Reserved
|
# status: reserved
|
||||||
# tenant: tenant1
|
# tenant: tenant1
|
||||||
|
@ -1,13 +1,22 @@
|
|||||||
|
## Possible Choices:
|
||||||
|
## status:
|
||||||
|
## - container
|
||||||
|
## - active
|
||||||
|
## - reserved
|
||||||
|
## - deprecated
|
||||||
|
##
|
||||||
|
## Examples:
|
||||||
|
|
||||||
# - description: prefix1
|
# - description: prefix1
|
||||||
# prefix: 10.1.1.0/24
|
# prefix: 10.1.1.0/24
|
||||||
# site: AMS 1
|
# site: AMS 1
|
||||||
# status: Active
|
# status: active
|
||||||
# tenant: tenant1
|
# tenant: tenant1
|
||||||
# vlan: vlan1
|
# vlan: vlan1
|
||||||
# - description: prefix2
|
# - description: prefix2
|
||||||
# prefix: 10.1.2.0/24
|
# prefix: 10.1.2.0/24
|
||||||
# site: AMS 2
|
# site: AMS 2
|
||||||
# status: Active
|
# status: active
|
||||||
# tenant: tenant2
|
# tenant: tenant2
|
||||||
# vlan: vlan2
|
# vlan: vlan2
|
||||||
# is_pool: true
|
# is_pool: true
|
||||||
@ -15,6 +24,6 @@
|
|||||||
# - description: ipv6 prefix1
|
# - description: ipv6 prefix1
|
||||||
# prefix: 2001:db8:a000:1::/64
|
# prefix: 2001:db8:a000:1::/64
|
||||||
# site: AMS 2
|
# site: AMS 2
|
||||||
# status: Active
|
# status: active
|
||||||
# tenant: tenant2
|
# tenant: tenant2
|
||||||
# vlan: vlan2
|
# vlan: vlan2
|
||||||
|
3
initializers/rack_groups.yml
Normal file
3
initializers/rack_groups.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# - name: cage 101
|
||||||
|
# slug: cage-101
|
||||||
|
# site: SING 1
|
@ -1,24 +1,41 @@
|
|||||||
|
## Possible Choices:
|
||||||
|
## width:
|
||||||
|
## - 19
|
||||||
|
## - 23
|
||||||
|
## types:
|
||||||
|
## - 2-post-frame
|
||||||
|
## - 4-post-frame
|
||||||
|
## - 4-post-cabinet
|
||||||
|
## - wall-frame
|
||||||
|
## - wall-cabinet
|
||||||
|
## outer_unit:
|
||||||
|
## - mm
|
||||||
|
## - in
|
||||||
|
##
|
||||||
|
## Examples:
|
||||||
|
|
||||||
# - site: AMS 1
|
# - site: AMS 1
|
||||||
# name: rack-01
|
# name: rack-01
|
||||||
# role: Role 1
|
# role: Role 1
|
||||||
# type: 4-post cabinet
|
# type: 4-post-cabinet
|
||||||
# width: 19 inches
|
# width: 19
|
||||||
# u_height: 47
|
# u_height: 47
|
||||||
# custom_fields:
|
# custom_fields:
|
||||||
# text_field: Description
|
# text_field: Description
|
||||||
# - site: AMS 2
|
# - site: AMS 2
|
||||||
# name: rack-02
|
# name: rack-02
|
||||||
# role: Role 2
|
# role: Role 2
|
||||||
# type: 4-post cabinet
|
# type: 4-post-cabinet
|
||||||
# width: 19 inches
|
# width: 19
|
||||||
# u_height: 47
|
# u_height: 47
|
||||||
# custom_fields:
|
# custom_fields:
|
||||||
# text_field: Description
|
# text_field: Description
|
||||||
# - site: SING 1
|
# - site: SING 1
|
||||||
# name: rack-03
|
# name: rack-03
|
||||||
|
# group: cage 101
|
||||||
# role: Role 3
|
# role: Role 3
|
||||||
# type: 4-post cabinet
|
# type: 4-post-cabinet
|
||||||
# width: 19 inches
|
# width: 19
|
||||||
# u_height: 47
|
# u_height: 47
|
||||||
# custom_fields:
|
# custom_fields:
|
||||||
# text_field: Description
|
# text_field: Description
|
||||||
|
@ -1,10 +1,18 @@
|
|||||||
|
## Possible Choices:
|
||||||
|
## status:
|
||||||
|
## - active
|
||||||
|
## - offline
|
||||||
|
## - staged
|
||||||
|
##
|
||||||
|
## Examples:
|
||||||
|
|
||||||
# - cluster: cluster1
|
# - cluster: cluster1
|
||||||
# comments: VM1
|
# comments: VM1
|
||||||
# disk: 200
|
# disk: 200
|
||||||
# memory: 4096
|
# memory: 4096
|
||||||
# name: virtual machine 1
|
# name: virtual machine 1
|
||||||
# platform: Platform 2
|
# platform: Platform 2
|
||||||
# status: Active
|
# status: active
|
||||||
# tenant: tenant1
|
# tenant: tenant1
|
||||||
# vcpus: 8
|
# vcpus: 8
|
||||||
# - cluster: cluster1
|
# - cluster: cluster1
|
||||||
@ -13,6 +21,6 @@
|
|||||||
# memory: 2048
|
# memory: 2048
|
||||||
# name: virtual machine 2
|
# name: virtual machine 2
|
||||||
# platform: Platform 2
|
# platform: Platform 2
|
||||||
# status: Active
|
# status: active
|
||||||
# tenant: tenant1
|
# tenant: tenant1
|
||||||
# vcpus: 8
|
# vcpus: 8
|
||||||
|
@ -1,11 +1,19 @@
|
|||||||
|
## Possible Choices:
|
||||||
|
## status:
|
||||||
|
## - active
|
||||||
|
## - reserved
|
||||||
|
## - deprecated
|
||||||
|
##
|
||||||
|
## Examples:
|
||||||
|
|
||||||
# - name: vlan1
|
# - name: vlan1
|
||||||
# site: AMS 1
|
# site: AMS 1
|
||||||
# status: Active
|
# status: active
|
||||||
# vid: 5
|
# vid: 5
|
||||||
# role: Main Management
|
# role: Main Management
|
||||||
# description: VLAN 5 for MGMT
|
# description: VLAN 5 for MGMT
|
||||||
# - group: VLAN group 2
|
# - group: VLAN group 2
|
||||||
# name: vlan2
|
# name: vlan2
|
||||||
# site: AMS 1
|
# site: AMS 1
|
||||||
# status: Active
|
# status: active
|
||||||
# vid: 1300
|
# vid: 1300
|
||||||
|
0
scripts/__init__.py
Normal file
0
scripts/__init__.py
Normal file
@ -1,19 +1,9 @@
|
|||||||
from extras.constants import CF_TYPE_TEXT, CF_TYPE_INTEGER, CF_TYPE_BOOLEAN, CF_TYPE_DATE, CF_TYPE_URL, CF_TYPE_SELECT, CF_FILTER_CHOICES
|
|
||||||
from extras.models import CustomField, CustomFieldChoice
|
from extras.models import CustomField, CustomFieldChoice
|
||||||
|
|
||||||
from ruamel.yaml import YAML
|
from ruamel.yaml import YAML
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
text_to_fields = {
|
|
||||||
'boolean': CF_TYPE_BOOLEAN,
|
|
||||||
'date': CF_TYPE_DATE,
|
|
||||||
'integer': CF_TYPE_INTEGER,
|
|
||||||
'selection': CF_TYPE_SELECT,
|
|
||||||
'text': CF_TYPE_TEXT,
|
|
||||||
'url': CF_TYPE_URL,
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_class_for_class_path(class_path):
|
def get_class_for_class_path(class_path):
|
||||||
import importlib
|
import importlib
|
||||||
from django.contrib.contenttypes.models import ContentType
|
from django.contrib.contenttypes.models import ContentType
|
||||||
@ -42,12 +32,6 @@ with file.open('r') as stream:
|
|||||||
if cf_details.get('description', 0):
|
if cf_details.get('description', 0):
|
||||||
custom_field.description = cf_details['description']
|
custom_field.description = cf_details['description']
|
||||||
|
|
||||||
# If no filter_logic is specified then it will default to 'Loose'
|
|
||||||
if cf_details.get('filter_logic', 0):
|
|
||||||
for choice_id, choice_text in CF_FILTER_CHOICES:
|
|
||||||
if choice_text.lower() == cf_details['filter_logic']:
|
|
||||||
custom_field.filter_logic = choice_id
|
|
||||||
|
|
||||||
if cf_details.get('label', 0):
|
if cf_details.get('label', 0):
|
||||||
custom_field.label = cf_details['label']
|
custom_field.label = cf_details['label']
|
||||||
|
|
||||||
@ -58,7 +42,7 @@ with file.open('r') as stream:
|
|||||||
custom_field.required = cf_details['required']
|
custom_field.required = cf_details['required']
|
||||||
|
|
||||||
if cf_details.get('type', 0):
|
if cf_details.get('type', 0):
|
||||||
custom_field.type = text_to_fields[cf_details['type']]
|
custom_field.type = cf_details['type']
|
||||||
|
|
||||||
if cf_details.get('weight', 0):
|
if cf_details.get('weight', 0):
|
||||||
custom_field.weight = cf_details['weight']
|
custom_field.weight = cf_details['weight']
|
||||||
|
31
startup_scripts/075_rack_groups.py
Normal file
31
startup_scripts/075_rack_groups.py
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
from dcim.models import Site,RackGroup
|
||||||
|
from ruamel.yaml import YAML
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
import sys
|
||||||
|
|
||||||
|
file = Path('/opt/netbox/initializers/rack_groups.yml')
|
||||||
|
if not file.is_file():
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
with file.open('r') as stream:
|
||||||
|
yaml=YAML(typ='safe')
|
||||||
|
rack_groups= yaml.load(stream)
|
||||||
|
|
||||||
|
required_assocs = {
|
||||||
|
'site': (Site, 'name')
|
||||||
|
}
|
||||||
|
|
||||||
|
if rack_groups is not None:
|
||||||
|
for params in rack_groups:
|
||||||
|
|
||||||
|
for assoc, details in required_assocs.items():
|
||||||
|
model, field = details
|
||||||
|
query = { field: params.pop(assoc) }
|
||||||
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
|
rack_group, created = RackGroup.objects.get_or_create(**params)
|
||||||
|
|
||||||
|
if created:
|
||||||
|
print("🎨 Created rack group", rack_group.name)
|
||||||
|
|
@ -1,7 +1,6 @@
|
|||||||
from dcim.models import Site, RackRole, Rack, RackGroup
|
from dcim.models import Site, RackRole, Rack, RackGroup
|
||||||
from tenancy.models import Tenant
|
from tenancy.models import Tenant
|
||||||
from extras.models import CustomField, CustomFieldValue
|
from extras.models import CustomField, CustomFieldValue
|
||||||
from dcim.constants import RACK_TYPE_CHOICES, RACK_WIDTH_CHOICES
|
|
||||||
from ruamel.yaml import YAML
|
from ruamel.yaml import YAML
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import sys
|
import sys
|
||||||
@ -41,14 +40,6 @@ with file.open('r') as stream:
|
|||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
for rack_type in RACK_TYPE_CHOICES:
|
|
||||||
if params['type'] in rack_type:
|
|
||||||
params['type'] = rack_type[0]
|
|
||||||
|
|
||||||
for rack_width in RACK_WIDTH_CHOICES:
|
|
||||||
if params['width'] in rack_width:
|
|
||||||
params['width'] = rack_width[0]
|
|
||||||
|
|
||||||
rack, created = Rack.objects.get_or_create(**params)
|
rack, created = Rack.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
from dcim.models import Site, Rack, DeviceRole, DeviceType, Device, Platform
|
from dcim.models import Site, Rack, DeviceRole, DeviceType, Device, Platform
|
||||||
from dcim.constants import RACK_FACE_CHOICES
|
|
||||||
from ipam.models import IPAddress
|
from ipam.models import IPAddress
|
||||||
from virtualization.models import Cluster
|
from virtualization.models import Cluster
|
||||||
from tenancy.models import Tenant
|
from tenancy.models import Tenant
|
||||||
@ -49,12 +48,6 @@ with file.open('r') as stream:
|
|||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
if 'face' in params:
|
|
||||||
for rack_face in RACK_FACE_CHOICES:
|
|
||||||
if params['face'] in rack_face:
|
|
||||||
params['face'] = rack_face[0]
|
|
||||||
break
|
|
||||||
|
|
||||||
device, created = Device.objects.get_or_create(**params)
|
device, created = Device.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
from dcim.models import Site
|
from dcim.models import Site
|
||||||
from ipam.models import VLAN, VLANGroup, Role
|
from ipam.models import VLAN, VLANGroup, Role
|
||||||
from ipam.constants import VLAN_STATUS_CHOICES
|
|
||||||
from tenancy.models import Tenant, TenantGroup
|
from tenancy.models import Tenant, TenantGroup
|
||||||
from extras.models import CustomField, CustomFieldValue
|
from extras.models import CustomField, CustomFieldValue
|
||||||
from ruamel.yaml import YAML
|
from ruamel.yaml import YAML
|
||||||
@ -35,12 +34,6 @@ with file.open('r') as stream:
|
|||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
if 'status' in params:
|
|
||||||
for vlan_status in VLAN_STATUS_CHOICES:
|
|
||||||
if params['status'] in vlan_status:
|
|
||||||
params['status'] = vlan_status[0]
|
|
||||||
break
|
|
||||||
|
|
||||||
vlan, created = VLAN.objects.get_or_create(**params)
|
vlan, created = VLAN.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
from dcim.models import Site
|
from dcim.models import Site
|
||||||
from ipam.models import Prefix, VLAN, Role, VRF
|
from ipam.models import Prefix, VLAN, Role, VRF
|
||||||
from ipam.constants import PREFIX_STATUS_CHOICES
|
|
||||||
from tenancy.models import Tenant, TenantGroup
|
from tenancy.models import Tenant, TenantGroup
|
||||||
from extras.models import CustomField, CustomFieldValue
|
from extras.models import CustomField, CustomFieldValue
|
||||||
from ruamel.yaml import YAML
|
from ruamel.yaml import YAML
|
||||||
@ -38,12 +37,6 @@ with file.open('r') as stream:
|
|||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
if 'status' in params:
|
|
||||||
for prefix_status in PREFIX_STATUS_CHOICES:
|
|
||||||
if params['status'] in prefix_status:
|
|
||||||
params['status'] = prefix_status[0]
|
|
||||||
break
|
|
||||||
|
|
||||||
prefix, created = Prefix.objects.get_or_create(**params)
|
prefix, created = Prefix.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
from dcim.models import Site, Platform, DeviceRole
|
from dcim.models import Site, Platform, DeviceRole
|
||||||
from virtualization.models import Cluster, VirtualMachine
|
from virtualization.models import Cluster, VirtualMachine
|
||||||
from virtualization.constants import VM_STATUS_CHOICES
|
|
||||||
from tenancy.models import Tenant
|
from tenancy.models import Tenant
|
||||||
from extras.models import CustomField, CustomFieldValue
|
from extras.models import CustomField, CustomFieldValue
|
||||||
from ruamel.yaml import YAML
|
from ruamel.yaml import YAML
|
||||||
@ -43,12 +42,6 @@ with file.open('r') as stream:
|
|||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
if 'status' in params:
|
|
||||||
for vm_status in VM_STATUS_CHOICES:
|
|
||||||
if params['status'] in vm_status:
|
|
||||||
params['status'] = vm_status[0]
|
|
||||||
break
|
|
||||||
|
|
||||||
virtual_machine, created = VirtualMachine.objects.get_or_create(**params)
|
virtual_machine, created = VirtualMachine.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
from dcim.models import Interface, Device
|
from dcim.models import Interface, Device
|
||||||
from dcim.constants import IFACE_TYPE_CHOICES
|
|
||||||
from extras.models import CustomField, CustomFieldValue
|
from extras.models import CustomField, CustomFieldValue
|
||||||
from ruamel.yaml import YAML
|
from ruamel.yaml import YAML
|
||||||
|
|
||||||
@ -28,16 +27,6 @@ with file.open('r') as stream:
|
|||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
if 'type' in params:
|
|
||||||
for outer_list in IFACE_TYPE_CHOICES:
|
|
||||||
for type_choices in outer_list[1]:
|
|
||||||
if params['type'] in type_choices:
|
|
||||||
params['type'] = type_choices[0]
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
continue
|
|
||||||
break
|
|
||||||
|
|
||||||
interface, created = Interface.objects.get_or_create(**params)
|
interface, created = Interface.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
from ipam.models import IPAddress, VRF
|
from ipam.models import IPAddress, VRF
|
||||||
from ipam.constants import IPADDRESS_STATUS_CHOICES
|
|
||||||
from dcim.models import Device, Interface
|
from dcim.models import Device, Interface
|
||||||
from virtualization.models import VirtualMachine
|
from virtualization.models import VirtualMachine
|
||||||
from tenancy.models import Tenant
|
from tenancy.models import Tenant
|
||||||
@ -49,12 +48,6 @@ with file.open('r') as stream:
|
|||||||
query = { field: params.pop(assoc) }
|
query = { field: params.pop(assoc) }
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
if 'status' in params:
|
|
||||||
for ip_status in IPADDRESS_STATUS_CHOICES:
|
|
||||||
if params['status'] in ip_status:
|
|
||||||
params['status'] = ip_status[0]
|
|
||||||
break
|
|
||||||
|
|
||||||
ip_address, created = IPAddress.objects.get_or_create(**params)
|
ip_address, created = IPAddress.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
|
18
startup_scripts/__main__.py
Normal file
18
startup_scripts/__main__.py
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import runpy
|
||||||
|
from os import scandir
|
||||||
|
from os.path import dirname, abspath
|
||||||
|
|
||||||
|
this_dir = dirname(abspath(__file__))
|
||||||
|
|
||||||
|
def filename(f):
|
||||||
|
return f.name
|
||||||
|
|
||||||
|
with scandir(dirname(abspath(__file__))) as it:
|
||||||
|
for f in sorted(it, key = filename):
|
||||||
|
if f.name.startswith('__') or not f.is_file():
|
||||||
|
continue
|
||||||
|
|
||||||
|
print(f"Running {f.path}")
|
||||||
|
runpy.run_path(f.path)
|
69
test.sh
Executable file
69
test.sh
Executable file
@ -0,0 +1,69 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# exit when a command exits with an exit code != 0
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# version is used by `docker-compose.yml` do determine the tag
|
||||||
|
# of the Docker Image that is to be used
|
||||||
|
export IMAGE="${IMAGE-netboxcommunity/netbox:latest}"
|
||||||
|
|
||||||
|
if [ -z "${IMAGE}" ]; then
|
||||||
|
echo "⚠️ No image defined"
|
||||||
|
|
||||||
|
if [ -z "${DEBUG}" ]; then
|
||||||
|
exit 1;
|
||||||
|
else
|
||||||
|
echo "⚠️ Would 'exit 1' here, but DEBUG is '${DEBUG}'."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# The docker compose command to use
|
||||||
|
doco="docker-compose -f docker-compose.test.yml"
|
||||||
|
|
||||||
|
INITIALIZERS_DIR=".initializers"
|
||||||
|
|
||||||
|
test_setup() {
|
||||||
|
echo "🏗 Setup up test environment"
|
||||||
|
if [ -d "${INITIALIZERS_DIR}" ]; then
|
||||||
|
rm -rf "${INITIALIZERS_DIR}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir "${INITIALIZERS_DIR}"
|
||||||
|
(
|
||||||
|
cd initializers
|
||||||
|
for script in *.yml; do
|
||||||
|
sed -E 's/^# //' "${script}" > "../${INITIALIZERS_DIR}/${script}"
|
||||||
|
done
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
test_netbox_unit_tests() {
|
||||||
|
echo "⏱ Running Netbox Unit Tests"
|
||||||
|
$doco run --rm netbox ./manage.py test
|
||||||
|
}
|
||||||
|
|
||||||
|
test_initializers() {
|
||||||
|
echo "🏭 Testing Initializers"
|
||||||
|
export INITIALIZERS_DIR
|
||||||
|
$doco run --rm netbox ./manage.py check
|
||||||
|
}
|
||||||
|
|
||||||
|
test_cleanup() {
|
||||||
|
echo "💣 Cleaning Up"
|
||||||
|
$doco down -v
|
||||||
|
|
||||||
|
if [ -d "${INITIALIZERS_DIR}" ]; then
|
||||||
|
rm -rf "${INITIALIZERS_DIR}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
echo "🐳🐳🐳 Start testing '${IMAGE}'"
|
||||||
|
|
||||||
|
# Make sure the cleanup script is executed
|
||||||
|
trap test_cleanup EXIT ERR
|
||||||
|
test_setup
|
||||||
|
|
||||||
|
test_netbox_unit_tests
|
||||||
|
test_initializers
|
||||||
|
|
||||||
|
echo "🐳🐳🐳 Done testing '${IMAGE}'"
|
Reference in New Issue
Block a user