Compare commits
101 Commits
Author | SHA1 | Date | |
---|---|---|---|
cb5ffa0354 | |||
ff1eed3cf0 | |||
53d593de25 | |||
8111da308b | |||
0125ab4849 | |||
ad0c04985b | |||
793393fe0f | |||
59995a8719 | |||
6cd156f7d5 | |||
95976f4cf4 | |||
009eb1fc6d | |||
989c250a82 | |||
3ee231bdd8 | |||
3338cdc9d7 | |||
c80fb19507 | |||
74543ec272 | |||
9ccf0ac921 | |||
8aed79363a | |||
f7337ed1e9 | |||
4c8435e874 | |||
a37f1592fc | |||
04ac3d5f4b | |||
c6df6a040a | |||
28e4ae44fb | |||
493fc60401 | |||
0d25aff744 | |||
f33c647f24 | |||
16ae063321 | |||
e4e2c788a9 | |||
2e5d84612d | |||
744f0e57ad | |||
52c51b5f99 | |||
f88f4e1579 | |||
95f4d7856a | |||
07a0b1d7ef | |||
fe811f37bd | |||
8321449cc0 | |||
618feff63a | |||
a3cf645dc5 | |||
d0c786e831 | |||
7112a88359 | |||
187ae4b2a1 | |||
3a0b3fe133 | |||
5343eaae65 | |||
83a0d5d12f | |||
ee40e339c8 | |||
04e0b3d3ca | |||
fa70e0b761 | |||
88b35eb48e | |||
5644254113 | |||
4dd7a51c7d | |||
c7e259e116 | |||
3cbe07cb0e | |||
70b38d52b9 | |||
a21d146b60 | |||
6e7a64bd81 | |||
f8360ba6aa | |||
bab8373f66 | |||
ad93c99f46 | |||
ed6256172f | |||
5109e340ca | |||
62d31fda58 | |||
ed141c8a4e | |||
4d8d02e35a | |||
96132e1dcc | |||
896651ed97 | |||
f810d0342d | |||
323e18278a | |||
a0f7737916 | |||
7f8cc76af6 | |||
21bd7f426c | |||
3758bc805a | |||
72859ca71a | |||
0022392f03 | |||
385c66e30d | |||
65023a7dd4 | |||
426adb2333 | |||
584566b0f0 | |||
5399f8c890 | |||
2372c1eeff | |||
788aeacd9b | |||
00fa1793d0 | |||
4260e9b864 | |||
02713e1465 | |||
728a16c93d | |||
742560c571 | |||
d273391773 | |||
380cb77080 | |||
92b6608403 | |||
94509f86d7 | |||
818266ace1 | |||
dfb0327340 | |||
e3946af27c | |||
8d8c58df54 | |||
cbaaffc589 | |||
0f1cc9eea0 | |||
90018fc6d7 | |||
621fa12934 | |||
3094665092 | |||
31f52041f8 | |||
6ab38472be |
23
.ecrc
Normal file
23
.ecrc
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
{
|
||||||
|
"Verbose": false,
|
||||||
|
"Debug": false,
|
||||||
|
"IgnoreDefaults": false,
|
||||||
|
"SpacesAftertabs": false,
|
||||||
|
"NoColor": false,
|
||||||
|
"Exclude": [
|
||||||
|
"LICENSE",
|
||||||
|
"\\.initializers",
|
||||||
|
"\\.vscode"
|
||||||
|
],
|
||||||
|
"AllowedContentTypes": [],
|
||||||
|
"PassedFiles": [],
|
||||||
|
"Disable": {
|
||||||
|
// set these options to true to disable specific checks
|
||||||
|
"EndOfLine": false,
|
||||||
|
"Indentation": false,
|
||||||
|
"InsertFinalNewline": false,
|
||||||
|
"TrimTrailingWhitespace": false,
|
||||||
|
"IndentSize": true,
|
||||||
|
"MaxLineLength": false
|
||||||
|
}
|
||||||
|
}
|
11
.editorconfig
Normal file
11
.editorconfig
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
root = true
|
||||||
|
|
||||||
|
[*]
|
||||||
|
end_of_line = lf
|
||||||
|
insert_final_newline = true
|
||||||
|
charset = utf-8
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
||||||
|
|
||||||
|
[*.py]
|
||||||
|
indent_size = 4
|
7
.flake8
Normal file
7
.flake8
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
[flake8]
|
||||||
|
max-line-length = 100
|
||||||
|
extend-ignore = E203, W503
|
||||||
|
per-file-ignores =
|
||||||
|
configuration/*:E131,E251,E266,E302,E305,E501,E722
|
||||||
|
startup_scripts/startup_script_utils/__init__.py:F401
|
||||||
|
docker/*:E266,E722
|
51
.github/ISSUE_TEMPLATE/bug_report.md
vendored
51
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
name: Bug report
|
name: Bug report
|
||||||
about: Create a report to help us improve
|
about: Create a report about a malfunction of the Docker setup
|
||||||
title: ''
|
title: ''
|
||||||
labels: ''
|
labels: ''
|
||||||
assignees: ''
|
assignees: ''
|
||||||
@ -9,39 +9,49 @@ assignees: ''
|
|||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
|
||||||
Before raising an issue here, answer the following questions for yourself, please:
|
Please only raise an issue if you're certain that you've found a bug.
|
||||||
|
Else, see these other means to get help:
|
||||||
|
|
||||||
* Did you read through the troubleshooting section? (https://github.com/netbox-community/netbox-docker/wiki/Troubleshooting)
|
* See our troubleshooting section:
|
||||||
* Have you had a look at the rest of the wiki? (https://github.com/netbox-community/netbox-docker/wiki)
|
https://github.com/netbox-community/netbox-docker/wiki/Troubleshooting
|
||||||
* Have you updated to the latest version and tried again? (i.e. `git pull` and `docker-compose pull`)
|
* Have a look at the rest of the wiki:
|
||||||
* Have you reset the project and tried again? (i.e. `docker-compose down -v`)
|
https://github.com/netbox-community/netbox-docker/wiki
|
||||||
* Are you confident that your problem is related to the Docker image or Docker Compose file this project provides?
|
* Check the release notes:
|
||||||
(Otherwise ask on the Netbox mailing list, please: https://groups.google.com/d/forum/netbox-discuss)
|
https://github.com/netbox-community/netbox-docker/releases
|
||||||
* Have you looked through the issues already resolved?
|
* Look through the issues already resolved:
|
||||||
|
https://github.com/netbox-community/netbox-docker/issues?q=is%3Aclosed
|
||||||
|
|
||||||
Please try this means to get help before opening an issue here:
|
If you did not find what you're looking for,
|
||||||
|
try the help of our community:
|
||||||
|
|
||||||
* On the networktocode Slack in the #netbox-docker channel: http://slack.networktocode.com/
|
* Post to Github Discussions:
|
||||||
* On the networktocode Slack in the #netbox channel: http://slack.networktocode.com/
|
https://github.com/netbox-community/netbox-docker/discussions
|
||||||
* On the Netbox mailing list: https://groups.google.com/d/forum/netbox-discuss
|
* Join the `#netbox-docker` channel on our Slack:
|
||||||
|
https://join.slack.com/t/netdev-community/shared_invite/zt-mtts8g0n-Sm6Wutn62q_M4OdsaIycrQ
|
||||||
|
* Ask on the NetBox mailing list:
|
||||||
|
https://groups.google.com/d/forum/netbox-discuss
|
||||||
|
|
||||||
Please don't open an issue when you have a PR ready. Just submit the PR, that's good enough.
|
Please don't open an issue to open a PR.
|
||||||
|
Just submit the PR, that's good enough.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
## Current Behavior
|
## Current Behavior
|
||||||
|
|
||||||
<!-- describe what you did and how it misbehaved -->
|
<!-- describe what you did and how it misbehaved -->
|
||||||
...
|
|
||||||
|
|
||||||
|
|
||||||
## Expected Behavior
|
## Expected Behavior
|
||||||
|
|
||||||
<!-- describe what you expected instead -->
|
<!-- describe what you expected instead -->
|
||||||
...
|
|
||||||
|
|
||||||
|
|
||||||
## Debug Information
|
## Debug Information
|
||||||
|
|
||||||
<!-- please fill in the following information that might helps us debug your problem more quickly -->
|
<!-- please fill in the following information that helps us debug your problem more quickly -->
|
||||||
|
|
||||||
The output of `docker-compose version`: `XXXXX`
|
The output of `docker-compose version`: `XXXXX`
|
||||||
The output of `docker version`: `XXXXX`
|
The output of `docker version`: `XXXXX`
|
||||||
The output of `git rev-parse HEAD`: `XXXXX`
|
The output of `git rev-parse HEAD`: `XXXXX`
|
||||||
@ -59,17 +69,16 @@ The output of `docker inspect netboxcommunity/netbox:latest --format "{{json .Co
|
|||||||
|
|
||||||
The output of `docker-compose logs netbox`:
|
The output of `docker-compose logs netbox`:
|
||||||
<!--
|
<!--
|
||||||
If your log is very long, create a Gist instead (and post the link to it): https://gist.github.com
|
If your log is very long, create a Gist instead and post the link to it: https://gist.github.com
|
||||||
-->
|
-->
|
||||||
|
|
||||||
```text
|
```text
|
||||||
LOG LOG LOG
|
LOG LOG LOG
|
||||||
```
|
```
|
||||||
|
|
||||||
The output of `docker-compose logs nginx`:
|
The output of `cat docker-compose.override.yml`:
|
||||||
<!--
|
<!--
|
||||||
Only if you have gotten a 5xx http error, else delete this section.
|
If this file is very long, create a Gist instead and post the link to it: https://gist.github.com
|
||||||
If your log is very long, create a Gist instead (and post the link to it): https://gist.github.com
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
```text
|
```text
|
||||||
|
15
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
15
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
blank_issues_enabled: false
|
||||||
|
contact_links:
|
||||||
|
- name: Question
|
||||||
|
url: https://github.com/netbox-community/netbox-docker/discussions
|
||||||
|
about: The Github Discussions are the right place to ask questions about how to use or do certain things with NetBox Docker.
|
||||||
|
|
||||||
|
- name: Chat
|
||||||
|
url: https://join.slack.com/t/netdev-community/shared_invite/zt-mtts8g0n-Sm6Wutn62q_M4OdsaIycrQ
|
||||||
|
about: 'Usually the quickest way to seek help with small issues is to join our #netbox-docker Slack channel.'
|
||||||
|
|
||||||
|
- name: Community Wiki
|
||||||
|
url: https://github.com/netbox-community/netbox-docker/wiki
|
||||||
|
about: |
|
||||||
|
Our wiki contains information for common problems and tips for operating NetBox Docker in production.
|
||||||
|
It's maintained by our excellent community.
|
46
.github/ISSUE_TEMPLATE/feature_request.md
vendored
46
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@ -9,39 +9,52 @@ assignees: ''
|
|||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
|
||||||
Before raising an issue here, answer the following questions for yourself, please:
|
This issue type is to propose new features for the Docker setup.
|
||||||
|
To just spin an idea, see the Github Discussions section, please.
|
||||||
|
|
||||||
* Did you read through the troubleshooting section? (https://github.com/netbox-community/netbox-docker/wiki/Troubleshooting)
|
Before asking for help, see these links first:
|
||||||
* Have you had a look at the rest of the wiki? (https://github.com/netbox-community/netbox-docker/wiki)
|
|
||||||
* Have you read the release notes recently (https://github.com/netbox-community/netbox-docker/releases)
|
|
||||||
* Are you confident that your feature/change request is related to the Docker image or Docker Compose file this project provides?
|
|
||||||
(Otherwise ask on the Netbox mailing list, please: https://groups.google.com/d/forum/netbox-discuss)
|
|
||||||
* Have you looked through the issues already resolved?
|
|
||||||
|
|
||||||
Please try this means to get help before opening an issue here:
|
* See our troubleshooting section:
|
||||||
|
https://github.com/netbox-community/netbox-docker/wiki/Troubleshooting
|
||||||
|
* Have a look at the rest of the wiki:
|
||||||
|
https://github.com/netbox-community/netbox-docker/wiki
|
||||||
|
* Check the release notes:
|
||||||
|
https://github.com/netbox-community/netbox-docker/releases
|
||||||
|
* Look through the issues already resolved:
|
||||||
|
https://github.com/netbox-community/netbox-docker/issues?q=is%3Aclosed
|
||||||
|
|
||||||
* On the networktocode Slack in the #netbox-docker channel: http://slack.networktocode.com/
|
If you did not find what you're looking for,
|
||||||
* On the networktocode Slack in the #netbox channel: http://slack.networktocode.com/
|
try the help of our community:
|
||||||
* On the Netbox mailing list: https://groups.google.com/d/forum/netbox-discuss
|
|
||||||
|
|
||||||
Please don't open an issue when you have a PR ready. Just submit the PR, that's good enough.
|
* Post to Github Discussions:
|
||||||
|
https://github.com/netbox-community/netbox-docker/discussions
|
||||||
|
* Join the `#netbox-docker` channel on our Slack:
|
||||||
|
https://join.slack.com/t/netdev-community/shared_invite/zt-mtts8g0n-Sm6Wutn62q_M4OdsaIycrQ
|
||||||
|
* Ask on the NetBox mailing list:
|
||||||
|
https://groups.google.com/d/forum/netbox-discuss
|
||||||
|
|
||||||
|
Please don't open an issue to open a PR.
|
||||||
|
Just submit the PR, that's good enough.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
## Desired Behavior
|
## Desired Behavior
|
||||||
|
|
||||||
<!-- please describe the behavior you desire -->
|
<!-- please describe the behavior you desire -->
|
||||||
...
|
|
||||||
|
|
||||||
|
|
||||||
## Contrast to Current Behavior
|
## Contrast to Current Behavior
|
||||||
|
|
||||||
<!-- please describe how the desired behavior is different from the current behavior -->
|
<!-- please describe how the desired behavior is different from the current behavior -->
|
||||||
...
|
|
||||||
|
|
||||||
|
|
||||||
## Changes Required
|
## Changes Required
|
||||||
|
|
||||||
<!-- if you can, please elaborate what changes would exactly be required -->
|
<!-- if you can, please elaborate what changes would exactly be required -->
|
||||||
...
|
|
||||||
|
|
||||||
|
|
||||||
## Discussion: Benefits and Drawbacks
|
## Discussion: Benefits and Drawbacks
|
||||||
|
|
||||||
@ -51,4 +64,5 @@ Please make your case here:
|
|||||||
- What are the drawbacks of this change? Is it backwards-compatible?
|
- What are the drawbacks of this change? Is it backwards-compatible?
|
||||||
- Anything else that you think is relevant to the discussion of this feature/change request.
|
- Anything else that you think is relevant to the discussion of this feature/change request.
|
||||||
-->
|
-->
|
||||||
...
|
|
||||||
|
|
||||||
|
30
.github/workflows/push.yml
vendored
30
.github/workflows/push.yml
vendored
@ -9,7 +9,31 @@ on:
|
|||||||
- release
|
- release
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
lint:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Checks syntax of our code
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- uses: actions/setup-python@v2
|
||||||
|
- name: Lint Code Base
|
||||||
|
uses: github/super-linter@v3
|
||||||
|
env:
|
||||||
|
DEFAULT_BRANCH: develop
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
SUPPRESS_POSSUM: true
|
||||||
|
LINTER_RULES_PATH: /
|
||||||
|
VALIDATE_ALL_CODEBASE: false
|
||||||
|
VALIDATE_DOCKERFILE: false
|
||||||
|
FILTER_REGEX_EXCLUDE: (.*/)?(LICENSE|configuration/.*)
|
||||||
|
|
||||||
|
EDITORCONFIG_FILE_NAME: .ecrc
|
||||||
|
DOCKERFILE_HADOLINT_FILE_NAME: .hadolint.yaml
|
||||||
|
MARKDOWN_CONFIG_FILE: .markdown-lint.yml
|
||||||
|
PYTHON_BLACK_CONFIG_FILE: pyproject.toml
|
||||||
|
PYTHON_FLAKE8_CONFIG_FILE: .flake8
|
||||||
|
PYTHON_ISORT_CONFIG_FILE: pyproject.toml
|
||||||
build:
|
build:
|
||||||
|
continue-on-error: ${{ matrix.docker_from == 'alpine:edge' }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
build_cmd:
|
build_cmd:
|
||||||
@ -19,14 +43,14 @@ jobs:
|
|||||||
- ./build.sh develop
|
- ./build.sh develop
|
||||||
docker_from:
|
docker_from:
|
||||||
- '' # use the default of the build script
|
- '' # use the default of the build script
|
||||||
# - python:3.10-rc-alpine # disable until dependencies work
|
- alpine:edge
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Builds new Netbox Docker Images
|
name: Builds new NetBox Docker Images
|
||||||
steps:
|
steps:
|
||||||
- id: git-checkout
|
- id: git-checkout
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v1
|
uses: actions/checkout@v2
|
||||||
- id: docker-build
|
- id: docker-build
|
||||||
name: Build the image from '${{ matrix.docker_from }}' with '${{ matrix.build_cmd }}'
|
name: Build the image from '${{ matrix.docker_from }}' with '${{ matrix.build_cmd }}'
|
||||||
run: ${{ matrix.build_cmd }}
|
run: ${{ matrix.build_cmd }}
|
||||||
|
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@ -18,7 +18,7 @@ jobs:
|
|||||||
- ./build.sh develop
|
- ./build.sh develop
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Builds new Netbox Docker Images
|
name: Builds new NetBox Docker Images
|
||||||
steps:
|
steps:
|
||||||
- id: git-checkout
|
- id: git-checkout
|
||||||
name: Checkout
|
name: Checkout
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -9,3 +9,4 @@ configuration/*
|
|||||||
configuration/ldap/*
|
configuration/ldap/*
|
||||||
!configuration/ldap/ldap_config.py
|
!configuration/ldap/ldap_config.py
|
||||||
prometheus.yml
|
prometheus.yml
|
||||||
|
super-linter.log
|
||||||
|
3
.hadolint.yaml
Normal file
3
.hadolint.yaml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
ignored:
|
||||||
|
- DL3006
|
||||||
|
- DL3018
|
2
.markdown-lint.yml
Normal file
2
.markdown-lint.yml
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
MD013: false
|
||||||
|
MD041: false
|
64
Dockerfile
64
Dockerfile
@ -4,35 +4,31 @@ FROM ${FROM} as builder
|
|||||||
RUN apk add --no-cache \
|
RUN apk add --no-cache \
|
||||||
bash \
|
bash \
|
||||||
build-base \
|
build-base \
|
||||||
|
cargo \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
cyrus-sasl-dev \
|
cyrus-sasl-dev \
|
||||||
graphviz \
|
graphviz \
|
||||||
jpeg-dev \
|
jpeg-dev \
|
||||||
libevent-dev \
|
libevent-dev \
|
||||||
libffi-dev \
|
libffi-dev \
|
||||||
|
libressl-dev \
|
||||||
libxslt-dev \
|
libxslt-dev \
|
||||||
|
musl-dev \
|
||||||
openldap-dev \
|
openldap-dev \
|
||||||
postgresql-dev
|
postgresql-dev \
|
||||||
|
py3-pip \
|
||||||
WORKDIR /install
|
python3-dev \
|
||||||
|
&& python3 -m venv /opt/netbox/venv \
|
||||||
RUN pip install --prefix="/install" --no-warn-script-location \
|
&& /opt/netbox/venv/bin/python3 -m pip install --upgrade \
|
||||||
# gunicorn is used for launching netbox
|
pip \
|
||||||
gunicorn \
|
setuptools \
|
||||||
greenlet \
|
wheel
|
||||||
eventlet \
|
|
||||||
# napalm is used for gathering information from network devices
|
|
||||||
napalm \
|
|
||||||
# ruamel is used in startup_scripts
|
|
||||||
'ruamel.yaml>=0.15,<0.16' \
|
|
||||||
# django_auth_ldap is required for ldap
|
|
||||||
django_auth_ldap \
|
|
||||||
# django-storages was introduced in 2.7 and is optional
|
|
||||||
django-storages
|
|
||||||
|
|
||||||
ARG NETBOX_PATH
|
ARG NETBOX_PATH
|
||||||
COPY ${NETBOX_PATH}/requirements.txt /
|
COPY ${NETBOX_PATH}/requirements.txt requirements-container.txt /
|
||||||
RUN pip install --prefix="/install" --no-warn-script-location -r /requirements.txt
|
RUN /opt/netbox/venv/bin/pip install \
|
||||||
|
-r /requirements.txt \
|
||||||
|
-r /requirements-container.txt
|
||||||
|
|
||||||
###
|
###
|
||||||
# Main stage
|
# Main stage
|
||||||
@ -44,6 +40,7 @@ FROM ${FROM} as main
|
|||||||
RUN apk add --no-cache \
|
RUN apk add --no-cache \
|
||||||
bash \
|
bash \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
|
curl \
|
||||||
graphviz \
|
graphviz \
|
||||||
libevent \
|
libevent \
|
||||||
libffi \
|
libffi \
|
||||||
@ -51,35 +48,38 @@ RUN apk add --no-cache \
|
|||||||
libressl \
|
libressl \
|
||||||
libxslt \
|
libxslt \
|
||||||
postgresql-libs \
|
postgresql-libs \
|
||||||
ttf-ubuntu-font-family
|
python3 \
|
||||||
|
py3-pip \
|
||||||
|
ttf-ubuntu-font-family \
|
||||||
|
unit \
|
||||||
|
unit-python3
|
||||||
|
|
||||||
WORKDIR /opt
|
WORKDIR /opt
|
||||||
|
|
||||||
COPY --from=builder /install /usr/local
|
COPY --from=builder /opt/netbox/venv /opt/netbox/venv
|
||||||
|
|
||||||
ARG NETBOX_PATH
|
ARG NETBOX_PATH
|
||||||
COPY ${NETBOX_PATH} /opt/netbox
|
COPY ${NETBOX_PATH} /opt/netbox
|
||||||
|
|
||||||
COPY docker/configuration.docker.py /opt/netbox/netbox/netbox/configuration.py
|
COPY docker/configuration.docker.py /opt/netbox/netbox/netbox/configuration.py
|
||||||
COPY docker/gunicorn_config.py /etc/netbox/
|
|
||||||
COPY docker/nginx.conf /etc/netbox-nginx/nginx.conf
|
|
||||||
COPY docker/docker-entrypoint.sh /opt/netbox/docker-entrypoint.sh
|
COPY docker/docker-entrypoint.sh /opt/netbox/docker-entrypoint.sh
|
||||||
|
COPY docker/launch-netbox.sh /opt/netbox/launch-netbox.sh
|
||||||
COPY startup_scripts/ /opt/netbox/startup_scripts/
|
COPY startup_scripts/ /opt/netbox/startup_scripts/
|
||||||
COPY initializers/ /opt/netbox/initializers/
|
COPY initializers/ /opt/netbox/initializers/
|
||||||
COPY configuration/ /etc/netbox/config/
|
COPY configuration/ /etc/netbox/config/
|
||||||
|
COPY docker/nginx-unit.json /etc/unit/
|
||||||
|
|
||||||
WORKDIR /opt/netbox/netbox
|
WORKDIR /opt/netbox/netbox
|
||||||
|
|
||||||
# Must set permissions for '/opt/netbox/netbox/static' directory
|
|
||||||
# to g+w so that `./manage.py collectstatic` can be executed during
|
|
||||||
# container startup.
|
|
||||||
# Must set permissions for '/opt/netbox/netbox/media' directory
|
# Must set permissions for '/opt/netbox/netbox/media' directory
|
||||||
# to g+w so that pictures can be uploaded to netbox.
|
# to g+w so that pictures can be uploaded to netbox.
|
||||||
RUN mkdir static && chmod -R g+w static media
|
RUN mkdir -p static /opt/unit/state/ /opt/unit/tmp/ \
|
||||||
|
&& chmod -R g+w media /opt/unit/ \
|
||||||
|
&& SECRET_KEY="dummy" /opt/netbox/venv/bin/python /opt/netbox/netbox/manage.py collectstatic --no-input
|
||||||
|
|
||||||
ENTRYPOINT [ "/opt/netbox/docker-entrypoint.sh" ]
|
ENTRYPOINT [ "/opt/netbox/docker-entrypoint.sh" ]
|
||||||
|
|
||||||
CMD ["gunicorn", "-c /etc/netbox/gunicorn_config.py", "netbox.wsgi"]
|
CMD [ "/opt/netbox/launch-netbox.sh" ]
|
||||||
|
|
||||||
LABEL ORIGINAL_TAG="" \
|
LABEL ORIGINAL_TAG="" \
|
||||||
NETBOX_GIT_BRANCH="" \
|
NETBOX_GIT_BRANCH="" \
|
||||||
@ -89,8 +89,8 @@ LABEL ORIGINAL_TAG="" \
|
|||||||
# Also https://microbadger.com/labels
|
# Also https://microbadger.com/labels
|
||||||
org.label-schema.schema-version="1.0" \
|
org.label-schema.schema-version="1.0" \
|
||||||
org.label-schema.build-date="" \
|
org.label-schema.build-date="" \
|
||||||
org.label-schema.name="Netbox Docker" \
|
org.label-schema.name="NetBox Docker" \
|
||||||
org.label-schema.description="A container based distribution of Netbox, the free and open IPAM and DCIM solution." \
|
org.label-schema.description="A container based distribution of NetBox, the free and open IPAM and DCIM solution." \
|
||||||
org.label-schema.vendor="The netbox-docker contributors." \
|
org.label-schema.vendor="The netbox-docker contributors." \
|
||||||
org.label-schema.url="https://github.com/netbox-community/netbox-docker" \
|
org.label-schema.url="https://github.com/netbox-community/netbox-docker" \
|
||||||
org.label-schema.usage="https://github.com/netbox-community/netbox-docker/wiki" \
|
org.label-schema.usage="https://github.com/netbox-community/netbox-docker/wiki" \
|
||||||
@ -99,8 +99,8 @@ LABEL ORIGINAL_TAG="" \
|
|||||||
org.label-schema.version="snapshot" \
|
org.label-schema.version="snapshot" \
|
||||||
# See https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys
|
# See https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys
|
||||||
org.opencontainers.image.created="" \
|
org.opencontainers.image.created="" \
|
||||||
org.opencontainers.image.title="Netbox Docker" \
|
org.opencontainers.image.title="NetBox Docker" \
|
||||||
org.opencontainers.image.description="A container based distribution of Netbox, the free and open IPAM and DCIM solution." \
|
org.opencontainers.image.description="A container based distribution of NetBox, the free and open IPAM and DCIM solution." \
|
||||||
org.opencontainers.image.licenses="Apache-2.0" \
|
org.opencontainers.image.licenses="Apache-2.0" \
|
||||||
org.opencontainers.image.authors="The netbox-docker contributors." \
|
org.opencontainers.image.authors="The netbox-docker contributors." \
|
||||||
org.opencontainers.image.vendor="The netbox-docker contributors." \
|
org.opencontainers.image.vendor="The netbox-docker contributors." \
|
||||||
|
67
README.md
67
README.md
@ -5,36 +5,36 @@
|
|||||||

|

|
||||||

|

|
||||||

|

|
||||||
[][netbox-docker-microbadger]
|
|
||||||
[][netbox-docker-microbadger]
|
|
||||||
[][netbox-docker-license]
|
[][netbox-docker-license]
|
||||||
|
|
||||||
[The Github repository](netbox-docker-github) houses the components needed to build Netbox as a Docker container.
|
[The Github repository](netbox-docker-github) houses the components needed to build NetBox as a Docker container.
|
||||||
Images are built using this code and are released to [Docker Hub][netbox-dockerhub] and [Quay.io][netbox-quayio] once a day.
|
Images are built using this code and are released to [Docker Hub][netbox-dockerhub] and [Quay.io][netbox-quayio] once a day.
|
||||||
|
|
||||||
Do you have any questions?
|
Do you have any questions?
|
||||||
Before opening an issue on Github, please join the [Network To Code][ntc-slack] Slack and ask for help in our [`#netbox-docker`][netbox-docker-slack] channel.
|
Before opening an issue on Github,
|
||||||
|
please join the [our Slack][netbox-docker-slack] and ask for help in the [`#netbox-docker`][netbox-docker-slack-channel] channel.
|
||||||
|
|
||||||
[github-stargazers]: https://github.com/netbox-community/netbox-docker/stargazers
|
[github-stargazers]: https://github.com/netbox-community/netbox-docker/stargazers
|
||||||
[github-release]: https://github.com/netbox-community/netbox-docker/releases
|
[github-release]: https://github.com/netbox-community/netbox-docker/releases
|
||||||
[netbox-docker-microbadger]: https://microbadger.com/images/netboxcommunity/netbox
|
[netbox-docker-microbadger]: https://microbadger.com/images/netboxcommunity/netbox
|
||||||
[netbox-dockerhub]: https://hub.docker.com/r/netboxcommunity/netbox/
|
[netbox-dockerhub]: https://hub.docker.com/r/netboxcommunity/netbox/
|
||||||
[netbox-docker-github]: https://github.com/netbox-community/netbox-docker/
|
[netbox-docker-github]: https://github.com/netbox-community/netbox-docker/
|
||||||
[ntc-slack]: http://slack.networktocode.com/
|
[netbox-docker-slack]: https://join.slack.com/t/netdev-community/shared_invite/zt-mtts8g0n-Sm6Wutn62q_M4OdsaIycrQ
|
||||||
[netbox-docker-slack]: https://slack.com/app_redirect?channel=netbox-docker&team=T09LQ7E9E
|
[netbox-docker-slack-channel]: https://netdev-community.slack.com/archives/C01P0GEVBU7
|
||||||
|
[netbox-slack-channel]: https://netdev-community.slack.com/archives/C01P0FRSXRV
|
||||||
[netbox-docker-license]: https://github.com/netbox-community/netbox-docker/blob/release/LICENSE
|
[netbox-docker-license]: https://github.com/netbox-community/netbox-docker/blob/release/LICENSE
|
||||||
[netbox-quayio]: https://quay.io/repository/netboxcommunity/netbox
|
[netbox-quayio]: https://quay.io/repository/netboxcommunity/netbox
|
||||||
|
|
||||||
## Docker Tags
|
## Docker Tags
|
||||||
|
|
||||||
* `vX.Y.Z`: These are release builds, automatically built from [the corresponding releases of Netbox][netbox-releases].
|
* `vX.Y.Z`: These are release builds, automatically built from [the corresponding releases of NetBox][netbox-releases].
|
||||||
* `latest`: These are release builds, automatically built from [the `master` branch of Netbox][netbox-master].
|
* `latest`: These are release builds, automatically built from [the `master` branch of NetBox][netbox-master].
|
||||||
* `snapshot`: These are pre-release builds, automatically built from the [`develop` branch of Netbox][netbox-develop].
|
* `snapshot`: These are pre-release builds, automatically built from the [`develop` branch of NetBox][netbox-develop].
|
||||||
* `develop-X.Y`: These are pre-release builds, automatically built from the corresponding [branch of Netbox][netbox-branches].
|
* `develop-X.Y`: These are pre-release builds, automatically built from the corresponding [branch of NetBox][netbox-branches].
|
||||||
|
|
||||||
Then there is currently one extra tags for each of the above tags:
|
Then there is currently one extra tags for each of the above tags:
|
||||||
|
|
||||||
* `-ldap`: Contains additional dependencies and configurations for connecting Netbox to an LDAP directory.
|
* `-ldap`: Contains additional dependencies and configurations for connecting NetBox to an LDAP directory.
|
||||||
[Learn more about that in our wiki][netbox-docker-ldap].
|
[Learn more about that in our wiki][netbox-docker-ldap].
|
||||||
|
|
||||||
New images are built and published automatically every ~24h.
|
New images are built and published automatically every ~24h.
|
||||||
@ -47,7 +47,7 @@ New images are built and published automatically every ~24h.
|
|||||||
|
|
||||||
## Quickstart
|
## Quickstart
|
||||||
|
|
||||||
To get Netbox Docker up and running run the following commands.
|
To get NetBox Docker up and running run the following commands.
|
||||||
There is a more complete [_Getting Started_ guide on our wiki][wiki-getting-started] which explains every step.
|
There is a more complete [_Getting Started_ guide on our wiki][wiki-getting-started] which explains every step.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -56,7 +56,7 @@ cd netbox-docker
|
|||||||
tee docker-compose.override.yml <<EOF
|
tee docker-compose.override.yml <<EOF
|
||||||
version: '3.4'
|
version: '3.4'
|
||||||
services:
|
services:
|
||||||
nginx:
|
netbox:
|
||||||
ports:
|
ports:
|
||||||
- 8000:8080
|
- 8000:8080
|
||||||
EOF
|
EOF
|
||||||
@ -66,7 +66,7 @@ docker-compose up
|
|||||||
|
|
||||||
The whole application will be available after a few minutes.
|
The whole application will be available after a few minutes.
|
||||||
Open the URL `http://0.0.0.0:8000/` in a web-browser.
|
Open the URL `http://0.0.0.0:8000/` in a web-browser.
|
||||||
You should see the Netbox homepage.
|
You should see the NetBox homepage.
|
||||||
In the top-right corner you can login.
|
In the top-right corner you can login.
|
||||||
The default credentials are:
|
The default credentials are:
|
||||||
|
|
||||||
@ -79,17 +79,19 @@ The default credentials are:
|
|||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
Please refer [to our wiki on Github][netbox-docker-wiki] for further information on how to use this Netbox Docker image properly.
|
Please refer [to our wiki on Github][netbox-docker-wiki] for further information on how to use this NetBox Docker image properly.
|
||||||
It covers advanced topics such as using files for secrets, deployment to Kubernetes, monitoring and configuring NAPALM or LDAP.
|
It covers advanced topics such as using files for secrets, deployment to Kubernetes, monitoring and configuring NAPALM or LDAP.
|
||||||
|
|
||||||
[netbox-docker-wiki]: https://github.com/netbox-community/netbox-docker/wiki/
|
[netbox-docker-wiki]: https://github.com/netbox-community/netbox-docker/wiki/
|
||||||
|
|
||||||
## Getting Help
|
## Getting Help
|
||||||
|
|
||||||
Feel free to ask questions in our [Github Community][netbox-community] or join [our Slack channel `#netbox-docker`][netbox-docker-slack] on the [Network To Code Slack][ntc-slack],
|
Feel free to ask questions in our [Github Community][netbox-community]
|
||||||
|
or [join our Slack][netbox-docker-slack] and ask [in our channel `#netbox-docker`][netbox-docker-slack-channel],
|
||||||
which is free to use and where there are almost always people online that can help you in the Slack channel.
|
which is free to use and where there are almost always people online that can help you in the Slack channel.
|
||||||
|
|
||||||
If you need help with using Netbox or developing for it or against it's API you may find the `#netbox` channel on the same Slack instance very helpful.
|
If you need help with using NetBox or developing for it or against it's API
|
||||||
|
you may find [the `#netbox` channel][netbox-slack-channel] on the same Slack instance very helpful.
|
||||||
|
|
||||||
[netbox-community]: https://github.com/netbox-community/netbox-docker/discussions
|
[netbox-community]: https://github.com/netbox-community/netbox-docker/discussions
|
||||||
|
|
||||||
@ -97,36 +99,11 @@ If you need help with using Netbox or developing for it or against it's API you
|
|||||||
|
|
||||||
This project relies only on *Docker* and *docker-compose* meeting these requirements:
|
This project relies only on *Docker* and *docker-compose* meeting these requirements:
|
||||||
|
|
||||||
* The *Docker version* must be at least `17.05`.
|
* The *Docker version* must be at least `19.03`.
|
||||||
* The *docker-compose version* must be at least `1.17.0`.
|
* The *docker-compose version* must be at least `1.28.0`.
|
||||||
|
|
||||||
To check the version installed on your system run `docker --version` and `docker-compose --version`.
|
To check the version installed on your system run `docker --version` and `docker-compose --version`.
|
||||||
|
|
||||||
## Use a Specific Netbox Version
|
|
||||||
|
|
||||||
The `docker-compose.yml` file is prepared to run a specific version of Netbox, instead of `latest`.
|
|
||||||
To use this feature, set and export the environment-variable `VERSION` before launching `docker-compose`, as shown below.
|
|
||||||
`VERSION` may be set to the name of
|
|
||||||
[any tag of the `netboxcommunity/netbox` Docker image on Docker Hub][netbox-dockerhub] or [Quay.io][netbox-quayio].
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export VERSION=v2.7.1
|
|
||||||
docker-compose pull netbox
|
|
||||||
docker-compose up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also build a specific version of the Netbox Docker image yourself.
|
|
||||||
`VERSION` can be any valid [git ref][git-ref] in that case.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export VERSION=v2.7.1
|
|
||||||
./build.sh $VERSION
|
|
||||||
docker-compose up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
[git-ref]: https://git-scm.com/book/en/v2/Git-Internals-Git-References
|
|
||||||
[netbox-github]: https://github.com/netbox-community/netbox/releases
|
|
||||||
|
|
||||||
## Breaking Changes
|
## Breaking Changes
|
||||||
|
|
||||||
From time to time it might become necessary to re-engineer the structure of this setup.
|
From time to time it might become necessary to re-engineer the structure of this setup.
|
||||||
@ -151,7 +128,7 @@ For more details on custom builds [consult our wiki][netbox-docker-wiki-build].
|
|||||||
## Tests
|
## Tests
|
||||||
|
|
||||||
We have a test script.
|
We have a test script.
|
||||||
It runs Netbox's own unit tests and ensures that all initializers work:
|
It runs NetBox's own unit tests and ensures that all initializers work:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
IMAGE=netboxcommunity/netbox:latest ./test.sh
|
IMAGE=netboxcommunity/netbox:latest ./test.sh
|
||||||
|
@ -5,4 +5,4 @@ push_image_to_registry() {
|
|||||||
echo "⏫ Pushing '${target_tag}'"
|
echo "⏫ Pushing '${target_tag}'"
|
||||||
$DRY docker push "${target_tag}"
|
$DRY docker push "${target_tag}"
|
||||||
echo "✅ Finished pushing the Docker image '${target_tag}'."
|
echo "✅ Finished pushing the Docker image '${target_tag}'."
|
||||||
}
|
}
|
||||||
|
@ -45,16 +45,16 @@ _get_image_configuration() {
|
|||||||
--silent \
|
--silent \
|
||||||
--location \
|
--location \
|
||||||
--header "Authorization: Bearer $token" \
|
--header "Authorization: Bearer $token" \
|
||||||
"https://registry-1.docker.io/v2/$image/blobs/$digest" \
|
"https://registry-1.docker.io/v2/$image/blobs/$digest" |
|
||||||
| jq -r ".config.Labels.\"$label\""
|
jq -r ".config.Labels.\"$label\""
|
||||||
}
|
}
|
||||||
|
|
||||||
_get_token() {
|
_get_token() {
|
||||||
local image=$1
|
local image=$1
|
||||||
curl \
|
curl \
|
||||||
--silent \
|
--silent \
|
||||||
"https://auth.docker.io/token?scope=repository:$image:pull&service=registry.docker.io" \
|
"https://auth.docker.io/token?scope=repository:$image:pull&service=registry.docker.io" |
|
||||||
| jq -r '.token'
|
jq -r '.token'
|
||||||
}
|
}
|
||||||
|
|
||||||
_get_digest() {
|
_get_digest() {
|
||||||
@ -65,8 +65,8 @@ _get_digest() {
|
|||||||
--silent \
|
--silent \
|
||||||
--header "Accept: application/vnd.docker.distribution.manifest.v2+json" \
|
--header "Accept: application/vnd.docker.distribution.manifest.v2+json" \
|
||||||
--header "Authorization: Bearer $token" \
|
--header "Authorization: Bearer $token" \
|
||||||
"https://registry-1.docker.io/v2/$image/manifests/$tag" \
|
"https://registry-1.docker.io/v2/$image/manifests/$tag" |
|
||||||
| jq -r '.config.digest'
|
jq -r '.config.digest'
|
||||||
}
|
}
|
||||||
|
|
||||||
_get_layers() {
|
_get_layers() {
|
||||||
@ -77,6 +77,6 @@ _get_layers() {
|
|||||||
--silent \
|
--silent \
|
||||||
--header "Accept: application/vnd.docker.distribution.manifest.v2+json" \
|
--header "Accept: application/vnd.docker.distribution.manifest.v2+json" \
|
||||||
--header "Authorization: Bearer $token" \
|
--header "Authorization: Bearer $token" \
|
||||||
"https://registry-1.docker.io/v2/$image/manifests/$tag" \
|
"https://registry-1.docker.io/v2/$image/manifests/$tag" |
|
||||||
| jq -r '.layers[].digest'
|
jq -r '.layers[].digest'
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@ fi
|
|||||||
# Checking if PRERELEASE is either unset, 'true' or 'false'
|
# Checking if PRERELEASE is either unset, 'true' or 'false'
|
||||||
###
|
###
|
||||||
if [ -n "${PRERELEASE}" ] &&
|
if [ -n "${PRERELEASE}" ] &&
|
||||||
{ [ "${PRERELEASE}" != "true" ] && [ "${PRERELEASE}" != "false" ]; }; then
|
{ [ "${PRERELEASE}" != "true" ] && [ "${PRERELEASE}" != "false" ]; }; then
|
||||||
|
|
||||||
if [ -z "${DEBUG}" ]; then
|
if [ -z "${DEBUG}" ]; then
|
||||||
echo "⚠️ PRERELEASE must be either unset, 'true' or 'false', but was '${PRERELEASE}'!"
|
echo "⚠️ PRERELEASE must be either unset, 'true' or 'false', but was '${PRERELEASE}'!"
|
||||||
@ -60,9 +60,10 @@ if [ "${PRERELEASE}" == "true" ]; then
|
|||||||
# shellcheck disable=SC2003
|
# shellcheck disable=SC2003
|
||||||
MINOR_UNSTABLE=$(expr match "${VERSION}" 'v[0-9]\+\.\([0-9]\+\)')
|
MINOR_UNSTABLE=$(expr match "${VERSION}" 'v[0-9]\+\.\([0-9]\+\)')
|
||||||
|
|
||||||
if { [ "${MAJOR_STABLE}" -eq "${MAJOR_UNSTABLE}" ] \
|
if {
|
||||||
&& [ "${MINOR_STABLE}" -ge "${MINOR_UNSTABLE}" ];
|
[ "${MAJOR_STABLE}" -eq "${MAJOR_UNSTABLE}" ] &&
|
||||||
} || [ "${MAJOR_STABLE}" -gt "${MAJOR_UNSTABLE}" ]; then
|
[ "${MINOR_STABLE}" -ge "${MINOR_UNSTABLE}" ]
|
||||||
|
} || [ "${MAJOR_STABLE}" -gt "${MAJOR_UNSTABLE}" ]; then
|
||||||
|
|
||||||
echo "❎ Latest unstable version '${VERSION}' is not higher than the latest stable version '$STABLE_VERSION'."
|
echo "❎ Latest unstable version '${VERSION}' is not higher than the latest stable version '$STABLE_VERSION'."
|
||||||
if [ -z "$DEBUG" ]; then
|
if [ -z "$DEBUG" ]; then
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# Builds develop, develop-* and master branches of Netbox
|
# Builds develop, develop-* and master branches of NetBox
|
||||||
|
|
||||||
echo "▶️ $0 $*"
|
echo "▶️ $0 $*"
|
||||||
|
|
||||||
|
76
build.sh
76
build.sh
@ -1,5 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# Clones the Netbox repository with git from Github and builds the Dockerfile
|
# Clones the NetBox repository with git from Github and builds the Dockerfile
|
||||||
|
|
||||||
echo "▶️ $0 $*"
|
echo "▶️ $0 $*"
|
||||||
|
|
||||||
@ -49,7 +49,7 @@ if [ "${1}x" == "x" ] || [ "${1}" == "--help" ] || [ "${1}" == "-h" ]; then
|
|||||||
echo " DOCKERFILE The name of Dockerfile to use."
|
echo " DOCKERFILE The name of Dockerfile to use."
|
||||||
echo " Default: Dockerfile"
|
echo " Default: Dockerfile"
|
||||||
echo " DOCKER_FROM The base image to use."
|
echo " DOCKER_FROM The base image to use."
|
||||||
echo " Default: 'python:3.9-alpine'"
|
echo " Default: 'alpine:3.13'"
|
||||||
echo " DOCKER_TARGET A specific target to build."
|
echo " DOCKER_TARGET A specific target to build."
|
||||||
echo " It's currently not possible to pass multiple targets."
|
echo " It's currently not possible to pass multiple targets."
|
||||||
echo " Default: main ldap"
|
echo " Default: main ldap"
|
||||||
@ -106,7 +106,7 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
###
|
###
|
||||||
# Variables for fetching the source
|
# Variables for fetching the NetBox source
|
||||||
###
|
###
|
||||||
SRC_ORG="${SRC_ORG-netbox-community}"
|
SRC_ORG="${SRC_ORG-netbox-community}"
|
||||||
SRC_REPO="${SRC_REPO-netbox}"
|
SRC_REPO="${SRC_REPO-netbox}"
|
||||||
@ -115,10 +115,10 @@ URL="${URL-https://github.com/${SRC_ORG}/${SRC_REPO}.git}"
|
|||||||
NETBOX_PATH="${NETBOX_PATH-.netbox}"
|
NETBOX_PATH="${NETBOX_PATH-.netbox}"
|
||||||
|
|
||||||
###
|
###
|
||||||
# Fetching the source
|
# Fetching the NetBox source
|
||||||
###
|
###
|
||||||
if [ "${2}" != "--push-only" ] && [ -z "${SKIP_GIT}" ] ; then
|
if [ "${2}" != "--push-only" ] && [ -z "${SKIP_GIT}" ]; then
|
||||||
echo "🌐 Checking out '${NETBOX_BRANCH}' of netbox from the url '${URL}' into '${NETBOX_PATH}'"
|
echo "🌐 Checking out '${NETBOX_BRANCH}' of NetBox from the url '${URL}' into '${NETBOX_PATH}'"
|
||||||
if [ ! -d "${NETBOX_PATH}" ]; then
|
if [ ! -d "${NETBOX_PATH}" ]; then
|
||||||
$DRY git clone -q --depth 10 -b "${NETBOX_BRANCH}" "${URL}" "${NETBOX_PATH}"
|
$DRY git clone -q --depth 10 -b "${NETBOX_BRANCH}" "${URL}" "${NETBOX_PATH}"
|
||||||
fi
|
fi
|
||||||
@ -135,7 +135,7 @@ if [ "${2}" != "--push-only" ] && [ -z "${SKIP_GIT}" ] ; then
|
|||||||
$DRY git checkout -qf FETCH_HEAD
|
$DRY git checkout -qf FETCH_HEAD
|
||||||
$DRY git prune
|
$DRY git prune
|
||||||
)
|
)
|
||||||
echo "✅ Checked out netbox"
|
echo "✅ Checked out NetBox"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
###
|
###
|
||||||
@ -157,7 +157,7 @@ fi
|
|||||||
# Determining the value for DOCKER_FROM
|
# Determining the value for DOCKER_FROM
|
||||||
###
|
###
|
||||||
if [ -z "$DOCKER_FROM" ]; then
|
if [ -z "$DOCKER_FROM" ]; then
|
||||||
DOCKER_FROM="python:3.9-alpine"
|
DOCKER_FROM="alpine:3.13"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
###
|
###
|
||||||
@ -174,9 +174,18 @@ PROJECT_VERSION="${PROJECT_VERSION-$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]
|
|||||||
|
|
||||||
# Get the Git information from the netbox directory
|
# Get the Git information from the netbox directory
|
||||||
if [ -d "${NETBOX_PATH}/.git" ]; then
|
if [ -d "${NETBOX_PATH}/.git" ]; then
|
||||||
NETBOX_GIT_REF=$(cd "${NETBOX_PATH}"; git rev-parse HEAD)
|
NETBOX_GIT_REF=$(
|
||||||
NETBOX_GIT_BRANCH=$(cd "${NETBOX_PATH}"; git rev-parse --abbrev-ref HEAD)
|
cd "${NETBOX_PATH}"
|
||||||
NETBOX_GIT_URL=$(cd "${NETBOX_PATH}"; git remote get-url origin)
|
git rev-parse HEAD
|
||||||
|
)
|
||||||
|
NETBOX_GIT_BRANCH=$(
|
||||||
|
cd "${NETBOX_PATH}"
|
||||||
|
git rev-parse --abbrev-ref HEAD
|
||||||
|
)
|
||||||
|
NETBOX_GIT_URL=$(
|
||||||
|
cd "${NETBOX_PATH}"
|
||||||
|
git remote get-url origin
|
||||||
|
)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
###
|
###
|
||||||
@ -186,19 +195,22 @@ DOCKER_REGISTRY="${DOCKER_REGISTRY-docker.io}"
|
|||||||
DOCKER_ORG="${DOCKER_ORG-netboxcommunity}"
|
DOCKER_ORG="${DOCKER_ORG-netboxcommunity}"
|
||||||
DOCKER_REPO="${DOCKER_REPO-netbox}"
|
DOCKER_REPO="${DOCKER_REPO-netbox}"
|
||||||
case "${NETBOX_BRANCH}" in
|
case "${NETBOX_BRANCH}" in
|
||||||
master)
|
master)
|
||||||
TAG="${TAG-latest}";;
|
TAG="${TAG-latest}"
|
||||||
develop)
|
;;
|
||||||
TAG="${TAG-snapshot}";;
|
develop)
|
||||||
*)
|
TAG="${TAG-snapshot}"
|
||||||
TAG="${TAG-$NETBOX_BRANCH}";;
|
;;
|
||||||
|
*)
|
||||||
|
TAG="${TAG-$NETBOX_BRANCH}"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
###
|
###
|
||||||
# Determine targets to build
|
# Determine targets to build
|
||||||
###
|
###
|
||||||
DEFAULT_DOCKER_TARGETS=("main" "ldap")
|
DEFAULT_DOCKER_TARGETS=("main" "ldap")
|
||||||
DOCKER_TARGETS=( "${DOCKER_TARGET:-"${DEFAULT_DOCKER_TARGETS[@]}"}")
|
DOCKER_TARGETS=("${DOCKER_TARGET:-"${DEFAULT_DOCKER_TARGETS[@]}"}")
|
||||||
echo "🏭 Building the following targets:" "${DOCKER_TARGETS[@]}"
|
echo "🏭 Building the following targets:" "${DOCKER_TARGETS[@]}"
|
||||||
|
|
||||||
###
|
###
|
||||||
@ -216,7 +228,7 @@ for DOCKER_TARGET in "${DOCKER_TARGETS[@]}"; do
|
|||||||
TARGET_DOCKER_TAG="${TARGET_DOCKER_TAG}-${DOCKER_TARGET}"
|
TARGET_DOCKER_TAG="${TARGET_DOCKER_TAG}-${DOCKER_TARGET}"
|
||||||
fi
|
fi
|
||||||
if [ -n "${GH_ACTION}" ]; then
|
if [ -n "${GH_ACTION}" ]; then
|
||||||
echo "FINAL_DOCKER_TAG=${TARGET_DOCKER_TAG}" >> $GITHUB_ENV
|
echo "FINAL_DOCKER_TAG=${TARGET_DOCKER_TAG}" >>"$GITHUB_ENV"
|
||||||
echo "::set-output name=skipped::false"
|
echo "::set-output name=skipped::false"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -242,7 +254,7 @@ for DOCKER_TARGET in "${DOCKER_TARGETS[@]}"; do
|
|||||||
###
|
###
|
||||||
# Proceeding to buils stage, except if `--push-only` is passed
|
# Proceeding to buils stage, except if `--push-only` is passed
|
||||||
###
|
###
|
||||||
if [ "${2}" != "--push-only" ] ; then
|
if [ "${2}" != "--push-only" ]; then
|
||||||
###
|
###
|
||||||
# Checking if the build is necessary,
|
# Checking if the build is necessary,
|
||||||
# meaning build only if one of those values changed:
|
# meaning build only if one of those values changed:
|
||||||
@ -259,7 +271,7 @@ for DOCKER_TARGET in "${DOCKER_TARGETS[@]}"; do
|
|||||||
BUILD_REASON="${BUILD_REASON} interactive"
|
BUILD_REASON="${BUILD_REASON} interactive"
|
||||||
elif [ "$DOCKER_REGISTRY" = "docker.io" ]; then
|
elif [ "$DOCKER_REGISTRY" = "docker.io" ]; then
|
||||||
source ./build-functions/get-public-image-config.sh
|
source ./build-functions/get-public-image-config.sh
|
||||||
IFS=':' read -ra DOCKER_FROM_SPLIT <<< "${DOCKER_FROM}"
|
IFS=':' read -ra DOCKER_FROM_SPLIT <<<"${DOCKER_FROM}"
|
||||||
if ! [[ ${DOCKER_FROM_SPLIT[0]} =~ .*/.* ]]; then
|
if ! [[ ${DOCKER_FROM_SPLIT[0]} =~ .*/.* ]]; then
|
||||||
# Need to use "library/..." for images the have no two part name
|
# Need to use "library/..." for images the have no two part name
|
||||||
DOCKER_FROM_SPLIT[0]="library/${DOCKER_FROM_SPLIT[0]}"
|
DOCKER_FROM_SPLIT[0]="library/${DOCKER_FROM_SPLIT[0]}"
|
||||||
@ -271,7 +283,7 @@ for DOCKER_TARGET in "${DOCKER_TARGETS[@]}"; do
|
|||||||
|
|
||||||
if ! printf '%s\n' "${IMAGES_LAYERS_OLD[@]}" | grep -q -P "^${PYTHON_LAST_LAYER}\$"; then
|
if ! printf '%s\n' "${IMAGES_LAYERS_OLD[@]}" | grep -q -P "^${PYTHON_LAST_LAYER}\$"; then
|
||||||
SHOULD_BUILD="true"
|
SHOULD_BUILD="true"
|
||||||
BUILD_REASON="${BUILD_REASON} python"
|
BUILD_REASON="${BUILD_REASON} alpine"
|
||||||
fi
|
fi
|
||||||
if [ "${NETBOX_GIT_REF}" != "${NETBOX_GIT_REF_OLD}" ]; then
|
if [ "${NETBOX_GIT_REF}" != "${NETBOX_GIT_REF_OLD}" ]; then
|
||||||
SHOULD_BUILD="true"
|
SHOULD_BUILD="true"
|
||||||
@ -295,8 +307,8 @@ for DOCKER_TARGET in "${DOCKER_TARGETS[@]}"; do
|
|||||||
-t "${TARGET_DOCKER_TAG}"
|
-t "${TARGET_DOCKER_TAG}"
|
||||||
)
|
)
|
||||||
if [ -n "${TARGET_DOCKER_SHORT_TAG}" ]; then
|
if [ -n "${TARGET_DOCKER_SHORT_TAG}" ]; then
|
||||||
DOCKER_BUILD_ARGS+=( -t "${TARGET_DOCKER_SHORT_TAG}" )
|
DOCKER_BUILD_ARGS+=(-t "${TARGET_DOCKER_SHORT_TAG}")
|
||||||
DOCKER_BUILD_ARGS+=( -t "${TARGET_DOCKER_LATEST_TAG}" )
|
DOCKER_BUILD_ARGS+=(-t "${TARGET_DOCKER_LATEST_TAG}")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# --label
|
# --label
|
||||||
@ -323,22 +335,22 @@ for DOCKER_TARGET in "${DOCKER_TARGETS[@]}"; do
|
|||||||
)
|
)
|
||||||
fi
|
fi
|
||||||
if [ -n "${BUILD_REASON}" ]; then
|
if [ -n "${BUILD_REASON}" ]; then
|
||||||
BUILD_REASON=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<< "$BUILD_REASON")
|
BUILD_REASON=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<"$BUILD_REASON")
|
||||||
DOCKER_BUILD_ARGS+=( --label "BUILD_REASON=${BUILD_REASON}" )
|
DOCKER_BUILD_ARGS+=(--label "BUILD_REASON=${BUILD_REASON}")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# --build-arg
|
# --build-arg
|
||||||
DOCKER_BUILD_ARGS+=( --build-arg "NETBOX_PATH=${NETBOX_PATH}" )
|
DOCKER_BUILD_ARGS+=(--build-arg "NETBOX_PATH=${NETBOX_PATH}")
|
||||||
|
|
||||||
if [ -n "${DOCKER_FROM}" ]; then
|
if [ -n "${DOCKER_FROM}" ]; then
|
||||||
DOCKER_BUILD_ARGS+=( --build-arg "FROM=${DOCKER_FROM}" )
|
DOCKER_BUILD_ARGS+=(--build-arg "FROM=${DOCKER_FROM}")
|
||||||
fi
|
fi
|
||||||
if [ -n "${HTTP_PROXY}" ]; then
|
if [ -n "${HTTP_PROXY}" ]; then
|
||||||
DOCKER_BUILD_ARGS+=( --build-arg "http_proxy=${HTTP_PROXY}" )
|
DOCKER_BUILD_ARGS+=(--build-arg "http_proxy=${HTTP_PROXY}")
|
||||||
DOCKER_BUILD_ARGS+=( --build-arg "https_proxy=${HTTPS_PROXY}" )
|
DOCKER_BUILD_ARGS+=(--build-arg "https_proxy=${HTTPS_PROXY}")
|
||||||
fi
|
fi
|
||||||
if [ -n "${NO_PROXY}" ]; then
|
if [ -n "${NO_PROXY}" ]; then
|
||||||
DOCKER_BUILD_ARGS+=( --build-arg "no_proxy=${NO_PROXY}" )
|
DOCKER_BUILD_ARGS+=(--build-arg "no_proxy=${NO_PROXY}")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
###
|
###
|
||||||
@ -360,7 +372,7 @@ for DOCKER_TARGET in "${DOCKER_TARGETS[@]}"; do
|
|||||||
###
|
###
|
||||||
# Pushing the docker images if either `--push` or `--push-only` are passed
|
# Pushing the docker images if either `--push` or `--push-only` are passed
|
||||||
###
|
###
|
||||||
if [ "${2}" == "--push" ] || [ "${2}" == "--push-only" ] ; then
|
if [ "${2}" == "--push" ] || [ "${2}" == "--push-only" ]; then
|
||||||
source ./build-functions/docker-functions.sh
|
source ./build-functions/docker-functions.sh
|
||||||
push_image_to_registry "${TARGET_DOCKER_TAG}"
|
push_image_to_registry "${TARGET_DOCKER_TAG}"
|
||||||
|
|
||||||
|
@ -5,9 +5,8 @@
|
|||||||
####
|
####
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from os.path import dirname, abspath, join
|
|
||||||
from os import environ
|
from os import environ
|
||||||
|
from os.path import abspath, dirname, join
|
||||||
|
|
||||||
# For reference see https://netbox.readthedocs.io/en/stable/configuration/
|
# For reference see https://netbox.readthedocs.io/en/stable/configuration/
|
||||||
# Based on https://github.com/netbox-community/netbox/blob/master/netbox/netbox/configuration.example.py
|
# Based on https://github.com/netbox-community/netbox/blob/master/netbox/netbox/configuration.example.py
|
||||||
@ -39,16 +38,16 @@ ALLOWED_HOSTS = environ.get('ALLOWED_HOSTS', '*').split(' ')
|
|||||||
# PostgreSQL database configuration. See the Django documentation for a complete list of available parameters:
|
# PostgreSQL database configuration. See the Django documentation for a complete list of available parameters:
|
||||||
# https://docs.djangoproject.com/en/stable/ref/settings/#databases
|
# https://docs.djangoproject.com/en/stable/ref/settings/#databases
|
||||||
DATABASE = {
|
DATABASE = {
|
||||||
'NAME': environ.get('DB_NAME', 'netbox'), # Database name
|
'NAME': environ.get('DB_NAME', 'netbox'), # Database name
|
||||||
'USER': environ.get('DB_USER', ''), # PostgreSQL username
|
'USER': environ.get('DB_USER', ''), # PostgreSQL username
|
||||||
'PASSWORD': _read_secret('db_password', environ.get('DB_PASSWORD', '')),
|
'PASSWORD': _read_secret('db_password', environ.get('DB_PASSWORD', '')),
|
||||||
# PostgreSQL password
|
# PostgreSQL password
|
||||||
'HOST': environ.get('DB_HOST', 'localhost'), # Database server
|
'HOST': environ.get('DB_HOST', 'localhost'), # Database server
|
||||||
'PORT': environ.get('DB_PORT', ''), # Database port (leave blank for default)
|
'PORT': environ.get('DB_PORT', ''), # Database port (leave blank for default)
|
||||||
'OPTIONS': {'sslmode': environ.get('DB_SSLMODE', 'prefer')},
|
'OPTIONS': {'sslmode': environ.get('DB_SSLMODE', 'prefer')},
|
||||||
# Database connection SSLMODE
|
# Database connection SSLMODE
|
||||||
'CONN_MAX_AGE': int(environ.get('DB_CONN_MAX_AGE', '300')),
|
'CONN_MAX_AGE': int(environ.get('DB_CONN_MAX_AGE', '300')),
|
||||||
# Max database connection age
|
# Max database connection age
|
||||||
}
|
}
|
||||||
|
|
||||||
# Redis database settings. Redis is used for caching and for queuing background tasks such as webhook events. A separate
|
# Redis database settings. Redis is used for caching and for queuing background tasks such as webhook events. A separate
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
import ldap
|
|
||||||
|
|
||||||
from django_auth_ldap.config import LDAPSearch
|
|
||||||
from importlib import import_module
|
from importlib import import_module
|
||||||
from os import environ
|
from os import environ
|
||||||
|
|
||||||
|
import ldap
|
||||||
|
from django_auth_ldap.config import LDAPSearch
|
||||||
|
|
||||||
|
|
||||||
# Read secret from file
|
# Read secret from file
|
||||||
def _read_secret(secret_name, default=None):
|
def _read_secret(secret_name, default=None):
|
||||||
try:
|
try:
|
||||||
@ -47,9 +48,11 @@ LDAP_IGNORE_CERT_ERRORS = environ.get('LDAP_IGNORE_CERT_ERRORS', 'False').lower(
|
|||||||
|
|
||||||
AUTH_LDAP_USER_SEARCH_BASEDN = environ.get('AUTH_LDAP_USER_SEARCH_BASEDN', '')
|
AUTH_LDAP_USER_SEARCH_BASEDN = environ.get('AUTH_LDAP_USER_SEARCH_BASEDN', '')
|
||||||
AUTH_LDAP_USER_SEARCH_ATTR = environ.get('AUTH_LDAP_USER_SEARCH_ATTR', 'sAMAccountName')
|
AUTH_LDAP_USER_SEARCH_ATTR = environ.get('AUTH_LDAP_USER_SEARCH_ATTR', 'sAMAccountName')
|
||||||
AUTH_LDAP_USER_SEARCH = LDAPSearch(AUTH_LDAP_USER_SEARCH_BASEDN,
|
AUTH_LDAP_USER_SEARCH = LDAPSearch(
|
||||||
ldap.SCOPE_SUBTREE,
|
AUTH_LDAP_USER_SEARCH_BASEDN,
|
||||||
"(" + AUTH_LDAP_USER_SEARCH_ATTR + "=%(user)s)")
|
ldap.SCOPE_SUBTREE,
|
||||||
|
"(" + AUTH_LDAP_USER_SEARCH_ATTR + "=%(user)s)"
|
||||||
|
)
|
||||||
|
|
||||||
# This search ought to return all groups to which the user belongs. django_auth_ldap uses this to determine group
|
# This search ought to return all groups to which the user belongs. django_auth_ldap uses this to determine group
|
||||||
# heirarchy.
|
# heirarchy.
|
||||||
|
@ -7,6 +7,8 @@ services:
|
|||||||
- redis
|
- redis
|
||||||
- redis-cache
|
- redis-cache
|
||||||
env_file: env/netbox.env
|
env_file: env/netbox.env
|
||||||
|
environment:
|
||||||
|
SKIP_STARTUP_SCRIPTS: ${SKIP_STARTUP_SCRIPTS-false}
|
||||||
user: '101'
|
user: '101'
|
||||||
volumes:
|
volumes:
|
||||||
- ./startup_scripts:/opt/netbox/startup_scripts:z,ro
|
- ./startup_scripts:/opt/netbox/startup_scripts:z,ro
|
||||||
@ -14,19 +16,9 @@ services:
|
|||||||
- ./configuration:/etc/netbox/config:z,ro
|
- ./configuration:/etc/netbox/config:z,ro
|
||||||
- ./reports:/etc/netbox/reports:z,ro
|
- ./reports:/etc/netbox/reports:z,ro
|
||||||
- ./scripts:/etc/netbox/scripts:z,ro
|
- ./scripts:/etc/netbox/scripts:z,ro
|
||||||
- netbox-nginx-config:/etc/netbox-nginx:z
|
|
||||||
- netbox-static-files:/opt/netbox/netbox/static:z
|
|
||||||
- netbox-media-files:/opt/netbox/netbox/media:z
|
- netbox-media-files:/opt/netbox/netbox/media:z
|
||||||
nginx:
|
|
||||||
command: nginx -c /etc/netbox-nginx/nginx.conf
|
|
||||||
image: nginx:1.19-alpine
|
|
||||||
depends_on:
|
|
||||||
- netbox
|
|
||||||
ports:
|
ports:
|
||||||
- 8080
|
- 8080
|
||||||
volumes:
|
|
||||||
- netbox-static-files:/opt/netbox/netbox/static:ro
|
|
||||||
- netbox-nginx-config:/etc/netbox-nginx/:ro
|
|
||||||
postgres:
|
postgres:
|
||||||
image: postgres:12-alpine
|
image: postgres:12-alpine
|
||||||
env_file: env/postgres.env
|
env_file: env/postgres.env
|
||||||
@ -45,9 +37,5 @@ services:
|
|||||||
- redis-server --requirepass $$REDIS_PASSWORD ## $$ because of docker-compose
|
- redis-server --requirepass $$REDIS_PASSWORD ## $$ because of docker-compose
|
||||||
env_file: env/redis-cache.env
|
env_file: env/redis-cache.env
|
||||||
volumes:
|
volumes:
|
||||||
netbox-static-files:
|
|
||||||
driver: local
|
|
||||||
netbox-nginx-config:
|
|
||||||
driver: local
|
|
||||||
netbox-media-files:
|
netbox-media-files:
|
||||||
driver: local
|
driver: local
|
||||||
|
@ -15,30 +15,19 @@ services:
|
|||||||
- ./configuration:/etc/netbox/config:z,ro
|
- ./configuration:/etc/netbox/config:z,ro
|
||||||
- ./reports:/etc/netbox/reports:z,ro
|
- ./reports:/etc/netbox/reports:z,ro
|
||||||
- ./scripts:/etc/netbox/scripts:z,ro
|
- ./scripts:/etc/netbox/scripts:z,ro
|
||||||
- netbox-nginx-config:/etc/netbox-nginx:z
|
|
||||||
- netbox-static-files:/opt/netbox/netbox/static:z
|
|
||||||
- netbox-media-files:/opt/netbox/netbox/media:z
|
- netbox-media-files:/opt/netbox/netbox/media:z
|
||||||
|
ports:
|
||||||
|
- "8080"
|
||||||
netbox-worker:
|
netbox-worker:
|
||||||
<<: *netbox
|
<<: *netbox
|
||||||
depends_on:
|
depends_on:
|
||||||
- redis
|
- redis
|
||||||
entrypoint:
|
entrypoint:
|
||||||
- python3
|
- /opt/netbox/venv/bin/python
|
||||||
- /opt/netbox/netbox/manage.py
|
- /opt/netbox/netbox/manage.py
|
||||||
command:
|
command:
|
||||||
- rqworker
|
- rqworker
|
||||||
|
ports: []
|
||||||
# nginx
|
|
||||||
nginx:
|
|
||||||
command: nginx -c /etc/netbox-nginx/nginx.conf
|
|
||||||
image: nginx:1.19-alpine
|
|
||||||
depends_on:
|
|
||||||
- netbox
|
|
||||||
ports:
|
|
||||||
- 8080
|
|
||||||
volumes:
|
|
||||||
- netbox-static-files:/opt/netbox/netbox/static:ro
|
|
||||||
- netbox-nginx-config:/etc/netbox-nginx/:ro
|
|
||||||
|
|
||||||
# postgres
|
# postgres
|
||||||
postgres:
|
postgres:
|
||||||
@ -66,10 +55,6 @@ services:
|
|||||||
env_file: env/redis-cache.env
|
env_file: env/redis-cache.env
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
netbox-static-files:
|
|
||||||
driver: local
|
|
||||||
netbox-nginx-config:
|
|
||||||
driver: local
|
|
||||||
netbox-media-files:
|
netbox-media-files:
|
||||||
driver: local
|
driver: local
|
||||||
netbox-postgres-data:
|
netbox-postgres-data:
|
||||||
|
@ -4,57 +4,61 @@
|
|||||||
#
|
#
|
||||||
# They can be imported by other code (see `ldap_config.py` for an example).
|
# They can be imported by other code (see `ldap_config.py` for an example).
|
||||||
|
|
||||||
from os.path import abspath, isfile
|
|
||||||
from os import scandir
|
|
||||||
import importlib.util
|
import importlib.util
|
||||||
import sys
|
import sys
|
||||||
|
from os import scandir
|
||||||
|
from os.path import abspath, isfile
|
||||||
|
|
||||||
|
|
||||||
def _filename(f):
|
def _filename(f):
|
||||||
return f.name
|
return f.name
|
||||||
|
|
||||||
|
|
||||||
def _import(module_name, path, loaded_configurations):
|
def _import(module_name, path, loaded_configurations):
|
||||||
spec = importlib.util.spec_from_file_location('', path)
|
spec = importlib.util.spec_from_file_location("", path)
|
||||||
module = importlib.util.module_from_spec(spec)
|
module = importlib.util.module_from_spec(spec)
|
||||||
spec.loader.exec_module(module)
|
spec.loader.exec_module(module)
|
||||||
sys.modules[module_name] = module
|
sys.modules[module_name] = module
|
||||||
|
|
||||||
loaded_configurations.insert(0, module)
|
loaded_configurations.insert(0, module)
|
||||||
|
|
||||||
print(f"🧬 loaded config '{path}'")
|
print(f"🧬 loaded config '{path}'")
|
||||||
|
|
||||||
|
|
||||||
def read_configurations(config_module, config_dir, main_config):
|
def read_configurations(config_module, config_dir, main_config):
|
||||||
loaded_configurations = []
|
loaded_configurations = []
|
||||||
|
|
||||||
main_config_path = abspath(f'{config_dir}/{main_config}.py')
|
main_config_path = abspath(f"{config_dir}/{main_config}.py")
|
||||||
if isfile(main_config_path):
|
if isfile(main_config_path):
|
||||||
_import(f'{config_module}.{main_config}', main_config_path, loaded_configurations)
|
_import(f"{config_module}.{main_config}", main_config_path, loaded_configurations)
|
||||||
else:
|
else:
|
||||||
print(f"⚠️ Main configuration '{main_config_path}' not found.")
|
print(f"⚠️ Main configuration '{main_config_path}' not found.")
|
||||||
|
|
||||||
with scandir(config_dir) as it:
|
with scandir(config_dir) as it:
|
||||||
for f in sorted(it, key=_filename):
|
for f in sorted(it, key=_filename):
|
||||||
if not f.is_file():
|
if not f.is_file():
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if f.name.startswith('__'):
|
if f.name.startswith("__"):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not f.name.endswith('.py'):
|
if not f.name.endswith(".py"):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if f.name == f'{config_dir}.py':
|
if f.name == f"{main_config}.py":
|
||||||
continue
|
continue
|
||||||
|
|
||||||
module_name = f"{config_module}.{f.name[:-len('.py')]}".replace(".", "_")
|
if f.name == f"{config_dir}.py":
|
||||||
_import(module_name, f.path, loaded_configurations)
|
continue
|
||||||
|
|
||||||
if len(loaded_configurations) == 0:
|
module_name = f"{config_module}.{f.name[:-len('.py')]}".replace(".", "_")
|
||||||
print(f"‼️ No configuration files found in '{config_dir}'.")
|
_import(module_name, f.path, loaded_configurations)
|
||||||
raise ImportError(f"No configuration files found in '{config_dir}'.")
|
|
||||||
|
|
||||||
return loaded_configurations
|
if len(loaded_configurations) == 0:
|
||||||
|
print(f"‼️ No configuration files found in '{config_dir}'.")
|
||||||
|
raise ImportError(f"No configuration files found in '{config_dir}'.")
|
||||||
|
|
||||||
|
return loaded_configurations
|
||||||
|
|
||||||
|
|
||||||
## Specific Parts
|
## Specific Parts
|
||||||
@ -65,15 +69,16 @@ def read_configurations(config_module, config_dir, main_config):
|
|||||||
|
|
||||||
|
|
||||||
_loaded_configurations = read_configurations(
|
_loaded_configurations = read_configurations(
|
||||||
config_dir = '/etc/netbox/config/',
|
config_dir="/etc/netbox/config/",
|
||||||
config_module = 'netbox.configuration',
|
config_module="netbox.configuration",
|
||||||
main_config = 'configuration')
|
main_config="configuration",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def __getattr__(name):
|
def __getattr__(name):
|
||||||
for config in _loaded_configurations:
|
for config in _loaded_configurations:
|
||||||
try:
|
try:
|
||||||
return getattr(config, name)
|
return getattr(config, name)
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
raise AttributeError
|
raise AttributeError
|
||||||
|
@ -1,12 +1,16 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# Runs on every start of the Netbox Docker container
|
# Runs on every start of the NetBox Docker container
|
||||||
|
|
||||||
# Stop when an error occures
|
# Stop when an error occures
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Allows Netbox to be run as non-root users
|
# Allows NetBox to be run as non-root users
|
||||||
umask 002
|
umask 002
|
||||||
|
|
||||||
|
# Load correct Python3 env
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
source /opt/netbox/venv/bin/activate
|
||||||
|
|
||||||
# Try to connect to the DB
|
# Try to connect to the DB
|
||||||
DB_WAIT_TIMEOUT=${DB_WAIT_TIMEOUT-3}
|
DB_WAIT_TIMEOUT=${DB_WAIT_TIMEOUT-3}
|
||||||
MAX_DB_WAIT_TIME=${MAX_DB_WAIT_TIME-30}
|
MAX_DB_WAIT_TIME=${MAX_DB_WAIT_TIME-30}
|
||||||
@ -14,7 +18,7 @@ CUR_DB_WAIT_TIME=0
|
|||||||
while ! ./manage.py migrate 2>&1 && [ "${CUR_DB_WAIT_TIME}" -lt "${MAX_DB_WAIT_TIME}" ]; do
|
while ! ./manage.py migrate 2>&1 && [ "${CUR_DB_WAIT_TIME}" -lt "${MAX_DB_WAIT_TIME}" ]; do
|
||||||
echo "⏳ Waiting on DB... (${CUR_DB_WAIT_TIME}s / ${MAX_DB_WAIT_TIME}s)"
|
echo "⏳ Waiting on DB... (${CUR_DB_WAIT_TIME}s / ${MAX_DB_WAIT_TIME}s)"
|
||||||
sleep "${DB_WAIT_TIMEOUT}"
|
sleep "${DB_WAIT_TIMEOUT}"
|
||||||
CUR_DB_WAIT_TIME=$(( CUR_DB_WAIT_TIME + DB_WAIT_TIMEOUT ))
|
CUR_DB_WAIT_TIME=$((CUR_DB_WAIT_TIME + DB_WAIT_TIMEOUT))
|
||||||
done
|
done
|
||||||
if [ "${CUR_DB_WAIT_TIME}" -ge "${MAX_DB_WAIT_TIME}" ]; then
|
if [ "${CUR_DB_WAIT_TIME}" -ge "${MAX_DB_WAIT_TIME}" ]; then
|
||||||
echo "❌ Waited ${MAX_DB_WAIT_TIME}s or more for the DB to become ready."
|
echo "❌ Waited ${MAX_DB_WAIT_TIME}s or more for the DB to become ready."
|
||||||
@ -32,17 +36,17 @@ else
|
|||||||
SUPERUSER_EMAIL='admin@example.com'
|
SUPERUSER_EMAIL='admin@example.com'
|
||||||
fi
|
fi
|
||||||
if [ -f "/run/secrets/superuser_password" ]; then
|
if [ -f "/run/secrets/superuser_password" ]; then
|
||||||
SUPERUSER_PASSWORD="$(< /run/secrets/superuser_password)"
|
SUPERUSER_PASSWORD="$(</run/secrets/superuser_password)"
|
||||||
elif [ -z ${SUPERUSER_PASSWORD+x} ]; then
|
elif [ -z ${SUPERUSER_PASSWORD+x} ]; then
|
||||||
SUPERUSER_PASSWORD='admin'
|
SUPERUSER_PASSWORD='admin'
|
||||||
fi
|
fi
|
||||||
if [ -f "/run/secrets/superuser_api_token" ]; then
|
if [ -f "/run/secrets/superuser_api_token" ]; then
|
||||||
SUPERUSER_API_TOKEN="$(< /run/secrets/superuser_api_token)"
|
SUPERUSER_API_TOKEN="$(</run/secrets/superuser_api_token)"
|
||||||
elif [ -z ${SUPERUSER_API_TOKEN+x} ]; then
|
elif [ -z ${SUPERUSER_API_TOKEN+x} ]; then
|
||||||
SUPERUSER_API_TOKEN='0123456789abcdef0123456789abcdef01234567'
|
SUPERUSER_API_TOKEN='0123456789abcdef0123456789abcdef01234567'
|
||||||
fi
|
fi
|
||||||
|
|
||||||
./manage.py shell --interface python << END
|
./manage.py shell --interface python <<END
|
||||||
from django.contrib.auth.models import User
|
from django.contrib.auth.models import User
|
||||||
from users.models import Token
|
from users.models import Token
|
||||||
if not User.objects.filter(username='${SUPERUSER_NAME}'):
|
if not User.objects.filter(username='${SUPERUSER_NAME}'):
|
||||||
@ -60,9 +64,6 @@ else
|
|||||||
echo "import runpy; runpy.run_path('../startup_scripts')" | ./manage.py shell --interface python
|
echo "import runpy; runpy.run_path('../startup_scripts')" | ./manage.py shell --interface python
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Copy static files
|
|
||||||
./manage.py collectstatic --no-input
|
|
||||||
|
|
||||||
echo "✅ Initialisation is done."
|
echo "✅ Initialisation is done."
|
||||||
|
|
||||||
# Launch whatever is passed by docker
|
# Launch whatever is passed by docker
|
||||||
|
@ -1,8 +0,0 @@
|
|||||||
command = '/usr/bin/gunicorn'
|
|
||||||
pythonpath = '/opt/netbox/netbox'
|
|
||||||
bind = '0.0.0.0:8001'
|
|
||||||
workers = 3
|
|
||||||
errorlog = '-'
|
|
||||||
accesslog = '-'
|
|
||||||
capture_output = False
|
|
||||||
loglevel = 'info'
|
|
54
docker/launch-netbox.sh
Executable file
54
docker/launch-netbox.sh
Executable file
@ -0,0 +1,54 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
UNIT_CONFIG="${UNIT_CONFIG-/etc/unit/nginx-unit.json}"
|
||||||
|
UNIT_SOCKET="/opt/unit/unit.sock"
|
||||||
|
|
||||||
|
load_configuration() {
|
||||||
|
MAX_WAIT=10
|
||||||
|
WAIT_COUNT=0
|
||||||
|
while [ ! -S $UNIT_SOCKET ]; do
|
||||||
|
if [ $WAIT_COUNT -ge $MAX_WAIT ]; then
|
||||||
|
echo "⚠️ No control socket found; configuration will not be loaded."
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
WAIT_COUNT=$((WAIT_COUNT + 1))
|
||||||
|
echo "⏳ Waiting for control socket to be created... (${WAIT_COUNT}/${MAX_WAIT})"
|
||||||
|
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
# even when the control socket exists, it does not mean unit has finished initialisation
|
||||||
|
# this curl call will get a reply once unit is fully launched
|
||||||
|
curl --silent --output /dev/null --request GET --unix-socket $UNIT_SOCKET http://localhost/
|
||||||
|
|
||||||
|
echo "⚙️ Applying configuration from $UNIT_CONFIG"
|
||||||
|
|
||||||
|
RESP_CODE=$(
|
||||||
|
curl \
|
||||||
|
--silent \
|
||||||
|
--output /dev/null \
|
||||||
|
--write-out '%{http_code}' \
|
||||||
|
--request PUT \
|
||||||
|
--data-binary "@${UNIT_CONFIG}" \
|
||||||
|
--unix-socket $UNIT_SOCKET \
|
||||||
|
http://localhost/config
|
||||||
|
)
|
||||||
|
if [ "$RESP_CODE" != "200" ]; then
|
||||||
|
echo "⚠️ Could no load Unit configuration"
|
||||||
|
kill "$(cat /opt/unit/unit.pid)"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✅ Unit configuration loaded successfully"
|
||||||
|
}
|
||||||
|
|
||||||
|
load_configuration &
|
||||||
|
|
||||||
|
exec unitd \
|
||||||
|
--no-daemon \
|
||||||
|
--control unix:$UNIT_SOCKET \
|
||||||
|
--pid /opt/unit/unit.pid \
|
||||||
|
--log /dev/stdout \
|
||||||
|
--state /opt/unit/state/ \
|
||||||
|
--tmp /opt/unit/tmp/
|
@ -1,21 +1,23 @@
|
|||||||
from .configuration import read_configurations
|
from .configuration import read_configurations
|
||||||
|
|
||||||
_loaded_configurations = read_configurations(
|
_loaded_configurations = read_configurations(
|
||||||
config_dir = '/etc/netbox/config/ldap/',
|
config_dir="/etc/netbox/config/ldap/",
|
||||||
config_module = 'netbox.configuration.ldap',
|
config_module="netbox.configuration.ldap",
|
||||||
main_config = 'ldap_config')
|
main_config="ldap_config",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def __getattr__(name):
|
def __getattr__(name):
|
||||||
for config in _loaded_configurations:
|
for config in _loaded_configurations:
|
||||||
try:
|
try:
|
||||||
return getattr(config, name)
|
return getattr(config, name)
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
raise AttributeError
|
raise AttributeError
|
||||||
|
|
||||||
|
|
||||||
def __dir__():
|
def __dir__():
|
||||||
names = []
|
names = []
|
||||||
for config in _loaded_configurations:
|
for config in _loaded_configurations:
|
||||||
names.extend(config.__dir__())
|
names.extend(config.__dir__())
|
||||||
return names
|
return names
|
||||||
|
40
docker/nginx-unit.json
Normal file
40
docker/nginx-unit.json
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
{
|
||||||
|
"listeners": {
|
||||||
|
"*:8080": {
|
||||||
|
"pass": "routes"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
"routes": [
|
||||||
|
{
|
||||||
|
"match": {
|
||||||
|
"uri": "/static/*"
|
||||||
|
},
|
||||||
|
"action": {
|
||||||
|
"share": "/opt/netbox/netbox"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
"action": {
|
||||||
|
"pass": "applications/netbox"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
|
||||||
|
"applications": {
|
||||||
|
"netbox": {
|
||||||
|
"type": "python 3",
|
||||||
|
"path": "/opt/netbox/netbox/",
|
||||||
|
"module": "netbox.wsgi",
|
||||||
|
"home": "/opt/netbox/venv",
|
||||||
|
"processes": {
|
||||||
|
"max": 4,
|
||||||
|
"spare": 1,
|
||||||
|
"idle_timeout": 120
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
"access_log": "/dev/stdout"
|
||||||
|
}
|
@ -1,44 +0,0 @@
|
|||||||
daemon off;
|
|
||||||
worker_processes 1;
|
|
||||||
|
|
||||||
error_log /dev/stderr info;
|
|
||||||
|
|
||||||
events {
|
|
||||||
worker_connections 1024;
|
|
||||||
}
|
|
||||||
|
|
||||||
http {
|
|
||||||
include /etc/nginx/mime.types;
|
|
||||||
default_type application/octet-stream;
|
|
||||||
sendfile on;
|
|
||||||
tcp_nopush on;
|
|
||||||
keepalive_timeout 65;
|
|
||||||
gzip on;
|
|
||||||
server_tokens off;
|
|
||||||
client_max_body_size 10M;
|
|
||||||
|
|
||||||
server {
|
|
||||||
listen 8080;
|
|
||||||
access_log off;
|
|
||||||
|
|
||||||
location /static/ {
|
|
||||||
alias /opt/netbox/netbox/static/;
|
|
||||||
}
|
|
||||||
|
|
||||||
location / {
|
|
||||||
proxy_pass http://netbox:8001;
|
|
||||||
proxy_set_header X-Forwarded-Host $http_host;
|
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
|
||||||
add_header P3P 'CP="ALL DSP COR PSAa PSDa OUR NOR ONL UNI COM NAV"';
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
server {
|
|
||||||
listen 8081;
|
|
||||||
access_log off;
|
|
||||||
|
|
||||||
location = /stub_status {
|
|
||||||
stub_status;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
38
env/netbox.env
vendored
38
env/netbox.env
vendored
@ -1,39 +1,39 @@
|
|||||||
CORS_ORIGIN_ALLOW_ALL=True
|
CORS_ORIGIN_ALLOW_ALL=True
|
||||||
DB_NAME=netbox
|
|
||||||
DB_USER=netbox
|
|
||||||
DB_PASSWORD=J5brHrAXFLQSif0K
|
|
||||||
DB_HOST=postgres
|
DB_HOST=postgres
|
||||||
EMAIL_SERVER=localhost
|
DB_NAME=netbox
|
||||||
EMAIL_PORT=25
|
DB_PASSWORD=J5brHrAXFLQSif0K
|
||||||
EMAIL_USERNAME=netbox
|
DB_USER=netbox
|
||||||
EMAIL_PASSWORD=
|
|
||||||
EMAIL_TIMEOUT=5
|
|
||||||
EMAIL_FROM=netbox@bar.com
|
EMAIL_FROM=netbox@bar.com
|
||||||
|
EMAIL_PASSWORD=
|
||||||
|
EMAIL_PORT=25
|
||||||
|
EMAIL_SERVER=localhost
|
||||||
|
EMAIL_SSL_CERTFILE=
|
||||||
|
EMAIL_SSL_KEYFILE=
|
||||||
|
EMAIL_TIMEOUT=5
|
||||||
|
EMAIL_USERNAME=netbox
|
||||||
# EMAIL_USE_SSL and EMAIL_USE_TLS are mutually exclusive, i.e. they can't both be `true`!
|
# EMAIL_USE_SSL and EMAIL_USE_TLS are mutually exclusive, i.e. they can't both be `true`!
|
||||||
EMAIL_USE_SSL=false
|
EMAIL_USE_SSL=false
|
||||||
EMAIL_USE_TLS=false
|
EMAIL_USE_TLS=false
|
||||||
EMAIL_SSL_CERTFILE=
|
|
||||||
EMAIL_SSL_KEYFILE=
|
|
||||||
MAX_PAGE_SIZE=1000
|
MAX_PAGE_SIZE=1000
|
||||||
MEDIA_ROOT=/opt/netbox/netbox/media
|
MEDIA_ROOT=/opt/netbox/netbox/media
|
||||||
METRICS_ENABLED=false
|
METRICS_ENABLED=false
|
||||||
NAPALM_USERNAME=
|
|
||||||
NAPALM_PASSWORD=
|
NAPALM_PASSWORD=
|
||||||
NAPALM_TIMEOUT=10
|
NAPALM_TIMEOUT=10
|
||||||
REDIS_HOST=redis
|
NAPALM_USERNAME=
|
||||||
REDIS_PASSWORD=H733Kdjndks81
|
REDIS_CACHE_DATABASE=1
|
||||||
REDIS_DATABASE=0
|
|
||||||
REDIS_SSL=false
|
|
||||||
REDIS_CACHE_HOST=redis-cache
|
REDIS_CACHE_HOST=redis-cache
|
||||||
REDIS_CACHE_PASSWORD=t4Ph722qJ5QHeQ1qfu36
|
REDIS_CACHE_PASSWORD=t4Ph722qJ5QHeQ1qfu36
|
||||||
REDIS_CACHE_DATABASE=1
|
|
||||||
REDIS_CACHE_SSL=false
|
REDIS_CACHE_SSL=false
|
||||||
|
REDIS_DATABASE=0
|
||||||
|
REDIS_HOST=redis
|
||||||
|
REDIS_PASSWORD=H733Kdjndks81
|
||||||
|
REDIS_SSL=false
|
||||||
RELEASE_CHECK_URL=https://api.github.com/repos/netbox-community/netbox/releases
|
RELEASE_CHECK_URL=https://api.github.com/repos/netbox-community/netbox/releases
|
||||||
SECRET_KEY=r8OwDznj!!dci#P9ghmRfdu1Ysxm0AiPeDCQhKE+N_rClfWNj
|
SECRET_KEY=r8OwDznj!!dci#P9ghmRfdu1Ysxm0AiPeDCQhKE+N_rClfWNj
|
||||||
SKIP_STARTUP_SCRIPTS=false
|
SKIP_STARTUP_SCRIPTS=false
|
||||||
SKIP_SUPERUSER=false
|
SKIP_SUPERUSER=false
|
||||||
SUPERUSER_NAME=admin
|
|
||||||
SUPERUSER_EMAIL=admin@example.com
|
|
||||||
SUPERUSER_PASSWORD=admin
|
|
||||||
SUPERUSER_API_TOKEN=0123456789abcdef0123456789abcdef01234567
|
SUPERUSER_API_TOKEN=0123456789abcdef0123456789abcdef01234567
|
||||||
|
SUPERUSER_EMAIL=admin@example.com
|
||||||
|
SUPERUSER_NAME=admin
|
||||||
|
SUPERUSER_PASSWORD=admin
|
||||||
WEBHOOKS_ENABLED=true
|
WEBHOOKS_ENABLED=true
|
||||||
|
4
env/postgres.env
vendored
4
env/postgres.env
vendored
@ -1,3 +1,3 @@
|
|||||||
POSTGRES_USER=netbox
|
|
||||||
POSTGRES_PASSWORD=J5brHrAXFLQSif0K
|
|
||||||
POSTGRES_DB=netbox
|
POSTGRES_DB=netbox
|
||||||
|
POSTGRES_PASSWORD=J5brHrAXFLQSif0K
|
||||||
|
POSTGRES_USER=netbox
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
# - prefix: 10.0.0.0/16
|
# - prefix: 10.0.0.0/16
|
||||||
# rir: RFC1918
|
# rir: RFC1918
|
||||||
|
# tenant: tenant1
|
||||||
# - prefix: fd00:ccdd::/32
|
# - prefix: fd00:ccdd::/32
|
||||||
# rir: RFC4193 ULA
|
# rir: RFC4193 ULA
|
||||||
# - prefix: 2001:db8::/32
|
# - prefix: 2001:db8::/32
|
||||||
|
6
initializers/circuit_types.yml
Normal file
6
initializers/circuit_types.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# - name: VPLS
|
||||||
|
# slug: vpls
|
||||||
|
# - name: MPLS
|
||||||
|
# slug: mpls
|
||||||
|
# - name: Internet
|
||||||
|
# slug: internet
|
7
initializers/circuits.yml
Normal file
7
initializers/circuits.yml
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
# - cid: Circuit_ID-1
|
||||||
|
# provider: Provider1
|
||||||
|
# type: Internet
|
||||||
|
# tenant: tenant1
|
||||||
|
# - cid: Circuit_ID-2
|
||||||
|
# provider: Provider2
|
||||||
|
# type: MPLS
|
4
initializers/cluster_groups.yml
Normal file
4
initializers/cluster_groups.yml
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
# - name: Group 1
|
||||||
|
# slug: group-1
|
||||||
|
# - name: Group 2
|
||||||
|
# slug: group-2
|
@ -1,5 +1,7 @@
|
|||||||
# - name: cluster1
|
# - name: cluster1
|
||||||
# type: Hyper-V
|
# type: Hyper-V
|
||||||
|
# group: Group 1
|
||||||
|
# tenant: tenant1
|
||||||
# - name: cluster2
|
# - name: cluster2
|
||||||
# type: Hyper-V
|
# type: Hyper-V
|
||||||
# site: SING 1
|
# site: SING 1
|
||||||
|
@ -53,20 +53,20 @@
|
|||||||
# - Fifth Item
|
# - Fifth Item
|
||||||
# - Fourth Item
|
# - Fourth Item
|
||||||
# select_field_legacy_format:
|
# select_field_legacy_format:
|
||||||
# type: select
|
# type: select
|
||||||
# label: Choose between items
|
# label: Choose between items
|
||||||
# required: false
|
# required: false
|
||||||
# filter_logic: loose
|
# filter_logic: loose
|
||||||
# weight: 30
|
# weight: 30
|
||||||
# on_objects:
|
# on_objects:
|
||||||
# - dcim.models.Device
|
# - dcim.models.Device
|
||||||
# choices:
|
# choices:
|
||||||
# - value: A # this is the deprecated format.
|
# - value: A # this is the deprecated format.
|
||||||
# - value: B # we only use it for the tests.
|
# - value: B # we only use it for the tests.
|
||||||
# - value: C # please see above for the new format.
|
# - value: C # please see above for the new format.
|
||||||
# - value: "D like deprecated"
|
# - value: "D like deprecated"
|
||||||
# weight: 999
|
# weight: 999
|
||||||
# - value: E
|
# - value: E
|
||||||
# boolean_field:
|
# boolean_field:
|
||||||
# type: boolean
|
# type: boolean
|
||||||
# label: Yes Or No?
|
# label: Yes Or No?
|
||||||
|
21
initializers/custom_links.yml
Normal file
21
initializers/custom_links.yml
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
## Possible Choices:
|
||||||
|
## new_window:
|
||||||
|
## - True
|
||||||
|
## - False
|
||||||
|
## content_type:
|
||||||
|
## - device
|
||||||
|
## - site
|
||||||
|
## - any-other-content-type
|
||||||
|
##
|
||||||
|
## Examples:
|
||||||
|
|
||||||
|
# - name: link_to_repo
|
||||||
|
# text: 'Link to Netbox Docker'
|
||||||
|
# url: 'https://github.com/netbox-community/netbox-docker'
|
||||||
|
# new_window: False
|
||||||
|
# content_type: device
|
||||||
|
# - name: link_to_localhost
|
||||||
|
# text: 'Link to localhost'
|
||||||
|
# url: 'http://localhost'
|
||||||
|
# new_window: True
|
||||||
|
# content_type: device
|
14
initializers/power_feeds.yml
Normal file
14
initializers/power_feeds.yml
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
# - name: power feed 1
|
||||||
|
# power_panel: power panel AMS 1
|
||||||
|
# voltage: 208
|
||||||
|
# amperage: 50
|
||||||
|
# max_utilization: 80
|
||||||
|
# phase: Single phase
|
||||||
|
# rack: rack-01
|
||||||
|
# - name: power feed 2
|
||||||
|
# power_panel: power panel SING 1
|
||||||
|
# voltage: 208
|
||||||
|
# amperage: 50
|
||||||
|
# max_utilization: 80
|
||||||
|
# phase: Three-phase
|
||||||
|
# rack: rack-03
|
5
initializers/power_panels.yml
Normal file
5
initializers/power_panels.yml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
# - name: power panel AMS 1
|
||||||
|
# site: AMS 1
|
||||||
|
# - name: power panel SING 1
|
||||||
|
# site: SING 1
|
||||||
|
# rack_group: cage 101
|
6
initializers/providers.yml
Normal file
6
initializers/providers.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# - name: Provider1
|
||||||
|
# slug: provider1
|
||||||
|
# asn: 121
|
||||||
|
# - name: Provider2
|
||||||
|
# slug: provider2
|
||||||
|
# asn: 122
|
3
initializers/route_targets.yml
Normal file
3
initializers/route_targets.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# - name: 65000:1001
|
||||||
|
# tenant: tenant1
|
||||||
|
# - name: 65000:1002
|
4
initializers/secret_roles.yml
Normal file
4
initializers/secret_roles.yml
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
# - name: Super Secret Passwords
|
||||||
|
# slug: super-secret
|
||||||
|
# - name: SNMP Communities
|
||||||
|
# slug: snmp
|
15
initializers/services.yml
Normal file
15
initializers/services.yml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# - name: DNS
|
||||||
|
# protocol: TCP
|
||||||
|
# ports:
|
||||||
|
# - 53
|
||||||
|
# virtual_machine: virtual machine 1
|
||||||
|
# - name: DNS
|
||||||
|
# protocol: UDP
|
||||||
|
# ports:
|
||||||
|
# - 53
|
||||||
|
# virtual_machine: virtual machine 1
|
||||||
|
# - name: MISC
|
||||||
|
# protocol: UDP
|
||||||
|
# ports:
|
||||||
|
# - 4000
|
||||||
|
# device: server01
|
12
initializers/tags.yml
Normal file
12
initializers/tags.yml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
# - name: Tag 1
|
||||||
|
# slug: tag-1
|
||||||
|
# color: Pink
|
||||||
|
# - name: Tag 2
|
||||||
|
# slug: tag-2
|
||||||
|
# color: Cyan
|
||||||
|
# - name: Tag 3
|
||||||
|
# slug: tag-3
|
||||||
|
# color: Grey
|
||||||
|
# - name: Tag 4
|
||||||
|
# slug: tag-4
|
||||||
|
# color: Teal
|
27
initializers/webhooks.yml
Normal file
27
initializers/webhooks.yml
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
## Possible Choices:
|
||||||
|
## object_types:
|
||||||
|
## - device
|
||||||
|
## - site
|
||||||
|
## - any-other-content-type
|
||||||
|
## types:
|
||||||
|
## - type_create
|
||||||
|
## - type_update
|
||||||
|
## - type_delete
|
||||||
|
## Examples:
|
||||||
|
|
||||||
|
# - name: device_creation
|
||||||
|
# payload_url: 'http://localhost:8080'
|
||||||
|
# object_types:
|
||||||
|
# - device
|
||||||
|
# - cable
|
||||||
|
# type_create: True
|
||||||
|
# - name: device_update
|
||||||
|
# payload_url: 'http://localhost:8080'
|
||||||
|
# object_types:
|
||||||
|
# - device
|
||||||
|
# type_update: True
|
||||||
|
# - name: device_delete
|
||||||
|
# payload_url: 'http://localhost:8080'
|
||||||
|
# object_types:
|
||||||
|
# - device
|
||||||
|
# type_delete: True
|
26
pyproject.toml
Normal file
26
pyproject.toml
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
[tool.black]
|
||||||
|
line_length = 100
|
||||||
|
target-version = ['py38']
|
||||||
|
include = '\.pyi?$'
|
||||||
|
exclude = '''
|
||||||
|
(
|
||||||
|
/(
|
||||||
|
\.git
|
||||||
|
| \.venv
|
||||||
|
| \.netbox
|
||||||
|
| \.vscode
|
||||||
|
| configuration
|
||||||
|
)/
|
||||||
|
)
|
||||||
|
'''
|
||||||
|
|
||||||
|
[tool.isort]
|
||||||
|
profile = "black"
|
||||||
|
multi_line_output = 3
|
||||||
|
line_length = 100
|
||||||
|
|
||||||
|
[tool.pylint.messages_control]
|
||||||
|
disable = "C0330, C0326"
|
||||||
|
|
||||||
|
[tool.pylint.format]
|
||||||
|
max-line-length = "100"
|
188
release.sh
Executable file
188
release.sh
Executable file
@ -0,0 +1,188 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
DEFAULT_REPO=netbox-community/netbox-docker
|
||||||
|
REPO="${REPO-${DEFAULT_REPO}}"
|
||||||
|
|
||||||
|
echomoji() {
|
||||||
|
EMOJI=${1}
|
||||||
|
TEXT=${2}
|
||||||
|
shift 2
|
||||||
|
if [ -z "$DISABLE_EMOJI" ]; then
|
||||||
|
echo "${EMOJI}" "${@}"
|
||||||
|
else
|
||||||
|
echo "${TEXT}" "${@}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
echo_nok() {
|
||||||
|
echomoji "❌" "!" "${@}"
|
||||||
|
}
|
||||||
|
echo_ok() {
|
||||||
|
echomoji "✅" "-" "${@}"
|
||||||
|
}
|
||||||
|
echo_hint() {
|
||||||
|
echomoji "👉" ">" "${@}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# check errors shall exit with code 1
|
||||||
|
|
||||||
|
check_clean_repo() {
|
||||||
|
changes=$(git status --porcelain 2>/dev/null)
|
||||||
|
if [ ${?} ] && [ -n "$changes" ]; then
|
||||||
|
echo_nok "There are git changes pending:"
|
||||||
|
echo "$changes"
|
||||||
|
echo_hint "Please clean the repository before continueing: git stash --include-untracked"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo_ok "Repository has no pending changes."
|
||||||
|
}
|
||||||
|
|
||||||
|
check_branch() {
|
||||||
|
expected_branch="${1}"
|
||||||
|
actual_branch=$(git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||||
|
if [ ${?} ] && [ "${actual_branch}" != "${expected_branch}" ]; then
|
||||||
|
echo_nok "Current branch should be '${expected_branch}', but is '${actual_branch}'."
|
||||||
|
echo_hint "Please change to the '${expected_branch}' branch: git checkout ${expected_branch}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo_ok "The current branch is '${actual_branch}'."
|
||||||
|
}
|
||||||
|
|
||||||
|
check_upstream() {
|
||||||
|
expected_upstream_branch="origin/${1}"
|
||||||
|
actual_upstream_branch=$(git rev-parse --abbrev-ref '@{upstream}' 2>/dev/null)
|
||||||
|
if [ ${?} ] && [ "${actual_upstream_branch}" != "${expected_upstream_branch}" ]; then
|
||||||
|
echo_nok "Current upstream branch should be '${expected_upstream_branch}', but is '${actual_upstream_branch}'."
|
||||||
|
echo_hint "Please set '${expected_upstream_branch}' as the upstream branch: git branch --set-upstream-to=${expected_upstream_branch}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo_ok "The current upstream branch is '${actual_upstream_branch}'."
|
||||||
|
}
|
||||||
|
|
||||||
|
check_origin() {
|
||||||
|
expected_origin="git@github.com:${REPO}.git"
|
||||||
|
actual_origin=$(git remote get-url origin 2>/dev/null)
|
||||||
|
if [ ${?} ] && [ "${actual_origin}" != "${expected_origin}" ]; then
|
||||||
|
echo_nok "The url of origin is '${actual_origin}', but '${expected_origin}' is expected."
|
||||||
|
echo_hint "Please set '${expected_origin}' as the url for origin: git origin set-url '${expected_origin}'"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo_ok "The current origin url is '${actual_origin}'."
|
||||||
|
}
|
||||||
|
|
||||||
|
check_latest() {
|
||||||
|
git fetch --tags origin
|
||||||
|
|
||||||
|
local_head_commit=$(git rev-parse HEAD 2>/dev/null)
|
||||||
|
remote_head_commit=$(git rev-parse FETCH_HEAD 2>/dev/null)
|
||||||
|
if [ "${local_head_commit}" != "${remote_head_commit}" ]; then
|
||||||
|
echo_nok "HEAD is at '${local_head_commit}', but FETCH_HEAD is at '${remote_head_commit}'."
|
||||||
|
echo_hint "Please ensure that you have pushed and pulled all the latest chanegs: git pull --prune --rebase origin; git push origin"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo_ok "HEAD and FETCH_HEAD both point to '${local_head_commit}'."
|
||||||
|
}
|
||||||
|
|
||||||
|
check_tag() {
|
||||||
|
local tag
|
||||||
|
|
||||||
|
tag=$(<VERSION)
|
||||||
|
if git rev-parse "${tag}" 2>/dev/null >/dev/null; then
|
||||||
|
echo_nok "The tag '${tag}' already points to '$(git rev-parse "${tag}" 2>/dev/null)'."
|
||||||
|
echo_hint "Please ensure that the 'VERSION' file has been updated before trying to release: echo X.Y.Z > VERSION"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo_ok "The tag '${tag}' does not exist yet."
|
||||||
|
}
|
||||||
|
|
||||||
|
check_develop() {
|
||||||
|
echomoji 📋 "?" "Checking 'develop' branch"
|
||||||
|
|
||||||
|
check_branch develop
|
||||||
|
check_upstream develop
|
||||||
|
check_clean_repo
|
||||||
|
check_latest
|
||||||
|
}
|
||||||
|
|
||||||
|
check_release() {
|
||||||
|
echomoji 📋 "?" "Checking 'release' branch"
|
||||||
|
|
||||||
|
check_upstream release
|
||||||
|
check_clean_repo
|
||||||
|
check_latest
|
||||||
|
}
|
||||||
|
|
||||||
|
# git errors shall exit with code 2
|
||||||
|
|
||||||
|
git_switch() {
|
||||||
|
echomoji 🔀 "≈" "Switching to '${1}' branch…"
|
||||||
|
if ! git checkout "${1}" >/dev/null; then
|
||||||
|
echo_nok "It was not possible to switch to the branch '${1}'."
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
echo_ok "The branch is now '${1}'."
|
||||||
|
}
|
||||||
|
|
||||||
|
git_tag() {
|
||||||
|
echomoji 🏷 "X" "Tagging version '${1}'…"
|
||||||
|
if ! git tag "${1}"; then
|
||||||
|
echo_nok "The tag '${1}' was not created because of an error."
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
echo_ok "The tag '$(<VERSION)' was created."
|
||||||
|
}
|
||||||
|
|
||||||
|
git_push() {
|
||||||
|
echomoji ⏩ "»" "Pushing the tag '${2}' to '${1}'…"
|
||||||
|
if ! git push "${1}" "${2}"; then
|
||||||
|
echo_nok "The tag '${2}' could not be pushed to '${1}'."
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
echo_ok "The tag '${2}' was pushed."
|
||||||
|
}
|
||||||
|
|
||||||
|
git_merge() {
|
||||||
|
echomoji ⏩ "»" "Merging '${1}'…"
|
||||||
|
if ! git merge --no-ff "${1}"; then
|
||||||
|
echo_nok "The branch '${1}' could not be merged."
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
echo_ok "The branch '${2}' was merged."
|
||||||
|
}
|
||||||
|
|
||||||
|
git_merge() {
|
||||||
|
echomoji ⏩ "»" "Rebasing onto '${1}'…"
|
||||||
|
if ! git rebase "${1}"; then
|
||||||
|
echo_nok "Could not rebase onto '${1}'."
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
echo_ok "Rebased onto '${2}'."
|
||||||
|
}
|
||||||
|
|
||||||
|
###
|
||||||
|
# MAIN
|
||||||
|
###
|
||||||
|
|
||||||
|
echomoji 📋 "▶︎" "Checking pre-requisites for releasing '$(<VERSION)'"
|
||||||
|
|
||||||
|
check_origin
|
||||||
|
|
||||||
|
check_develop
|
||||||
|
check_tag
|
||||||
|
|
||||||
|
git_switch release
|
||||||
|
check_release
|
||||||
|
|
||||||
|
echomoji 📋 "▶︎" "Releasing '$(<VERSION)'"
|
||||||
|
|
||||||
|
git_merge develop
|
||||||
|
check_tag
|
||||||
|
git_tag "$(<VERSION)"
|
||||||
|
|
||||||
|
git_push "origin" release
|
||||||
|
git_push "origin" "$(<VERSION)"
|
||||||
|
|
||||||
|
git_switch develop
|
||||||
|
git_rebase release
|
||||||
|
|
||||||
|
echomoji ✅ "◼︎" "The release of '$(<VERSION)' is complete."
|
13
renovate.json
Normal file
13
renovate.json
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
{
|
||||||
|
"extends": [
|
||||||
|
"config:base"
|
||||||
|
],
|
||||||
|
"enabled": true,
|
||||||
|
"labels": ["maintenance"],
|
||||||
|
"baseBranches": ["develop"],
|
||||||
|
"pip_requirements": {
|
||||||
|
"fileMatch": [
|
||||||
|
"requirements-container.txt"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
4
requirements-container.txt
Normal file
4
requirements-container.txt
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
napalm==3.2.0
|
||||||
|
ruamel.yaml==0.16.13
|
||||||
|
django-auth-ldap==2.3.0
|
||||||
|
django-storages[azure,boto3,dropbox,google,libcloud,sftp]==1.11.1
|
@ -4,20 +4,21 @@ from django.contrib.auth.models import User
|
|||||||
from startup_script_utils import load_yaml, set_permissions
|
from startup_script_utils import load_yaml, set_permissions
|
||||||
from users.models import Token
|
from users.models import Token
|
||||||
|
|
||||||
users = load_yaml('/opt/netbox/initializers/users.yml')
|
users = load_yaml("/opt/netbox/initializers/users.yml")
|
||||||
if users is None:
|
if users is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
for username, user_details in users.items():
|
for username, user_details in users.items():
|
||||||
if not User.objects.filter(username=username):
|
if not User.objects.filter(username=username):
|
||||||
user = User.objects.create_user(
|
user = User.objects.create_user(
|
||||||
username = username,
|
username=username,
|
||||||
password = user_details.get('password', 0) or User.objects.make_random_password())
|
password=user_details.get("password", 0) or User.objects.make_random_password(),
|
||||||
|
)
|
||||||
|
|
||||||
print("👤 Created user",username)
|
print("👤 Created user", username)
|
||||||
|
|
||||||
if user_details.get('api_token', 0):
|
if user_details.get("api_token", 0):
|
||||||
Token.objects.create(user=user, key=user_details['api_token'])
|
Token.objects.create(user=user, key=user_details["api_token"])
|
||||||
|
|
||||||
yaml_permissions = user_details.get('permissions', [])
|
yaml_permissions = user_details.get("permissions", [])
|
||||||
set_permissions(user.user_permissions, yaml_permissions)
|
set_permissions(user.user_permissions, yaml_permissions)
|
||||||
|
@ -3,21 +3,21 @@ import sys
|
|||||||
from django.contrib.auth.models import Group, User
|
from django.contrib.auth.models import Group, User
|
||||||
from startup_script_utils import load_yaml, set_permissions
|
from startup_script_utils import load_yaml, set_permissions
|
||||||
|
|
||||||
groups = load_yaml('/opt/netbox/initializers/groups.yml')
|
groups = load_yaml("/opt/netbox/initializers/groups.yml")
|
||||||
if groups is None:
|
if groups is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
for groupname, group_details in groups.items():
|
for groupname, group_details in groups.items():
|
||||||
group, created = Group.objects.get_or_create(name=groupname)
|
group, created = Group.objects.get_or_create(name=groupname)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
print("👥 Created group", groupname)
|
print("👥 Created group", groupname)
|
||||||
|
|
||||||
for username in group_details.get('users', []):
|
for username in group_details.get("users", []):
|
||||||
user = User.objects.get(username=username)
|
user = User.objects.get(username=username)
|
||||||
|
|
||||||
if user:
|
if user:
|
||||||
user.groups.add(group)
|
user.groups.add(group)
|
||||||
|
|
||||||
yaml_permissions = group_details.get('permissions', [])
|
yaml_permissions = group_details.get("permissions", [])
|
||||||
set_permissions(group.permissions, yaml_permissions)
|
set_permissions(group.permissions, yaml_permissions)
|
||||||
|
@ -3,56 +3,62 @@ import sys
|
|||||||
from extras.models import CustomField
|
from extras.models import CustomField
|
||||||
from startup_script_utils import load_yaml
|
from startup_script_utils import load_yaml
|
||||||
|
|
||||||
|
|
||||||
def get_class_for_class_path(class_path):
|
def get_class_for_class_path(class_path):
|
||||||
import importlib
|
import importlib
|
||||||
from django.contrib.contenttypes.models import ContentType
|
|
||||||
|
|
||||||
module_name, class_name = class_path.rsplit(".", 1)
|
from django.contrib.contenttypes.models import ContentType
|
||||||
module = importlib.import_module(module_name)
|
|
||||||
clazz = getattr(module, class_name)
|
|
||||||
return ContentType.objects.get_for_model(clazz)
|
|
||||||
|
|
||||||
customfields = load_yaml('/opt/netbox/initializers/custom_fields.yml')
|
module_name, class_name = class_path.rsplit(".", 1)
|
||||||
|
module = importlib.import_module(module_name)
|
||||||
|
clazz = getattr(module, class_name)
|
||||||
|
return ContentType.objects.get_for_model(clazz)
|
||||||
|
|
||||||
|
|
||||||
|
customfields = load_yaml("/opt/netbox/initializers/custom_fields.yml")
|
||||||
|
|
||||||
if customfields is None:
|
if customfields is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
for cf_name, cf_details in customfields.items():
|
for cf_name, cf_details in customfields.items():
|
||||||
custom_field, created = CustomField.objects.get_or_create(name = cf_name)
|
custom_field, created = CustomField.objects.get_or_create(name=cf_name)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
if cf_details.get('default', False):
|
if cf_details.get("default", False):
|
||||||
custom_field.default = cf_details['default']
|
custom_field.default = cf_details["default"]
|
||||||
|
|
||||||
if cf_details.get('description', False):
|
if cf_details.get("description", False):
|
||||||
custom_field.description = cf_details['description']
|
custom_field.description = cf_details["description"]
|
||||||
|
|
||||||
if cf_details.get('label', False):
|
if cf_details.get("label", False):
|
||||||
custom_field.label = cf_details['label']
|
custom_field.label = cf_details["label"]
|
||||||
|
|
||||||
for object_type in cf_details.get('on_objects', []):
|
for object_type in cf_details.get("on_objects", []):
|
||||||
custom_field.content_types.add(get_class_for_class_path(object_type))
|
custom_field.content_types.add(get_class_for_class_path(object_type))
|
||||||
|
|
||||||
if cf_details.get('required', False):
|
if cf_details.get("required", False):
|
||||||
custom_field.required = cf_details['required']
|
custom_field.required = cf_details["required"]
|
||||||
|
|
||||||
if cf_details.get('type', False):
|
if cf_details.get("type", False):
|
||||||
custom_field.type = cf_details['type']
|
custom_field.type = cf_details["type"]
|
||||||
|
|
||||||
if cf_details.get('weight', -1) >= 0:
|
if cf_details.get("weight", -1) >= 0:
|
||||||
custom_field.weight = cf_details['weight']
|
custom_field.weight = cf_details["weight"]
|
||||||
|
|
||||||
if cf_details.get('choices', False):
|
if cf_details.get("choices", False):
|
||||||
custom_field.choices = []
|
custom_field.choices = []
|
||||||
|
|
||||||
for choice_detail in cf_details.get('choices', []):
|
for choice_detail in cf_details.get("choices", []):
|
||||||
if isinstance(choice_detail, dict) and 'value' in choice_detail:
|
if isinstance(choice_detail, dict) and "value" in choice_detail:
|
||||||
# legacy mode
|
# legacy mode
|
||||||
print(f"⚠️ Please migrate the choice '{choice_detail['value']}' of '{cf_name}' to the new format, as 'weight' is no longer supported!")
|
print(
|
||||||
custom_field.choices.append(choice_detail['value'])
|
f"⚠️ Please migrate the choice '{choice_detail['value']}' of '{cf_name}'"
|
||||||
else:
|
+ " to the new format, as 'weight' is no longer supported!"
|
||||||
custom_field.choices.append(choice_detail)
|
)
|
||||||
|
custom_field.choices.append(choice_detail["value"])
|
||||||
|
else:
|
||||||
|
custom_field.choices.append(choice_detail)
|
||||||
|
|
||||||
custom_field.save()
|
custom_field.save()
|
||||||
|
|
||||||
print("🔧 Created custom field", cf_name)
|
print("🔧 Created custom field", cf_name)
|
||||||
|
23
startup_scripts/020_tags.py
Normal file
23
startup_scripts/020_tags.py
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
import sys
|
||||||
|
|
||||||
|
from extras.models import Tag
|
||||||
|
from startup_script_utils import load_yaml
|
||||||
|
from utilities.choices import ColorChoices
|
||||||
|
|
||||||
|
tags = load_yaml("/opt/netbox/initializers/tags.yml")
|
||||||
|
|
||||||
|
if tags is None:
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
for params in tags:
|
||||||
|
if "color" in params:
|
||||||
|
color = params.pop("color")
|
||||||
|
|
||||||
|
for color_tpl in ColorChoices:
|
||||||
|
if color in color_tpl:
|
||||||
|
params["color"] = color_tpl[0]
|
||||||
|
|
||||||
|
tag, created = Tag.objects.get_or_create(**params)
|
||||||
|
|
||||||
|
if created:
|
||||||
|
print("🎨 Created Tag", tag.name)
|
@ -1,26 +1,25 @@
|
|||||||
from dcim.models import Region
|
|
||||||
from startup_script_utils import load_yaml
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
regions = load_yaml('/opt/netbox/initializers/regions.yml')
|
from dcim.models import Region
|
||||||
|
from startup_script_utils import load_yaml
|
||||||
|
|
||||||
|
regions = load_yaml("/opt/netbox/initializers/regions.yml")
|
||||||
|
|
||||||
if regions is None:
|
if regions is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
optional_assocs = {
|
optional_assocs = {"parent": (Region, "name")}
|
||||||
'parent': (Region, 'name')
|
|
||||||
}
|
|
||||||
|
|
||||||
for params in regions:
|
for params in regions:
|
||||||
|
|
||||||
for assoc, details in optional_assocs.items():
|
for assoc, details in optional_assocs.items():
|
||||||
if assoc in params:
|
if assoc in params:
|
||||||
model, field = details
|
model, field = details
|
||||||
query = { field: params.pop(assoc) }
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
region, created = Region.objects.get_or_create(**params)
|
region, created = Region.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
print("🌐 Created region", region.name)
|
print("🌐 Created region", region.name)
|
||||||
|
@ -1,32 +1,29 @@
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
from dcim.models import Region, Site
|
from dcim.models import Region, Site
|
||||||
from startup_script_utils import *
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
from tenancy.models import Tenant
|
from tenancy.models import Tenant
|
||||||
|
|
||||||
sites = load_yaml('/opt/netbox/initializers/sites.yml')
|
sites = load_yaml("/opt/netbox/initializers/sites.yml")
|
||||||
|
|
||||||
if sites is None:
|
if sites is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
optional_assocs = {
|
optional_assocs = {"region": (Region, "name"), "tenant": (Tenant, "name")}
|
||||||
'region': (Region, 'name'),
|
|
||||||
'tenant': (Tenant, 'name')
|
|
||||||
}
|
|
||||||
|
|
||||||
for params in sites:
|
for params in sites:
|
||||||
custom_field_data = pop_custom_fields(params)
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
for assoc, details in optional_assocs.items():
|
for assoc, details in optional_assocs.items():
|
||||||
if assoc in params:
|
if assoc in params:
|
||||||
model, field = details
|
model, field = details
|
||||||
query = { field: params.pop(assoc) }
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
site, created = Site.objects.get_or_create(**params)
|
site, created = Site.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
set_custom_fields_values(site, custom_field_data)
|
set_custom_fields_values(site, custom_field_data)
|
||||||
|
|
||||||
print("📍 Created site", site.name)
|
print("📍 Created site", site.name)
|
||||||
|
@ -1,14 +1,15 @@
|
|||||||
from dcim.models import Manufacturer
|
|
||||||
from startup_script_utils import load_yaml
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
manufacturers = load_yaml('/opt/netbox/initializers/manufacturers.yml')
|
from dcim.models import Manufacturer
|
||||||
|
from startup_script_utils import load_yaml
|
||||||
|
|
||||||
|
manufacturers = load_yaml("/opt/netbox/initializers/manufacturers.yml")
|
||||||
|
|
||||||
if manufacturers is None:
|
if manufacturers is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
for params in manufacturers:
|
for params in manufacturers:
|
||||||
manufacturer, created = Manufacturer.objects.get_or_create(**params)
|
manufacturer, created = Manufacturer.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
print("🏭 Created Manufacturer", manufacturer.name)
|
print("🏭 Created Manufacturer", manufacturer.name)
|
||||||
|
@ -1,42 +1,37 @@
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
from dcim.models import DeviceType, Manufacturer, Region
|
from dcim.models import DeviceType, Manufacturer, Region
|
||||||
from startup_script_utils import *
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
from tenancy.models import Tenant
|
from tenancy.models import Tenant
|
||||||
|
|
||||||
device_types = load_yaml('/opt/netbox/initializers/device_types.yml')
|
device_types = load_yaml("/opt/netbox/initializers/device_types.yml")
|
||||||
|
|
||||||
if device_types is None:
|
if device_types is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
required_assocs = {
|
required_assocs = {"manufacturer": (Manufacturer, "name")}
|
||||||
'manufacturer': (Manufacturer, 'name')
|
|
||||||
}
|
|
||||||
|
|
||||||
optional_assocs = {
|
optional_assocs = {"region": (Region, "name"), "tenant": (Tenant, "name")}
|
||||||
'region': (Region, 'name'),
|
|
||||||
'tenant': (Tenant, 'name')
|
|
||||||
}
|
|
||||||
|
|
||||||
for params in device_types:
|
for params in device_types:
|
||||||
custom_field_data = pop_custom_fields(params)
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
for assoc, details in required_assocs.items():
|
for assoc, details in required_assocs.items():
|
||||||
model, field = details
|
model, field = details
|
||||||
query = { field: params.pop(assoc) }
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
for assoc, details in optional_assocs.items():
|
for assoc, details in optional_assocs.items():
|
||||||
if assoc in params:
|
if assoc in params:
|
||||||
model, field = details
|
model, field = details
|
||||||
query = { field: params.pop(assoc) }
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
device_type, created = DeviceType.objects.get_or_create(**params)
|
device_type, created = DeviceType.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
set_custom_fields_values(device_type, custom_field_data)
|
set_custom_fields_values(device_type, custom_field_data)
|
||||||
|
|
||||||
print("🔡 Created device type", device_type.manufacturer, device_type.model)
|
print("🔡 Created device type", device_type.manufacturer, device_type.model)
|
||||||
|
@ -1,23 +1,23 @@
|
|||||||
from dcim.models import RackRole
|
|
||||||
from utilities.choices import ColorChoices
|
|
||||||
|
|
||||||
from startup_script_utils import load_yaml
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
rack_roles = load_yaml('/opt/netbox/initializers/rack_roles.yml')
|
from dcim.models import RackRole
|
||||||
|
from startup_script_utils import load_yaml
|
||||||
|
from utilities.choices import ColorChoices
|
||||||
|
|
||||||
|
rack_roles = load_yaml("/opt/netbox/initializers/rack_roles.yml")
|
||||||
|
|
||||||
if rack_roles is None:
|
if rack_roles is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
for params in rack_roles:
|
for params in rack_roles:
|
||||||
if 'color' in params:
|
if "color" in params:
|
||||||
color = params.pop('color')
|
color = params.pop("color")
|
||||||
|
|
||||||
for color_tpl in ColorChoices:
|
for color_tpl in ColorChoices:
|
||||||
if color in color_tpl:
|
if color in color_tpl:
|
||||||
params['color'] = color_tpl[0]
|
params["color"] = color_tpl[0]
|
||||||
|
|
||||||
rack_role, created = RackRole.objects.get_or_create(**params)
|
rack_role, created = RackRole.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
print("🎨 Created rack role", rack_role.name)
|
print("🎨 Created rack role", rack_role.name)
|
||||||
|
@ -1,25 +1,23 @@
|
|||||||
from dcim.models import Site,RackGroup
|
|
||||||
from startup_script_utils import load_yaml
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
rack_groups = load_yaml('/opt/netbox/initializers/rack_groups.yml')
|
from dcim.models import RackGroup, Site
|
||||||
|
from startup_script_utils import load_yaml
|
||||||
|
|
||||||
|
rack_groups = load_yaml("/opt/netbox/initializers/rack_groups.yml")
|
||||||
|
|
||||||
if rack_groups is None:
|
if rack_groups is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
required_assocs = {
|
required_assocs = {"site": (Site, "name")}
|
||||||
'site': (Site, 'name')
|
|
||||||
}
|
|
||||||
|
|
||||||
for params in rack_groups:
|
for params in rack_groups:
|
||||||
|
|
||||||
for assoc, details in required_assocs.items():
|
for assoc, details in required_assocs.items():
|
||||||
model, field = details
|
model, field = details
|
||||||
query = { field: params.pop(assoc) }
|
query = {field: params.pop(assoc)}
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
rack_group, created = RackGroup.objects.get_or_create(**params)
|
rack_group, created = RackGroup.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
|
||||||
print("🎨 Created rack group", rack_group.name)
|
|
||||||
|
|
||||||
|
if created:
|
||||||
|
print("🎨 Created rack group", rack_group.name)
|
||||||
|
@ -1,43 +1,41 @@
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
from dcim.models import Site, RackRole, Rack, RackGroup
|
from dcim.models import Rack, RackGroup, RackRole, Site
|
||||||
from startup_script_utils import *
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
from tenancy.models import Tenant
|
from tenancy.models import Tenant
|
||||||
|
|
||||||
racks = load_yaml('/opt/netbox/initializers/racks.yml')
|
racks = load_yaml("/opt/netbox/initializers/racks.yml")
|
||||||
|
|
||||||
if racks is None:
|
if racks is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
required_assocs = {
|
required_assocs = {"site": (Site, "name")}
|
||||||
'site': (Site, 'name')
|
|
||||||
}
|
|
||||||
|
|
||||||
optional_assocs = {
|
optional_assocs = {
|
||||||
'role': (RackRole, 'name'),
|
"role": (RackRole, "name"),
|
||||||
'tenant': (Tenant, 'name'),
|
"tenant": (Tenant, "name"),
|
||||||
'group': (RackGroup, 'name')
|
"group": (RackGroup, "name"),
|
||||||
}
|
}
|
||||||
|
|
||||||
for params in racks:
|
for params in racks:
|
||||||
custom_field_data = pop_custom_fields(params)
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
for assoc, details in required_assocs.items():
|
for assoc, details in required_assocs.items():
|
||||||
model, field = details
|
model, field = details
|
||||||
query = { field: params.pop(assoc) }
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
for assoc, details in optional_assocs.items():
|
for assoc, details in optional_assocs.items():
|
||||||
if assoc in params:
|
if assoc in params:
|
||||||
model, field = details
|
model, field = details
|
||||||
query = { field: params.pop(assoc) }
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
rack, created = Rack.objects.get_or_create(**params)
|
rack, created = Rack.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
set_custom_fields_values(rack, custom_field_data)
|
set_custom_fields_values(rack, custom_field_data)
|
||||||
|
|
||||||
print("🔳 Created rack", rack.site, rack.name)
|
print("🔳 Created rack", rack.site, rack.name)
|
||||||
|
@ -1,24 +1,24 @@
|
|||||||
from dcim.models import DeviceRole
|
|
||||||
from utilities.choices import ColorChoices
|
|
||||||
|
|
||||||
from startup_script_utils import load_yaml
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
device_roles = load_yaml('/opt/netbox/initializers/device_roles.yml')
|
from dcim.models import DeviceRole
|
||||||
|
from startup_script_utils import load_yaml
|
||||||
|
from utilities.choices import ColorChoices
|
||||||
|
|
||||||
|
device_roles = load_yaml("/opt/netbox/initializers/device_roles.yml")
|
||||||
|
|
||||||
if device_roles is None:
|
if device_roles is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
for params in device_roles:
|
for params in device_roles:
|
||||||
|
|
||||||
if 'color' in params:
|
if "color" in params:
|
||||||
color = params.pop('color')
|
color = params.pop("color")
|
||||||
|
|
||||||
for color_tpl in ColorChoices:
|
for color_tpl in ColorChoices:
|
||||||
if color in color_tpl:
|
if color in color_tpl:
|
||||||
params['color'] = color_tpl[0]
|
params["color"] = color_tpl[0]
|
||||||
|
|
||||||
device_role, created = DeviceRole.objects.get_or_create(**params)
|
device_role, created = DeviceRole.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
print("🎨 Created device role", device_role.name)
|
print("🎨 Created device role", device_role.name)
|
||||||
|
@ -1,26 +1,27 @@
|
|||||||
from dcim.models import Manufacturer, Platform
|
|
||||||
from startup_script_utils import load_yaml
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
platforms = load_yaml('/opt/netbox/initializers/platforms.yml')
|
from dcim.models import Manufacturer, Platform
|
||||||
|
from startup_script_utils import load_yaml
|
||||||
|
|
||||||
|
platforms = load_yaml("/opt/netbox/initializers/platforms.yml")
|
||||||
|
|
||||||
if platforms is None:
|
if platforms is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
optional_assocs = {
|
optional_assocs = {
|
||||||
'manufacturer': (Manufacturer, 'name'),
|
"manufacturer": (Manufacturer, "name"),
|
||||||
}
|
}
|
||||||
|
|
||||||
for params in platforms:
|
for params in platforms:
|
||||||
|
|
||||||
for assoc, details in optional_assocs.items():
|
for assoc, details in optional_assocs.items():
|
||||||
if assoc in params:
|
if assoc in params:
|
||||||
model, field = details
|
model, field = details
|
||||||
query = { field: params.pop(assoc) }
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
platform, created = Platform.objects.get_or_create(**params)
|
platform, created = Platform.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
print("💾 Created platform", platform.name)
|
print("💾 Created platform", platform.name)
|
||||||
|
@ -1,14 +1,15 @@
|
|||||||
from tenancy.models import TenantGroup
|
|
||||||
from startup_script_utils import load_yaml
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
tenant_groups = load_yaml('/opt/netbox/initializers/tenant_groups.yml')
|
from startup_script_utils import load_yaml
|
||||||
|
from tenancy.models import TenantGroup
|
||||||
|
|
||||||
|
tenant_groups = load_yaml("/opt/netbox/initializers/tenant_groups.yml")
|
||||||
|
|
||||||
if tenant_groups is None:
|
if tenant_groups is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
for params in tenant_groups:
|
for params in tenant_groups:
|
||||||
tenant_group, created = TenantGroup.objects.get_or_create(**params)
|
tenant_group, created = TenantGroup.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
print("🔳 Created Tenant Group", tenant_group.name)
|
print("🔳 Created Tenant Group", tenant_group.name)
|
||||||
|
@ -1,30 +1,28 @@
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
from startup_script_utils import *
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
from tenancy.models import Tenant, TenantGroup
|
from tenancy.models import Tenant, TenantGroup
|
||||||
|
|
||||||
tenants = load_yaml('/opt/netbox/initializers/tenants.yml')
|
tenants = load_yaml("/opt/netbox/initializers/tenants.yml")
|
||||||
|
|
||||||
if tenants is None:
|
if tenants is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
optional_assocs = {
|
optional_assocs = {"group": (TenantGroup, "name")}
|
||||||
'group': (TenantGroup, 'name')
|
|
||||||
}
|
|
||||||
|
|
||||||
for params in tenants:
|
for params in tenants:
|
||||||
custom_field_data = pop_custom_fields(params)
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
for assoc, details in optional_assocs.items():
|
for assoc, details in optional_assocs.items():
|
||||||
if assoc in params:
|
if assoc in params:
|
||||||
model, field = details
|
model, field = details
|
||||||
query = { field: params.pop(assoc) }
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
tenant, created = Tenant.objects.get_or_create(**params)
|
tenant, created = Tenant.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
set_custom_fields_values(tenant, custom_field_data)
|
set_custom_fields_values(tenant, custom_field_data)
|
||||||
|
|
||||||
print("👩💻 Created Tenant", tenant.name)
|
print("👩💻 Created Tenant", tenant.name)
|
||||||
|
15
startup_scripts/130_cluster_types.py
Normal file
15
startup_scripts/130_cluster_types.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
import sys
|
||||||
|
|
||||||
|
from startup_script_utils import load_yaml
|
||||||
|
from virtualization.models import ClusterType
|
||||||
|
|
||||||
|
cluster_types = load_yaml("/opt/netbox/initializers/cluster_types.yml")
|
||||||
|
|
||||||
|
if cluster_types is None:
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
for params in cluster_types:
|
||||||
|
cluster_type, created = ClusterType.objects.get_or_create(**params)
|
||||||
|
|
||||||
|
if created:
|
||||||
|
print("🧰 Created Cluster Type", cluster_type.name)
|
@ -1,51 +0,0 @@
|
|||||||
import sys
|
|
||||||
|
|
||||||
from dcim.models import Site, Rack, DeviceRole, DeviceType, Device, Platform
|
|
||||||
from startup_script_utils import *
|
|
||||||
from tenancy.models import Tenant
|
|
||||||
from virtualization.models import Cluster
|
|
||||||
|
|
||||||
devices = load_yaml('/opt/netbox/initializers/devices.yml')
|
|
||||||
|
|
||||||
if devices is None:
|
|
||||||
sys.exit()
|
|
||||||
|
|
||||||
required_assocs = {
|
|
||||||
'device_role': (DeviceRole, 'name'),
|
|
||||||
'device_type': (DeviceType, 'model'),
|
|
||||||
'site': (Site, 'name')
|
|
||||||
}
|
|
||||||
|
|
||||||
optional_assocs = {
|
|
||||||
'tenant': (Tenant, 'name'),
|
|
||||||
'platform': (Platform, 'name'),
|
|
||||||
'rack': (Rack, 'name'),
|
|
||||||
'cluster': (Cluster, 'name')
|
|
||||||
}
|
|
||||||
|
|
||||||
for params in devices:
|
|
||||||
custom_field_data = pop_custom_fields(params)
|
|
||||||
|
|
||||||
# primary ips are handled later in `270_primary_ips.py`
|
|
||||||
params.pop('primary_ip4', None)
|
|
||||||
params.pop('primary_ip6', None)
|
|
||||||
|
|
||||||
for assoc, details in required_assocs.items():
|
|
||||||
model, field = details
|
|
||||||
query = { field: params.pop(assoc) }
|
|
||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
|
||||||
|
|
||||||
for assoc, details in optional_assocs.items():
|
|
||||||
if assoc in params:
|
|
||||||
model, field = details
|
|
||||||
query = { field: params.pop(assoc) }
|
|
||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
|
||||||
|
|
||||||
device, created = Device.objects.get_or_create(**params)
|
|
||||||
|
|
||||||
if created:
|
|
||||||
set_custom_fields_values(device, custom_field_data)
|
|
||||||
|
|
||||||
print("🖥️ Created device", device.name)
|
|
15
startup_scripts/135_cluster_groups.py
Normal file
15
startup_scripts/135_cluster_groups.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
import sys
|
||||||
|
|
||||||
|
from startup_script_utils import load_yaml
|
||||||
|
from virtualization.models import ClusterGroup
|
||||||
|
|
||||||
|
cluster_groups = load_yaml("/opt/netbox/initializers/cluster_groups.yml")
|
||||||
|
|
||||||
|
if cluster_groups is None:
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
for params in cluster_groups:
|
||||||
|
cluster_group, created = ClusterGroup.objects.get_or_create(**params)
|
||||||
|
|
||||||
|
if created:
|
||||||
|
print("🗄️ Created Cluster Group", cluster_group.name)
|
42
startup_scripts/135_clusters.py
Normal file
42
startup_scripts/135_clusters.py
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
import sys
|
||||||
|
|
||||||
|
from dcim.models import Site
|
||||||
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
|
from tenancy.models import Tenant
|
||||||
|
from virtualization.models import Cluster, ClusterGroup, ClusterType
|
||||||
|
|
||||||
|
clusters = load_yaml("/opt/netbox/initializers/clusters.yml")
|
||||||
|
|
||||||
|
if clusters is None:
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
required_assocs = {"type": (ClusterType, "name")}
|
||||||
|
|
||||||
|
optional_assocs = {
|
||||||
|
"site": (Site, "name"),
|
||||||
|
"group": (ClusterGroup, "name"),
|
||||||
|
"tenant": (Tenant, "name"),
|
||||||
|
}
|
||||||
|
|
||||||
|
for params in clusters:
|
||||||
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
|
for assoc, details in required_assocs.items():
|
||||||
|
model, field = details
|
||||||
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
|
for assoc, details in optional_assocs.items():
|
||||||
|
if assoc in params:
|
||||||
|
model, field = details
|
||||||
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
|
cluster, created = Cluster.objects.get_or_create(**params)
|
||||||
|
|
||||||
|
if created:
|
||||||
|
set_custom_fields_values(cluster, custom_field_data)
|
||||||
|
|
||||||
|
print("🗄️ Created cluster", cluster.name)
|
@ -1,14 +0,0 @@
|
|||||||
from virtualization.models import ClusterType
|
|
||||||
from startup_script_utils import load_yaml
|
|
||||||
import sys
|
|
||||||
|
|
||||||
cluster_types = load_yaml('/opt/netbox/initializers/cluster_types.yml')
|
|
||||||
|
|
||||||
if cluster_types is None:
|
|
||||||
sys.exit()
|
|
||||||
|
|
||||||
for params in cluster_types:
|
|
||||||
cluster_type, created = ClusterType.objects.get_or_create(**params)
|
|
||||||
|
|
||||||
if created:
|
|
||||||
print("🧰 Created Cluster Type", cluster_type.name)
|
|
42
startup_scripts/140_clusters.py
Normal file
42
startup_scripts/140_clusters.py
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
import sys
|
||||||
|
|
||||||
|
from dcim.models import Site
|
||||||
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
|
from tenancy.models import Tenant
|
||||||
|
from virtualization.models import Cluster, ClusterGroup, ClusterType
|
||||||
|
|
||||||
|
clusters = load_yaml("/opt/netbox/initializers/clusters.yml")
|
||||||
|
|
||||||
|
if clusters is None:
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
required_assocs = {"type": (ClusterType, "name")}
|
||||||
|
|
||||||
|
optional_assocs = {
|
||||||
|
"site": (Site, "name"),
|
||||||
|
"group": (ClusterGroup, "name"),
|
||||||
|
"tenant": (Tenant, "name"),
|
||||||
|
}
|
||||||
|
|
||||||
|
for params in clusters:
|
||||||
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
|
for assoc, details in required_assocs.items():
|
||||||
|
model, field = details
|
||||||
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
|
for assoc, details in optional_assocs.items():
|
||||||
|
if assoc in params:
|
||||||
|
model, field = details
|
||||||
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
|
cluster, created = Cluster.objects.get_or_create(**params)
|
||||||
|
|
||||||
|
if created:
|
||||||
|
set_custom_fields_values(cluster, custom_field_data)
|
||||||
|
|
||||||
|
print("🗄️ Created cluster", cluster.name)
|
51
startup_scripts/140_devices.py
Normal file
51
startup_scripts/140_devices.py
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
import sys
|
||||||
|
|
||||||
|
from dcim.models import Device, DeviceRole, DeviceType, Platform, Rack, Site
|
||||||
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
|
from tenancy.models import Tenant
|
||||||
|
from virtualization.models import Cluster
|
||||||
|
|
||||||
|
devices = load_yaml("/opt/netbox/initializers/devices.yml")
|
||||||
|
|
||||||
|
if devices is None:
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
required_assocs = {
|
||||||
|
"device_role": (DeviceRole, "name"),
|
||||||
|
"device_type": (DeviceType, "model"),
|
||||||
|
"site": (Site, "name"),
|
||||||
|
}
|
||||||
|
|
||||||
|
optional_assocs = {
|
||||||
|
"tenant": (Tenant, "name"),
|
||||||
|
"platform": (Platform, "name"),
|
||||||
|
"rack": (Rack, "name"),
|
||||||
|
"cluster": (Cluster, "name"),
|
||||||
|
}
|
||||||
|
|
||||||
|
for params in devices:
|
||||||
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
|
# primary ips are handled later in `270_primary_ips.py`
|
||||||
|
params.pop("primary_ip4", None)
|
||||||
|
params.pop("primary_ip6", None)
|
||||||
|
|
||||||
|
for assoc, details in required_assocs.items():
|
||||||
|
model, field = details
|
||||||
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
|
for assoc, details in optional_assocs.items():
|
||||||
|
if assoc in params:
|
||||||
|
model, field = details
|
||||||
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
|
device, created = Device.objects.get_or_create(**params)
|
||||||
|
|
||||||
|
if created:
|
||||||
|
set_custom_fields_values(device, custom_field_data)
|
||||||
|
|
||||||
|
print("🖥️ Created device", device.name)
|
51
startup_scripts/145_devices.py
Normal file
51
startup_scripts/145_devices.py
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
import sys
|
||||||
|
|
||||||
|
from dcim.models import Device, DeviceRole, DeviceType, Platform, Rack, Site
|
||||||
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
|
from tenancy.models import Tenant
|
||||||
|
from virtualization.models import Cluster
|
||||||
|
|
||||||
|
devices = load_yaml("/opt/netbox/initializers/devices.yml")
|
||||||
|
|
||||||
|
if devices is None:
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
required_assocs = {
|
||||||
|
"device_role": (DeviceRole, "name"),
|
||||||
|
"device_type": (DeviceType, "model"),
|
||||||
|
"site": (Site, "name"),
|
||||||
|
}
|
||||||
|
|
||||||
|
optional_assocs = {
|
||||||
|
"tenant": (Tenant, "name"),
|
||||||
|
"platform": (Platform, "name"),
|
||||||
|
"rack": (Rack, "name"),
|
||||||
|
"cluster": (Cluster, "name"),
|
||||||
|
}
|
||||||
|
|
||||||
|
for params in devices:
|
||||||
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
|
# primary ips are handled later in `270_primary_ips.py`
|
||||||
|
params.pop("primary_ip4", None)
|
||||||
|
params.pop("primary_ip6", None)
|
||||||
|
|
||||||
|
for assoc, details in required_assocs.items():
|
||||||
|
model, field = details
|
||||||
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
|
for assoc, details in optional_assocs.items():
|
||||||
|
if assoc in params:
|
||||||
|
model, field = details
|
||||||
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
|
device, created = Device.objects.get_or_create(**params)
|
||||||
|
|
||||||
|
if created:
|
||||||
|
set_custom_fields_values(device, custom_field_data)
|
||||||
|
|
||||||
|
print("🖥️ Created device", device.name)
|
@ -1,14 +1,15 @@
|
|||||||
from ipam.models import RIR
|
|
||||||
from startup_script_utils import load_yaml
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
rirs = load_yaml('/opt/netbox/initializers/rirs.yml')
|
from ipam.models import RIR
|
||||||
|
from startup_script_utils import load_yaml
|
||||||
|
|
||||||
|
rirs = load_yaml("/opt/netbox/initializers/rirs.yml")
|
||||||
|
|
||||||
if rirs is None:
|
if rirs is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
for params in rirs:
|
for params in rirs:
|
||||||
rir, created = RIR.objects.get_or_create(**params)
|
rir, created = RIR.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
print("🗺️ Created RIR", rir.name)
|
print("🗺️ Created RIR", rir.name)
|
||||||
|
@ -1,32 +1,42 @@
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
from ipam.models import Aggregate, RIR
|
from ipam.models import RIR, Aggregate
|
||||||
from netaddr import IPNetwork
|
from netaddr import IPNetwork
|
||||||
from startup_script_utils import *
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
|
from tenancy.models import Tenant
|
||||||
|
|
||||||
aggregates = load_yaml('/opt/netbox/initializers/aggregates.yml')
|
aggregates = load_yaml("/opt/netbox/initializers/aggregates.yml")
|
||||||
|
|
||||||
if aggregates is None:
|
if aggregates is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
required_assocs = {
|
required_assocs = {"rir": (RIR, "name")}
|
||||||
'rir': (RIR, 'name')
|
|
||||||
|
optional_assocs = {
|
||||||
|
"tenant": (Tenant, "name"),
|
||||||
}
|
}
|
||||||
|
|
||||||
for params in aggregates:
|
for params in aggregates:
|
||||||
custom_field_data = pop_custom_fields(params)
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
params['prefix'] = IPNetwork(params['prefix'])
|
params["prefix"] = IPNetwork(params["prefix"])
|
||||||
|
|
||||||
for assoc, details in required_assocs.items():
|
for assoc, details in required_assocs.items():
|
||||||
model, field = details
|
model, field = details
|
||||||
query = { field: params.pop(assoc) }
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
aggregate, created = Aggregate.objects.get_or_create(**params)
|
for assoc, details in optional_assocs.items():
|
||||||
|
if assoc in params:
|
||||||
|
model, field = details
|
||||||
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
if created:
|
params[assoc] = model.objects.get(**query)
|
||||||
set_custom_fields_values(aggregate, custom_field_data)
|
|
||||||
|
|
||||||
print("🗞️ Created Aggregate", aggregate.prefix)
|
aggregate, created = Aggregate.objects.get_or_create(**params)
|
||||||
|
|
||||||
|
if created:
|
||||||
|
set_custom_fields_values(aggregate, custom_field_data)
|
||||||
|
|
||||||
|
print("🗞️ Created Aggregate", aggregate.prefix)
|
||||||
|
15
startup_scripts/165_cluster_groups.py
Normal file
15
startup_scripts/165_cluster_groups.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
import sys
|
||||||
|
|
||||||
|
from startup_script_utils import load_yaml
|
||||||
|
from virtualization.models import ClusterGroup
|
||||||
|
|
||||||
|
cluster_groups = load_yaml("/opt/netbox/initializers/cluster_groups.yml")
|
||||||
|
|
||||||
|
if cluster_groups is None:
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
for params in cluster_groups:
|
||||||
|
cluster_group, created = ClusterGroup.objects.get_or_create(**params)
|
||||||
|
|
||||||
|
if created:
|
||||||
|
print("🗄️ Created Cluster Group", cluster_group.name)
|
@ -1,42 +0,0 @@
|
|||||||
import sys
|
|
||||||
|
|
||||||
from dcim.models import Site
|
|
||||||
from startup_script_utils import *
|
|
||||||
from virtualization.models import Cluster, ClusterType, ClusterGroup
|
|
||||||
|
|
||||||
clusters = load_yaml('/opt/netbox/initializers/clusters.yml')
|
|
||||||
|
|
||||||
if clusters is None:
|
|
||||||
sys.exit()
|
|
||||||
|
|
||||||
required_assocs = {
|
|
||||||
'type': (ClusterType, 'name')
|
|
||||||
}
|
|
||||||
|
|
||||||
optional_assocs = {
|
|
||||||
'site': (Site, 'name'),
|
|
||||||
'group': (ClusterGroup, 'name')
|
|
||||||
}
|
|
||||||
|
|
||||||
for params in clusters:
|
|
||||||
custom_field_data = pop_custom_fields(params)
|
|
||||||
|
|
||||||
for assoc, details in required_assocs.items():
|
|
||||||
model, field = details
|
|
||||||
query = { field: params.pop(assoc) }
|
|
||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
|
||||||
|
|
||||||
for assoc, details in optional_assocs.items():
|
|
||||||
if assoc in params:
|
|
||||||
model, field = details
|
|
||||||
query = { field: params.pop(assoc) }
|
|
||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
|
||||||
|
|
||||||
cluster, created = Cluster.objects.get_or_create(**params)
|
|
||||||
|
|
||||||
if created:
|
|
||||||
set_custom_fields_values(cluster, custom_field_data)
|
|
||||||
|
|
||||||
print("🗄️ Created cluster", cluster.name)
|
|
29
startup_scripts/175_route_targets.py
Normal file
29
startup_scripts/175_route_targets.py
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
import sys
|
||||||
|
|
||||||
|
from ipam.models import RouteTarget
|
||||||
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
|
from tenancy.models import Tenant
|
||||||
|
|
||||||
|
route_targets = load_yaml("/opt/netbox/initializers/route_targets.yml")
|
||||||
|
|
||||||
|
if route_targets is None:
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
optional_assocs = {"tenant": (Tenant, "name")}
|
||||||
|
|
||||||
|
for params in route_targets:
|
||||||
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
|
for assoc, details in optional_assocs.items():
|
||||||
|
if assoc in params:
|
||||||
|
model, field = details
|
||||||
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
|
route_target, created = RouteTarget.objects.get_or_create(**params)
|
||||||
|
|
||||||
|
if created:
|
||||||
|
set_custom_fields_values(route_target, custom_field_data)
|
||||||
|
|
||||||
|
print("🎯 Created Route Target", route_target.name)
|
@ -1,31 +1,29 @@
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
from ipam.models import VRF
|
from ipam.models import VRF
|
||||||
from startup_script_utils import *
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
from tenancy.models import Tenant
|
from tenancy.models import Tenant
|
||||||
|
|
||||||
vrfs = load_yaml('/opt/netbox/initializers/vrfs.yml')
|
vrfs = load_yaml("/opt/netbox/initializers/vrfs.yml")
|
||||||
|
|
||||||
if vrfs is None:
|
if vrfs is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
optional_assocs = {
|
optional_assocs = {"tenant": (Tenant, "name")}
|
||||||
'tenant': (Tenant, 'name')
|
|
||||||
}
|
|
||||||
|
|
||||||
for params in vrfs:
|
for params in vrfs:
|
||||||
custom_field_data = pop_custom_fields(params)
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
for assoc, details in optional_assocs.items():
|
for assoc, details in optional_assocs.items():
|
||||||
if assoc in params:
|
if assoc in params:
|
||||||
model, field = details
|
model, field = details
|
||||||
query = { field: params.pop(assoc) }
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
vrf, created = VRF.objects.get_or_create(**params)
|
vrf, created = VRF.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
set_custom_fields_values(vrf, custom_field_data)
|
set_custom_fields_values(vrf, custom_field_data)
|
||||||
|
|
||||||
print("📦 Created VRF", vrf.name)
|
print("📦 Created VRF", vrf.name)
|
||||||
|
@ -1,14 +1,15 @@
|
|||||||
from ipam.models import Role
|
|
||||||
from startup_script_utils import load_yaml
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
roles = load_yaml('/opt/netbox/initializers/prefix_vlan_roles.yml')
|
from ipam.models import Role
|
||||||
|
from startup_script_utils import load_yaml
|
||||||
|
|
||||||
|
roles = load_yaml("/opt/netbox/initializers/prefix_vlan_roles.yml")
|
||||||
|
|
||||||
if roles is None:
|
if roles is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
for params in roles:
|
for params in roles:
|
||||||
role, created = Role.objects.get_or_create(**params)
|
role, created = Role.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
print("⛹️ Created Prefix/VLAN Role", role.name)
|
print("⛹️ Created Prefix/VLAN Role", role.name)
|
||||||
|
@ -2,30 +2,28 @@ import sys
|
|||||||
|
|
||||||
from dcim.models import Site
|
from dcim.models import Site
|
||||||
from ipam.models import VLANGroup
|
from ipam.models import VLANGroup
|
||||||
from startup_script_utils import *
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
|
|
||||||
vlan_groups = load_yaml('/opt/netbox/initializers/vlan_groups.yml')
|
vlan_groups = load_yaml("/opt/netbox/initializers/vlan_groups.yml")
|
||||||
|
|
||||||
if vlan_groups is None:
|
if vlan_groups is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
optional_assocs = {
|
optional_assocs = {"site": (Site, "name")}
|
||||||
'site': (Site, 'name')
|
|
||||||
}
|
|
||||||
|
|
||||||
for params in vlan_groups:
|
for params in vlan_groups:
|
||||||
custom_field_data = pop_custom_fields(params)
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
for assoc, details in optional_assocs.items():
|
for assoc, details in optional_assocs.items():
|
||||||
if assoc in params:
|
if assoc in params:
|
||||||
model, field = details
|
model, field = details
|
||||||
query = { field: params.pop(assoc) }
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
vlan_group, created = VLANGroup.objects.get_or_create(**params)
|
vlan_group, created = VLANGroup.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
set_custom_fields_values(vlan_group, custom_field_data)
|
set_custom_fields_values(vlan_group, custom_field_data)
|
||||||
|
|
||||||
print("🏘️ Created VLAN Group", vlan_group.name)
|
print("🏘️ Created VLAN Group", vlan_group.name)
|
||||||
|
@ -1,36 +1,36 @@
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
from dcim.models import Site
|
from dcim.models import Site
|
||||||
from ipam.models import VLAN, VLANGroup, Role
|
from ipam.models import VLAN, Role, VLANGroup
|
||||||
from startup_script_utils import *
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
from tenancy.models import Tenant, TenantGroup
|
from tenancy.models import Tenant, TenantGroup
|
||||||
|
|
||||||
vlans = load_yaml('/opt/netbox/initializers/vlans.yml')
|
vlans = load_yaml("/opt/netbox/initializers/vlans.yml")
|
||||||
|
|
||||||
if vlans is None:
|
if vlans is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
optional_assocs = {
|
optional_assocs = {
|
||||||
'site': (Site, 'name'),
|
"site": (Site, "name"),
|
||||||
'tenant': (Tenant, 'name'),
|
"tenant": (Tenant, "name"),
|
||||||
'tenant_group': (TenantGroup, 'name'),
|
"tenant_group": (TenantGroup, "name"),
|
||||||
'group': (VLANGroup, 'name'),
|
"group": (VLANGroup, "name"),
|
||||||
'role': (Role, 'name')
|
"role": (Role, "name"),
|
||||||
}
|
}
|
||||||
|
|
||||||
for params in vlans:
|
for params in vlans:
|
||||||
custom_field_data = pop_custom_fields(params)
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
for assoc, details in optional_assocs.items():
|
for assoc, details in optional_assocs.items():
|
||||||
if assoc in params:
|
if assoc in params:
|
||||||
model, field = details
|
model, field = details
|
||||||
query = { field: params.pop(assoc) }
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
vlan, created = VLAN.objects.get_or_create(**params)
|
vlan, created = VLAN.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
set_custom_fields_values(vlan, custom_field_data)
|
set_custom_fields_values(vlan, custom_field_data)
|
||||||
|
|
||||||
print("🏠 Created VLAN", vlan.name)
|
print("🏠 Created VLAN", vlan.name)
|
||||||
|
@ -1,39 +1,39 @@
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
from dcim.models import Site
|
from dcim.models import Site
|
||||||
from ipam.models import Prefix, VLAN, Role, VRF
|
from ipam.models import VLAN, VRF, Prefix, Role
|
||||||
from netaddr import IPNetwork
|
from netaddr import IPNetwork
|
||||||
from startup_script_utils import *
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
from tenancy.models import Tenant, TenantGroup
|
from tenancy.models import Tenant, TenantGroup
|
||||||
|
|
||||||
prefixes = load_yaml('/opt/netbox/initializers/prefixes.yml')
|
prefixes = load_yaml("/opt/netbox/initializers/prefixes.yml")
|
||||||
|
|
||||||
if prefixes is None:
|
if prefixes is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
optional_assocs = {
|
optional_assocs = {
|
||||||
'site': (Site, 'name'),
|
"site": (Site, "name"),
|
||||||
'tenant': (Tenant, 'name'),
|
"tenant": (Tenant, "name"),
|
||||||
'tenant_group': (TenantGroup, 'name'),
|
"tenant_group": (TenantGroup, "name"),
|
||||||
'vlan': (VLAN, 'name'),
|
"vlan": (VLAN, "name"),
|
||||||
'role': (Role, 'name'),
|
"role": (Role, "name"),
|
||||||
'vrf': (VRF, 'name')
|
"vrf": (VRF, "name"),
|
||||||
}
|
}
|
||||||
|
|
||||||
for params in prefixes:
|
for params in prefixes:
|
||||||
custom_field_data = pop_custom_fields(params)
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
params['prefix'] = IPNetwork(params['prefix'])
|
params["prefix"] = IPNetwork(params["prefix"])
|
||||||
|
|
||||||
for assoc, details in optional_assocs.items():
|
for assoc, details in optional_assocs.items():
|
||||||
if assoc in params:
|
if assoc in params:
|
||||||
model, field = details
|
model, field = details
|
||||||
query = { field: params.pop(assoc) }
|
query = {field: params.pop(assoc)}
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
prefix, created = Prefix.objects.get_or_create(**params)
|
prefix, created = Prefix.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
set_custom_fields_values(prefix, custom_field_data)
|
set_custom_fields_values(prefix, custom_field_data)
|
||||||
|
|
||||||
print("📌 Created Prefix", prefix.prefix)
|
print("📌 Created Prefix", prefix.prefix)
|
||||||
|
@ -1,48 +1,46 @@
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
from dcim.models import Platform, DeviceRole
|
from dcim.models import DeviceRole, Platform
|
||||||
from startup_script_utils import *
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
from tenancy.models import Tenant
|
from tenancy.models import Tenant
|
||||||
from virtualization.models import Cluster, VirtualMachine
|
from virtualization.models import Cluster, VirtualMachine
|
||||||
|
|
||||||
virtual_machines = load_yaml('/opt/netbox/initializers/virtual_machines.yml')
|
virtual_machines = load_yaml("/opt/netbox/initializers/virtual_machines.yml")
|
||||||
|
|
||||||
if virtual_machines is None:
|
if virtual_machines is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
required_assocs = {
|
required_assocs = {"cluster": (Cluster, "name")}
|
||||||
'cluster': (Cluster, 'name')
|
|
||||||
}
|
|
||||||
|
|
||||||
optional_assocs = {
|
optional_assocs = {
|
||||||
'tenant': (Tenant, 'name'),
|
"tenant": (Tenant, "name"),
|
||||||
'platform': (Platform, 'name'),
|
"platform": (Platform, "name"),
|
||||||
'role': (DeviceRole, 'name')
|
"role": (DeviceRole, "name"),
|
||||||
}
|
}
|
||||||
|
|
||||||
for params in virtual_machines:
|
for params in virtual_machines:
|
||||||
custom_field_data = pop_custom_fields(params)
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
# primary ips are handled later in `270_primary_ips.py`
|
# primary ips are handled later in `270_primary_ips.py`
|
||||||
params.pop('primary_ip4', None)
|
params.pop("primary_ip4", None)
|
||||||
params.pop('primary_ip6', None)
|
params.pop("primary_ip6", None)
|
||||||
|
|
||||||
for assoc, details in required_assocs.items():
|
for assoc, details in required_assocs.items():
|
||||||
model, field = details
|
model, field = details
|
||||||
query = { field: params.pop(assoc) }
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
for assoc, details in optional_assocs.items():
|
for assoc, details in optional_assocs.items():
|
||||||
if assoc in params:
|
if assoc in params:
|
||||||
model, field = details
|
model, field = details
|
||||||
query = { field: params.pop(assoc) }
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
virtual_machine, created = VirtualMachine.objects.get_or_create(**params)
|
virtual_machine, created = VirtualMachine.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
set_custom_fields_values(virtual_machine, custom_field_data)
|
set_custom_fields_values(virtual_machine, custom_field_data)
|
||||||
|
|
||||||
print("🖥️ Created virtual machine", virtual_machine.name)
|
print("🖥️ Created virtual machine", virtual_machine.name)
|
||||||
|
@ -1,29 +1,27 @@
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
from startup_script_utils import *
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
from virtualization.models import VirtualMachine, VMInterface
|
from virtualization.models import VirtualMachine, VMInterface
|
||||||
|
|
||||||
interfaces = load_yaml('/opt/netbox/initializers/virtualization_interfaces.yml')
|
interfaces = load_yaml("/opt/netbox/initializers/virtualization_interfaces.yml")
|
||||||
|
|
||||||
if interfaces is None:
|
if interfaces is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
required_assocs = {
|
required_assocs = {"virtual_machine": (VirtualMachine, "name")}
|
||||||
'virtual_machine': (VirtualMachine, 'name')
|
|
||||||
}
|
|
||||||
|
|
||||||
for params in interfaces:
|
for params in interfaces:
|
||||||
custom_field_data = pop_custom_fields(params)
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
for assoc, details in required_assocs.items():
|
for assoc, details in required_assocs.items():
|
||||||
model, field = details
|
model, field = details
|
||||||
query = { field: params.pop(assoc) }
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
interface, created = VMInterface.objects.get_or_create(**params)
|
interface, created = VMInterface.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
set_custom_fields_values(interface, custom_field_data)
|
set_custom_fields_values(interface, custom_field_data)
|
||||||
|
|
||||||
print("🧷 Created interface", interface.name, interface.virtual_machine.name)
|
print("🧷 Created interface", interface.name, interface.virtual_machine.name)
|
||||||
|
@ -1,29 +1,27 @@
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
from dcim.models import Interface, Device
|
from dcim.models import Device, Interface
|
||||||
from startup_script_utils import *
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
|
|
||||||
interfaces = load_yaml('/opt/netbox/initializers/dcim_interfaces.yml')
|
interfaces = load_yaml("/opt/netbox/initializers/dcim_interfaces.yml")
|
||||||
|
|
||||||
if interfaces is None:
|
if interfaces is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
required_assocs = {
|
required_assocs = {"device": (Device, "name")}
|
||||||
'device': (Device, 'name')
|
|
||||||
}
|
|
||||||
|
|
||||||
for params in interfaces:
|
for params in interfaces:
|
||||||
custom_field_data = pop_custom_fields(params)
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
for assoc, details in required_assocs.items():
|
for assoc, details in required_assocs.items():
|
||||||
model, field = details
|
model, field = details
|
||||||
query = { field: params.pop(assoc) }
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
interface, created = Interface.objects.get_or_create(**params)
|
interface, created = Interface.objects.get_or_create(**params)
|
||||||
|
|
||||||
if created:
|
if created:
|
||||||
set_custom_fields_values(interface, custom_field_data)
|
set_custom_fields_values(interface, custom_field_data)
|
||||||
|
|
||||||
print("🧷 Created interface", interface.name, interface.device.name)
|
print("🧷 Created interface", interface.name, interface.device.name)
|
||||||
|
@ -5,56 +5,58 @@ from django.contrib.contenttypes.models import ContentType
|
|||||||
from django.db.models import Q
|
from django.db.models import Q
|
||||||
from ipam.models import VRF, IPAddress
|
from ipam.models import VRF, IPAddress
|
||||||
from netaddr import IPNetwork
|
from netaddr import IPNetwork
|
||||||
from startup_script_utils import *
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
from tenancy.models import Tenant
|
from tenancy.models import Tenant
|
||||||
from virtualization.models import VirtualMachine, VMInterface
|
from virtualization.models import VirtualMachine, VMInterface
|
||||||
|
|
||||||
ip_addresses = load_yaml('/opt/netbox/initializers/ip_addresses.yml')
|
ip_addresses = load_yaml("/opt/netbox/initializers/ip_addresses.yml")
|
||||||
|
|
||||||
if ip_addresses is None:
|
if ip_addresses is None:
|
||||||
sys.exit()
|
|
||||||
|
|
||||||
optional_assocs = {
|
|
||||||
'tenant': (Tenant, 'name'),
|
|
||||||
'vrf': (VRF, 'name'),
|
|
||||||
'interface': (None, None)
|
|
||||||
}
|
|
||||||
|
|
||||||
vm_interface_ct = ContentType.objects.filter(Q(app_label='virtualization', model='vminterface')).first()
|
|
||||||
interface_ct = ContentType.objects.filter(Q(app_label='dcim', model='interface')).first()
|
|
||||||
|
|
||||||
for params in ip_addresses:
|
|
||||||
custom_field_data = pop_custom_fields(params)
|
|
||||||
|
|
||||||
vm = params.pop('virtual_machine', None)
|
|
||||||
device = params.pop('device', None)
|
|
||||||
params['address'] = IPNetwork(params['address'])
|
|
||||||
|
|
||||||
if vm and device:
|
|
||||||
print("IP Address can only specify one of the following: virtual_machine or device.")
|
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
for assoc, details in optional_assocs.items():
|
optional_assocs = {
|
||||||
if assoc in params:
|
"tenant": (Tenant, "name"),
|
||||||
model, field = details
|
"vrf": (VRF, "name"),
|
||||||
if assoc == 'interface':
|
"interface": (None, None),
|
||||||
if vm:
|
}
|
||||||
vm_id = VirtualMachine.objects.get(name=vm).id
|
|
||||||
query = { 'name': params.pop(assoc), "virtual_machine_id": vm_id }
|
|
||||||
params['assigned_object_type'] = vm_interface_ct
|
|
||||||
params['assigned_object_id'] = VMInterface.objects.get(**query).id
|
|
||||||
elif device:
|
|
||||||
dev_id = Device.objects.get(name=device).id
|
|
||||||
query = { 'name': params.pop(assoc), "device_id": dev_id }
|
|
||||||
params['assigned_object_type'] = interface_ct
|
|
||||||
params['assigned_object_id'] = Interface.objects.get(**query).id
|
|
||||||
else:
|
|
||||||
query = { field: params.pop(assoc) }
|
|
||||||
params[assoc] = model.objects.get(**query)
|
|
||||||
|
|
||||||
ip_address, created = IPAddress.objects.get_or_create(**params)
|
vm_interface_ct = ContentType.objects.filter(
|
||||||
|
Q(app_label="virtualization", model="vminterface")
|
||||||
|
).first()
|
||||||
|
interface_ct = ContentType.objects.filter(Q(app_label="dcim", model="interface")).first()
|
||||||
|
|
||||||
if created:
|
for params in ip_addresses:
|
||||||
set_custom_fields_values(ip_address, custom_field_data)
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
print("🧬 Created IP Address", ip_address.address)
|
vm = params.pop("virtual_machine", None)
|
||||||
|
device = params.pop("device", None)
|
||||||
|
params["address"] = IPNetwork(params["address"])
|
||||||
|
|
||||||
|
if vm and device:
|
||||||
|
print("IP Address can only specify one of the following: virtual_machine or device.")
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
for assoc, details in optional_assocs.items():
|
||||||
|
if assoc in params:
|
||||||
|
model, field = details
|
||||||
|
if assoc == "interface":
|
||||||
|
if vm:
|
||||||
|
vm_id = VirtualMachine.objects.get(name=vm).id
|
||||||
|
query = {"name": params.pop(assoc), "virtual_machine_id": vm_id}
|
||||||
|
params["assigned_object_type"] = vm_interface_ct
|
||||||
|
params["assigned_object_id"] = VMInterface.objects.get(**query).id
|
||||||
|
elif device:
|
||||||
|
dev_id = Device.objects.get(name=device).id
|
||||||
|
query = {"name": params.pop(assoc), "device_id": dev_id}
|
||||||
|
params["assigned_object_type"] = interface_ct
|
||||||
|
params["assigned_object_id"] = Interface.objects.get(**query).id
|
||||||
|
else:
|
||||||
|
query = {field: params.pop(assoc)}
|
||||||
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
|
ip_address, created = IPAddress.objects.get_or_create(**params)
|
||||||
|
|
||||||
|
if created:
|
||||||
|
set_custom_fields_values(ip_address, custom_field_data)
|
||||||
|
|
||||||
|
print("🧬 Created IP Address", ip_address.address)
|
||||||
|
@ -1,43 +1,47 @@
|
|||||||
from dcim.models import Device
|
|
||||||
from ipam.models import IPAddress
|
|
||||||
from virtualization.models import VirtualMachine
|
|
||||||
from startup_script_utils import load_yaml
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
from dcim.models import Device
|
||||||
|
from ipam.models import IPAddress
|
||||||
|
from startup_script_utils import load_yaml
|
||||||
|
from virtualization.models import VirtualMachine
|
||||||
|
|
||||||
|
|
||||||
def link_primary_ip(assets, asset_model):
|
def link_primary_ip(assets, asset_model):
|
||||||
for params in assets:
|
for params in assets:
|
||||||
primary_ip_fields = set(params) & {'primary_ip4', 'primary_ip6'}
|
primary_ip_fields = set(params) & {"primary_ip4", "primary_ip6"}
|
||||||
if not primary_ip_fields:
|
if not primary_ip_fields:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
for assoc, details in optional_assocs.items():
|
for assoc, details in optional_assocs.items():
|
||||||
if assoc in params:
|
if assoc in params:
|
||||||
model, field = details
|
model, field = details
|
||||||
query = { field: params.pop(assoc) }
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
params[assoc] = model.objects.get(**query)
|
params[assoc] = model.objects.get(**query)
|
||||||
except model.DoesNotExist:
|
except model.DoesNotExist:
|
||||||
primary_ip_fields -= {assoc}
|
primary_ip_fields -= {assoc}
|
||||||
print(f"⚠️ IP Address '{query[field]}' not found")
|
print(f"⚠️ IP Address '{query[field]}' not found")
|
||||||
|
|
||||||
asset = asset_model.objects.get(name=params['name'])
|
asset = asset_model.objects.get(name=params["name"])
|
||||||
for field in primary_ip_fields:
|
for field in primary_ip_fields:
|
||||||
if getattr(asset, field) != params[field]:
|
if getattr(asset, field) != params[field]:
|
||||||
setattr(asset, field, params[field])
|
setattr(asset, field, params[field])
|
||||||
print(f"🔗 Define primary IP '{params[field].address}' on '{asset.name}'")
|
print(f"🔗 Define primary IP '{params[field].address}' on '{asset.name}'")
|
||||||
asset.save()
|
asset.save()
|
||||||
|
|
||||||
devices = load_yaml('/opt/netbox/initializers/devices.yml')
|
|
||||||
virtual_machines = load_yaml('/opt/netbox/initializers/virtual_machines.yml')
|
devices = load_yaml("/opt/netbox/initializers/devices.yml")
|
||||||
|
virtual_machines = load_yaml("/opt/netbox/initializers/virtual_machines.yml")
|
||||||
|
|
||||||
|
optional_assocs = {
|
||||||
|
"primary_ip4": (IPAddress, "address"),
|
||||||
|
"primary_ip6": (IPAddress, "address"),
|
||||||
|
}
|
||||||
|
|
||||||
if devices is None and virtual_machines is None:
|
if devices is None and virtual_machines is None:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
if devices is not None:
|
||||||
optional_assocs = {
|
link_primary_ip(devices, Device)
|
||||||
'primary_ip4': (IPAddress, 'address'),
|
if virtual_machines is not None:
|
||||||
'primary_ip6': (IPAddress, 'address')
|
link_primary_ip(virtual_machines, VirtualMachine)
|
||||||
}
|
|
||||||
|
|
||||||
link_primary_ip(devices, Device)
|
|
||||||
link_primary_ip(virtual_machines, VirtualMachine)
|
|
||||||
|
33
startup_scripts/280_custom_links.py
Normal file
33
startup_scripts/280_custom_links.py
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
import sys
|
||||||
|
|
||||||
|
from django.contrib.contenttypes.models import ContentType
|
||||||
|
from extras.models import CustomLink
|
||||||
|
from startup_script_utils import load_yaml
|
||||||
|
|
||||||
|
custom_links = load_yaml("/opt/netbox/initializers/custom_links.yml")
|
||||||
|
|
||||||
|
if custom_links is None:
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
|
||||||
|
def get_content_type_id(content_type):
|
||||||
|
try:
|
||||||
|
return ContentType.objects.get(model=content_type).id
|
||||||
|
except ContentType.DoesNotExist:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
for link in custom_links:
|
||||||
|
content_type = link.pop("content_type")
|
||||||
|
link["content_type_id"] = get_content_type_id(content_type)
|
||||||
|
if link["content_type_id"] is None:
|
||||||
|
print(
|
||||||
|
"⚠️ Unable to create Custom Link '{0}': The content_type '{1}' is unknown".format(
|
||||||
|
link.name, content_type
|
||||||
|
)
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
custom_link, created = CustomLink.objects.get_or_create(**link)
|
||||||
|
if created:
|
||||||
|
print("🔗 Created Custom Link '{0}'".format(custom_link.name))
|
19
startup_scripts/280_providers.py
Normal file
19
startup_scripts/280_providers.py
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
import sys
|
||||||
|
|
||||||
|
from circuits.models import Provider
|
||||||
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
|
|
||||||
|
providers = load_yaml("/opt/netbox/initializers/providers.yml")
|
||||||
|
|
||||||
|
if providers is None:
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
for params in providers:
|
||||||
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
|
provider, created = Provider.objects.get_or_create(**params)
|
||||||
|
|
||||||
|
if created:
|
||||||
|
set_custom_fields_values(provider, custom_field_data)
|
||||||
|
|
||||||
|
print("📡 Created provider", provider.name)
|
19
startup_scripts/290_circuit_types.py
Normal file
19
startup_scripts/290_circuit_types.py
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
import sys
|
||||||
|
|
||||||
|
from circuits.models import CircuitType
|
||||||
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
|
|
||||||
|
circuit_types = load_yaml("/opt/netbox/initializers/circuit_types.yml")
|
||||||
|
|
||||||
|
if circuit_types is None:
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
for params in circuit_types:
|
||||||
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
|
circuit_type, created = CircuitType.objects.get_or_create(**params)
|
||||||
|
|
||||||
|
if created:
|
||||||
|
set_custom_fields_values(circuit_type, custom_field_data)
|
||||||
|
|
||||||
|
print("⚡ Created Circuit Type", circuit_type.name)
|
34
startup_scripts/290_webhooks.py
Normal file
34
startup_scripts/290_webhooks.py
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
import sys
|
||||||
|
|
||||||
|
from django.contrib.contenttypes.models import ContentType
|
||||||
|
from extras.models import Webhook
|
||||||
|
from startup_script_utils import load_yaml
|
||||||
|
|
||||||
|
webhooks = load_yaml("/opt/netbox/initializers/webhooks.yml")
|
||||||
|
|
||||||
|
if webhooks is None:
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
|
||||||
|
def get_content_type_id(hook_name, content_type):
|
||||||
|
try:
|
||||||
|
return ContentType.objects.get(model=content_type).id
|
||||||
|
except ContentType.DoesNotExist as ex:
|
||||||
|
print("⚠️ Webhook '{0}': The object_type '{1}' is unknown.".format(hook_name, content_type))
|
||||||
|
raise ex
|
||||||
|
|
||||||
|
|
||||||
|
for hook in webhooks:
|
||||||
|
obj_types = hook.pop("object_types")
|
||||||
|
|
||||||
|
try:
|
||||||
|
obj_type_ids = [get_content_type_id(hook["name"], obj) for obj in obj_types]
|
||||||
|
except ContentType.DoesNotExist:
|
||||||
|
continue
|
||||||
|
|
||||||
|
webhook, created = Webhook.objects.get_or_create(**hook)
|
||||||
|
if created:
|
||||||
|
webhook.content_types.set(obj_type_ids)
|
||||||
|
webhook.save()
|
||||||
|
|
||||||
|
print("🪝 Created Webhook {0}".format(webhook.name))
|
37
startup_scripts/300_circuits.py
Normal file
37
startup_scripts/300_circuits.py
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
import sys
|
||||||
|
|
||||||
|
from circuits.models import Circuit, CircuitType, Provider
|
||||||
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
|
from tenancy.models import Tenant
|
||||||
|
|
||||||
|
circuits = load_yaml("/opt/netbox/initializers/circuits.yml")
|
||||||
|
|
||||||
|
if circuits is None:
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
required_assocs = {"provider": (Provider, "name"), "type": (CircuitType, "name")}
|
||||||
|
|
||||||
|
optional_assocs = {"tenant": (Tenant, "name")}
|
||||||
|
|
||||||
|
for params in circuits:
|
||||||
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
|
for assoc, details in required_assocs.items():
|
||||||
|
model, field = details
|
||||||
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
|
for assoc, details in optional_assocs.items():
|
||||||
|
if assoc in params:
|
||||||
|
model, field = details
|
||||||
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
|
circuit, created = Circuit.objects.get_or_create(**params)
|
||||||
|
|
||||||
|
if created:
|
||||||
|
set_custom_fields_values(circuit, custom_field_data)
|
||||||
|
|
||||||
|
print("⚡ Created Circuit", circuit.cid)
|
15
startup_scripts/310_secret_roles.py
Normal file
15
startup_scripts/310_secret_roles.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
import sys
|
||||||
|
from secrets.models import SecretRole
|
||||||
|
|
||||||
|
from startup_script_utils import load_yaml
|
||||||
|
|
||||||
|
secret_roles = load_yaml("/opt/netbox/initializers/secret_roles.yml")
|
||||||
|
|
||||||
|
if secret_roles is None:
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
for params in secret_roles:
|
||||||
|
secret_role, created = SecretRole.objects.get_or_create(**params)
|
||||||
|
|
||||||
|
if created:
|
||||||
|
print("🔑 Created Secret Role", secret_role.name)
|
30
startup_scripts/320_services.py
Normal file
30
startup_scripts/320_services.py
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
import sys
|
||||||
|
|
||||||
|
from dcim.models import Device
|
||||||
|
from ipam.models import Service
|
||||||
|
from startup_script_utils import load_yaml
|
||||||
|
from virtualization.models import VirtualMachine
|
||||||
|
|
||||||
|
services = load_yaml("/opt/netbox/initializers/services.yml")
|
||||||
|
|
||||||
|
if services is None:
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
optional_assocs = {
|
||||||
|
"device": (Device, "name"),
|
||||||
|
"virtual_machine": (VirtualMachine, "name"),
|
||||||
|
}
|
||||||
|
|
||||||
|
for params in services:
|
||||||
|
|
||||||
|
for assoc, details in optional_assocs.items():
|
||||||
|
if assoc in params:
|
||||||
|
model, field = details
|
||||||
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
|
service, created = Service.objects.get_or_create(**params)
|
||||||
|
|
||||||
|
if created:
|
||||||
|
print("🧰 Created Service", service.name)
|
36
startup_scripts/330_power_panels.py
Normal file
36
startup_scripts/330_power_panels.py
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
import sys
|
||||||
|
|
||||||
|
from dcim.models import PowerPanel, RackGroup, Site
|
||||||
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
|
|
||||||
|
power_panels = load_yaml("/opt/netbox/initializers/power_panels.yml")
|
||||||
|
|
||||||
|
if power_panels is None:
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
required_assocs = {"site": (Site, "name")}
|
||||||
|
|
||||||
|
optional_assocs = {"rack_group": (RackGroup, "name")}
|
||||||
|
|
||||||
|
for params in power_panels:
|
||||||
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
|
for assoc, details in required_assocs.items():
|
||||||
|
model, field = details
|
||||||
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
|
for assoc, details in optional_assocs.items():
|
||||||
|
if assoc in params:
|
||||||
|
model, field = details
|
||||||
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
|
power_panel, created = PowerPanel.objects.get_or_create(**params)
|
||||||
|
|
||||||
|
if created:
|
||||||
|
set_custom_fields_values(power_panel, custom_field_data)
|
||||||
|
|
||||||
|
print("⚡ Created Power Panel", power_panel.site, power_panel.name)
|
36
startup_scripts/340_power_feeds.py
Normal file
36
startup_scripts/340_power_feeds.py
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
import sys
|
||||||
|
|
||||||
|
from dcim.models import PowerFeed, PowerPanel, Rack
|
||||||
|
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
|
||||||
|
|
||||||
|
power_feeds = load_yaml("/opt/netbox/initializers/power_feeds.yml")
|
||||||
|
|
||||||
|
if power_feeds is None:
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
required_assocs = {"power_panel": (PowerPanel, "name")}
|
||||||
|
|
||||||
|
optional_assocs = {"rack": (Rack, "name")}
|
||||||
|
|
||||||
|
for params in power_feeds:
|
||||||
|
custom_field_data = pop_custom_fields(params)
|
||||||
|
|
||||||
|
for assoc, details in required_assocs.items():
|
||||||
|
model, field = details
|
||||||
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
|
for assoc, details in optional_assocs.items():
|
||||||
|
if assoc in params:
|
||||||
|
model, field = details
|
||||||
|
query = {field: params.pop(assoc)}
|
||||||
|
|
||||||
|
params[assoc] = model.objects.get(**query)
|
||||||
|
|
||||||
|
power_feed, created = PowerFeed.objects.get_or_create(**params)
|
||||||
|
|
||||||
|
if created:
|
||||||
|
set_custom_fields_values(power_feed, custom_field_data)
|
||||||
|
|
||||||
|
print("⚡ Created Power Feed", power_feed.name)
|
@ -2,28 +2,30 @@
|
|||||||
|
|
||||||
import runpy
|
import runpy
|
||||||
from os import scandir
|
from os import scandir
|
||||||
from os.path import dirname, abspath
|
from os.path import abspath, dirname
|
||||||
|
|
||||||
this_dir = dirname(abspath(__file__))
|
this_dir = dirname(abspath(__file__))
|
||||||
|
|
||||||
|
|
||||||
def filename(f):
|
def filename(f):
|
||||||
return f.name
|
return f.name
|
||||||
|
|
||||||
|
|
||||||
with scandir(this_dir) as it:
|
with scandir(this_dir) as it:
|
||||||
for f in sorted(it, key = filename):
|
for f in sorted(it, key=filename):
|
||||||
if not f.is_file():
|
if not f.is_file():
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if f.name.startswith('__'):
|
if f.name.startswith("__"):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not f.name.endswith('.py'):
|
if not f.name.endswith(".py"):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
print(f"▶️ Running the startup script {f.path}")
|
print(f"▶️ Running the startup script {f.path}")
|
||||||
try:
|
try:
|
||||||
runpy.run_path(f.path)
|
runpy.run_path(f.path)
|
||||||
except SystemExit as e:
|
except SystemExit as e:
|
||||||
if e.code is not None and e.code != 0:
|
if e.code is not None and e.code != 0:
|
||||||
print(f"‼️ The startup script {f.path} returned with code {e.code}, exiting.")
|
print(f"‼️ The startup script {f.path} returned with code {e.code}, exiting.")
|
||||||
raise
|
raise
|
||||||
|
@ -1,3 +1,3 @@
|
|||||||
|
from .custom_fields import pop_custom_fields, set_custom_fields_values
|
||||||
from .load_yaml import load_yaml
|
from .load_yaml import load_yaml
|
||||||
from .permissions import set_permissions
|
from .permissions import set_permissions
|
||||||
from .custom_fields import set_custom_fields_values, pop_custom_fields
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user