Compare commits

...

80 Commits
1.6.0 ... 2.1.0

Author SHA1 Message Date
ebda4660de Merge pull request #809 from netbox-community/develop
Version 2.1.0
2022-07-25 08:19:22 +02:00
f44e377e29 Merge branch 'release' into develop 2022-07-25 08:14:46 +02:00
0a6d4c998d Preparation for 2.1.0 2022-07-22 10:15:51 +02:00
bd9733d929 Merge pull request #808 from tobiasge/ldap-tls-fix
LDAP TLS connection use the system trusted root store
2022-07-21 11:29:27 +02:00
9ae3282dcf Install libldap-common
This installs the LDAP configurationn file which is needed to load the trusted root certificates from the system.
2022-07-21 09:13:11 +02:00
e62fedbd5e Merge pull request #786 from rjmidau/oidc
Add requirements for OIDC SSO
2022-07-19 13:58:48 +02:00
c53ae2afa0 Merge pull request #805 from tobiasge/with-ubuntu
Using Ubuntu 22.04 because Debian has old packages
2022-07-19 11:37:16 +02:00
45e7f6a30c Using Ubuntu 22.04 because Debian has old packages
With Debian the Quay.io security checker found several issues in the
image. With Ubuntu we have never versions of all packages and therefore
less (or no) issues.
2022-07-19 09:42:56 +02:00
8fbedf2886 Merge pull request #802 from netbox-community/renovate/docker-setup-buildx-action-2.x
Update docker/setup-buildx-action action to v2
2022-07-16 10:24:23 +02:00
c0063a6573 Update docker/setup-buildx-action action to v2 2022-07-16 08:23:01 +00:00
c9795a8213 Merge pull request #803 from netbox-community/renovate/docker-setup-qemu-action-2.x
Update docker/setup-qemu-action action to v2
2022-07-16 10:22:43 +02:00
8aec402ea2 Update docker/setup-qemu-action action to v2 2022-07-15 18:36:01 +00:00
adc2079b17 Merge pull request #797 from tobiasge/arm64-auto-build
Arm64 auto build
2022-07-15 16:56:57 +02:00
cee1b5b079 Build ARM64 images 2022-07-15 15:06:16 +02:00
13d66f2ae7 Merge pull request #778 from tobiasge/path-update
Added our Python venv to the PATH variable
2022-07-14 22:37:14 +02:00
901ac05e99 Added our Python venv to the PATH variable
Now users can run "manage.py" without specifying the full path.
2022-07-13 15:53:56 +02:00
2bdaed1e6f Add requirements for OIDC SSO 2022-07-13 16:55:38 +10:00
b45934cd9e Merge pull request #796 from netbox-community/develop
Version 2.0.0
2022-07-12 18:10:25 +02:00
fceb6e0e13 Removed CSRF_TRUSTED_ORIGINS from extra.py
CSRF_TRUSTED_ORIGINS is already in configuration.py
2022-07-12 17:16:20 +02:00
f05a9c67ae Preparation for 2.0.0 2022-07-12 16:50:32 +02:00
f2d1e62204 Merge pull request #791 from netbox-community/renovate/napalm-4.x
Update dependency napalm to v4
2022-07-12 07:53:50 +02:00
8f704f220a Update dependency napalm to v4 2022-07-11 13:11:13 +00:00
d5093201ee Merge pull request #780 from tobiasge/fixed-comment
Fixed comment and variable name
2022-06-22 12:51:27 +02:00
401777adff Fixed comment and variable name 2022-06-22 12:28:33 +02:00
f80cc70d76 Merge pull request #776 from tobiasge/image-label-update
Updated image labels and build script
2022-06-16 19:21:39 +02:00
5b8bf780df Updated image labels and build script 2022-06-15 16:18:21 +02:00
bce52596a5 Merge pull request #775 from tobiasge/debian-based
Debian based
2022-06-15 11:02:30 +02:00
c3c94b0a63 Used version number and remove explicit dependency 2022-06-15 10:15:15 +02:00
14c30fb81c Changed the ignored warnings 2022-06-15 09:04:00 +02:00
1130ff6c6d Merge branch 'csrf-trusted-origins' into develop 2022-06-12 10:52:10 +02:00
993c93b34a Add CSRF option to extra.py 2022-06-12 10:51:56 +02:00
dcf0bdb950 Added psycopg2 as additionnal dependency
With psycopg2-binary the images doesn't work on ARM64.
2022-06-10 10:38:21 +02:00
9e2f4313fb First version of Debian based image 2022-06-10 09:31:41 +02:00
df41020cb8 Merge pull request #753 from netbox-community/renovate/redis-7.x
Update dependency redis to v7
2022-06-09 15:35:34 +02:00
1332df4857 Merge pull request #759 from netbox-community/renovate/django-auth-ldap-4.x
Update dependency django-auth-ldap to v4.1.0
2022-06-09 15:35:12 +02:00
3f23419bb7 Merge pull request #768 from FliesLikeABrick/feature/skip-git-no-git-operations-fix-#765
Proposing SKIP_GIT now skips all git operations
2022-06-09 15:34:53 +02:00
184ff72912 Update dependency redis to v7 2022-06-09 06:12:46 +00:00
55ee95df78 Update dependency django-auth-ldap to v4.1.0 2022-06-09 06:12:42 +00:00
78fe47aaba Merge pull request #774 from netbox-community/renovate/actions-setup-python-4.x
Update actions/setup-python action to v4
2022-06-09 08:12:17 +02:00
1370596f27 Fixed missing Python version
Python version hast to be set with v4 of the action. The version is now fixed to what is in Alpine 3.14.
2022-06-09 07:47:48 +02:00
606e56d78f Update actions/setup-python action to v4 2022-06-08 19:42:43 +00:00
51226c8e50 Proposed fix for netbox-docker #765 -- SKIP_GIT will skip other git operations 2022-05-25 10:43:23 -04:00
6c9d4aebac Merge pull request #757 from netbox-community/renovate/docker-login-action-2.x
Update docker/login-action action to v2
2022-05-06 08:53:08 +02:00
ed8b42fbde Update docker/login-action action to v2 2022-05-05 19:01:27 +00:00
aa56f645e9 Merge pull request #754 from netbox-community/renovate/napalm-3.x
Update dependency napalm to v3.4.1
2022-05-05 16:41:23 +02:00
9a1bb788d2 Update dependency napalm to v3.4.1 2022-04-29 17:02:51 +00:00
7d32f79379 Merge pull request #750 from thomas-mc-work/patch-2
Add MAPS_URL to config
2022-04-25 17:04:04 +02:00
0410cf2fd2 Merge pull request #728 from Lon1/contact-startups
Contact startups
2022-04-25 17:03:31 +02:00
c9f5e34c0d Improved contact initializer examples 2022-04-25 16:38:09 +02:00
047f2abdb5 adding contact startups 2022-04-25 15:51:32 +02:00
f13a6573a8 Merge pull request #736 from kr3ator/feature/cable_initializers
Startup script for cables
2022-04-25 15:44:36 +02:00
596bb6953c preserve sort order 2022-04-19 12:59:27 +02:00
d482e623df fix: Template and non-template fields example 2022-04-19 12:54:37 +02:00
bf910dea02 Handle MAPS_URL config value
Regarding https://github.com/netbox-community/netbox/blob/develop/docs/configuration/dynamic-settings.md#maps_url
2022-04-19 12:21:07 +02:00
57da852af6 Cabling script minor updates 2022-04-12 14:47:24 +02:00
4c21344e8b Merge pull request #744 from kr3ator/feature/interfaces_improvements
Add support for bridge, lag, parent in DCIM Interfaces startup script
2022-04-12 14:07:56 +02:00
302c0fed59 Cable startup script 2022-04-12 13:51:57 +02:00
0e7afe466d feat: Add support for bridge, lag, parent 2022-04-12 13:49:44 +02:00
2c757af250 Merge pull request #742 from kr3ator/feature/device_type_components
Add support for DeviceType components
2022-04-12 13:29:28 +02:00
27f28935d7 Merge pull request #738 from RobinBeismann/develop
Added environment variable for CSRF_TRUSTED_ORIGINS
2022-04-08 17:03:28 +02:00
12753dd7d4 Document field name precedence 2022-04-08 15:57:52 +02:00
dd8dce1b49 feat: Add support for DeviceType components 2022-04-08 15:57:46 +02:00
19280c2bb0 Fixed default value to reflect upstream 2022-04-08 15:36:49 +02:00
5c4a1cc082 Merge pull request #729 from kr3ator/feature/separate_default_params
feat: Make startup scripts idempotent
2022-04-08 15:17:07 +02:00
a63af05bec Update initializers/users.yml
Co-authored-by: Christian Mäder <cimnine@users.noreply.github.com>
2022-04-08 14:57:37 +02:00
9be7b0e109 feat: Make startup scripts idempotent 2022-04-07 19:47:19 +02:00
d5b1d9ce39 Added environment variable for CSRF_TRUSTED_ORIGINS 2022-04-07 16:09:27 +02:00
a6eb4fef00 Merge pull request #730 from kr3ator/bugfix/cf_creation
Fix setting custom field data if custom field object is missing
2022-04-07 09:01:04 +02:00
d1c69e8fe5 fix: invalid Interface optional assoc 2022-04-06 16:45:25 +02:00
81d9e4f560 Fix setting CF data if CF object is missing 2022-04-06 14:09:07 +02:00
61a3afbb3b Merge pull request #734 from netbox-community/develop
Version 1.6.1
2022-04-06 09:58:45 +02:00
91ab616cc5 Preparation for 1.6.1 2022-04-06 09:39:22 +02:00
43d62f1284 Merge pull request #733 from tobiasge/remove-tzdata
tzdata is already required in Netbox
2022-04-06 09:03:28 +02:00
d61470d6ef Merge pull request #725 from netbox-community/renovate/napalm-3.x
Update dependency napalm to v3.4.0
2022-04-06 08:50:29 +02:00
091d23d537 tzdata is already required in Netbox 2022-04-06 08:43:10 +02:00
2f24902436 Update dependency napalm to v3.4.0 2022-03-21 20:43:51 +00:00
36d47b9b88 Merge pull request #711 from netbox-community/renovate/actions-checkout-3.x
Update actions/checkout action to v3
2022-03-02 07:46:38 +01:00
2c20771682 Update actions/checkout action to v3 2022-03-01 19:06:16 +00:00
a9cdec6d87 Merge pull request #708 from netbox-community/renovate/actions-setup-python-3.x
Update actions/setup-python action to v3
2022-03-01 09:17:08 +01:00
f1efccea6b Update actions/setup-python action to v3 2022-02-28 13:52:44 +00:00
66 changed files with 1358 additions and 552 deletions

View File

@ -1,3 +1,4 @@
---
name: push
on:
@ -13,53 +14,66 @@ jobs:
runs-on: ubuntu-latest
name: Checks syntax of our code
steps:
- uses: actions/checkout@v2
with:
# Full git history is needed to get a proper list of changed files within `super-linter`
fetch-depth: 0
- uses: actions/setup-python@v2
- name: Lint Code Base
uses: github/super-linter@v4
env:
DEFAULT_BRANCH: develop
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SUPPRESS_POSSUM: true
LINTER_RULES_PATH: /
VALIDATE_ALL_CODEBASE: false
VALIDATE_DOCKERFILE: false
FILTER_REGEX_EXCLUDE: (.*/)?(LICENSE|configuration/.*)
EDITORCONFIG_FILE_NAME: .ecrc
DOCKERFILE_HADOLINT_FILE_NAME: .hadolint.yaml
MARKDOWN_CONFIG_FILE: .markdown-lint.yml
PYTHON_BLACK_CONFIG_FILE: pyproject.toml
PYTHON_FLAKE8_CONFIG_FILE: .flake8
PYTHON_ISORT_CONFIG_FILE: pyproject.toml
- uses: actions/checkout@v3
with:
# Full git history is needed to get a proper
# list of changed files within `super-linter`
fetch-depth: 0
- uses: actions/setup-python@v4
with:
python-version: '3.9'
- name: Lint Code Base
uses: github/super-linter@v4
env:
DEFAULT_BRANCH: develop
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SUPPRESS_POSSUM: true
LINTER_RULES_PATH: /
VALIDATE_ALL_CODEBASE: false
VALIDATE_DOCKERFILE: false
FILTER_REGEX_EXCLUDE: (.*/)?(LICENSE|configuration/.*)
EDITORCONFIG_FILE_NAME: .ecrc
DOCKERFILE_HADOLINT_FILE_NAME: .hadolint.yaml
MARKDOWN_CONFIG_FILE: .markdown-lint.yml
PYTHON_BLACK_CONFIG_FILE: pyproject.toml
PYTHON_FLAKE8_CONFIG_FILE: .flake8
PYTHON_ISORT_CONFIG_FILE: pyproject.toml
YAML_CONFIG_FILE: .yamllint.yaml
build:
continue-on-error: ${{ matrix.docker_from == 'alpine:edge' }}
continue-on-error: ${{ matrix.build_cmd != './build-latest.sh' }}
strategy:
matrix:
build_cmd:
- ./build-latest.sh
- PRERELEASE=true ./build-latest.sh
- ./build.sh feature
- ./build.sh develop
docker_from:
- '' # use the default of the build script
- ./build-latest.sh
- PRERELEASE=true ./build-latest.sh
- ./build.sh feature
- ./build.sh develop
platform:
- linux/amd64
- linux/arm64
fail-fast: false
env:
GH_ACTION: enable
IMAGE_NAMES: docker.io/netboxcommunity/netbox
runs-on: ubuntu-latest
name: Builds new NetBox Docker Images
steps:
- id: git-checkout
name: Checkout
uses: actions/checkout@v2
- id: docker-build
name: Build the image from '${{ matrix.docker_from }}' with '${{ matrix.build_cmd }}'
run: ${{ matrix.build_cmd }}
env:
DOCKER_FROM: ${{ matrix.docker_from }}
GH_ACTION: enable
- id: docker-test
name: Test the image
run: IMAGE="${FINAL_DOCKER_TAG}" ./test.sh
if: steps.docker-build.outputs.skipped != 'true'
- id: git-checkout
name: Checkout
uses: actions/checkout@v3
- id: qemu-setup
name: Set up QEMU
uses: docker/setup-qemu-action@v2
- id: buildx-setup
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- id: docker-build
name: Build the image for '${{ matrix.platform }}' with '${{ matrix.build_cmd }}'
run: ${{ matrix.build_cmd }}
env:
BUILDX_PLATFORM: ${{ matrix.platform }}
BUILDX_BUILDER_NAME: ${{ steps.buildx-setup.outputs.name }}
- id: docker-test
name: Test the image
run: IMAGE="${FINAL_DOCKER_TAG}" ./test.sh
if: steps.docker-build.outputs.skipped != 'true'

View File

@ -1,3 +1,4 @@
---
name: release
on:
@ -6,82 +7,77 @@ on:
- published
schedule:
- cron: '45 5 * * *'
workflow_dispatch:
jobs:
build:
strategy:
matrix:
build_cmd:
- ./build-latest.sh
- PRERELEASE=true ./build-latest.sh
- ./build.sh feature
- ./build.sh develop
- ./build-latest.sh
- PRERELEASE=true ./build-latest.sh
- ./build.sh feature
- ./build.sh develop
platform:
- linux/amd64,linux/arm64
fail-fast: false
runs-on: ubuntu-latest
name: Builds new NetBox Docker Images
env:
GH_ACTION: enable
IMAGE_NAMES: docker.io/netboxcommunity/netbox quay.io/netboxcommunity/netbox ghcr.io/netbox-community/netbox
steps:
-
name: Checkout
uses: actions/checkout@v2
-
name: Get Version of NetBox Docker
run: |
echo "::set-output name=version::$(cat VERSION)"
shell: bash
-
id: docker-build
name: Build the image with '${{ matrix.build_cmd }}'
run: ${{ matrix.build_cmd }}
-
name: Test the image
run: IMAGE="${FINAL_DOCKER_TAG}" ./test.sh
if: steps.docker-build.outputs.skipped != 'true'
# docker.io
-
name: Login to docker.io
uses: docker/login-action@v1
with:
registry: docker.io
username: ${{ secrets.dockerhub_username }}
password: ${{ secrets.dockerhub_password }}
if: steps.docker-build.outputs.skipped != 'true'
-
name: Push the image to docker.io
run: ${{ matrix.build_cmd }} --push-only
if: steps.docker-build.outputs.skipped != 'true'
# quay.io
-
name: Login to Quay.io
uses: docker/login-action@v1
with:
registry: quay.io
username: ${{ secrets.quayio_username }}
password: ${{ secrets.quayio_password }}
if: steps.docker-build.outputs.skipped != 'true'
-
name: Build and push the image with '${{ matrix.build_cmd }}'
run: ${{ matrix.build_cmd }} --push
env:
DOCKER_REGISTRY: quay.io
if: steps.docker-build.outputs.skipped != 'true'
# ghcr.io
-
name: Login to GitHub Container Registry
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
if: steps.docker-build.outputs.skipped != 'true'
-
name: Build and push the image with '${{ matrix.build_cmd }}'
run: ${{ matrix.build_cmd }} --push
env:
DOCKER_REGISTRY: ghcr.io
DOCKER_ORG: netbox-community
if: steps.docker-build.outputs.skipped != 'true'
- id: source-checkout
name: Checkout
uses: actions/checkout@v3
- id: set-netbox-docker-version
name: Get Version of NetBox Docker
run: echo "::set-output name=version::$(cat VERSION)"
shell: bash
- id: qemu-setup
name: Set up QEMU
uses: docker/setup-qemu-action@v2
- id: buildx-setup
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- id: docker-build
name: Build the image with '${{ matrix.build_cmd }}'
run: ${{ matrix.build_cmd }}
- id: test-image
name: Test the image
run: IMAGE="${FINAL_DOCKER_TAG}" ./test.sh
if: steps.docker-build.outputs.skipped != 'true'
# docker.io
- id: docker-io-login
name: Login to docker.io
uses: docker/login-action@v2
with:
registry: docker.io
username: ${{ secrets.dockerhub_username }}
password: ${{ secrets.dockerhub_password }}
if: steps.docker-build.outputs.skipped != 'true'
# quay.io
- id: quay-io-login
name: Login to Quay.io
uses: docker/login-action@v2
with:
registry: quay.io
username: ${{ secrets.quayio_username }}
password: ${{ secrets.quayio_password }}
if: steps.docker-build.outputs.skipped != 'true'
# ghcr.io
- id: ghcr-io-login
name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
if: steps.docker-build.outputs.skipped != 'true'
- id: build-and-push
name: Push the image
run: ${{ matrix.build_cmd }} --push
if: steps.docker-build.outputs.skipped != 'true'
env:
BUILDX_PLATFORM: ${{ matrix.platform }}
BUILDX_BUILDER_NAME: ${{ steps.buildx-setup.outputs.name }}

View File

@ -1,3 +1,4 @@
ignored:
- DL3006
- DL3018
- DL3008
- DL3003

5
.yamllint.yaml Normal file
View File

@ -0,0 +1,5 @@
---
rules:
line-length:
max: 120

View File

@ -1,45 +1,27 @@
ARG FROM
FROM ${FROM} as builder
RUN apk add --no-cache \
bash \
build-base \
cargo \
RUN export DEBIAN_FRONTEND=noninteractive \
&& apt-get update -qq \
&& apt-get upgrade \
--yes -qq --no-install-recommends \
&& apt-get install \
--yes -qq --no-install-recommends \
build-essential \
ca-certificates \
cmake \
cyrus-sasl-dev \
git \
graphviz \
jpeg-dev \
libevent-dev \
libffi-dev \
libxslt-dev \
make \
musl-dev \
openldap-dev \
postgresql-dev \
py3-pip \
libldap-dev \
libpq-dev \
libsasl2-dev \
libssl-dev \
python3-dev \
&& python3 -m venv /opt/netbox/venv \
&& /opt/netbox/venv/bin/python3 -m pip install --upgrade \
python3-pip \
python3-venv \
&& python3 -m venv /opt/netbox/venv \
&& /opt/netbox/venv/bin/python3 -m pip install --upgrade \
pip \
setuptools \
wheel
# Build libcrc32c for google-crc32c python module
RUN git clone https://github.com/google/crc32c \
&& cd crc32c \
&& git submodule update --init --recursive \
&& mkdir build \
&& cd build \
&& cmake \
-DCMAKE_BUILD_TYPE=Release \
-DCRC32C_BUILD_TESTS=no \
-DCRC32C_BUILD_BENCHMARKS=no \
-DBUILD_SHARED_LIBS=yes \
.. \
&& make all install
ARG NETBOX_PATH
COPY ${NETBOX_PATH}/requirements.txt requirements-container.txt /
RUN /opt/netbox/venv/bin/pip install \
@ -53,35 +35,38 @@ RUN /opt/netbox/venv/bin/pip install \
ARG FROM
FROM ${FROM} as main
RUN apk add --no-cache \
bash \
RUN export DEBIAN_FRONTEND=noninteractive \
&& apt-get update -qq \
&& apt-get upgrade \
--yes -qq --no-install-recommends \
&& apt-get install \
--yes -qq --no-install-recommends \
ca-certificates \
curl \
graphviz \
libevent \
libffi \
libjpeg-turbo \
libxslt \
libldap-common \
libpq5 \
openssl \
postgresql-client \
postgresql-libs \
py3-pip \
python3 \
python3-distutils \
tini \
unit \
unit-python3
&& curl -sL https://nginx.org/keys/nginx_signing.key \
> /etc/apt/trusted.gpg.d/nginx.asc && \
echo "deb https://packages.nginx.org/unit/ubuntu/ jammy unit" \
> /etc/apt/sources.list.d/unit.list \
&& apt-get update -qq \
&& apt-get install \
--yes -qq --no-install-recommends \
unit=1.27.0-1~jammy \
unit-python3.10=1.27.0-1~jammy \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /opt
COPY --from=builder /usr/local/lib/libcrc32c.* /usr/local/lib/
COPY --from=builder /usr/local/include/crc32c /usr/local/include
COPY --from=builder /usr/local/lib/cmake/Crc32c /usr/local/lib/cmake/
COPY --from=builder /opt/netbox/venv /opt/netbox/venv
ARG NETBOX_PATH
COPY ${NETBOX_PATH} /opt/netbox
COPY docker/configuration.docker.py /opt/netbox/netbox/netbox/configuration.py
COPY docker/ldap_config.docker.py /opt/netbox/netbox/netbox/ldap_config.py
COPY docker/docker-entrypoint.sh /opt/netbox/docker-entrypoint.sh
COPY docker/housekeeping.sh /opt/netbox/housekeeping.sh
COPY docker/launch-netbox.sh /opt/netbox/launch-netbox.sh
@ -101,26 +86,15 @@ RUN mkdir -p static /opt/unit/state/ /opt/unit/tmp/ \
--config-file /opt/netbox/mkdocs.yml --site-dir /opt/netbox/netbox/project-static/docs/ \
&& SECRET_KEY="dummy" /opt/netbox/venv/bin/python /opt/netbox/netbox/manage.py collectstatic --no-input
ENTRYPOINT [ "/sbin/tini", "--" ]
ENV LANG=C.UTF-8 PATH=/opt/netbox/venv/bin:$PATH
ENTRYPOINT [ "/usr/bin/tini", "--" ]
CMD [ "/opt/netbox/docker-entrypoint.sh", "/opt/netbox/launch-netbox.sh" ]
LABEL ORIGINAL_TAG="" \
NETBOX_GIT_BRANCH="" \
NETBOX_GIT_REF="" \
NETBOX_GIT_URL="" \
# See http://label-schema.org/rc1/#build-time-labels
# Also https://microbadger.com/labels
org.label-schema.schema-version="1.0" \
org.label-schema.build-date="" \
org.label-schema.name="NetBox Docker" \
org.label-schema.description="A container based distribution of NetBox, the free and open IPAM and DCIM solution." \
org.label-schema.vendor="The netbox-docker contributors." \
org.label-schema.url="https://github.com/netbox-community/netbox-docker" \
org.label-schema.usage="https://github.com/netbox-community/netbox-docker/wiki" \
org.label-schema.vcs-url="https://github.com/netbox-community/netbox-docker.git" \
org.label-schema.vcs-ref="" \
org.label-schema.version="snapshot" \
LABEL netbox.original-tag="" \
netbox.git-branch="" \
netbox.git-ref="" \
netbox.git-url="" \
# See https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys
org.opencontainers.image.created="" \
org.opencontainers.image.title="NetBox Docker" \
@ -132,17 +106,4 @@ LABEL ORIGINAL_TAG="" \
org.opencontainers.image.documentation="https://github.com/netbox-community/netbox-docker/wiki" \
org.opencontainers.image.source="https://github.com/netbox-community/netbox-docker.git" \
org.opencontainers.image.revision="" \
org.opencontainers.image.version="snapshot"
#####
## LDAP specific configuration
#####
FROM main as ldap
RUN apk add --no-cache \
libsasl \
libldap \
util-linux
COPY docker/ldap_config.docker.py /opt/netbox/netbox/netbox/ldap_config.py
org.opencontainers.image.version=""

View File

@ -1 +1 @@
1.6.0
2.1.0

View File

@ -1,8 +0,0 @@
#!/bin/bash
push_image_to_registry() {
local target_tag=$1
echo "⏫ Pushing '${target_tag}'"
$DRY docker push "${target_tag}"
echo "✅ Finished pushing the Docker image '${target_tag}'."
}

413
build.sh
View File

@ -6,10 +6,9 @@ echo "▶️ $0 $*"
set -e
if [ "${1}x" == "x" ] || [ "${1}" == "--help" ] || [ "${1}" == "-h" ]; then
echo "Usage: ${0} <branch> [--push|--push-only]"
echo "Usage: ${0} <branch> [--push]"
echo " branch The branch or tag to build. Required."
echo " --push Pushes the built Docker image to the registry."
echo " --push-only Only pushes the Docker image to the registry, but does not build it."
echo ""
echo "You can use the following ENV variables to customize the build:"
echo " SRC_ORG Which fork of netbox to use (i.e. github.com/\${SRC_ORG}/\${SRC_REPO})."
@ -30,15 +29,10 @@ if [ "${1}x" == "x" ] || [ "${1}" == "--help" ] || [ "${1}" == "-h" ]; then
echo " When <branch>=master: latest"
echo " When <branch>=develop: snapshot"
echo " Else: same as <branch>"
echo " DOCKER_REGISTRY The Docker repository's registry (i.e. '\${DOCKER_REGISTRY}/\${DOCKER_ORG}/\${DOCKER_REPO}'')"
echo " IMAGE_NAMES The names used for the image including the registry"
echo " Used for tagging the image."
echo " Default: docker.io"
echo " DOCKER_ORG The Docker repository's organisation (i.e. '\${DOCKER_REGISTRY}/\${DOCKER_ORG}/\${DOCKER_REPO}'')"
echo " Used for tagging the image."
echo " Default: netboxcommunity"
echo " DOCKER_REPO The Docker repository's name (i.e. '\${DOCKER_REGISTRY}/\${DOCKER_ORG}/\${DOCKER_REPO}'')"
echo " Used for tagging the image."
echo " Default: netbox"
echo " Default: docker.io/netboxcommunity/netbox"
echo " Example: 'docker.io/netboxcommunity/netbox quay.io/netboxcommunity/netbox'"
echo " DOCKER_TAG The name of the tag which is applied to the image."
echo " Useful for pushing into another registry than hub.docker.com."
echo " Default: \${DOCKER_REGISTRY}/\${DOCKER_ORG}/\${DOCKER_REPO}:\${TAG}"
@ -49,10 +43,25 @@ if [ "${1}x" == "x" ] || [ "${1}" == "--help" ] || [ "${1}" == "-h" ]; then
echo " DOCKERFILE The name of Dockerfile to use."
echo " Default: Dockerfile"
echo " DOCKER_FROM The base image to use."
echo " Default: 'alpine:3.14'"
echo " DOCKER_TARGET A specific target to build."
echo " It's currently not possible to pass multiple targets."
echo " Default: main ldap"
echo " Default: 'ubuntu:22.04'"
echo " BUILDX_PLATFORMS"
echo " Specifies the platform(s) to build the image for."
echo " Example: 'linux/amd64,linux/arm64'"
echo " Default: 'linux/amd64'"
echo " BUILDX_BUILDER_NAME"
echo " If defined, the image build will be assigned to the given builder."
echo " If you specify this variable, make sure that the builder exists."
echo " If this value is not defined, a new builx builder with the directory name of the"
echo " current directory (i.e. '$(basename "${PWD}")') is created."
echo " Example: 'clever_lovelace'"
echo " Default: undefined"
echo " BUILDX_REMOVE_BUILDER"
echo " If defined (and only if BUILDX_BUILDER_NAME is undefined),"
echo " then the buildx builder created by this script will be removed after use."
echo " This is useful if you build NetBox Docker on an automated system that does"
echo " not manage the builders for you."
echo " Example: 'on'"
echo " Default: undefined"
echo " HTTP_PROXY The proxy to use for http requests."
echo " Example: http://proxy.domain.tld:3128"
echo " Default: undefined"
@ -97,6 +106,9 @@ fi
source ./build-functions/gh-functions.sh
IMAGE_NAMES="${IMAGE_NAMES-docker.io/netboxcommunity/netbox}"
IFS=' ' read -ra IMAGE_NAMES <<<"${IMAGE_NAMES}"
###
# Enabling dry-run mode
###
@ -170,7 +182,7 @@ fi
# Determining the value for DOCKER_FROM
###
if [ -z "$DOCKER_FROM" ]; then
DOCKER_FROM="alpine:3.14"
DOCKER_FROM="ubuntu:22.04"
fi
###
@ -178,7 +190,7 @@ fi
###
BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M+00:00')"
if [ -d ".git" ]; then
if [ -d ".git" ] && [ -z "${SKIP_GIT}" ]; then
GIT_REF="$(git rev-parse HEAD)"
fi
@ -186,7 +198,7 @@ fi
PROJECT_VERSION="${PROJECT_VERSION-$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' VERSION)}"
# Get the Git information from the netbox directory
if [ -d "${NETBOX_PATH}/.git" ]; then
if [ -d "${NETBOX_PATH}/.git" ] && [ -z "${SKIP_GIT}" ]; then
NETBOX_GIT_REF=$(
cd "${NETBOX_PATH}"
git rev-parse HEAD
@ -220,193 +232,194 @@ develop)
esac
###
# Determine targets to build
# composing the final TARGET_DOCKER_TAG
###
DEFAULT_DOCKER_TARGETS=("main" "ldap")
DOCKER_TARGETS=("${DOCKER_TARGET:-"${DEFAULT_DOCKER_TARGETS[@]}"}")
echo "🏭 Building the following targets:" "${DOCKER_TARGETS[@]}"
TARGET_DOCKER_TAG="${DOCKER_TAG-${TAG}}"
TARGET_DOCKER_TAG_PROJECT="${TARGET_DOCKER_TAG}-${PROJECT_VERSION}"
###
# composing the additional DOCKER_SHORT_TAG,
# i.e. "v2.6.1" becomes "v2.6",
# which is only relevant for version tags
# Also let "latest" follow the highest version
###
if [[ "${TAG}" =~ ^v([0-9]+)\.([0-9]+)\.[0-9]+$ ]]; then
MAJOR=${BASH_REMATCH[1]}
MINOR=${BASH_REMATCH[2]}
TARGET_DOCKER_SHORT_TAG="${DOCKER_SHORT_TAG-v${MAJOR}.${MINOR}}"
TARGET_DOCKER_LATEST_TAG="latest"
TARGET_DOCKER_SHORT_TAG_PROJECT="${TARGET_DOCKER_SHORT_TAG}-${PROJECT_VERSION}"
TARGET_DOCKER_LATEST_TAG_PROJECT="${TARGET_DOCKER_LATEST_TAG}-${PROJECT_VERSION}"
fi
IMAGE_NAME_TAGS=()
for IMAGE_NAME in "${IMAGE_NAMES[@]}"; do
IMAGE_NAME_TAGS+=("${IMAGE_NAME}:${TARGET_DOCKER_TAG}")
IMAGE_NAME_TAGS+=("${IMAGE_NAME}:${TARGET_DOCKER_TAG_PROJECT}")
done
if [ -n "${TARGET_DOCKER_SHORT_TAG}" ]; then
for IMAGE_NAME in "${IMAGE_NAMES[@]}"; do
IMAGE_NAME_TAGS+=("${IMAGE_NAME}:${TARGET_DOCKER_SHORT_TAG}")
IMAGE_NAME_TAGS+=("${IMAGE_NAME}:${TARGET_DOCKER_SHORT_TAG_PROJECT}")
IMAGE_NAME_TAGS+=("${IMAGE_NAME}:${TARGET_DOCKER_LATEST_TAG}")
IMAGE_NAME_TAGS+=("${IMAGE_NAME}:${TARGET_DOCKER_LATEST_TAG_PROJECT}")
done
fi
gh_env "FINAL_DOCKER_TAG=${IMAGE_NAME_TAGS[0]}"
###
# Checking if the build is necessary,
# meaning build only if one of those values changed:
# - base image digest
# - netbox git ref (Label: netbox.git-ref)
# - netbox-docker git ref (Label: org.opencontainers.image.revision)
###
# Load information from registry (only for docker.io)
SHOULD_BUILD="false"
BUILD_REASON=""
if [ -z "${GH_ACTION}" ]; then
# Asuming non Github builds should always proceed
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} interactive"
elif [[ "${IMAGE_NAME_TAGS[0]}" = docker.io* ]]; then
source ./build-functions/get-public-image-config.sh
IFS=':' read -ra DOCKER_FROM_SPLIT <<<"${DOCKER_FROM}"
if ! [[ ${DOCKER_FROM_SPLIT[0]} =~ .*/.* ]]; then
# Need to use "library/..." for images the have no two part name
DOCKER_FROM_SPLIT[0]="library/${DOCKER_FROM_SPLIT[0]}"
fi
IFS='/' read -ra ORG_REPO <<<"${IMAGE_NAMES[0]}"
echo "Checking labels for '${ORG_REPO[1]}' and '${ORG_REPO[2]}'"
BASE_LAST_LAYER=$(get_image_last_layer "${DOCKER_FROM_SPLIT[0]}" "${DOCKER_FROM_SPLIT[1]}")
mapfile -t IMAGES_LAYERS_OLD < <(get_image_layers "${ORG_REPO[1]}"/"${ORG_REPO[2]}" "${TAG}")
NETBOX_GIT_REF_OLD=$(get_image_label netbox.git-ref "${ORG_REPO[1]}"/"${ORG_REPO[2]}" "${TAG}")
GIT_REF_OLD=$(get_image_label org.opencontainers.image.revision "${ORG_REPO[1]}"/"${ORG_REPO[2]}" "${TAG}")
if ! printf '%s\n' "${IMAGES_LAYERS_OLD[@]}" | grep -q -P "^${BASE_LAST_LAYER}\$"; then
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} debian"
fi
if [ "${NETBOX_GIT_REF}" != "${NETBOX_GIT_REF_OLD}" ]; then
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} netbox"
fi
if [ "${GIT_REF}" != "${GIT_REF_OLD}" ]; then
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} netbox-docker"
fi
else
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} no-check"
fi
if [ "${SHOULD_BUILD}" != "true" ]; then
echo "Build skipped because sources didn't change"
echo "::set-output name=skipped::true"
exit 0 # Nothing to do -> exit
else
gh_echo "::set-output name=skipped::false"
fi
gh_echo "::endgroup::"
###
# Build each target
# Build the image
###
export DOCKER_BUILDKIT=${DOCKER_BUILDKIT-1}
for DOCKER_TARGET in "${DOCKER_TARGETS[@]}"; do
gh_echo "::group::🏗 Building the target '${DOCKER_TARGET}'"
echo "🏗 Building the target '${DOCKER_TARGET}'"
###
# composing the final TARGET_DOCKER_TAG
###
TARGET_DOCKER_TAG="${DOCKER_TAG-${DOCKER_REGISTRY}/${DOCKER_ORG}/${DOCKER_REPO}:${TAG}}"
if [ "${DOCKER_TARGET}" != "main" ]; then
TARGET_DOCKER_TAG="${TARGET_DOCKER_TAG}-${DOCKER_TARGET}"
fi
TARGET_DOCKER_TAG_PROJECT="${TARGET_DOCKER_TAG}-${PROJECT_VERSION}"
gh_env "FINAL_DOCKER_TAG=${TARGET_DOCKER_TAG_PROJECT}"
gh_echo "::set-output name=skipped::false"
###
# composing the additional DOCKER_SHORT_TAG,
# i.e. "v2.6.1" becomes "v2.6",
# which is only relevant for version tags
# Also let "latest" follow the highest version
###
if [[ "${TAG}" =~ ^v([0-9]+)\.([0-9]+)\.[0-9]+$ ]]; then
MAJOR=${BASH_REMATCH[1]}
MINOR=${BASH_REMATCH[2]}
TARGET_DOCKER_SHORT_TAG="${DOCKER_SHORT_TAG-${DOCKER_REGISTRY}/${DOCKER_ORG}/${DOCKER_REPO}:v${MAJOR}.${MINOR}}"
TARGET_DOCKER_LATEST_TAG="${DOCKER_REGISTRY}/${DOCKER_ORG}/${DOCKER_REPO}:latest"
if [ "${DOCKER_TARGET}" != "main" ]; then
TARGET_DOCKER_SHORT_TAG="${TARGET_DOCKER_SHORT_TAG}-${DOCKER_TARGET}"
TARGET_DOCKER_LATEST_TAG="${TARGET_DOCKER_LATEST_TAG}-${DOCKER_TARGET}"
fi
TARGET_DOCKER_SHORT_TAG_PROJECT="${TARGET_DOCKER_SHORT_TAG}-${PROJECT_VERSION}"
TARGET_DOCKER_LATEST_TAG_PROJECT="${TARGET_DOCKER_LATEST_TAG}-${PROJECT_VERSION}"
fi
###
# Proceeding to buils stage, except if `--push-only` is passed
###
if [ "${2}" != "--push-only" ]; then
###
# Checking if the build is necessary,
# meaning build only if one of those values changed:
# - Python base image digest (Label: PYTHON_BASE_DIGEST)
# - netbox git ref (Label: NETBOX_GIT_REF)
# - netbox-docker git ref (Label: org.label-schema.vcs-ref)
###
# Load information from registry (only for docker.io)
SHOULD_BUILD="false"
BUILD_REASON=""
if [ -z "${GH_ACTION}" ]; then
# Asuming non Github builds should always proceed
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} interactive"
elif [ "$DOCKER_REGISTRY" = "docker.io" ]; then
source ./build-functions/get-public-image-config.sh
IFS=':' read -ra DOCKER_FROM_SPLIT <<<"${DOCKER_FROM}"
if ! [[ ${DOCKER_FROM_SPLIT[0]} =~ .*/.* ]]; then
# Need to use "library/..." for images the have no two part name
DOCKER_FROM_SPLIT[0]="library/${DOCKER_FROM_SPLIT[0]}"
fi
PYTHON_LAST_LAYER=$(get_image_last_layer "${DOCKER_FROM_SPLIT[0]}" "${DOCKER_FROM_SPLIT[1]}")
mapfile -t IMAGES_LAYERS_OLD < <(get_image_layers "${DOCKER_ORG}"/"${DOCKER_REPO}" "${TAG}")
NETBOX_GIT_REF_OLD=$(get_image_label NETBOX_GIT_REF "${DOCKER_ORG}"/"${DOCKER_REPO}" "${TAG}")
GIT_REF_OLD=$(get_image_label org.label-schema.vcs-ref "${DOCKER_ORG}"/"${DOCKER_REPO}" "${TAG}")
if ! printf '%s\n' "${IMAGES_LAYERS_OLD[@]}" | grep -q -P "^${PYTHON_LAST_LAYER}\$"; then
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} alpine"
fi
if [ "${NETBOX_GIT_REF}" != "${NETBOX_GIT_REF_OLD}" ]; then
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} netbox"
fi
if [ "${GIT_REF}" != "${GIT_REF_OLD}" ]; then
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} netbox-docker"
fi
else
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} no-check"
fi
###
# Composing all arguments for `docker build`
###
DOCKER_BUILD_ARGS=(
--pull
--target "${DOCKER_TARGET}"
-f "${DOCKERFILE}"
-t "${TARGET_DOCKER_TAG}"
-t "${TARGET_DOCKER_TAG_PROJECT}"
)
if [ -n "${TARGET_DOCKER_SHORT_TAG}" ]; then
DOCKER_BUILD_ARGS+=(-t "${TARGET_DOCKER_SHORT_TAG}")
DOCKER_BUILD_ARGS+=(-t "${TARGET_DOCKER_SHORT_TAG_PROJECT}")
DOCKER_BUILD_ARGS+=(-t "${TARGET_DOCKER_LATEST_TAG}")
DOCKER_BUILD_ARGS+=(-t "${TARGET_DOCKER_LATEST_TAG_PROJECT}")
fi
# --label
DOCKER_BUILD_ARGS+=(
--label "ORIGINAL_TAG=${TARGET_DOCKER_TAG_PROJECT}"
--label "org.label-schema.build-date=${BUILD_DATE}"
--label "org.opencontainers.image.created=${BUILD_DATE}"
--label "org.label-schema.version=${PROJECT_VERSION}"
--label "org.opencontainers.image.version=${PROJECT_VERSION}"
)
if [ -d ".git" ]; then
DOCKER_BUILD_ARGS+=(
--label "org.label-schema.vcs-ref=${GIT_REF}"
--label "org.opencontainers.image.revision=${GIT_REF}"
)
fi
if [ -d "${NETBOX_PATH}/.git" ]; then
DOCKER_BUILD_ARGS+=(
--label "NETBOX_GIT_BRANCH=${NETBOX_GIT_BRANCH}"
--label "NETBOX_GIT_REF=${NETBOX_GIT_REF}"
--label "NETBOX_GIT_URL=${NETBOX_GIT_URL}"
)
fi
if [ -n "${BUILD_REASON}" ]; then
BUILD_REASON=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<"$BUILD_REASON")
DOCKER_BUILD_ARGS+=(--label "BUILD_REASON=${BUILD_REASON}")
fi
# --build-arg
DOCKER_BUILD_ARGS+=(--build-arg "NETBOX_PATH=${NETBOX_PATH}")
if [ -n "${DOCKER_FROM}" ]; then
DOCKER_BUILD_ARGS+=(--build-arg "FROM=${DOCKER_FROM}")
fi
# shellcheck disable=SC2031
if [ -n "${HTTP_PROXY}" ]; then
DOCKER_BUILD_ARGS+=(--build-arg "http_proxy=${HTTP_PROXY}")
DOCKER_BUILD_ARGS+=(--build-arg "https_proxy=${HTTPS_PROXY}")
fi
if [ -n "${NO_PROXY}" ]; then
DOCKER_BUILD_ARGS+=(--build-arg "no_proxy=${NO_PROXY}")
fi
###
# Building the docker image
###
if [ "${SHOULD_BUILD}" == "true" ]; then
echo "🐳 Building the Docker image '${TARGET_DOCKER_TAG_PROJECT}'."
echo " Build reason set to: ${BUILD_REASON}"
$DRY docker build "${DOCKER_BUILD_ARGS[@]}" .
echo "✅ Finished building the Docker images '${TARGET_DOCKER_TAG_PROJECT}'"
echo "🔎 Inspecting labels on '${TARGET_DOCKER_TAG_PROJECT}'"
$DRY docker inspect "${TARGET_DOCKER_TAG_PROJECT}" --format "{{json .Config.Labels}}"
else
echo "Build skipped because sources didn't change"
echo "::set-output name=skipped::true"
fi
fi
###
# Pushing the docker images if either `--push` or `--push-only` are passed
###
if [ "${2}" == "--push" ] || [ "${2}" == "--push-only" ]; then
source ./build-functions/docker-functions.sh
push_image_to_registry "${TARGET_DOCKER_TAG}"
push_image_to_registry "${TARGET_DOCKER_TAG_PROJECT}"
if [ -n "${TARGET_DOCKER_SHORT_TAG}" ]; then
push_image_to_registry "${TARGET_DOCKER_SHORT_TAG}"
push_image_to_registry "${TARGET_DOCKER_SHORT_TAG_PROJECT}"
push_image_to_registry "${TARGET_DOCKER_LATEST_TAG}"
push_image_to_registry "${TARGET_DOCKER_LATEST_TAG_PROJECT}"
fi
fi
gh_echo "::endgroup::"
gh_echo "::group::🏗 Building the image"
###
# Composing all arguments for `docker build`
###
DOCKER_BUILD_ARGS=(
--pull
--target main
-f "${DOCKERFILE}"
)
for IMAGE_NAME in "${IMAGE_NAME_TAGS[@]}"; do
DOCKER_BUILD_ARGS+=(-t "${IMAGE_NAME}")
done
# --label
DOCKER_BUILD_ARGS+=(
--label "netbox.original-tag=${TARGET_DOCKER_TAG_PROJECT}"
--label "org.opencontainers.image.created=${BUILD_DATE}"
--label "org.opencontainers.image.version=${PROJECT_VERSION}"
)
if [ -d ".git" ] && [ -z "${SKIP_GIT}" ]; then
DOCKER_BUILD_ARGS+=(
--label "org.opencontainers.image.revision=${GIT_REF}"
)
fi
if [ -d "${NETBOX_PATH}/.git" ] && [ -z "${SKIP_GIT}" ]; then
DOCKER_BUILD_ARGS+=(
--label "netbox.git-branch=${NETBOX_GIT_BRANCH}"
--label "netbox.git-ref=${NETBOX_GIT_REF}"
--label "netbox.git-url=${NETBOX_GIT_URL}"
)
fi
if [ -n "${BUILD_REASON}" ]; then
BUILD_REASON=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<"$BUILD_REASON")
DOCKER_BUILD_ARGS+=(--label "netbox.build-reason=${BUILD_REASON}")
fi
# --build-arg
DOCKER_BUILD_ARGS+=(--build-arg "NETBOX_PATH=${NETBOX_PATH}")
if [ -n "${DOCKER_FROM}" ]; then
DOCKER_BUILD_ARGS+=(--build-arg "FROM=${DOCKER_FROM}")
fi
# shellcheck disable=SC2031
if [ -n "${HTTP_PROXY}" ]; then
DOCKER_BUILD_ARGS+=(--build-arg "http_proxy=${HTTP_PROXY}")
DOCKER_BUILD_ARGS+=(--build-arg "https_proxy=${HTTPS_PROXY}")
fi
if [ -n "${NO_PROXY}" ]; then
DOCKER_BUILD_ARGS+=(--build-arg "no_proxy=${NO_PROXY}")
fi
DOCKER_BUILD_ARGS+=(--platform "${BUILDX_PLATFORM-linux/amd64}")
if [ "${2}" == "--push" ]; then
# output type=docker does not work with pushing
DOCKER_BUILD_ARGS+=(
--output=type=image
--push
)
else
DOCKER_BUILD_ARGS+=(
--output=type=docker
)
fi
###
# Building the docker image
###
if [ -z "${BUILDX_BUILDER_NAME}" ]; then
BUILDX_BUILDER_NAME="$(basename "${PWD}")"
fi
if ! docker buildx ls | grep --quiet --word-regexp "${BUILDX_BUILDER_NAME}"; then
echo "👷 Creating new Buildx Builder '${BUILDX_BUILDER_NAME}'"
$DRY docker buildx create --name "${BUILDX_BUILDER_NAME}"
BUILDX_BUILDER_CREATED="yes"
fi
echo "🐳 Building the Docker image '${TARGET_DOCKER_TAG_PROJECT}'."
echo " Build reason set to: ${BUILD_REASON}"
$DRY docker buildx \
--builder "${BUILDX_BUILDER_NAME}" \
build \
"${DOCKER_BUILD_ARGS[@]}" \
.
echo "✅ Finished building the Docker images"
gh_echo "::endgroup::" # End group for Build
gh_echo "::group::🏗 Image Labels"
echo "🔎 Inspecting labels on '${IMAGE_NAME_TAGS[0]}'"
$DRY docker inspect "${IMAGE_NAME_TAGS[0]}" --format "{{json .Config.Labels}}" | jq
gh_echo "::endgroup::"
gh_echo "::group::🏗 Clean up"
if [ -n "${BUILDX_REMOVE_BUILDER}" ] && [ "${BUILDX_BUILDER_CREATED}" == "yes" ]; then
echo "👷 Removing Buildx Builder '${BUILDX_BUILDER_NAME}'"
$DRY docker buildx rm "${BUILDX_BUILDER_NAME}"
fi
gh_echo "::endgroup::"

View File

@ -120,6 +120,11 @@ CORS_ORIGIN_ALLOW_ALL = environ.get('CORS_ORIGIN_ALLOW_ALL', 'False').lower() ==
CORS_ORIGIN_WHITELIST = list(filter(None, environ.get('CORS_ORIGIN_WHITELIST', 'https://localhost').split(' ')))
CORS_ORIGIN_REGEX_WHITELIST = [re.compile(r) for r in list(filter(None, environ.get('CORS_ORIGIN_REGEX_WHITELIST', '').split(' ')))]
# Cross-Site-Request-Forgery-Attack settings. If Netbox is sitting behind a reverse proxy, you might need to set the CSRF_TRUSTED_ORIGINS flag.
# Django 4.0 requires to specify the URL Scheme in this setting. An example environment variable could be specified like:
# CSRF_TRUSTED_ORIGINS=https://demo.netbox.dev http://demo.netbox.dev
CSRF_TRUSTED_ORIGINS = list(filter(None, environ.get('CSRF_TRUSTED_ORIGINS', '').split(' ')))
# Set to True to enable server debugging. WARNING: Debugging introduces a substantial performance penalty and may reveal
# sensitive information about your installation. Only enable debugging while performing testing. Never enable debugging
# on a production system.
@ -165,6 +170,9 @@ LOGIN_TIMEOUT = int(environ.get('LOGIN_TIMEOUT', 1209600))
# Setting this to True will display a "maintenance mode" banner at the top of every page.
MAINTENANCE_MODE = environ.get('MAINTENANCE_MODE', 'False').lower() == 'true'
# Maps provider
MAPS_URL = environ.get('MAPS_URL', None)
# An API consumer can request an arbitrary number of objects =by appending the "limit" parameter to the URL (e.g.
# "?limit=1000"). This setting defines the maximum limit. Setting it to 0 or None will allow an API consumer to request
# all objects by specifying "?limit=0".

View File

@ -21,14 +21,14 @@ services:
image: postgres:14-alpine
env_file: env/postgres.env
redis:
image: redis:6-alpine
image: redis:7-alpine
command:
- sh
- -c # this is to evaluate the $REDIS_PASSWORD from the env
- redis-server --appendonly yes --requirepass $$REDIS_PASSWORD ## $$ because of docker-compose
env_file: env/redis.env
redis-cache:
image: redis:6-alpine
image: redis:7-alpine
command:
- sh
- -c # this is to evaluate the $REDIS_PASSWORD from the env

View File

@ -1,7 +1,7 @@
version: '3.4'
services:
netbox: &netbox
image: netboxcommunity/netbox:${VERSION-v3.1-1.6.0}
image: netboxcommunity/netbox:${VERSION-v3.2-2.1.0}
depends_on:
- postgres
- redis
@ -42,7 +42,7 @@ services:
# redis
redis:
image: redis:6-alpine
image: redis:7-alpine
command:
- sh
- -c # this is to evaluate the $REDIS_PASSWORD from the env
@ -51,7 +51,7 @@ services:
volumes:
- netbox-redis-data:/data
redis-cache:
image: redis:6-alpine
image: redis:7-alpine
command:
- sh
- -c # this is to evaluate the $REDIS_PASSWORD from the env

View File

@ -11,7 +11,7 @@
"uri": "/static/*"
},
"action": {
"share": "/opt/netbox/netbox"
"share": "/opt/netbox/netbox${uri}"
}
},

71
initializers/cables.yml Normal file
View File

@ -0,0 +1,71 @@
# # Required parameters for termination X ('a' or 'b'):
# #
# # ```
# # termination_x_name -> name of interface
# # termination_x_device -> name of the device interface belongs to
# # termination_x_class -> required if different than 'Interface' which is the default
# # ```
# #
# # Supported termination classes: Interface, ConsolePort, ConsoleServerPort, FrontPort, RearPort, PowerPort, PowerOutlet
# #
# #
# # If a termination is a circuit then the required parameter is termination_x_circuit.
# # Required parameters for a circuit termination:
# #
# # ```
# # termination_x_circuit:
# # term_side -> termination side of a circuit. Must be A or B
# # cid -> circuit ID value
# # site OR provider_network -> name of Site or ProviderNetwork respectively. If both provided, Site takes precedence
# # ```
# #
# # If a termination is a power feed then the required parameter is termination_x_feed.
# #
# # ```
# # termination_x_feed:
# # name -> name of the PowerFeed object
# # power_panel:
# # name -> name of the PowerPanel the PowerFeed is attached to
# # site -> name of the Site in which the PowerPanel is present
# # ```
# #
# # Any other Cable parameters supported by Netbox are supported as the top level keys, e.g. 'type', 'status', etc.
# #
# # - termination_a_name: console
# # termination_a_device: spine
# # termination_a_class: ConsolePort
# # termination_b_name: tty9
# # termination_b_device: console-server
# # termination_b_class: ConsoleServerPort
# # type: cat6
# #
# - termination_a_name: to-server02
# termination_a_device: server01
# termination_b_name: to-server01
# termination_b_device: server02
# status: planned
# type: mmf
# - termination_a_name: eth0
# termination_a_device: server02
# termination_b_circuit:
# term_side: A
# cid: Circuit_ID-1
# site: AMS 1
# type: cat6
# - termination_a_name: psu0
# termination_a_device: server04
# termination_a_class: PowerPort
# termination_b_feed:
# name: power feed 1
# power_panel:
# name: power panel AMS 1
# site: AMS 1
# - termination_a_name: outlet1
# termination_a_device: server04
# termination_a_class: PowerOutlet
# termination_b_name: psu1
# termination_b_device: server04
# termination_b_class: PowerPort

View File

@ -0,0 +1,7 @@
# - name: Network-Team
# slug: network-team
# description: This is a new contact group for the Network-Team
# - name: New Contact Group
# slug: new-contact-group
# description: This is a new contact group sub under of Network-Team
# parent: Network-Team

View File

@ -0,0 +1,3 @@
# - name: New Contact Role
# slug: new-contact-role
# description: This is a new contact role description

20
initializers/contacts.yml Normal file
View File

@ -0,0 +1,20 @@
# - name: Lee Widget
# title: CEO of Widget Corp
# phone: 221-555-1212
# email: widgetCEO@widgetcorp.com
# address: 1200 Nowhere Blvd, Scranton NJ, 555111
# comments: This is a very important contact
# - name: Ali Gator
# group: Network-Team
# title: Consultant for Widget Corp
# phone: 221-555-1213
# email: Consultant@widgetcorp.com
# address: 1200 Nowhere Blvd, Scranton NJ, 555111
# comments: This is a very important contact
# - name: Karlchen Maier
# group: New Contact Group
# title: COO of Widget Corp
# phone: 221-555-1214
# email: Karlchen@widgetcorp.com
# address: 1200 Nowhere Blvd, Scranton NJ, 555111
# comments: This is a very important contact

View File

@ -8,11 +8,28 @@
##
## Examples:
# - device: server01
# name: ath0
# type: 1000base-t
# lag: ae0
# bridge: br0
# - device: server01
# name: ath1
# type: 1000base-t
# parent: ath0
# - device: server01
# enabled: true
# type: virtual
# type: 1000base-x-sfp
# name: to-server02
# - device: server02
# enabled: true
# type: virtual
# type: 1000base-x-sfp
# name: to-server01
# - device: server02
# enabled: true
# type: 1000base-t
# name: eth0
# - device: server02
# enabled: true
# type: virtual
# name: loopback

View File

@ -21,3 +21,37 @@
# slug: other
# custom_field_data:
# text_field: Description
# interfaces:
# - name: eth0
# type: 1000base-t
# mgmt_only: True
# - name: eth1
# type: 1000base-t
# console_server_ports:
# - name_template: ttyS[1-48]
# type: rj-45
# power_ports:
# - name_template: psu[0,1]
# type: iec-60320-c14
# maximum_draw: 35
# allocated_draw: 35
# front_ports:
# - name_template: front[1,2]
# type: 8p8c
# rear_port_template: rear[0,1]
# rear_port_position_template: "[1,2]"
# rear_ports:
# - name_template: rear[0,1]
# type: 8p8c
# positions_template: "[3,2]"
# device_bays:
# - name: bay0 # both non-template and template field specified; non-template field takes precedence
# name_template: bay[0-9]
# label: test0
# label_template: test[0-5,9,6-8]
# description: Test description
# power_outlets:
# - name_template: outlet[0,1]
# type: iec-60320-c5
# power_port: psu0
# feed_leg: B

View File

@ -4,6 +4,7 @@
# password: reader
# writer:
# password: writer
# api_token: "" # a token is generated automatically unless the value is explicity set to empty
# jdoe:
# first_name: John
# last_name: Doe

View File

@ -1,6 +1,6 @@
django-auth-ldap==4.0.0
django-auth-ldap==4.1.0
django-storages[azure,boto3,dropbox,google,libcloud,sftp]==1.12.3
google-crc32c==1.3.0
napalm==3.3.1
napalm==4.0.0
psycopg2==2.9.3
social-auth-core[openidconnect]==4.3.0
ruamel.yaml==0.17.21
tzdata==2021.5

View File

@ -9,13 +9,17 @@ if users is None:
sys.exit()
for username, user_details in users.items():
if not User.objects.filter(username=username):
user = User.objects.create_user(
username=username,
password=user_details.get("password", 0) or User.objects.make_random_password(),
)
api_token = user_details.pop("api_token", Token.generate_key())
password = user_details.pop("password", User.objects.make_random_password())
user, created = User.objects.get_or_create(username=username, defaults=user_details)
if created:
user.set_password(password)
user.save()
if api_token:
Token.objects.get_or_create(user=user, key=api_token)
print("👤 Created user", username)
if user_details.get("api_token", 0):
Token.objects.create(user=user, key=user_details["api_token"])

View File

@ -14,9 +14,11 @@ for permission_name, permission_details in object_permissions.items():
object_permission, created = ObjectPermission.objects.get_or_create(
name=permission_name,
description=permission_details["description"],
enabled=permission_details["enabled"],
actions=permission_details["actions"],
defaults={
"description": permission_details["description"],
"enabled": permission_details["enabled"],
"actions": permission_details["actions"],
},
)
if permission_details.get("object_types", 0):

View File

@ -2,7 +2,7 @@ import sys
from django.contrib.contenttypes.models import ContentType
from extras.models import CustomLink
from startup_script_utils import load_yaml
from startup_script_utils import load_yaml, split_params
custom_links = load_yaml("/opt/netbox/initializers/custom_links.yml")
@ -28,6 +28,8 @@ for link in custom_links:
)
continue
custom_link, created = CustomLink.objects.get_or_create(**link)
matching_params, defaults = split_params(link)
custom_link, created = CustomLink.objects.get_or_create(**matching_params, defaults=defaults)
if created:
print("🔗 Created Custom Link '{0}'".format(custom_link.name))

View File

@ -1,7 +1,7 @@
import sys
from extras.models import Tag
from startup_script_utils import load_yaml
from startup_script_utils import load_yaml, split_params
from utilities.choices import ColorChoices
tags = load_yaml("/opt/netbox/initializers/tags.yml")
@ -17,7 +17,8 @@ for params in tags:
if color in color_tpl:
params["color"] = color_tpl[0]
tag, created = Tag.objects.get_or_create(**params)
matching_params, defaults = split_params(params)
tag, created = Tag.objects.get_or_create(**matching_params, defaults=defaults)
if created:
print("🎨 Created Tag", tag.name)

View File

@ -2,7 +2,7 @@ import sys
from django.contrib.contenttypes.models import ContentType
from extras.models import Webhook
from startup_script_utils import load_yaml
from startup_script_utils import load_yaml, split_params
webhooks = load_yaml("/opt/netbox/initializers/webhooks.yml")
@ -26,7 +26,9 @@ for hook in webhooks:
except ContentType.DoesNotExist:
continue
webhook, created = Webhook.objects.get_or_create(**hook)
matching_params, defaults = split_params(hook)
webhook, created = Webhook.objects.get_or_create(**matching_params, defaults=defaults)
if created:
webhook.content_types.set(obj_type_ids)
webhook.save()

View File

@ -1,6 +1,6 @@
import sys
from startup_script_utils import load_yaml
from startup_script_utils import load_yaml, split_params
from tenancy.models import TenantGroup
tenant_groups = load_yaml("/opt/netbox/initializers/tenant_groups.yml")
@ -9,7 +9,8 @@ if tenant_groups is None:
sys.exit()
for params in tenant_groups:
tenant_group, created = TenantGroup.objects.get_or_create(**params)
matching_params, defaults = split_params(params)
tenant_group, created = TenantGroup.objects.get_or_create(**matching_params, defaults=defaults)
if created:
print("🔳 Created Tenant Group", tenant_group.name)

View File

@ -1,6 +1,11 @@
import sys
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
from tenancy.models import Tenant, TenantGroup
tenants = load_yaml("/opt/netbox/initializers/tenants.yml")
@ -20,9 +25,10 @@ for params in tenants:
params[assoc] = model.objects.get(**query)
tenant, created = Tenant.objects.get_or_create(**params)
matching_params, defaults = split_params(params)
tenant, created = Tenant.objects.get_or_create(**matching_params, defaults=defaults)
if created:
set_custom_fields_values(tenant, custom_field_data)
print("👩‍💻 Created Tenant", tenant.name)
set_custom_fields_values(tenant, custom_field_data)

View File

@ -1,7 +1,7 @@
import sys
from dcim.models import Region
from startup_script_utils import load_yaml
from startup_script_utils import load_yaml, split_params
regions = load_yaml("/opt/netbox/initializers/regions.yml")
@ -19,7 +19,8 @@ for params in regions:
params[assoc] = model.objects.get(**query)
region, created = Region.objects.get_or_create(**params)
matching_params, defaults = split_params(params)
region, created = Region.objects.get_or_create(**matching_params, defaults=defaults)
if created:
print("🌐 Created region", region.name)

View File

@ -1,7 +1,12 @@
import sys
from dcim.models import Region, Site
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
from tenancy.models import Tenant
sites = load_yaml("/opt/netbox/initializers/sites.yml")
@ -21,9 +26,10 @@ for params in sites:
params[assoc] = model.objects.get(**query)
site, created = Site.objects.get_or_create(**params)
matching_params, defaults = split_params(params)
site, created = Site.objects.get_or_create(**matching_params, defaults=defaults)
if created:
set_custom_fields_values(site, custom_field_data)
print("📍 Created site", site.name)
set_custom_fields_values(site, custom_field_data)

View File

@ -1,13 +1,14 @@
import sys
from dcim.models import Location, Site
from startup_script_utils import load_yaml
from startup_script_utils import load_yaml, split_params
rack_groups = load_yaml("/opt/netbox/initializers/locations.yml")
if rack_groups is None:
sys.exit()
match_params = ["name", "slug", "site"]
required_assocs = {"site": (Site, "name")}
for params in rack_groups:
@ -17,7 +18,8 @@ for params in rack_groups:
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
location, created = Location.objects.get_or_create(**params)
matching_params, defaults = split_params(params, match_params)
location, created = Location.objects.get_or_create(**matching_params, defaults=defaults)
if created:
print("🎨 Created location", location.name)

View File

@ -1,7 +1,7 @@
import sys
from dcim.models import RackRole
from startup_script_utils import load_yaml
from startup_script_utils import load_yaml, split_params
from utilities.choices import ColorChoices
rack_roles = load_yaml("/opt/netbox/initializers/rack_roles.yml")
@ -17,7 +17,8 @@ for params in rack_roles:
if color in color_tpl:
params["color"] = color_tpl[0]
rack_role, created = RackRole.objects.get_or_create(**params)
matching_params, defaults = split_params(params)
rack_role, created = RackRole.objects.get_or_create(**matching_params, defaults=defaults)
if created:
print("🎨 Created rack role", rack_role.name)

View File

@ -1,7 +1,12 @@
import sys
from dcim.models import Location, Rack, RackRole, Site
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
from tenancy.models import Tenant
racks = load_yaml("/opt/netbox/initializers/racks.yml")
@ -9,8 +14,8 @@ racks = load_yaml("/opt/netbox/initializers/racks.yml")
if racks is None:
sys.exit()
match_params = ["name", "site"]
required_assocs = {"site": (Site, "name")}
optional_assocs = {
"role": (RackRole, "name"),
"tenant": (Tenant, "name"),
@ -33,9 +38,10 @@ for params in racks:
params[assoc] = model.objects.get(**query)
rack, created = Rack.objects.get_or_create(**params)
matching_params, defaults = split_params(params, match_params)
rack, created = Rack.objects.get_or_create(**matching_params, defaults=defaults)
if created:
set_custom_fields_values(rack, custom_field_data)
print("🔳 Created rack", rack.site, rack.name)
set_custom_fields_values(rack, custom_field_data)

View File

@ -1,15 +1,20 @@
import sys
from dcim.models import Location, PowerPanel, Site
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
power_panels = load_yaml("/opt/netbox/initializers/power_panels.yml")
if power_panels is None:
sys.exit()
match_params = ["name", "site"]
required_assocs = {"site": (Site, "name")}
optional_assocs = {"location": (Location, "name")}
for params in power_panels:
@ -28,9 +33,10 @@ for params in power_panels:
params[assoc] = model.objects.get(**query)
power_panel, created = PowerPanel.objects.get_or_create(**params)
matching_params, defaults = split_params(params, match_params)
power_panel, created = PowerPanel.objects.get_or_create(**matching_params, defaults=defaults)
if created:
set_custom_fields_values(power_panel, custom_field_data)
print("⚡ Created Power Panel", power_panel.site, power_panel.name)
set_custom_fields_values(power_panel, custom_field_data)

View File

@ -1,15 +1,20 @@
import sys
from dcim.models import PowerFeed, PowerPanel, Rack
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
power_feeds = load_yaml("/opt/netbox/initializers/power_feeds.yml")
if power_feeds is None:
sys.exit()
match_params = ["name", "power_panel"]
required_assocs = {"power_panel": (PowerPanel, "name")}
optional_assocs = {"rack": (Rack, "name")}
for params in power_feeds:
@ -28,9 +33,10 @@ for params in power_feeds:
params[assoc] = model.objects.get(**query)
power_feed, created = PowerFeed.objects.get_or_create(**params)
matching_params, defaults = split_params(params, match_params)
power_feed, created = PowerFeed.objects.get_or_create(**matching_params, defaults=defaults)
if created:
set_custom_fields_values(power_feed, custom_field_data)
print("⚡ Created Power Feed", power_feed.name)
set_custom_fields_values(power_feed, custom_field_data)

View File

@ -1,7 +1,7 @@
import sys
from dcim.models import Manufacturer
from startup_script_utils import load_yaml
from startup_script_utils import load_yaml, split_params
manufacturers = load_yaml("/opt/netbox/initializers/manufacturers.yml")
@ -9,7 +9,8 @@ if manufacturers is None:
sys.exit()
for params in manufacturers:
manufacturer, created = Manufacturer.objects.get_or_create(**params)
matching_params, defaults = split_params(params)
manufacturer, created = Manufacturer.objects.get_or_create(**matching_params, defaults=defaults)
if created:
print("🏭 Created Manufacturer", manufacturer.name)

View File

@ -1,7 +1,7 @@
import sys
from dcim.models import DeviceRole
from startup_script_utils import load_yaml
from startup_script_utils import load_yaml, split_params
from utilities.choices import ColorChoices
device_roles = load_yaml("/opt/netbox/initializers/device_roles.yml")
@ -18,7 +18,8 @@ for params in device_roles:
if color in color_tpl:
params["color"] = color_tpl[0]
device_role, created = DeviceRole.objects.get_or_create(**params)
matching_params, defaults = split_params(params)
device_role, created = DeviceRole.objects.get_or_create(**matching_params, defaults=defaults)
if created:
print("🎨 Created device role", device_role.name)

View File

@ -1,20 +1,95 @@
import sys
from typing import List
from dcim.models import DeviceType, Manufacturer, Region
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from dcim.models.device_component_templates import (
ConsolePortTemplate,
ConsoleServerPortTemplate,
DeviceBayTemplate,
FrontPortTemplate,
InterfaceTemplate,
PowerOutletTemplate,
PowerPortTemplate,
RearPortTemplate,
)
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
from tenancy.models import Tenant
from utilities.forms.utils import expand_alphanumeric_pattern
def expand_templates(params: List[dict], device_type: DeviceType) -> List[dict]:
templateable_fields = ["name", "label", "positions", "rear_port", "rear_port_position"]
expanded = []
for param in params:
param["device_type"] = device_type
expanded_fields = {}
has_plain_fields = False
for field in templateable_fields:
template_value = param.pop(f"{field}_template", None)
if field in param:
has_plain_fields = True
elif template_value:
expanded_fields[field] = list(expand_alphanumeric_pattern(template_value))
if expanded_fields and has_plain_fields:
raise ValueError(f"Mix of plain and template keys provided for {templateable_fields}")
elif not expanded_fields:
expanded.append(param)
continue
elements = list(expanded_fields.values())
master_len = len(elements[0])
if not all([len(elem) == master_len for elem in elements]):
raise ValueError(
f"Number of elements in template fields "
f"{list(expanded_fields.keys())} must be equal"
)
for idx in range(master_len):
tmp = param.copy()
for field, value in expanded_fields.items():
if field in nested_assocs:
model, match_key = nested_assocs[field]
query = {match_key: value[idx], "device_type": device_type}
tmp[field] = model.objects.get(**query)
else:
tmp[field] = value[idx]
expanded.append(tmp)
return expanded
device_types = load_yaml("/opt/netbox/initializers/device_types.yml")
if device_types is None:
sys.exit()
match_params = ["manufacturer", "model", "slug"]
required_assocs = {"manufacturer": (Manufacturer, "name")}
optional_assocs = {"region": (Region, "name"), "tenant": (Tenant, "name")}
nested_assocs = {"rear_port": (RearPortTemplate, "name"), "power_port": (PowerPortTemplate, "name")}
supported_components = {
"interfaces": (InterfaceTemplate, ["name"]),
"console_ports": (ConsolePortTemplate, ["name"]),
"console_server_ports": (ConsoleServerPortTemplate, ["name"]),
"power_ports": (PowerPortTemplate, ["name"]),
"power_outlets": (PowerOutletTemplate, ["name"]),
"rear_ports": (RearPortTemplate, ["name"]),
"front_ports": (FrontPortTemplate, ["name"]),
"device_bays": (DeviceBayTemplate, ["name"]),
}
for params in device_types:
custom_field_data = pop_custom_fields(params)
components = [(v[0], v[1], params.pop(k, [])) for k, v in supported_components.items()]
for assoc, details in required_assocs.items():
model, field = details
@ -29,9 +104,36 @@ for params in device_types:
params[assoc] = model.objects.get(**query)
device_type, created = DeviceType.objects.get_or_create(**params)
matching_params, defaults = split_params(params, match_params)
device_type, created = DeviceType.objects.get_or_create(**matching_params, defaults=defaults)
if created:
set_custom_fields_values(device_type, custom_field_data)
print("🔡 Created device type", device_type.manufacturer, device_type.model)
set_custom_fields_values(device_type, custom_field_data)
for component in components:
c_model, c_match_params, c_params = component
c_match_params.append("device_type")
if not c_params:
continue
expanded_c_params = expand_templates(c_params, device_type)
for n_assoc, n_details in nested_assocs.items():
n_model, n_field = n_details
for c_param in expanded_c_params:
if n_assoc in c_param:
n_query = {n_field: c_param[n_assoc], "device_type": device_type}
c_param[n_assoc] = n_model.objects.get(**n_query)
for new_param in expanded_c_params:
new_matching_params, new_defaults = split_params(new_param, c_match_params)
new_obj, new_obj_created = c_model.objects.get_or_create(
**new_matching_params, defaults=new_defaults
)
if new_obj_created:
print(
f"🧷 Created {c_model._meta} {new_obj} component for device type {device_type}"
)

View File

@ -1,7 +1,12 @@
import sys
from dcim.models import Device, DeviceRole, DeviceType, Location, Platform, Rack, Site
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
from tenancy.models import Tenant
from virtualization.models import Cluster
@ -10,12 +15,12 @@ devices = load_yaml("/opt/netbox/initializers/devices.yml")
if devices is None:
sys.exit()
match_params = ["device_type", "name", "site"]
required_assocs = {
"device_role": (DeviceRole, "name"),
"device_type": (DeviceType, "model"),
"site": (Site, "name"),
}
optional_assocs = {
"tenant": (Tenant, "name"),
"platform": (Platform, "name"),
@ -27,7 +32,7 @@ optional_assocs = {
for params in devices:
custom_field_data = pop_custom_fields(params)
# primary ips are handled later in `270_primary_ips.py`
# primary ips are handled later in `380_primary_ips.py`
params.pop("primary_ip4", None)
params.pop("primary_ip6", None)
@ -44,9 +49,10 @@ for params in devices:
params[assoc] = model.objects.get(**query)
device, created = Device.objects.get_or_create(**params)
matching_params, defaults = split_params(params, match_params)
device, created = Device.objects.get_or_create(**matching_params, defaults=defaults)
if created:
set_custom_fields_values(device, custom_field_data)
print("🖥️ Created device", device.name)
set_custom_fields_values(device, custom_field_data)

View File

@ -1,27 +1,70 @@
import sys
from dcim.models import Device, Interface
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
interfaces = load_yaml("/opt/netbox/initializers/dcim_interfaces.yml")
if interfaces is None:
sys.exit()
match_params = ["device", "name"]
required_assocs = {"device": (Device, "name")}
related_assocs = {
"bridge": (Interface, "name"),
"lag": (Interface, "name"),
"parent": (Interface, "name"),
}
for params in interfaces:
custom_field_data = pop_custom_fields(params)
related_interfaces = {k: params.pop(k, None) for k in related_assocs}
for assoc, details in required_assocs.items():
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
interface, created = Interface.objects.get_or_create(**params)
matching_params, defaults = split_params(params, match_params)
interface, created = Interface.objects.get_or_create(**matching_params, defaults=defaults)
if created:
set_custom_fields_values(interface, custom_field_data)
print(f"🧷 Created interface {interface} on {interface.device}")
print("🧷 Created interface", interface.name, interface.device.name)
set_custom_fields_values(interface, custom_field_data)
for related_field, related_value in related_interfaces.items():
if not related_value:
continue
r_model, r_field = related_assocs[related_field]
if related_field == "parent" and not interface.parent_id:
query = {r_field: related_value, "device": interface.device}
try:
related_obj = r_model.objects.get(**query)
except Interface.DoesNotExist:
print(f"⚠️ Could not find parent interface with: {query} for interface {interface}")
raise
interface.parent_id = related_obj.id
interface.save()
print(
f"🧷 Attached interface {interface} on {interface.device} "
f"to parent {related_obj}"
)
else:
query = {r_field: related_value, "device": interface.device, "type": related_field}
related_obj, rel_obj_created = r_model.objects.get_or_create(**query)
if rel_obj_created:
setattr(interface, f"{related_field}_id", related_obj.id)
interface.save()
print(f"🧷 Created {related_field} interface {interface} on {interface.device}")

View File

@ -1,7 +1,7 @@
import sys
from dcim.models import Manufacturer, Platform
from startup_script_utils import load_yaml
from startup_script_utils import load_yaml, split_params
platforms = load_yaml("/opt/netbox/initializers/platforms.yml")
@ -21,7 +21,8 @@ for params in platforms:
params[assoc] = model.objects.get(**query)
platform, created = Platform.objects.get_or_create(**params)
matching_params, defaults = split_params(params)
platform, created = Platform.objects.get_or_create(**matching_params, defaults=defaults)
if created:
print("💾 Created platform", platform.name)

View File

@ -1,7 +1,12 @@
import sys
from ipam.models import RouteTarget
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
from tenancy.models import Tenant
route_targets = load_yaml("/opt/netbox/initializers/route_targets.yml")
@ -21,9 +26,10 @@ for params in route_targets:
params[assoc] = model.objects.get(**query)
route_target, created = RouteTarget.objects.get_or_create(**params)
matching_params, defaults = split_params(params)
route_target, created = RouteTarget.objects.get_or_create(**matching_params, defaults=defaults)
if created:
set_custom_fields_values(route_target, custom_field_data)
print("🎯 Created Route Target", route_target.name)
set_custom_fields_values(route_target, custom_field_data)

View File

@ -1,7 +1,12 @@
import sys
from ipam.models import VRF
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
from tenancy.models import Tenant
vrfs = load_yaml("/opt/netbox/initializers/vrfs.yml")
@ -9,6 +14,7 @@ vrfs = load_yaml("/opt/netbox/initializers/vrfs.yml")
if vrfs is None:
sys.exit()
match_params = ["name", "rd"]
optional_assocs = {"tenant": (Tenant, "name")}
for params in vrfs:
@ -21,9 +27,10 @@ for params in vrfs:
params[assoc] = model.objects.get(**query)
vrf, created = VRF.objects.get_or_create(**params)
matching_params, defaults = split_params(params)
vrf, created = VRF.objects.get_or_create(**matching_params, defaults=defaults)
if created:
set_custom_fields_values(vrf, custom_field_data)
print("📦 Created VRF", vrf.name)
set_custom_fields_values(vrf, custom_field_data)

View File

@ -1,7 +1,7 @@
import sys
from ipam.models import RIR
from startup_script_utils import load_yaml
from startup_script_utils import load_yaml, split_params
rirs = load_yaml("/opt/netbox/initializers/rirs.yml")
@ -9,7 +9,8 @@ if rirs is None:
sys.exit()
for params in rirs:
rir, created = RIR.objects.get_or_create(**params)
matching_params, defaults = split_params(params)
rir, created = RIR.objects.get_or_create(**matching_params, defaults=defaults)
if created:
print("🗺️ Created RIR", rir.name)

View File

@ -1,7 +1,7 @@
import sys
from ipam.models import ASN, RIR
from startup_script_utils import load_yaml
from startup_script_utils import load_yaml, split_params
from tenancy.models import Tenant
asns = load_yaml("/opt/netbox/initializers/asns.yml")
@ -9,8 +9,8 @@ asns = load_yaml("/opt/netbox/initializers/asns.yml")
if asns is None:
sys.exit()
match_params = ["asn", "rir"]
required_assocs = {"rir": (RIR, "name")}
optional_assocs = {"tenant": (Tenant, "name")}
for params in asns:
@ -27,7 +27,8 @@ for params in asns:
params[assoc] = model.objects.get(**query)
asn, created = ASN.objects.get_or_create(**params)
matching_params, defaults = split_params(params, match_params)
asn, created = ASN.objects.get_or_create(**matching_params, defaults=defaults)
if created:
print(f"🔡 Created ASN {asn.asn}")

View File

@ -2,7 +2,12 @@ import sys
from ipam.models import RIR, Aggregate
from netaddr import IPNetwork
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
from tenancy.models import Tenant
aggregates = load_yaml("/opt/netbox/initializers/aggregates.yml")
@ -10,8 +15,8 @@ aggregates = load_yaml("/opt/netbox/initializers/aggregates.yml")
if aggregates is None:
sys.exit()
match_params = ["prefix", "rir"]
required_assocs = {"rir": (RIR, "name")}
optional_assocs = {
"tenant": (Tenant, "name"),
}
@ -34,9 +39,10 @@ for params in aggregates:
params[assoc] = model.objects.get(**query)
aggregate, created = Aggregate.objects.get_or_create(**params)
matching_params, defaults = split_params(params, match_params)
aggregate, created = Aggregate.objects.get_or_create(**matching_params, defaults=defaults)
if created:
set_custom_fields_values(aggregate, custom_field_data)
print("🗞️ Created Aggregate", aggregate.prefix)
set_custom_fields_values(aggregate, custom_field_data)

View File

@ -1,7 +1,7 @@
import sys
from ipam.models import Role
from startup_script_utils import load_yaml
from startup_script_utils import load_yaml, split_params
roles = load_yaml("/opt/netbox/initializers/prefix_vlan_roles.yml")
@ -9,7 +9,8 @@ if roles is None:
sys.exit()
for params in roles:
role, created = Role.objects.get_or_create(**params)
matching_params, defaults = split_params(params)
role, created = Role.objects.get_or_create(**matching_params, defaults=defaults)
if created:
print("⛹️‍ Created Prefix/VLAN Role", role.name)

View File

@ -1,6 +1,6 @@
import sys
from startup_script_utils import load_yaml
from startup_script_utils import load_yaml, split_params
from virtualization.models import ClusterType
cluster_types = load_yaml("/opt/netbox/initializers/cluster_types.yml")
@ -9,7 +9,8 @@ if cluster_types is None:
sys.exit()
for params in cluster_types:
cluster_type, created = ClusterType.objects.get_or_create(**params)
matching_params, defaults = split_params(params)
cluster_type, created = ClusterType.objects.get_or_create(**matching_params, defaults=defaults)
if created:
print("🧰 Created Cluster Type", cluster_type.name)

View File

@ -1,6 +1,6 @@
import sys
from startup_script_utils import load_yaml
from startup_script_utils import load_yaml, split_params
from virtualization.models import ClusterGroup
cluster_groups = load_yaml("/opt/netbox/initializers/cluster_groups.yml")
@ -9,7 +9,10 @@ if cluster_groups is None:
sys.exit()
for params in cluster_groups:
cluster_group, created = ClusterGroup.objects.get_or_create(**params)
matching_params, defaults = split_params(params)
cluster_group, created = ClusterGroup.objects.get_or_create(
**matching_params, defaults=defaults
)
if created:
print("🗄️ Created Cluster Group", cluster_group.name)

View File

@ -1,7 +1,12 @@
import sys
from dcim.models import Site
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
from tenancy.models import Tenant
from virtualization.models import Cluster, ClusterGroup, ClusterType
@ -10,8 +15,8 @@ clusters = load_yaml("/opt/netbox/initializers/clusters.yml")
if clusters is None:
sys.exit()
match_params = ["name", "type"]
required_assocs = {"type": (ClusterType, "name")}
optional_assocs = {
"site": (Site, "name"),
"group": (ClusterGroup, "name"),
@ -34,9 +39,10 @@ for params in clusters:
params[assoc] = model.objects.get(**query)
cluster, created = Cluster.objects.get_or_create(**params)
matching_params, defaults = split_params(params, match_params)
cluster, created = Cluster.objects.get_or_create(**matching_params, defaults=defaults)
if created:
set_custom_fields_values(cluster, custom_field_data)
print("🗄️ Created cluster", cluster.name)
set_custom_fields_values(cluster, custom_field_data)

View File

@ -2,7 +2,12 @@ import sys
from django.contrib.contenttypes.models import ContentType
from ipam.models import VLANGroup
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
vlan_groups = load_yaml("/opt/netbox/initializers/vlan_groups.yml")
@ -32,9 +37,11 @@ for params in vlan_groups:
)
continue
params["scope_id"] = ct.model_class().objects.get(**query).id
vlan_group, created = VLANGroup.objects.get_or_create(**params)
matching_params, defaults = split_params(params)
vlan_group, created = VLANGroup.objects.get_or_create(**matching_params, defaults=defaults)
if created:
set_custom_fields_values(vlan_group, custom_field_data)
print("🏘️ Created VLAN Group", vlan_group.name)
set_custom_fields_values(vlan_group, custom_field_data)

View File

@ -2,7 +2,12 @@ import sys
from dcim.models import Site
from ipam.models import VLAN, Role, VLANGroup
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
from tenancy.models import Tenant, TenantGroup
vlans = load_yaml("/opt/netbox/initializers/vlans.yml")
@ -10,6 +15,7 @@ vlans = load_yaml("/opt/netbox/initializers/vlans.yml")
if vlans is None:
sys.exit()
match_params = ["name", "vid"]
optional_assocs = {
"site": (Site, "name"),
"tenant": (Tenant, "name"),
@ -28,9 +34,10 @@ for params in vlans:
params[assoc] = model.objects.get(**query)
vlan, created = VLAN.objects.get_or_create(**params)
matching_params, defaults = split_params(params, match_params)
vlan, created = VLAN.objects.get_or_create(**matching_params, defaults=defaults)
if created:
set_custom_fields_values(vlan, custom_field_data)
print("🏠 Created VLAN", vlan.name)
set_custom_fields_values(vlan, custom_field_data)

View File

@ -1,7 +1,12 @@
import sys
from dcim.models import DeviceRole, Platform
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
from tenancy.models import Tenant
from virtualization.models import Cluster, VirtualMachine
@ -10,8 +15,8 @@ virtual_machines = load_yaml("/opt/netbox/initializers/virtual_machines.yml")
if virtual_machines is None:
sys.exit()
match_params = ["cluster", "name"]
required_assocs = {"cluster": (Cluster, "name")}
optional_assocs = {
"tenant": (Tenant, "name"),
"platform": (Platform, "name"),
@ -38,9 +43,12 @@ for params in virtual_machines:
params[assoc] = model.objects.get(**query)
virtual_machine, created = VirtualMachine.objects.get_or_create(**params)
matching_params, defaults = split_params(params, match_params)
virtual_machine, created = VirtualMachine.objects.get_or_create(
**matching_params, defaults=defaults
)
if created:
set_custom_fields_values(virtual_machine, custom_field_data)
print("🖥️ Created virtual machine", virtual_machine.name)
set_custom_fields_values(virtual_machine, custom_field_data)

View File

@ -1,6 +1,11 @@
import sys
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
from virtualization.models import VirtualMachine, VMInterface
interfaces = load_yaml("/opt/netbox/initializers/virtualization_interfaces.yml")
@ -8,6 +13,7 @@ interfaces = load_yaml("/opt/netbox/initializers/virtualization_interfaces.yml")
if interfaces is None:
sys.exit()
match_params = ["name", "virtual_machine"]
required_assocs = {"virtual_machine": (VirtualMachine, "name")}
for params in interfaces:
@ -19,9 +25,10 @@ for params in interfaces:
params[assoc] = model.objects.get(**query)
interface, created = VMInterface.objects.get_or_create(**params)
matching_params, defaults = split_params(params, match_params)
interface, created = VMInterface.objects.get_or_create(**matching_params, defaults=defaults)
if created:
set_custom_fields_values(interface, custom_field_data)
print("🧷 Created interface", interface.name, interface.virtual_machine.name)
set_custom_fields_values(interface, custom_field_data)

View File

@ -3,7 +3,12 @@ import sys
from dcim.models import Site
from ipam.models import VLAN, VRF, Prefix, Role
from netaddr import IPNetwork
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
from tenancy.models import Tenant, TenantGroup
prefixes = load_yaml("/opt/netbox/initializers/prefixes.yml")
@ -11,6 +16,7 @@ prefixes = load_yaml("/opt/netbox/initializers/prefixes.yml")
if prefixes is None:
sys.exit()
match_params = ["prefix", "site", "vrf", "vlan"]
optional_assocs = {
"site": (Site, "name"),
"tenant": (Tenant, "name"),
@ -31,9 +37,10 @@ for params in prefixes:
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
prefix, created = Prefix.objects.get_or_create(**params)
matching_params, defaults = split_params(params, match_params)
prefix, created = Prefix.objects.get_or_create(**matching_params, defaults=defaults)
if created:
set_custom_fields_values(prefix, custom_field_data)
print("📌 Created Prefix", prefix.prefix)
set_custom_fields_values(prefix, custom_field_data)

View File

@ -5,7 +5,12 @@ from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from ipam.models import VRF, IPAddress
from netaddr import IPNetwork
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
from tenancy.models import Tenant
from virtualization.models import VirtualMachine, VMInterface
@ -14,10 +19,11 @@ ip_addresses = load_yaml("/opt/netbox/initializers/ip_addresses.yml")
if ip_addresses is None:
sys.exit()
match_params = ["address", "vrf"]
optional_assocs = {
"tenant": (Tenant, "name"),
"vrf": (VRF, "name"),
"interface": (None, None),
"interface": (Interface, "name"),
}
vm_interface_ct = ContentType.objects.filter(
@ -55,9 +61,10 @@ for params in ip_addresses:
params[assoc] = model.objects.get(**query)
ip_address, created = IPAddress.objects.get_or_create(**params)
matching_params, defaults = split_params(params, match_params)
ip_address, created = IPAddress.objects.get_or_create(**matching_params, defaults=defaults)
if created:
set_custom_fields_values(ip_address, custom_field_data)
print("🧬 Created IP Address", ip_address.address)
set_custom_fields_values(ip_address, custom_field_data)

View File

@ -2,7 +2,7 @@ import sys
from dcim.models import Device
from ipam.models import Service
from startup_script_utils import load_yaml
from startup_script_utils import load_yaml, split_params
from virtualization.models import VirtualMachine
services = load_yaml("/opt/netbox/initializers/services.yml")
@ -10,6 +10,7 @@ services = load_yaml("/opt/netbox/initializers/services.yml")
if services is None:
sys.exit()
match_params = ["name", "device", "virtual_machine"]
optional_assocs = {
"device": (Device, "name"),
"virtual_machine": (VirtualMachine, "name"),
@ -24,6 +25,7 @@ for params in services:
params[assoc] = model.objects.get(**query)
matching_params, defaults = split_params(params, match_params)
service, created = Service.objects.get_or_create(**params)
if created:

View File

@ -1,7 +1,12 @@
import sys
from circuits.models import Provider
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
providers = load_yaml("/opt/netbox/initializers/providers.yml")
@ -11,9 +16,10 @@ if providers is None:
for params in providers:
custom_field_data = pop_custom_fields(params)
provider, created = Provider.objects.get_or_create(**params)
matching_params, defaults = split_params(params)
provider, created = Provider.objects.get_or_create(**matching_params, defaults=defaults)
if created:
set_custom_fields_values(provider, custom_field_data)
print("📡 Created provider", provider.name)
set_custom_fields_values(provider, custom_field_data)

View File

@ -1,7 +1,12 @@
import sys
from circuits.models import CircuitType
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
circuit_types = load_yaml("/opt/netbox/initializers/circuit_types.yml")
@ -11,9 +16,10 @@ if circuit_types is None:
for params in circuit_types:
custom_field_data = pop_custom_fields(params)
circuit_type, created = CircuitType.objects.get_or_create(**params)
matching_params, defaults = split_params(params)
circuit_type, created = CircuitType.objects.get_or_create(**matching_params, defaults=defaults)
if created:
set_custom_fields_values(circuit_type, custom_field_data)
print("⚡ Created Circuit Type", circuit_type.name)
set_custom_fields_values(circuit_type, custom_field_data)

View File

@ -1,7 +1,12 @@
import sys
from circuits.models import Circuit, CircuitType, Provider
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
from tenancy.models import Tenant
circuits = load_yaml("/opt/netbox/initializers/circuits.yml")
@ -9,8 +14,8 @@ circuits = load_yaml("/opt/netbox/initializers/circuits.yml")
if circuits is None:
sys.exit()
match_params = ["cid", "provider", "type"]
required_assocs = {"provider": (Provider, "name"), "type": (CircuitType, "name")}
optional_assocs = {"tenant": (Tenant, "name")}
for params in circuits:
@ -29,9 +34,10 @@ for params in circuits:
params[assoc] = model.objects.get(**query)
circuit, created = Circuit.objects.get_or_create(**params)
matching_params, defaults = split_params(params, match_params)
circuit, created = Circuit.objects.get_or_create(**matching_params, defaults=defaults)
if created:
set_custom_fields_values(circuit, custom_field_data)
print("⚡ Created Circuit", circuit.cid)
set_custom_fields_values(circuit, custom_field_data)

View File

@ -0,0 +1,227 @@
import sys
from typing import Tuple
from circuits.models import Circuit, CircuitTermination, ProviderNetwork
from dcim.models import (
Cable,
ConsolePort,
ConsoleServerPort,
FrontPort,
Interface,
PowerFeed,
PowerOutlet,
PowerPanel,
PowerPort,
RearPort,
Site,
)
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from startup_script_utils import load_yaml
CONSOLE_PORT_TERMINATION = ContentType.objects.get_for_model(ConsolePort)
CONSOLE_SERVER_PORT_TERMINATION = ContentType.objects.get_for_model(ConsoleServerPort)
FRONT_PORT_TERMINATION = ContentType.objects.get_for_model(FrontPort)
REAR_PORT_TERMINATION = ContentType.objects.get_for_model(RearPort)
FRONT_AND_REAR = [FRONT_PORT_TERMINATION, REAR_PORT_TERMINATION]
POWER_PORT_TERMINATION = ContentType.objects.get_for_model(PowerPort)
POWER_OUTLET_TERMINATION = ContentType.objects.get_for_model(PowerOutlet)
POWER_FEED_TERMINATION = ContentType.objects.get_for_model(PowerFeed)
POWER_TERMINATIONS = [POWER_PORT_TERMINATION, POWER_OUTLET_TERMINATION, POWER_FEED_TERMINATION]
VIRTUAL_INTERFACES = ["bridge", "lag", "virtual"]
def get_termination_object(params: dict, side: str):
klass = params.pop(f"termination_{side}_class")
name = params.pop(f"termination_{side}_name", None)
device = params.pop(f"termination_{side}_device", None)
feed_params = params.pop(f"termination_{side}_feed", None)
circuit_params = params.pop(f"termination_{side}_circuit", {})
if name and device:
termination = klass.objects.get(name=name, device__name=device)
return termination
elif feed_params:
q = {
"name": feed_params["power_panel"]["name"],
"site__name": feed_params["power_panel"]["site"],
}
power_panel = PowerPanel.objects.get(**q)
termination = PowerFeed.objects.get(name=feed_params["name"], power_panel=power_panel)
return termination
elif circuit_params:
circuit = Circuit.objects.get(cid=circuit_params.pop("cid"))
term_side = circuit_params.pop("term_side").upper()
site_name = circuit_params.pop("site", None)
provider_network = circuit_params.pop("provider_network", None)
if site_name:
circuit_params["site"] = Site.objects.get(name=site_name)
elif provider_network:
circuit_params["provider_network"] = ProviderNetwork.objects.get(name=provider_network)
else:
raise ValueError(
f"⚠️ Missing one of required parameters: 'site' or 'provider_network' "
f"for side {term_side} of circuit {circuit}"
)
termination, created = CircuitTermination.objects.get_or_create(
circuit=circuit, term_side=term_side, defaults=circuit_params
)
if created:
print(f"⚡ Created new CircuitTermination {termination}")
return termination
raise ValueError(
f"⚠️ Missing parameters for termination_{side}. "
"Need termination_{side}_name AND termination_{side}_device OR termination_{side}_circuit"
)
def get_termination_class_by_name(port_class: str):
if not port_class:
return Interface
return globals()[port_class]
def cable_in_cables(term_a: tuple, term_b: tuple) -> bool:
"""Check if cable exist for given terminations.
Each tuple should consist termination object and termination type
"""
cable = Cable.objects.filter(
Q(
termination_a_id=term_a[0].id,
termination_a_type=term_a[1],
termination_b_id=term_b[0].id,
termination_b_type=term_b[1],
)
| Q(
termination_a_id=term_b[0].id,
termination_a_type=term_b[1],
termination_b_id=term_a[0].id,
termination_b_type=term_a[1],
)
)
return cable.exists()
def check_termination_types(type_a, type_b) -> Tuple[bool, str]:
if type_a in POWER_TERMINATIONS and type_b in POWER_TERMINATIONS:
if type_a == type_b:
return False, "Can't connect the same power terminations together"
elif (
type_a == POWER_OUTLET_TERMINATION
and type_b == POWER_FEED_TERMINATION
or type_a == POWER_FEED_TERMINATION
and type_b == POWER_OUTLET_TERMINATION
):
return False, "PowerOutlet can't be connected with PowerFeed"
elif type_a in POWER_TERMINATIONS or type_b in POWER_TERMINATIONS:
return False, "Can't mix power terminations with port terminations"
elif type_a in FRONT_AND_REAR or type_b in FRONT_AND_REAR:
return True, ""
elif (
type_a == CONSOLE_PORT_TERMINATION
and type_b != CONSOLE_SERVER_PORT_TERMINATION
or type_b == CONSOLE_PORT_TERMINATION
and type_a != CONSOLE_SERVER_PORT_TERMINATION
):
return False, "ConsolePorts can only be connected to ConsoleServerPorts or Front/Rear ports"
return True, ""
def get_cable_name(termination_a: tuple, termination_b: tuple) -> str:
"""Returns name of a cable in format:
device_a interface_a <---> interface_b device_b
or for circuits:
circuit_a termination_a <---> termination_b circuit_b
"""
cable_name = []
for is_side_b, termination in enumerate([termination_a, termination_b]):
try:
power_panel_id = getattr(termination[0], "power_panel_id", None)
if power_panel_id:
power_feed = PowerPanel.objects.get(id=power_panel_id)
segment = [f"{power_feed}", f"{termination[0]}"]
else:
segment = [f"{termination[0].device}", f"{termination[0]}"]
except AttributeError:
segment = [f"{termination[0].circuit.cid}", f"{termination[0]}"]
if is_side_b:
segment.reverse()
cable_name.append(" ".join(segment))
return " <---> ".join(cable_name)
def check_interface_types(*args):
for termination in args:
try:
if termination.type in VIRTUAL_INTERFACES:
raise Exception(
f"⚠️ Virtual interfaces are not supported for cabling. "
f"Termination {termination.device} {termination} {termination.type}"
)
except AttributeError:
# CircuitTermination doesn't have a type field
pass
def check_terminations_are_free(*args):
any_failed = False
for termination in args:
if termination.cable_id:
any_failed = True
print(
f"⚠️ Termination {termination} is already occupied "
f"with cable #{termination.cable_id}"
)
if any_failed:
raise Exception("⚠️ At least one end of the cable is already occupied.")
cables = load_yaml("/opt/netbox/initializers/cables.yml")
if cables is None:
sys.exit()
for params in cables:
params["termination_a_class"] = get_termination_class_by_name(params.get("termination_a_class"))
params["termination_b_class"] = get_termination_class_by_name(params.get("termination_b_class"))
term_a = get_termination_object(params, side="a")
term_b = get_termination_object(params, side="b")
check_interface_types(term_a, term_b)
term_a_ct = ContentType.objects.get_for_model(term_a)
term_b_ct = ContentType.objects.get_for_model(term_b)
types_ok, msg = check_termination_types(term_a_ct, term_b_ct)
cable_name = get_cable_name((term_a, term_a_ct), (term_b, term_b_ct))
if not types_ok:
print(f"⚠️ Invalid termination types for {cable_name}. {msg}")
continue
if cable_in_cables((term_a, term_a_ct), (term_b, term_b_ct)):
continue
check_terminations_are_free(term_a, term_b)
params["termination_a_id"] = term_a.id
params["termination_b_id"] = term_b.id
params["termination_a_type"] = term_a_ct
params["termination_b_type"] = term_b_ct
cable = Cable.objects.create(**params)
print(f"🧷 Created cable {cable} {cable_name}")

View File

@ -0,0 +1,36 @@
import sys
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
from tenancy.models import ContactGroup
contact_groups = load_yaml("/opt/netbox/initializers/contact_groups.yml")
if contact_groups is None:
sys.exit()
optional_assocs = {"parent": (ContactGroup, "name")}
for params in contact_groups:
custom_field_data = pop_custom_fields(params)
for assoc, details in optional_assocs.items():
if assoc in params:
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
matching_params, defaults = split_params(params)
contact_group, created = ContactGroup.objects.get_or_create(
**matching_params, defaults=defaults
)
if created:
print("🔳 Created Contact Group", contact_group.name)
set_custom_fields_values(contact_group, custom_field_data)

View File

@ -0,0 +1,26 @@
import sys
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
from tenancy.models import ContactRole
contact_roles = load_yaml("/opt/netbox/initializers/contact_roles.yml")
if contact_roles is None:
sys.exit()
for params in contact_roles:
custom_field_data = pop_custom_fields(params)
matching_params, defaults = split_params(params)
contact_role, created = ContactRole.objects.get_or_create(**matching_params, defaults=defaults)
if created:
print("🔳 Created Contact Role", contact_role.name)
set_custom_fields_values(contact_role, custom_field_data)

View File

@ -0,0 +1,34 @@
import sys
from startup_script_utils import (
load_yaml,
pop_custom_fields,
set_custom_fields_values,
split_params,
)
from tenancy.models import Contact, ContactGroup
contacts = load_yaml("/opt/netbox/initializers/contacts.yml")
if contacts is None:
sys.exit()
optional_assocs = {"group": (ContactGroup, "name")}
for params in contacts:
custom_field_data = pop_custom_fields(params)
for assoc, details in optional_assocs.items():
if assoc in params:
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
matching_params, defaults = split_params(params)
contact, created = Contact.objects.get_or_create(**matching_params, defaults=defaults)
if created:
print("👩‍💻 Created Contact", contact.name)
set_custom_fields_values(contact, custom_field_data)

View File

@ -1,2 +1,3 @@
from .custom_fields import pop_custom_fields, set_custom_fields_values
from .load_yaml import load_yaml
from .utils import split_params

View File

@ -1,9 +1,38 @@
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from extras.models import CustomField
def set_custom_fields_values(entity, custom_field_data):
if not custom_field_data:
return
entity.custom_field_data = custom_field_data
return entity.save()
missing_cfs = []
save = False
for key, value in custom_field_data.items():
try:
cf = CustomField.objects.get(name=key)
except ObjectDoesNotExist:
missing_cfs.append(key)
else:
ct = ContentType.objects.get_for_model(entity)
if ct not in cf.content_types.all():
print(
f"⚠️ Custom field {key} is not enabled for {entity}'s model!"
"Please check the 'on_objects' for that custom field in custom_fields.yml"
)
elif key not in entity.custom_field_data:
entity.custom_field_data[key] = value
save = True
if missing_cfs:
raise Exception(
f"⚠️ Custom field(s) '{missing_cfs}' requested for {entity} but not found in Netbox!"
"Please chceck the custom_fields.yml"
)
if save:
entity.save()
def pop_custom_fields(params):

View File

@ -0,0 +1,15 @@
from typing import Tuple
def split_params(params: dict, unique_params: list = None) -> Tuple[dict, dict]:
"""Split params dict into dict with matching params and a dict with default values"""
if unique_params is None:
unique_params = ["name", "slug"]
matching_params = {}
for unique_param in unique_params:
param = params.pop(unique_param, None)
if param:
matching_params[unique_param] = param
return matching_params, params