Compare commits

...

102 Commits
2.3.0 ... 2.6.1

Author SHA1 Message Date
22486fefb5 Merge pull request #1002 from netbox-community/develop
Prepare 2.6.1
2023-04-28 15:29:28 +02:00
96bda7fa4f Merge branch 'release' into develop 2023-04-28 13:15:19 +02:00
c085287e64 Prepare 2.6.1 2023-04-28 13:13:06 +02:00
b4a6be37ec Merge pull request #1001 from netbox-community/develop
Version 2.6.0
2023-04-28 09:00:49 +02:00
02a926431b Merge pull request #990 from tobiasge/prepare-for-nb-35
Preparation for Netbox 3.5
2023-04-28 07:41:22 +02:00
3978b14c7f Preparation for 2.6.0 2023-04-27 23:45:39 +02:00
7532508aab Ensure minimum length for the SECRET_KEY is met 2023-04-27 16:56:47 +02:00
858611ad67 Check if the new image tag exists 2023-04-27 09:32:42 +02:00
155e90c99f Removed BASE_PATH from configuration
Setting the BASE_PATH is a more involved process than just setting this variable.
To prevent surprises the option to set this via ENV variable was removed.
2023-04-27 09:32:42 +02:00
cb524c32ed Preparation for Netbox 3.5
- Reports and Scripts have changed in Netbox 3.5. They need to be uploaded now.
  The Docker compose now creates a volume as it does for the media files
- Napalm has been removed from Netbox 3.5
  All configuration entries for Napalm were removed and napalm itself is removed from the requirements file
- Removed Gunicorn from the image
  Nginx Unit has been used for a while now. No need to install Gunicorn
2023-04-27 09:32:42 +02:00
cb4dcc0488 Merge pull request #1000 from netbox-community/renovate/django-auth-ldap-4.x
Update dependency django-auth-ldap to v4.3.0
2023-04-27 09:09:13 +02:00
ddcc8b5131 Update dependency django-auth-ldap to v4.3.0 2023-04-26 23:27:32 +00:00
3286faa94c Merge pull request #989 from netbox-community/develop
Version 2.5.3
2023-04-15 08:42:25 +02:00
788cd03a35 Merge pull request #988 from netbox-community/renovate/github-super-linter-5.x
Update github/super-linter action to v5
2023-04-15 01:21:43 +02:00
0911c2251d Merge branch 'release' into develop 2023-04-15 01:20:55 +02:00
c698496e36 Preparation for 2.5.3 2023-04-15 01:16:40 +02:00
23a262d72f Update github/super-linter action to v5 2023-04-14 19:16:09 +00:00
5273e17d89 Merge pull request #983 from tobiasge/arm-test
Try to get test on ARM64 to pass
2023-04-06 19:01:12 +02:00
e44f0398fb Try to get test on ARM64 to pass 2023-04-06 17:44:10 +02:00
7c2e012523 Merge pull request #982 from tobiasge/fix-social-auth-core
Fixed version conflicts for social-auth-core
2023-04-06 12:37:51 +02:00
5a29364bca Fixed wording
Co-authored-by: Christian Mäder <cimnine@users.noreply.github.com>
2023-04-06 11:08:34 +02:00
5d5b01f6b5 Fixed version conflicts for social-auth-core 2023-04-06 09:52:28 +02:00
5d6e733bce Merge pull request #980 from netbox-community/renovate/psycopg2-2.x
Update dependency psycopg2 to v2.9.6
2023-04-05 20:22:27 +02:00
87a9808bc2 Update dependency psycopg2 to v2.9.6 2023-04-03 11:39:03 +00:00
7bf9e1af5a Merge pull request #978 from netbox-community/develop
Missing version tags for 2.5.2
2023-03-29 20:50:29 +02:00
1e588431e2 Merge pull request #976 from tobiasge/better-tests
Further improved test configuration
2023-03-29 18:33:23 +02:00
41fd4e5d67 Further improved test configuration 2023-03-29 17:08:21 +02:00
17f1bb0af0 Preparation for 2.5.2 2023-03-29 12:41:34 +02:00
9cc58918ab Merge pull request #975 from netbox-community/develop
Version 2.5.2
2023-03-29 12:03:15 +02:00
831867499b Merge branch 'release' into develop 2023-03-29 10:36:28 +02:00
d5dde45bec Merge pull request #973 from tobiasge/social-auth-update
Use same version as Netbox for social-auth-core
2023-03-29 10:25:58 +02:00
6576c18a9c Merge pull request #972 from netbox-community/renovate/django-auth-ldap-4.x
Update dependency django-auth-ldap to v4.2.0
2023-03-29 09:34:54 +02:00
efd6e6a3c2 Use same version as Netbox for social-auth-core 2023-03-29 08:58:49 +02:00
47a7eee16a Update dependency django-auth-ldap to v4.2.0 2023-03-28 10:20:51 +00:00
5eac65b8f6 Merge pull request #968 from ryanmerolle/patch-1
Update docker-compose.yml
2023-03-20 15:11:35 +01:00
2ba441124e Update docker-compose.yml 2023-03-20 08:21:17 -04:00
f2d070fc49 Added more tests (#965) 2023-03-16 21:44:08 +01:00
97ee353b00 Merge pull request #963 from netbox-community/develop
Version 2.5.1
2023-03-16 11:13:13 +01:00
c001b88a81 Merge pull request #964 from tobiasge/better-base-image-check
Simplified base image check
2023-03-16 09:46:35 +01:00
b131b07af8 Simplified basse image check 2023-03-16 07:50:24 +01:00
311629ade4 Preparation for 2.5.1 2023-03-16 07:39:38 +01:00
256f23b4ad Merge pull request #961 from MarcHagen/patch-1
Catch DoesNotExist preventing startup
2023-03-16 07:37:05 +01:00
29e37a31d7 Catch DoesNotExist preventing startup
Fixes failing startup because of python error:

```
Traceback (most recent call last):
  File "/opt/netbox/netbox/./manage.py", line 10, in <module>
    execute_from_command_line(sys.argv)
  File "/opt/netbox/venv/lib/python3.10/site-packages/django/core/management/__init__.py", line 446, in execute_from_command_line
    utility.execute()
  File "/opt/netbox/venv/lib/python3.10/site-packages/django/core/management/__init__.py", line 440, in execute
    self.fetch_command(subcommand).run_from_argv(self.argv)
  File "/opt/netbox/venv/lib/python3.10/site-packages/django/core/management/base.py", line 402, in run_from_argv
    self.execute(*args, **cmd_options)
  File "/opt/netbox/venv/lib/python3.10/site-packages/django/core/management/base.py", line 448, in execute
    output = self.handle(*args, **options)
  File "/opt/netbox/venv/lib/python3.10/site-packages/django/core/management/commands/shell.py", line 127, in handle
    exec(sys.stdin.read(), globals())
  File "<string>", line 2, in <module>
  File "/opt/netbox/venv/lib/python3.10/site-packages/django/db/models/manager.py", line 85, in manager_method
    return getattr(self.get_queryset(), name)(*args, **kwargs)
  File "/opt/netbox/venv/lib/python3.10/site-packages/django/db/models/query.py", line 650, in get
    raise self.model.DoesNotExist(
users.models.Token.DoesNotExist: Token matching query does not exist.
```
2023-03-15 23:04:04 +01:00
93017f150e Merge pull request #959 from netbox-community/develop
Version 2.5.0
2023-03-15 15:53:14 +01:00
ac8cb022ae Preparation for 2.5.0 2023-03-15 14:32:09 +01:00
480cabaefe Merge pull request #955 from tobiasge/953-default-admin
Don't create superuser with default credentials
2023-03-15 14:20:07 +01:00
ab7e19df55 Merge pull request #958 from tobiasge/954-use-skopeo
Added check for commands to all scripts
2023-03-15 14:16:39 +01:00
4ce89f9209 Added check for commands to all scripts 2023-03-15 13:02:25 +01:00
3e2bf7ec93 Don't create superuser with default credentials 2023-03-15 12:23:36 +01:00
4bad061bc4 Merge pull request #957 from tobiasge/fix-action-badge
Fixed Github action badge
2023-03-15 12:08:35 +01:00
f9abdf2390 Merge pull request #956 from tobiasge/954-use-skopeo
Replaced curl with Skopeo for in image functions
2023-03-15 12:04:10 +01:00
cc95a67df0 Fixed Github action badge 2023-03-15 10:16:50 +01:00
fb5bacc4b4 Replaced curl with Skopeo for in image functions 2023-03-15 09:32:37 +01:00
ed309a15b4 Merge pull request #948 from tobiasge/lazy-reindex-on-start
Fixes #947: Rebuild search index when needed
2023-03-14 20:46:44 +01:00
5f8a09536c Merge pull request #951 from tobiasge/new-unit
Updated to new unit version
2023-03-01 08:53:24 +01:00
d3a30e1172 Updated to new unit version 2023-03-01 07:39:37 +01:00
e60a746eee Fixes #947: Rebuild search index when needed
This rebuilds the search index when models where updated.
2023-02-23 08:37:53 +01:00
879c700bb8 Merge pull request #939 from timrabl/fix-931
Actually fix #471 and #931
2023-02-01 10:08:59 +01:00
6f70b88972 squash commits that revert AUTH_LDAP_USER_SEARCH
add missing AUTH_LDAP_USER_SEARCH, removed while modifying....

revert AUTH_LDAP_USER_SEARCH variable that was accidentally removed in #931 and change behaviour to requested features in #471

remove duplicate AUTH_LDAP_USER_SEARCH variable now and fix this finally, hopefully
2023-02-01 08:18:34 +01:00
7a9aef3791 Merge pull request #931 from timrabl/fix-471
implement extra LDAP user and group filters as requested in #471
2023-01-30 10:17:13 +01:00
3071c500da implement extra LDAP user and group filters as requested in #471 2023-01-30 09:06:22 +01:00
350747c1cb Merge pull request #910 from sc68cal/enforcing_shortname
Prepend docker.io to image URLs
2023-01-28 18:22:19 +01:00
250b1fb093 Merge pull request #933 from tobiasge/fix-gh-warnings
Fix Github action warnings
2023-01-28 18:21:59 +01:00
eef45c8197 Using new GITHUB_OUTPUT method 2023-01-28 15:49:13 +01:00
f549b93b9d Merge pull request #932 from tobiasge/gh-token
Using GITHUB_TOKEN for API
2023-01-28 15:31:53 +01:00
f2b0375d5b Using GITHUB_TOKEN for API 2023-01-28 13:38:28 +01:00
3202fb9446 Merge pull request #929 from christianharendt/create-redis-username
Add redis username parameter
2023-01-28 10:14:00 +01:00
ff373bd60d Update configuration.py 2023-01-27 15:05:17 +01:00
bd07a7a5a2 Add redis username parameter 2023-01-27 15:02:11 +01:00
41d80d66b1 Prepend docker.io to image URLs
This is to make podman happy, since newer versions of podman
have set short-name-mode to enforcing

https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md
2023-01-10 14:10:15 +01:00
015e131d99 Merge pull request #914 from kmanwar89/patch-1
Fix syntax of docker compose commands
2023-01-05 09:47:28 +01:00
ff37e17eeb Merge pull request #913 from tobiasge/start_period_explanation
Added start_period to docker-compose example
2023-01-04 09:27:20 +01:00
1403f52d04 Merge pull request #912 from tobiasge/localhost_allowed_host
Ensure that '*' or 'localhost' is always in ALLOWED_HOSTS
2023-01-03 19:06:03 +01:00
7e0a8fee82 Improved comment 2023-01-03 16:48:00 +01:00
0c1b69ded0 Update docker-compose.override.yml.example
Co-authored-by: Christian Mäder <cimnine@users.noreply.github.com>
2023-01-03 16:45:09 +01:00
06e0815c70 Merge pull request #911 from netbox-community/renovate/python3-saml-1.x
Update dependency python3-saml to v1.15.0
2023-01-03 15:12:12 +01:00
8f2820626c Fix syntax of docker compose commands
Docker compose's syntax changed as of Compose v2 (source: https://docs.docker.com/compose/reference/).  Replaced references of "docker-compose" with "docker compose" to align with this change.
2023-01-03 09:06:46 -05:00
b6faad36cb Added start_period to docker-compose example 2023-01-03 14:58:41 +01:00
73f479d5db Ensure that '*' or 'localhost' is always in ALLOWED_HOSTS 2023-01-03 14:41:26 +01:00
89ad7588f0 Update dependency python3-saml to v1.15.0 2022-12-27 23:54:03 +00:00
a4d986011d Merge pull request #906 from netbox-community/renovate/django-storages-1.x
Update dependency django-storages to v1.13.2
2022-12-23 16:54:27 +01:00
f2bb1198dd Update dependency django-storages to v1.13.2 2022-12-23 05:15:28 +00:00
39c7de4af4 Merge pull request #899 from netbox-community/develop
Release 2.4.0
2022-12-15 15:36:27 +01:00
238f95c5ce Preparation for 2.4.0 2022-12-15 12:57:51 +01:00
751a131b78 Merge pull request #889 from netbox-community/listenOnIPv6
Make nginx-unit listen on IPv4 and IPv6
2022-12-06 21:09:02 +01:00
5e2158da24 Merge pull request #866 from ryanmerolle/issue_784
Address housekeeping traceback
2022-12-06 18:12:02 +01:00
4a530947f8 Merge pull request #895 from tobiasge/ldap-cert-settings
Added settings for CA certificates for LDAP
2022-12-01 13:59:22 +01:00
bdb4396275 Added settings for CA certificates for LDAP 2022-12-01 08:17:25 +01:00
80d87bdf1b Merge pull request #894 from tobiasge/psycopg2-changes
Optimize psycopg2 dependency
2022-11-30 16:23:20 +01:00
6d465e6f81 Optimize psycopg2 dependency
We have beeing installing psycopg2 for a while now. This updates to the latest version. Because psycopg2-binary is a direct dependency of Netbox both versions were installled. Now we remove the pre-compiled version from the dependency file.
2022-11-30 14:36:53 +01:00
b72084290a Merge pull request #893 from tobiasge/startup-scripts-complete-removal
Startup scripts complete removal
2022-11-30 13:14:32 +01:00
aa3357817a Disable Gitleaks 2022-11-30 10:54:24 +01:00
9441be459c Improved testing
After the initializer scripts were removed, we didn't test the actual compose setup anymore. This adds new tests to run the database migrations.
2022-11-30 10:54:24 +01:00
1779ba790d Removed warning for initializer scripts 2022-11-30 09:51:36 +01:00
22cb2d5812 Merge pull request #865 from netbox-community/renovate/postgres-15.x
Update postgres Docker tag to v15
2022-11-30 08:50:15 +01:00
6020f4503a Make nginx-unit listen on IPv4 and IPv6 2022-11-23 14:40:03 +01:00
a4f494db14 Update postgres Docker tag to v15 2022-11-11 14:24:44 +00:00
0cac6f51a9 Merge pull request #876 from Wellyas/patch-1
Add requirements for SAML SSO
2022-11-11 14:08:15 +01:00
dd01e3c227 Enable SAML & OPENIDCONNECT for social-auth-core 2022-11-11 13:07:14 +01:00
925f41b97f Merge pull request #883 from movelg/housekeeping_var_name_fix
Housekeeping var name fix
2022-11-11 10:54:13 +01:00
7d871778eb Don't use bash internal variable name 2022-11-11 08:40:29 +01:00
35a94cb7e5 Update docker-compose.yml
Address housekeeping tracebacks & remove whitespace
2022-10-22 09:40:03 -04:00
27 changed files with 374 additions and 308 deletions

View File

@ -1,10 +1,10 @@
.git
.github
.travis.yml
.git*
*.md
env
build*
docker-compose.override.yml
docker-compose*
env
test-configuration
.netbox/.git*
.netbox/.travis.yml
.netbox/contrib
.netbox/scripts
.netbox/upgrade.sh

View File

@ -23,7 +23,7 @@ jobs:
with:
python-version: '3.9'
- name: Lint Code Base
uses: github/super-linter@v4
uses: github/super-linter@v5
env:
DEFAULT_BRANCH: develop
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@ -31,6 +31,7 @@ jobs:
LINTER_RULES_PATH: /
VALIDATE_ALL_CODEBASE: false
VALIDATE_DOCKERFILE: false
VALIDATE_GITLEAKS: false
FILTER_REGEX_EXCLUDE: (.*/)?(LICENSE|configuration/.*)
EDITORCONFIG_FILE_NAME: .ecrc
DOCKERFILE_HADOLINT_FILE_NAME: .hadolint.yaml
@ -54,6 +55,7 @@ jobs:
fail-fast: false
env:
GH_ACTION: enable
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
IMAGE_NAMES: docker.io/netboxcommunity/netbox
runs-on: ubuntu-latest
name: Builds new NetBox Docker Images
@ -73,6 +75,11 @@ jobs:
env:
BUILDX_PLATFORM: ${{ matrix.platform }}
BUILDX_BUILDER_NAME: ${{ steps.buildx-setup.outputs.name }}
- id: arm-time-limit
name: Set Netbox container start_period higher on ARM64
if: matrix.platform == 'linux/arm64'
run: |
echo "NETBOX_START_PERIOD=240s" >>"${GITHUB_ENV}"
- id: docker-test
name: Test the image
run: IMAGE="${FINAL_DOCKER_TAG}" ./test.sh

View File

@ -25,6 +25,7 @@ jobs:
name: Builds new NetBox Docker Images
env:
GH_ACTION: enable
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
IMAGE_NAMES: docker.io/netboxcommunity/netbox quay.io/netboxcommunity/netbox ghcr.io/netbox-community/netbox
steps:
- id: source-checkout
@ -32,7 +33,7 @@ jobs:
uses: actions/checkout@v3
- id: set-netbox-docker-version
name: Get Version of NetBox Docker
run: echo "::set-output name=version::$(cat VERSION)"
run: echo "version=$(cat VERSION)" >>"$GITHUB_OUTPUT"
shell: bash
- id: qemu-setup
name: Set up QEMU

View File

@ -13,6 +13,12 @@ RUN export DEBIAN_FRONTEND=noninteractive \
libpq-dev \
libsasl2-dev \
libssl-dev \
libxml2-dev \
libxmlsec1 \
libxmlsec1-dev \
libxmlsec1-openssl \
libxslt-dev \
pkg-config \
python3-dev \
python3-pip \
python3-venv \
@ -24,7 +30,16 @@ RUN export DEBIAN_FRONTEND=noninteractive \
ARG NETBOX_PATH
COPY ${NETBOX_PATH}/requirements.txt requirements-container.txt /
RUN /opt/netbox/venv/bin/pip install \
RUN \
# We compile 'psycopg2' in the build process
sed -i -e '/psycopg2-binary/d' /requirements.txt && \
# Gunicorn is not needed because we use Nginx Unit
sed -i -e '/gunicorn/d' /requirements.txt && \
# We need 'social-auth-core[all]' in the Docker image. But if we put it in our own requirements-container.txt
# we have potential version conflicts and the build will fail.
# That's why we just replace it in the original requirements.txt.
sed -i -e 's/social-auth-core\[openidconnect\]/social-auth-core\[all\]/g' /requirements.txt && \
/opt/netbox/venv/bin/pip install \
-r /requirements.txt \
-r /requirements-container.txt
@ -46,6 +61,7 @@ RUN export DEBIAN_FRONTEND=noninteractive \
curl \
libldap-common \
libpq5 \
libxmlsec1-openssl \
openssl \
python3 \
python3-distutils \
@ -57,14 +73,16 @@ RUN export DEBIAN_FRONTEND=noninteractive \
&& apt-get update -qq \
&& apt-get install \
--yes -qq --no-install-recommends \
unit=1.27.0-1~jammy \
unit-python3.10=1.27.0-1~jammy \
unit=1.29.1-1~jammy \
unit-python3.10=1.29.1-1~jammy \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /opt/netbox/venv /opt/netbox/venv
ARG NETBOX_PATH
COPY ${NETBOX_PATH} /opt/netbox
# Copy the modified 'requirements*.txt' files, to have the files actually used during installation
COPY --from=builder /requirements.txt /requirements-container.txt /opt/netbox/
COPY docker/configuration.docker.py /opt/netbox/netbox/netbox/configuration.py
COPY docker/ldap_config.docker.py /opt/netbox/netbox/netbox/ldap_config.py
@ -79,13 +97,13 @@ WORKDIR /opt/netbox/netbox
# Must set permissions for '/opt/netbox/netbox/media' directory
# to g+w so that pictures can be uploaded to netbox.
RUN mkdir -p static /opt/unit/state/ /opt/unit/tmp/ \
&& chown -R unit:root media /opt/unit/ \
&& chmod -R g+w media /opt/unit/ \
&& cd /opt/netbox/ && SECRET_KEY="dummy" /opt/netbox/venv/bin/python -m mkdocs build \
&& chown -R unit:root /opt/unit/ media reports scripts \
&& chmod -R g+w /opt/unit/ media reports scripts \
&& cd /opt/netbox/ && SECRET_KEY="dummyKeyWithMinimumLength-------------------------" /opt/netbox/venv/bin/python -m mkdocs build \
--config-file /opt/netbox/mkdocs.yml --site-dir /opt/netbox/netbox/project-static/docs/ \
&& SECRET_KEY="dummy" /opt/netbox/venv/bin/python /opt/netbox/netbox/manage.py collectstatic --no-input
&& SECRET_KEY="dummyKeyWithMinimumLength-------------------------" /opt/netbox/venv/bin/python /opt/netbox/netbox/manage.py collectstatic --no-input
ENV LANG=C.UTF-8 PATH=/opt/netbox/venv/bin:$PATH
ENV LANG=C.utf8 PATH=/opt/netbox/venv/bin:$PATH
ENTRYPOINT [ "/usr/bin/tini", "--" ]
CMD [ "/opt/netbox/docker-entrypoint.sh", "/opt/netbox/launch-netbox.sh" ]

View File

@ -3,7 +3,7 @@
[![GitHub release (latest by date)](https://img.shields.io/github/v/release/netbox-community/netbox-docker)][github-release]
[![GitHub stars](https://img.shields.io/github/stars/netbox-community/netbox-docker)][github-stargazers]
![GitHub closed pull requests](https://img.shields.io/github/issues-pr-closed-raw/netbox-community/netbox-docker)
![Github release workflow](https://img.shields.io/github/workflow/status/netbox-community/netbox-docker/release)
![Github release workflow](https://img.shields.io/github/actions/workflow/status/netbox-community/netbox-docker/release.yml?branch=release)
![Docker Pulls](https://img.shields.io/docker/pulls/netboxcommunity/netbox)
[![GitHub license](https://img.shields.io/github/license/netbox-community/netbox-docker)][netbox-docker-license]
@ -40,19 +40,21 @@ services:
ports:
- 8000:8080
EOF
docker-compose pull
docker-compose up
docker compose pull
docker compose up
```
The whole application will be available after a few minutes.
Open the URL `http://0.0.0.0:8000/` in a web-browser.
You should see the NetBox homepage.
In the top-right corner you can login.
The default credentials are:
* Username: **admin**
* Password: **admin**
* API Token: **0123456789abcdef0123456789abcdef01234567**
To create the first admin user run this command:
```bash
docker compose exec netbox /opt/netbox/netbox/manage.py createsuperuser
```
If you need to restart Netbox from an empty database often, you can also set the `SUPERUSER_*` variables in your `docker-compose.override.yml` as shown in the example.
[wiki-getting-started]: https://github.com/netbox-community/netbox-docker/wiki/Getting-Started
@ -97,7 +99,7 @@ For each of the above tag, there is an extra tag:
## Documentation
Please refer [to our wiki on GitHub][netbox-docker-wiki] for further information on how to use the NetBox Docker image properly.
The wiki covers advanced topics such as using files for secrets, configuring TLS, deployment to Kubernetes, monitoring and configuring NAPALM and LDAP.
The wiki covers advanced topics such as using files for secrets, configuring TLS, deployment to Kubernetes, monitoring and configuring LDAP.
Our wiki is a community effort.
Feel free to correct errors, update outdated information or provide additional guides and insights.
@ -123,7 +125,7 @@ This project relies only on _Docker_ and _docker-compose_ meeting these requirem
* The _containerd version_ must be at least `1.5.6`.
* The _docker-compose version_ must be at least `1.28.0`.
To check the version installed on your system run `docker --version` and `docker-compose --version`.
To check the version installed on your system run `docker --version` and `docker compose version`.
## Updating

View File

@ -1 +1 @@
2.3.0
2.6.1

View File

@ -0,0 +1,9 @@
#!/bin/bash
NEEDED_COMMANDS="curl jq docker skopeo"
for c in $NEEDED_COMMANDS; do
if ! command -v "$c" &>/dev/null; then
echo "⚠️ '$c' is not installed. Can't proceed with build."
exit 1
fi
done

View File

@ -1,82 +1,18 @@
#!/bin/bash
# Retrieves image configuration from public images in DockerHub
# Functions from https://gist.github.com/cirocosta/17ea17be7ac11594cb0f290b0a3ac0d1
# Optimised for our use case
check_if_tags_exists() {
local image=$1
local tag=$2
skopeo list-tags "docker://$image" | jq -r ".Tags | contains([\"$tag\"])"
}
get_image_label() {
local label=$1
local image=$2
local tag=$3
local token
token=$(_get_token "$image")
local digest
digest=$(_get_digest "$image" "$tag" "$token")
local retval="null"
if [ "$digest" != "null" ]; then
retval=$(_get_image_configuration "$image" "$token" "$digest" "$label")
fi
echo "$retval"
}
get_image_layers() {
local image=$1
local tag=$2
local token
token=$(_get_token "$image")
_get_layers "$image" "$tag" "$token"
skopeo inspect "docker://$image" | jq -r ".Labels[\"$label\"]"
}
get_image_last_layer() {
local image=$1
local tag=$2
local token
token=$(_get_token "$image")
local layers
mapfile -t layers < <(_get_layers "$image" "$tag" "$token")
echo "${layers[-1]}"
}
_get_image_configuration() {
local image=$1
local token=$2
local digest=$3
local label=$4
curl \
--silent \
--location \
--header "Authorization: Bearer $token" \
"https://registry-1.docker.io/v2/$image/blobs/$digest" |
jq -r ".config.Labels.\"$label\""
}
_get_token() {
local image=$1
curl \
--silent \
"https://auth.docker.io/token?scope=repository:$image:pull&service=registry.docker.io" |
jq -r '.token'
}
_get_digest() {
local image=$1
local tag=$2
local token=$3
curl \
--silent \
--header "Accept: application/vnd.docker.distribution.manifest.v2+json" \
--header "Authorization: Bearer $token" \
"https://registry-1.docker.io/v2/$image/manifests/$tag" |
jq -r '.config.digest'
}
_get_layers() {
local image=$1
local tag=$2
local token=$3
curl \
--silent \
--header "Accept: application/vnd.docker.distribution.manifest.v2+json" \
--header "Authorization: Bearer $token" \
"https://registry-1.docker.io/v2/$image/manifests/$tag" |
jq -r '.layers[].digest'
skopeo inspect "docker://$image" | jq -r ".Layers | last"
}

View File

@ -19,3 +19,14 @@ gh_env() {
echo "${@}" >>"${GITHUB_ENV}"
fi
}
###
# Prints the output to the file defined in ${GITHUB_OUTPUT}.
# Only executes if ${GH_ACTION} is defined.
# Example Usage: gh_env "FOO_VAR=bar_value"
###
gh_out() {
if [ -n "${GH_ACTION}" ]; then
echo "${@}" >>"$GITHUB_OUTPUT"
fi
}

View File

@ -1,26 +1,27 @@
#!/bin/bash
# Builds the latest released version
# Check if we have everything needed for the build
source ./build-functions/check-commands.sh
source ./build-functions/gh-functions.sh
echo "▶️ $0 $*"
###
# Check for the jq library needed for parsing JSON
###
if ! command -v jq; then
echo "⚠️ jq command missing from \$PATH!"
exit 1
fi
CURL_ARGS=(
--silent
)
###
# Checking for the presence of GITHUB_OAUTH_CLIENT_ID
# and GITHUB_OAUTH_CLIENT_SECRET
# Checking for the presence of GITHUB_TOKEN
###
if [ -n "${GITHUB_OAUTH_CLIENT_ID}" ] && [ -n "${GITHUB_OAUTH_CLIENT_SECRET}" ]; then
if [ -n "${GITHUB_TOKEN}" ]; then
echo "🗝 Performing authenticated Github API calls."
GITHUB_OAUTH_PARAMS="client_id=${GITHUB_OAUTH_CLIENT_ID}&client_secret=${GITHUB_OAUTH_CLIENT_SECRET}"
CURL_ARGS+=(
--header "Authorization: Bearer ${GITHUB_TOKEN}"
)
else
echo "🕶 Performing unauthenticated Github API calls. This might result in lower Github rate limits!"
GITHUB_OAUTH_PARAMS=""
fi
###
@ -42,31 +43,27 @@ fi
###
ORIGINAL_GITHUB_REPO="netbox-community/netbox"
GITHUB_REPO="${GITHUB_REPO-$ORIGINAL_GITHUB_REPO}"
URL_RELEASES="https://api.github.com/repos/${GITHUB_REPO}/releases?${GITHUB_OAUTH_PARAMS}"
URL_RELEASES="https://api.github.com/repos/${GITHUB_REPO}/releases"
# Composing the JQ commans to extract the most recent version number
JQ_LATEST="group_by(.prerelease) | .[] | sort_by(.published_at) | reverse | .[0] | select(.prerelease==${PRERELEASE-false}) | .tag_name"
CURL="curl -sS"
CURL="curl"
# Querying the Github API to fetch the most recent version number
VERSION=$($CURL "${URL_RELEASES}" | jq -r "${JQ_LATEST}")
VERSION=$($CURL "${CURL_ARGS[@]}" "${URL_RELEASES}" | jq -r "${JQ_LATEST}" 2>/dev/null)
###
# Check if the prerelease version is actually higher than stable version
###
if [ "${PRERELEASE}" == "true" ]; then
JQ_STABLE="group_by(.prerelease) | .[] | sort_by(.published_at) | reverse | .[0] | select(.prerelease==false) | .tag_name"
STABLE_VERSION=$($CURL "${URL_RELEASES}" | jq -r "${JQ_STABLE}")
STABLE_VERSION=$($CURL "${CURL_ARGS[@]}" "${URL_RELEASES}" | jq -r "${JQ_STABLE}" 2>/dev/null)
# shellcheck disable=SC2003
MAJOR_STABLE=$(expr match "${STABLE_VERSION}" 'v\([0-9]\+\)')
# shellcheck disable=SC2003
MINOR_STABLE=$(expr match "${STABLE_VERSION}" 'v[0-9]\+\.\([0-9]\+\)')
# shellcheck disable=SC2003
MAJOR_UNSTABLE=$(expr match "${VERSION}" 'v\([0-9]\+\)')
# shellcheck disable=SC2003
MINOR_UNSTABLE=$(expr match "${VERSION}" 'v[0-9]\+\.\([0-9]\+\)')
MAJOR_STABLE=$(expr "${STABLE_VERSION}" : 'v\([0-9]\+\)')
MINOR_STABLE=$(expr "${STABLE_VERSION}" : 'v[0-9]\+\.\([0-9]\+\)')
MAJOR_UNSTABLE=$(expr "${VERSION}" : 'v\([0-9]\+\)')
MINOR_UNSTABLE=$(expr "${VERSION}" : 'v[0-9]\+\.\([0-9]\+\)')
if {
[ "${MAJOR_STABLE}" -eq "${MAJOR_UNSTABLE}" ] &&
@ -75,10 +72,7 @@ if [ "${PRERELEASE}" == "true" ]; then
echo "❎ Latest unstable version '${VERSION}' is not higher than the latest stable version '$STABLE_VERSION'."
if [ -z "$DEBUG" ]; then
if [ -n "${GH_ACTION}" ]; then
echo "::set-output name=skipped::true"
fi
gh_out "skipped=true"
exit 0
else
echo "⚠️ Would exit here with code '0', but DEBUG is enabled."

View File

@ -137,6 +137,10 @@ END_OF_HELP
fi
fi
# Check if we have everything needed for the build
source ./build-functions/check-commands.sh
# Load all build functions
source ./build-functions/get-public-image-config.sh
source ./build-functions/gh-functions.sh
IMAGE_NAMES="${IMAGE_NAMES-docker.io/netboxcommunity/netbox}"
@ -170,7 +174,7 @@ if [ "${2}" != "--push-only" ] && [ -z "${SKIP_GIT}" ]; then
REMOTE_EXISTS=$(git ls-remote --heads --tags "${URL}" "${NETBOX_BRANCH}" | wc -l)
if [ "${REMOTE_EXISTS}" == "0" ]; then
echo "❌ Remote branch '${NETBOX_BRANCH}' not found in '${URL}'; Nothing to do"
gh_echo "::set-output name=skipped::true"
gh_out "skipped=true"
exit 0
fi
echo "🌐 Checking out '${NETBOX_BRANCH}' of NetBox from the url '${URL}' into '${NETBOX_PATH}'"
@ -215,7 +219,7 @@ fi
# Determining the value for DOCKER_FROM
###
if [ -z "$DOCKER_FROM" ]; then
DOCKER_FROM="ubuntu:22.04"
DOCKER_FROM="docker.io/ubuntu:22.04"
fi
###
@ -300,39 +304,37 @@ if [ -n "${TARGET_DOCKER_SHORT_TAG}" ]; then
done
fi
FINAL_DOCKER_TAG="${IMAGE_NAME_TAGS[0]}"
gh_env "FINAL_DOCKER_TAG=${IMAGE_NAME_TAGS[0]}"
###
# Checking if the build is necessary,
# meaning build only if one of those values changed:
# - a new tag is beeing created
# - base image digest
# - netbox git ref (Label: netbox.git-ref)
# - netbox-docker git ref (Label: org.opencontainers.image.revision)
###
# Load information from registry (only for docker.io)
# Load information from registry (only for first registry in "IMAGE_NAMES")
SHOULD_BUILD="false"
BUILD_REASON=""
if [ -z "${GH_ACTION}" ]; then
# Asuming non Github builds should always proceed
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} interactive"
elif [[ "${IMAGE_NAME_TAGS[0]}" = docker.io* ]]; then
source ./build-functions/get-public-image-config.sh
IFS=':' read -ra DOCKER_FROM_SPLIT <<<"${DOCKER_FROM}"
if ! [[ ${DOCKER_FROM_SPLIT[0]} =~ .*/.* ]]; then
# Need to use "library/..." for images the have no two part name
DOCKER_FROM_SPLIT[0]="library/${DOCKER_FROM_SPLIT[0]}"
fi
IFS='/' read -ra ORG_REPO <<<"${IMAGE_NAMES[0]}"
echo "Checking labels for '${ORG_REPO[1]}' and '${ORG_REPO[2]}'"
BASE_LAST_LAYER=$(get_image_last_layer "${DOCKER_FROM_SPLIT[0]}" "${DOCKER_FROM_SPLIT[1]}")
mapfile -t IMAGES_LAYERS_OLD < <(get_image_layers "${ORG_REPO[1]}"/"${ORG_REPO[2]}" "${TAG}")
NETBOX_GIT_REF_OLD=$(get_image_label netbox.git-ref "${ORG_REPO[1]}"/"${ORG_REPO[2]}" "${TAG}")
GIT_REF_OLD=$(get_image_label org.opencontainers.image.revision "${ORG_REPO[1]}"/"${ORG_REPO[2]}" "${TAG}")
elif [ "false" == "$(check_if_tags_exists "${IMAGE_NAMES[0]}" "$TARGET_DOCKER_TAG")" ]; then
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} newtag"
else
echo "Checking labels for '${FINAL_DOCKER_TAG}'"
BASE_LAST_LAYER=$(get_image_last_layer "${DOCKER_FROM}")
OLD_BASE_LAST_LAYER=$(get_image_label netbox.last-base-image-layer "${FINAL_DOCKER_TAG}")
NETBOX_GIT_REF_OLD=$(get_image_label netbox.git-ref "${FINAL_DOCKER_TAG}")
GIT_REF_OLD=$(get_image_label org.opencontainers.image.revision "${FINAL_DOCKER_TAG}")
if ! printf '%s\n' "${IMAGES_LAYERS_OLD[@]}" | grep -q -P "^${BASE_LAST_LAYER}\$"; then
if [ "${BASE_LAST_LAYER}" != "${OLD_BASE_LAST_LAYER}" ]; then
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} debian"
BUILD_REASON="${BUILD_REASON} ubuntu"
fi
if [ "${NETBOX_GIT_REF}" != "${NETBOX_GIT_REF_OLD}" ]; then
SHOULD_BUILD="true"
@ -342,17 +344,14 @@ elif [[ "${IMAGE_NAME_TAGS[0]}" = docker.io* ]]; then
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} netbox-docker"
fi
else
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} no-check"
fi
if [ "${SHOULD_BUILD}" != "true" ]; then
echo "Build skipped because sources didn't change"
echo "::set-output name=skipped::true"
gh_out "skipped=true"
exit 0 # Nothing to do -> exit
else
gh_echo "::set-output name=skipped::false"
gh_out "skipped=false"
fi
gh_echo "::endgroup::"
@ -393,6 +392,7 @@ fi
if [ -n "${BUILD_REASON}" ]; then
BUILD_REASON=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<"$BUILD_REASON")
DOCKER_BUILD_ARGS+=(--label "netbox.build-reason=${BUILD_REASON}")
DOCKER_BUILD_ARGS+=(--label "netbox.last-base-image-layer=${BASE_LAST_LAYER}")
fi
# --build-arg

View File

@ -58,6 +58,9 @@ _BASE_DIR = dirname(dirname(abspath(__file__)))
#
# Example: ALLOWED_HOSTS = ['netbox.example.com', 'netbox.internal.local']
ALLOWED_HOSTS = environ.get('ALLOWED_HOSTS', '*').split(' ')
# ensure that '*' or 'localhost' is always in ALLOWED_HOSTS (needed for health checks)
if '*' not in ALLOWED_HOSTS and 'localhost' not in ALLOWED_HOSTS:
ALLOWED_HOSTS.append('localhost')
# PostgreSQL database configuration. See the Django documentation for a complete list of available parameters:
# https://docs.djangoproject.com/en/stable/ref/settings/#databases
@ -83,6 +86,7 @@ REDIS = {
'tasks': {
'HOST': environ.get('REDIS_HOST', 'localhost'),
'PORT': _environ_get_and_map('REDIS_PORT', 6379, _AS_INT),
'USERNAME': environ.get('REDIS_USERNAME', ''),
'PASSWORD': _read_secret('redis_password', environ.get('REDIS_PASSWORD', '')),
'DATABASE': _environ_get_and_map('REDIS_DATABASE', 0, _AS_INT),
'SSL': _environ_get_and_map('REDIS_SSL', 'False', _AS_BOOL),
@ -91,6 +95,7 @@ REDIS = {
'caching': {
'HOST': environ.get('REDIS_CACHE_HOST', environ.get('REDIS_HOST', 'localhost')),
'PORT': _environ_get_and_map('REDIS_CACHE_PORT', environ.get('REDIS_PORT', '6379'), _AS_INT),
'USERNAME': environ.get('REDIS_CACHE_USERNAME', environ.get('REDIS_USERNAME', '')),
'PASSWORD': _read_secret('redis_cache_password', environ.get('REDIS_CACHE_PASSWORD', environ.get('REDIS_PASSWORD', ''))),
'DATABASE': _environ_get_and_map('REDIS_CACHE_DATABASE', '1', _AS_INT),
'SSL': _environ_get_and_map('REDIS_CACHE_SSL', environ.get('REDIS_SSL', 'False'), _AS_BOOL),
@ -131,10 +136,6 @@ if 'BANNER_BOTTOM' in environ:
if 'BANNER_LOGIN' in environ:
BANNER_LOGIN = environ.get('BANNER_LOGIN', None)
# Base URL path if accessing NetBox within a directory. For example, if installed at http://example.com/netbox/, set:
# BASE_PATH = 'netbox/'
BASE_PATH = environ.get('BASE_PATH', '')
# Maximum number of days to retain logged changes. Set to 0 to retain changes indefinitely. (Default: 90)
if 'CHANGELOG_RETENTION' in environ:
CHANGELOG_RETENTION = _environ_get_and_map('CHANGELOG_RETENTION', None, _AS_INT)
@ -234,20 +235,6 @@ MEDIA_ROOT = environ.get('MEDIA_ROOT', join(_BASE_DIR, 'media'))
# Expose Prometheus monitoring metrics at the HTTP endpoint '/metrics'
METRICS_ENABLED = _environ_get_and_map('METRICS_ENABLED', 'False', _AS_BOOL)
# Credentials that NetBox will uses to authenticate to devices when connecting via NAPALM.
if 'NAPALM_USERNAME' in environ:
NAPALM_USERNAME = environ.get('NAPALM_USERNAME', None)
if 'NAPALM_PASSWORD' in environ:
NAPALM_PASSWORD = _read_secret('napalm_password', environ.get('NAPALM_PASSWORD', None))
# NAPALM timeout (in seconds). (Default: 30)
if 'NAPALM_TIMEOUT' in environ:
NAPALM_TIMEOUT = _environ_get_and_map('NAPALM_TIMEOUT', None, _AS_INT)
# # NAPALM optional arguments (see http://napalm.readthedocs.io/en/latest/support/#optional-arguments). Arguments must
# # be provided as a dictionary.
# NAPALM_ARGS = None
# Determine how many objects to display per page within a list. (Default: 50)
if 'PAGINATE_COUNT' in environ:
PAGINATE_COUNT = _environ_get_and_map('PAGINATE_COUNT', None, _AS_INT)
@ -296,17 +283,9 @@ REMOTE_AUTH_DEFAULT_GROUPS = _environ_get_and_map('REMOTE_AUTH_DEFAULT_GROUPS',
RELEASE_CHECK_URL = environ.get('RELEASE_CHECK_URL', None)
# RELEASE_CHECK_URL = 'https://api.github.com/repos/netbox-community/netbox/releases'
# The file path where custom reports will be stored. A trailing slash is not needed. Note that the default value of
# this setting is derived from the installed location.
REPORTS_ROOT = environ.get('REPORTS_ROOT', '/etc/netbox/reports')
# Maximum execution time for background tasks, in seconds.
RQ_DEFAULT_TIMEOUT = _environ_get_and_map('RQ_DEFAULT_TIMEOUT', 300, _AS_INT)
# The file path where custom scripts will be stored. A trailing slash is not needed. Note that the default value of
# this setting is derived from the installed location.
SCRIPTS_ROOT = environ.get('SCRIPTS_ROOT', '/etc/netbox/scripts')
# The name to use for the csrf token cookie.
CSRF_COOKIE_NAME = environ.get('CSRF_COOKIE_NAME', 'csrftoken')

View File

@ -15,12 +15,6 @@
# 'file', 'ftp', 'ftps', 'http', 'https', 'irc', 'mailto', 'sftp', 'ssh', 'tel', 'telnet', 'tftp', 'vnc', 'xmpp',
# )
## NAPALM optional arguments (see http://napalm.readthedocs.io/en/latest/support/#optional-arguments). Arguments must
## be provided as a dictionary.
# NAPALM_ARGS = {}
## Enable installed plugins. Add the name of each plugin to the list.
# from netbox.configuration.configuration import PLUGINS
# PLUGINS.append('my_plugin')

View File

@ -49,20 +49,38 @@ AUTH_LDAP_START_TLS = environ.get('AUTH_LDAP_START_TLS', 'False').lower() == 'tr
# ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
LDAP_IGNORE_CERT_ERRORS = environ.get('LDAP_IGNORE_CERT_ERRORS', 'False').lower() == 'true'
# Include this setting if you want to validate the LDAP server certificates against a CA certificate directory on your server
# Note that this is a NetBox-specific setting which sets:
# ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, LDAP_CA_CERT_DIR)
LDAP_CA_CERT_DIR = environ.get('LDAP_CA_CERT_DIR', None)
# Include this setting if you want to validate the LDAP server certificates against your own CA.
# Note that this is a NetBox-specific setting which sets:
# ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, LDAP_CA_CERT_FILE)
LDAP_CA_CERT_FILE = environ.get('LDAP_CA_CERT_FILE', None)
AUTH_LDAP_USER_SEARCH_BASEDN = environ.get('AUTH_LDAP_USER_SEARCH_BASEDN', '')
AUTH_LDAP_USER_SEARCH_ATTR = environ.get('AUTH_LDAP_USER_SEARCH_ATTR', 'sAMAccountName')
AUTH_LDAP_USER_SEARCH_FILTER: str = environ.get(
'AUTH_LDAP_USER_SEARCH_FILTER', f'({AUTH_LDAP_USER_SEARCH_ATTR}=%(user)s)'
)
AUTH_LDAP_USER_SEARCH = LDAPSearch(
AUTH_LDAP_USER_SEARCH_BASEDN,
ldap.SCOPE_SUBTREE,
"(" + AUTH_LDAP_USER_SEARCH_ATTR + "=%(user)s)"
AUTH_LDAP_USER_SEARCH_BASEDN, ldap.SCOPE_SUBTREE, AUTH_LDAP_USER_SEARCH_FILTER
)
# This search ought to return all groups to which the user belongs. django_auth_ldap uses this to determine group
# heirarchy.
AUTH_LDAP_GROUP_SEARCH_BASEDN = environ.get('AUTH_LDAP_GROUP_SEARCH_BASEDN', '')
AUTH_LDAP_GROUP_SEARCH_CLASS = environ.get('AUTH_LDAP_GROUP_SEARCH_CLASS', 'group')
AUTH_LDAP_GROUP_SEARCH = LDAPSearch(AUTH_LDAP_GROUP_SEARCH_BASEDN, ldap.SCOPE_SUBTREE,
"(objectClass=" + AUTH_LDAP_GROUP_SEARCH_CLASS + ")")
AUTH_LDAP_GROUP_SEARCH_FILTER: str = environ.get(
'AUTH_LDAP_GROUP_SEARCH_FILTER', f'(objectclass={AUTH_LDAP_GROUP_SEARCH_CLASS})'
)
AUTH_LDAP_GROUP_SEARCH = LDAPSearch(
AUTH_LDAP_GROUP_SEARCH_BASEDN, ldap.SCOPE_SUBTREE, AUTH_LDAP_GROUP_SEARCH_FILTER
)
AUTH_LDAP_GROUP_TYPE = _import_group_type(environ.get('AUTH_LDAP_GROUP_TYPE', 'GroupOfNamesType'))
# Define a group required to login.

View File

@ -2,4 +2,22 @@ version: '3.4'
services:
netbox:
ports:
- 8000:8080
- "8000:8080"
# If you want the Nginx unit status page visible from the
# outside of the container add the following port mapping:
# - "8001:8081"
# healthcheck:
# Time for which the health check can fail after the container is started.
# This depends mostly on the performance of your database. On the first start,
# when all tables need to be created the start_period should be higher than on
# subsequent starts. For the first start after major version upgrades of NetBox
# the start_period might also need to be set higher.
# Default value in our docker-compose.yml is 60s
# start_period: 90s
# environment:
# SKIP_SUPERUSER: "false"
# SUPERUSER_API_TOKEN: ""
# SUPERUSER_EMAIL: ""
# SUPERUSER_NAME: ""
# SUPERUSER_PASSWORD: ""

View File

@ -0,0 +1,6 @@
version: '3.4'
services:
netbox:
ports:
- "127.0.0.1:8000:8080"

View File

@ -1,37 +1,65 @@
version: '3.4'
services:
netbox:
netbox: &netbox
image: ${IMAGE-netboxcommunity/netbox:latest}
depends_on:
- postgres
- redis
- redis-cache
postgres:
condition: service_healthy
redis:
condition: service_healthy
redis-cache:
condition: service_healthy
env_file: env/netbox.env
environment:
SKIP_STARTUP_SCRIPTS: ${SKIP_STARTUP_SCRIPTS-false}
user: 'unit:root'
volumes:
- ./configuration:/etc/netbox/config:z,ro
- ./test-configuration/logging.py:/etc/netbox/config/logging.py:z,ro
- ./reports:/etc/netbox/reports:z,ro
- ./scripts:/etc/netbox/scripts:z,ro
- netbox-media-files:/opt/netbox/netbox/media:z
healthcheck:
start_period: ${NETBOX_START_PERIOD-120s}
timeout: 3s
interval: 15s
test: "curl -f http://localhost:8080/api/ || exit 1"
netbox-worker:
<<: *netbox
command:
- /opt/netbox/venv/bin/python
- /opt/netbox/netbox/manage.py
- rqworker
healthcheck:
start_period: 40s
timeout: 3s
interval: 15s
test: "ps -aux | grep -v grep | grep -q rqworker || exit 1"
netbox-housekeeping:
<<: *netbox
command:
- /opt/netbox/housekeeping.sh
healthcheck:
start_period: 40s
timeout: 3s
interval: 15s
test: "ps -aux | grep -v grep | grep -q housekeeping || exit 1"
postgres:
image: postgres:14-alpine
image: postgres:15-alpine
env_file: env/postgres.env
redis:
healthcheck:
test: "pg_isready -t 2 -d $$POSTGRES_DB -U $$POSTGRES_USER" ## $$ because of docker-compose
interval: 10s
timeout: 5s
retries: 5
redis: &redis
image: redis:7-alpine
command:
- sh
- -c # this is to evaluate the $REDIS_PASSWORD from the env
- redis-server --appendonly yes --requirepass $$REDIS_PASSWORD ## $$ because of docker-compose
env_file: env/redis.env
healthcheck:
start_period: 20s
timeout: 3s
interval: 15s
test: "timeout 2 redis-cli ping"
redis-cache:
image: redis:7-alpine
command:
- sh
- -c # this is to evaluate the $REDIS_PASSWORD from the env
- redis-server --requirepass $$REDIS_PASSWORD ## $$ because of docker-compose
<<: *redis
env_file: env/redis-cache.env
volumes:
netbox-media-files:

View File

@ -1,46 +1,60 @@
version: '3.4'
services:
netbox: &netbox
image: netboxcommunity/netbox:${VERSION-v3.3-2.3.0}
image: docker.io/netboxcommunity/netbox:${VERSION-v3.5-2.6.1}
depends_on:
- postgres
- redis
- redis-cache
- netbox-worker
env_file: env/netbox.env
user: 'unit:root'
healthcheck:
start_period: 60s
timeout: 3s
interval: 15s
test: "curl -f http://localhost:8080/api/ || exit 1"
volumes:
- ./configuration:/etc/netbox/config:z,ro
- ./reports:/etc/netbox/reports:z,ro
- ./scripts:/etc/netbox/scripts:z,ro
- netbox-media-files:/opt/netbox/netbox/media:z
- netbox-media-files:/opt/netbox/netbox/media:z,rw
- netbox-reports-files:/opt/netbox/netbox/reports:z,rw
- netbox-scripts-files:/opt/netbox/netbox/scripts:z,rw
netbox-worker:
<<: *netbox
depends_on:
- redis
- postgres
netbox:
condition: service_healthy
command:
- /opt/netbox/venv/bin/python
- /opt/netbox/netbox/manage.py
- rqworker
healthcheck:
start_period: 20s
timeout: 3s
interval: 15s
test: "ps -aux | grep -v grep | grep -q rqworker || exit 1"
netbox-housekeeping:
<<: *netbox
depends_on:
- redis
- postgres
netbox:
condition: service_healthy
command:
- /opt/netbox/housekeeping.sh
healthcheck:
start_period: 20s
timeout: 3s
interval: 15s
test: "ps -aux | grep -v grep | grep -q housekeeping || exit 1"
# postgres
postgres:
image: postgres:14-alpine
image: docker.io/postgres:15-alpine
env_file: env/postgres.env
volumes:
- netbox-postgres-data:/var/lib/postgresql/data
# redis
redis:
image: redis:7-alpine
image: docker.io/redis:7-alpine
command:
- sh
- -c # this is to evaluate the $REDIS_PASSWORD from the env
@ -49,21 +63,25 @@ services:
volumes:
- netbox-redis-data:/data
redis-cache:
image: redis:7-alpine
image: docker.io/redis:7-alpine
command:
- sh
- -c # this is to evaluate the $REDIS_PASSWORD from the env
- redis-server --requirepass $$REDIS_PASSWORD ## $$ because of docker-compose
env_file: env/redis-cache.env
volumes:
- netbox-redis-cache-data:/data
- netbox-redis-cache-data:/data
volumes:
netbox-media-files:
driver: local
netbox-postgres-data:
driver: local
netbox-redis-cache-data:
driver: local
netbox-redis-data:
driver: local
netbox-redis-cache-data:
driver: local
netbox-reports-files:
driver: local
netbox-scripts-files:
driver: local

View File

@ -46,6 +46,8 @@ if ! ./manage.py migrate --check >/dev/null 2>&1; then
./manage.py remove_stale_contenttypes --no-input
echo "⚙️ Removing expired user sessions"
./manage.py clearsessions
echo "⚙️ Building search index (lazy)"
./manage.py reindex --lazy
fi
# Create Superuser if required
@ -80,14 +82,15 @@ END
echo "💡 Superuser Username: ${SUPERUSER_NAME}, E-Mail: ${SUPERUSER_EMAIL}"
fi
# Print warning if startup scripts (and initializers) would've been run # Remove for next release
if [ "$SKIP_STARTUP_SCRIPTS" == "true" ]; then
# Nothing to do
echo "" # Empty block not allowed
else
echo "⚠️⚠️⚠️ WARNING: The initializers have been moved to a plugin. See release notes."
echo "⚠️⚠️⚠️ Set environment variable 'SKIP_STARTUP_SCRIPTS' to 'true' to remove this warning."
fi
./manage.py shell --interface python <<END
from users.models import Token
try:
old_default_token = Token.objects.get(key="0123456789abcdef0123456789abcdef01234567")
if old_default_token:
print("⚠️ Warning: You have the old default admin token in your database. This token is widely known; please remove it.")
except Token.DoesNotExist:
pass
END
echo "✅ Initialisation is done."

View File

@ -1,8 +1,8 @@
#!/bin/bash
SECONDS=${HOUSEKEEPING_INTERVAL:=86400}
echo "Interval set to ${SECONDS} seconds"
SLEEP_SECONDS=${HOUSEKEEPING_INTERVAL:=86400}
echo "Interval set to ${SLEEP_SECONDS} seconds"
while true; do
date
/opt/netbox/venv/bin/python /opt/netbox/netbox/manage.py housekeeping
sleep "${SECONDS}s"
sleep "${SLEEP_SECONDS}s"
done

View File

@ -1,6 +1,7 @@
#!/bin/bash
UNIT_CONFIG="${UNIT_CONFIG-/etc/unit/nginx-unit.json}"
# Also used in "nginx-unit.json"
UNIT_SOCKET="/opt/unit/unit.sock"
load_configuration() {

View File

@ -1,27 +1,45 @@
{
"listeners": {
"*:8080": {
"pass": "routes"
"0.0.0.0:8080": {
"pass": "routes/main"
},
"[::]:8080": {
"pass": "routes/main"
},
"0.0.0.0:8081": {
"pass": "routes/status"
},
"[::]:8081": {
"pass": "routes/status"
}
},
"routes": [
{
"match": {
"uri": "/static/*"
"routes": {
"main": [
{
"match": {
"uri": "/static/*"
},
"action": {
"share": "/opt/netbox/netbox${uri}"
}
},
"action": {
"share": "/opt/netbox/netbox${uri}"
{
"action": {
"pass": "applications/netbox"
}
}
},
{
"action": {
"pass": "applications/netbox"
],
"status": [
{
"match": {
"uri": "/status/*"
},
"action": {
"proxy": "http://unix:/opt/unit/unit.sock"
}
}
}
],
]
},
"applications": {
"netbox": {
"type": "python 3",
@ -35,6 +53,5 @@
}
}
},
"access_log": "/dev/stdout"
}

8
env/netbox.env vendored
View File

@ -29,10 +29,6 @@ REDIS_INSECURE_SKIP_TLS_VERIFY=false
REDIS_PASSWORD=H733Kdjndks81
REDIS_SSL=false
RELEASE_CHECK_URL=https://api.github.com/repos/netbox-community/netbox/releases
SECRET_KEY=r8OwDznj!!dci#P9ghmRfdu1Ysxm0AiPeDCQhKE+N_rClfWNj
SKIP_SUPERUSER=false
SUPERUSER_API_TOKEN=0123456789abcdef0123456789abcdef01234567
SUPERUSER_EMAIL=admin@example.com
SUPERUSER_NAME=admin
SUPERUSER_PASSWORD=admin
SECRET_KEY=r(m)9nLGnz$(_q3N4z1k(EFsMCjjjzx08x9VhNVcfd%6RF#r!6DE@+V5Zk2X
SKIP_SUPERUSER=true
WEBHOOKS_ENABLED=true

View File

@ -1,46 +0,0 @@
from dcim.choices import DeviceStatusChoices
from dcim.models import ConsolePort, Device, PowerPort
from extras.reports import Report
class DeviceConnectionsReport(Report):
description = "Validate the minimum physical connections for each device"
def test_console_connection(self):
# Check that every console port for every active device has a connection defined.
active = DeviceStatusChoices.STATUS_ACTIVE
for console_port in ConsolePort.objects.prefetch_related('device').filter(device__status=active):
if console_port.connected_endpoint is None:
self.log_failure(
console_port.device,
"No console connection defined for {}".format(console_port.name)
)
elif not console_port.connection_status:
self.log_warning(
console_port.device,
"Console connection for {} marked as planned".format(console_port.name)
)
else:
self.log_success(console_port.device)
def test_power_connections(self):
# Check that every active device has at least two connected power supplies.
for device in Device.objects.filter(status=DeviceStatusChoices.STATUS_ACTIVE):
connected_ports = 0
for power_port in PowerPort.objects.filter(device=device):
if power_port.connected_endpoint is not None:
connected_ports += 1
if not power_port.connection_status:
self.log_warning(
device,
"Power connection for {} marked as planned".format(power_port.name)
)
if connected_ports < 2:
self.log_failure(
device,
"{} connected power supplies found (2 needed)".format(connected_ports)
)
else:
self.log_success(device)

View File

@ -1,5 +1,4 @@
django-auth-ldap==4.1.0
django-storages[azure,boto3,dropbox,google,libcloud,sftp]==1.13.1
napalm==4.0.0
psycopg2==2.9.4
social-auth-core[openidconnect]==4.3.0
django-auth-ldap==4.3.0
django-storages[azure,boto3,dropbox,google,libcloud,sftp]==1.13.2
psycopg2==2.9.6
python3-saml==1.15.0

View File

61
test.sh
View File

@ -14,6 +14,8 @@
# exit when a command exits with an exit code != 0
set -e
source ./build-functions/gh-functions.sh
# IMAGE is used by `docker-compose.yml` do determine the tag
# of the Docker Image that is to be used
if [ "${1}x" != "x" ]; then
@ -35,20 +37,72 @@ if [ -z "${IMAGE}" ]; then
fi
# The docker compose command to use
doco="docker-compose --file docker-compose.test.yml --project-name netbox_docker_test_${1}"
doco="docker compose --file docker-compose.test.yml --file docker-compose.test.override.yml --project-name netbox_docker_test"
test_setup() {
gh_echo "::group:: Test setup"
echo "🏗 Setup up test environment"
$doco up --detach --quiet-pull --wait --force-recreate --renew-anon-volumes --no-start
$doco start postgres
$doco start redis
$doco start redis-cache
gh_echo "::endgroup::"
}
test_netbox_unit_tests() {
gh_echo "::group:: Netbox unit tests"
echo "⏱ Running NetBox Unit Tests"
$doco run --rm netbox /opt/netbox/venv/bin/python /opt/netbox/netbox/manage.py test
gh_echo "::endgroup::"
}
test_compose_db_setup() {
gh_echo "::group:: Netbox DB migrations"
echo "⏱ Running NetBox DB migrations"
$doco run --rm netbox /opt/netbox/venv/bin/python /opt/netbox/netbox/manage.py migrate
gh_echo "::endgroup::"
}
test_netbox_start() {
gh_echo "::group:: Start Netbox service"
echo "⏱ Starting NetBox services"
$doco up --detach --wait
gh_echo "::endgroup::"
}
test_netbox_web() {
gh_echo "::group:: Web service test"
echo "⏱ Starting web service test"
RESP_CODE=$(
curl \
--silent \
--output /dev/null \
--write-out '%{http_code}' \
--request GET \
--connect-timeout 5 \
--max-time 10 \
--retry 5 \
--retry-delay 0 \
--retry-max-time 40 \
http://127.0.0.1:8000/
)
if [ "$RESP_CODE" == "200" ]; then
echo "Webservice running"
else
echo "⚠️ Got response code '$RESP_CODE' but expected '200'"
exit 1
fi
gh_echo "::endgroup::"
}
test_cleanup() {
echo "💣 Cleaning Up"
$doco down -v
gh_echo "::group:: Docker compose logs"
$doco logs --no-color
gh_echo "::endgroup::"
gh_echo "::group:: Docker compose down"
$doco down --volumes
gh_echo "::endgroup::"
}
echo "🐳🐳🐳 Start testing '${IMAGE}'"
@ -58,5 +112,8 @@ trap test_cleanup EXIT ERR
test_setup
test_netbox_unit_tests
test_compose_db_setup
test_netbox_start
test_netbox_web
echo "🐳🐳🐳 Done testing '${IMAGE}'"