Compare commits

...

78 Commits
2.3.0 ... 2.5.2

Author SHA1 Message Date
7bf9e1af5a Merge pull request #978 from netbox-community/develop
Missing version tags for 2.5.2
2023-03-29 20:50:29 +02:00
1e588431e2 Merge pull request #976 from tobiasge/better-tests
Further improved test configuration
2023-03-29 18:33:23 +02:00
41fd4e5d67 Further improved test configuration 2023-03-29 17:08:21 +02:00
17f1bb0af0 Preparation for 2.5.2 2023-03-29 12:41:34 +02:00
9cc58918ab Merge pull request #975 from netbox-community/develop
Version 2.5.2
2023-03-29 12:03:15 +02:00
831867499b Merge branch 'release' into develop 2023-03-29 10:36:28 +02:00
d5dde45bec Merge pull request #973 from tobiasge/social-auth-update
Use same version as Netbox for social-auth-core
2023-03-29 10:25:58 +02:00
6576c18a9c Merge pull request #972 from netbox-community/renovate/django-auth-ldap-4.x
Update dependency django-auth-ldap to v4.2.0
2023-03-29 09:34:54 +02:00
efd6e6a3c2 Use same version as Netbox for social-auth-core 2023-03-29 08:58:49 +02:00
47a7eee16a Update dependency django-auth-ldap to v4.2.0 2023-03-28 10:20:51 +00:00
5eac65b8f6 Merge pull request #968 from ryanmerolle/patch-1
Update docker-compose.yml
2023-03-20 15:11:35 +01:00
2ba441124e Update docker-compose.yml 2023-03-20 08:21:17 -04:00
f2d070fc49 Added more tests (#965) 2023-03-16 21:44:08 +01:00
97ee353b00 Merge pull request #963 from netbox-community/develop
Version 2.5.1
2023-03-16 11:13:13 +01:00
c001b88a81 Merge pull request #964 from tobiasge/better-base-image-check
Simplified base image check
2023-03-16 09:46:35 +01:00
b131b07af8 Simplified basse image check 2023-03-16 07:50:24 +01:00
311629ade4 Preparation for 2.5.1 2023-03-16 07:39:38 +01:00
256f23b4ad Merge pull request #961 from MarcHagen/patch-1
Catch DoesNotExist preventing startup
2023-03-16 07:37:05 +01:00
29e37a31d7 Catch DoesNotExist preventing startup
Fixes failing startup because of python error:

```
Traceback (most recent call last):
  File "/opt/netbox/netbox/./manage.py", line 10, in <module>
    execute_from_command_line(sys.argv)
  File "/opt/netbox/venv/lib/python3.10/site-packages/django/core/management/__init__.py", line 446, in execute_from_command_line
    utility.execute()
  File "/opt/netbox/venv/lib/python3.10/site-packages/django/core/management/__init__.py", line 440, in execute
    self.fetch_command(subcommand).run_from_argv(self.argv)
  File "/opt/netbox/venv/lib/python3.10/site-packages/django/core/management/base.py", line 402, in run_from_argv
    self.execute(*args, **cmd_options)
  File "/opt/netbox/venv/lib/python3.10/site-packages/django/core/management/base.py", line 448, in execute
    output = self.handle(*args, **options)
  File "/opt/netbox/venv/lib/python3.10/site-packages/django/core/management/commands/shell.py", line 127, in handle
    exec(sys.stdin.read(), globals())
  File "<string>", line 2, in <module>
  File "/opt/netbox/venv/lib/python3.10/site-packages/django/db/models/manager.py", line 85, in manager_method
    return getattr(self.get_queryset(), name)(*args, **kwargs)
  File "/opt/netbox/venv/lib/python3.10/site-packages/django/db/models/query.py", line 650, in get
    raise self.model.DoesNotExist(
users.models.Token.DoesNotExist: Token matching query does not exist.
```
2023-03-15 23:04:04 +01:00
93017f150e Merge pull request #959 from netbox-community/develop
Version 2.5.0
2023-03-15 15:53:14 +01:00
ac8cb022ae Preparation for 2.5.0 2023-03-15 14:32:09 +01:00
480cabaefe Merge pull request #955 from tobiasge/953-default-admin
Don't create superuser with default credentials
2023-03-15 14:20:07 +01:00
ab7e19df55 Merge pull request #958 from tobiasge/954-use-skopeo
Added check for commands to all scripts
2023-03-15 14:16:39 +01:00
4ce89f9209 Added check for commands to all scripts 2023-03-15 13:02:25 +01:00
3e2bf7ec93 Don't create superuser with default credentials 2023-03-15 12:23:36 +01:00
4bad061bc4 Merge pull request #957 from tobiasge/fix-action-badge
Fixed Github action badge
2023-03-15 12:08:35 +01:00
f9abdf2390 Merge pull request #956 from tobiasge/954-use-skopeo
Replaced curl with Skopeo for in image functions
2023-03-15 12:04:10 +01:00
cc95a67df0 Fixed Github action badge 2023-03-15 10:16:50 +01:00
fb5bacc4b4 Replaced curl with Skopeo for in image functions 2023-03-15 09:32:37 +01:00
ed309a15b4 Merge pull request #948 from tobiasge/lazy-reindex-on-start
Fixes #947: Rebuild search index when needed
2023-03-14 20:46:44 +01:00
5f8a09536c Merge pull request #951 from tobiasge/new-unit
Updated to new unit version
2023-03-01 08:53:24 +01:00
d3a30e1172 Updated to new unit version 2023-03-01 07:39:37 +01:00
e60a746eee Fixes #947: Rebuild search index when needed
This rebuilds the search index when models where updated.
2023-02-23 08:37:53 +01:00
879c700bb8 Merge pull request #939 from timrabl/fix-931
Actually fix #471 and #931
2023-02-01 10:08:59 +01:00
6f70b88972 squash commits that revert AUTH_LDAP_USER_SEARCH
add missing AUTH_LDAP_USER_SEARCH, removed while modifying....

revert AUTH_LDAP_USER_SEARCH variable that was accidentally removed in #931 and change behaviour to requested features in #471

remove duplicate AUTH_LDAP_USER_SEARCH variable now and fix this finally, hopefully
2023-02-01 08:18:34 +01:00
7a9aef3791 Merge pull request #931 from timrabl/fix-471
implement extra LDAP user and group filters as requested in #471
2023-01-30 10:17:13 +01:00
3071c500da implement extra LDAP user and group filters as requested in #471 2023-01-30 09:06:22 +01:00
350747c1cb Merge pull request #910 from sc68cal/enforcing_shortname
Prepend docker.io to image URLs
2023-01-28 18:22:19 +01:00
250b1fb093 Merge pull request #933 from tobiasge/fix-gh-warnings
Fix Github action warnings
2023-01-28 18:21:59 +01:00
eef45c8197 Using new GITHUB_OUTPUT method 2023-01-28 15:49:13 +01:00
f549b93b9d Merge pull request #932 from tobiasge/gh-token
Using GITHUB_TOKEN for API
2023-01-28 15:31:53 +01:00
f2b0375d5b Using GITHUB_TOKEN for API 2023-01-28 13:38:28 +01:00
3202fb9446 Merge pull request #929 from christianharendt/create-redis-username
Add redis username parameter
2023-01-28 10:14:00 +01:00
ff373bd60d Update configuration.py 2023-01-27 15:05:17 +01:00
bd07a7a5a2 Add redis username parameter 2023-01-27 15:02:11 +01:00
41d80d66b1 Prepend docker.io to image URLs
This is to make podman happy, since newer versions of podman
have set short-name-mode to enforcing

https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md
2023-01-10 14:10:15 +01:00
015e131d99 Merge pull request #914 from kmanwar89/patch-1
Fix syntax of docker compose commands
2023-01-05 09:47:28 +01:00
ff37e17eeb Merge pull request #913 from tobiasge/start_period_explanation
Added start_period to docker-compose example
2023-01-04 09:27:20 +01:00
1403f52d04 Merge pull request #912 from tobiasge/localhost_allowed_host
Ensure that '*' or 'localhost' is always in ALLOWED_HOSTS
2023-01-03 19:06:03 +01:00
7e0a8fee82 Improved comment 2023-01-03 16:48:00 +01:00
0c1b69ded0 Update docker-compose.override.yml.example
Co-authored-by: Christian Mäder <cimnine@users.noreply.github.com>
2023-01-03 16:45:09 +01:00
06e0815c70 Merge pull request #911 from netbox-community/renovate/python3-saml-1.x
Update dependency python3-saml to v1.15.0
2023-01-03 15:12:12 +01:00
8f2820626c Fix syntax of docker compose commands
Docker compose's syntax changed as of Compose v2 (source: https://docs.docker.com/compose/reference/).  Replaced references of "docker-compose" with "docker compose" to align with this change.
2023-01-03 09:06:46 -05:00
b6faad36cb Added start_period to docker-compose example 2023-01-03 14:58:41 +01:00
73f479d5db Ensure that '*' or 'localhost' is always in ALLOWED_HOSTS 2023-01-03 14:41:26 +01:00
89ad7588f0 Update dependency python3-saml to v1.15.0 2022-12-27 23:54:03 +00:00
a4d986011d Merge pull request #906 from netbox-community/renovate/django-storages-1.x
Update dependency django-storages to v1.13.2
2022-12-23 16:54:27 +01:00
f2bb1198dd Update dependency django-storages to v1.13.2 2022-12-23 05:15:28 +00:00
39c7de4af4 Merge pull request #899 from netbox-community/develop
Release 2.4.0
2022-12-15 15:36:27 +01:00
238f95c5ce Preparation for 2.4.0 2022-12-15 12:57:51 +01:00
751a131b78 Merge pull request #889 from netbox-community/listenOnIPv6
Make nginx-unit listen on IPv4 and IPv6
2022-12-06 21:09:02 +01:00
5e2158da24 Merge pull request #866 from ryanmerolle/issue_784
Address housekeeping traceback
2022-12-06 18:12:02 +01:00
4a530947f8 Merge pull request #895 from tobiasge/ldap-cert-settings
Added settings for CA certificates for LDAP
2022-12-01 13:59:22 +01:00
bdb4396275 Added settings for CA certificates for LDAP 2022-12-01 08:17:25 +01:00
80d87bdf1b Merge pull request #894 from tobiasge/psycopg2-changes
Optimize psycopg2 dependency
2022-11-30 16:23:20 +01:00
6d465e6f81 Optimize psycopg2 dependency
We have beeing installing psycopg2 for a while now. This updates to the latest version. Because psycopg2-binary is a direct dependency of Netbox both versions were installled. Now we remove the pre-compiled version from the dependency file.
2022-11-30 14:36:53 +01:00
b72084290a Merge pull request #893 from tobiasge/startup-scripts-complete-removal
Startup scripts complete removal
2022-11-30 13:14:32 +01:00
aa3357817a Disable Gitleaks 2022-11-30 10:54:24 +01:00
9441be459c Improved testing
After the initializer scripts were removed, we didn't test the actual compose setup anymore. This adds new tests to run the database migrations.
2022-11-30 10:54:24 +01:00
1779ba790d Removed warning for initializer scripts 2022-11-30 09:51:36 +01:00
22cb2d5812 Merge pull request #865 from netbox-community/renovate/postgres-15.x
Update postgres Docker tag to v15
2022-11-30 08:50:15 +01:00
6020f4503a Make nginx-unit listen on IPv4 and IPv6 2022-11-23 14:40:03 +01:00
a4f494db14 Update postgres Docker tag to v15 2022-11-11 14:24:44 +00:00
0cac6f51a9 Merge pull request #876 from Wellyas/patch-1
Add requirements for SAML SSO
2022-11-11 14:08:15 +01:00
dd01e3c227 Enable SAML & OPENIDCONNECT for social-auth-core 2022-11-11 13:07:14 +01:00
925f41b97f Merge pull request #883 from movelg/housekeeping_var_name_fix
Housekeeping var name fix
2022-11-11 10:54:13 +01:00
7d871778eb Don't use bash internal variable name 2022-11-11 08:40:29 +01:00
35a94cb7e5 Update docker-compose.yml
Address housekeeping tracebacks & remove whitespace
2022-10-22 09:40:03 -04:00
23 changed files with 328 additions and 210 deletions

View File

@ -31,6 +31,7 @@ jobs:
LINTER_RULES_PATH: / LINTER_RULES_PATH: /
VALIDATE_ALL_CODEBASE: false VALIDATE_ALL_CODEBASE: false
VALIDATE_DOCKERFILE: false VALIDATE_DOCKERFILE: false
VALIDATE_GITLEAKS: false
FILTER_REGEX_EXCLUDE: (.*/)?(LICENSE|configuration/.*) FILTER_REGEX_EXCLUDE: (.*/)?(LICENSE|configuration/.*)
EDITORCONFIG_FILE_NAME: .ecrc EDITORCONFIG_FILE_NAME: .ecrc
DOCKERFILE_HADOLINT_FILE_NAME: .hadolint.yaml DOCKERFILE_HADOLINT_FILE_NAME: .hadolint.yaml
@ -54,6 +55,7 @@ jobs:
fail-fast: false fail-fast: false
env: env:
GH_ACTION: enable GH_ACTION: enable
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
IMAGE_NAMES: docker.io/netboxcommunity/netbox IMAGE_NAMES: docker.io/netboxcommunity/netbox
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Builds new NetBox Docker Images name: Builds new NetBox Docker Images

View File

@ -25,6 +25,7 @@ jobs:
name: Builds new NetBox Docker Images name: Builds new NetBox Docker Images
env: env:
GH_ACTION: enable GH_ACTION: enable
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
IMAGE_NAMES: docker.io/netboxcommunity/netbox quay.io/netboxcommunity/netbox ghcr.io/netbox-community/netbox IMAGE_NAMES: docker.io/netboxcommunity/netbox quay.io/netboxcommunity/netbox ghcr.io/netbox-community/netbox
steps: steps:
- id: source-checkout - id: source-checkout
@ -32,7 +33,7 @@ jobs:
uses: actions/checkout@v3 uses: actions/checkout@v3
- id: set-netbox-docker-version - id: set-netbox-docker-version
name: Get Version of NetBox Docker name: Get Version of NetBox Docker
run: echo "::set-output name=version::$(cat VERSION)" run: echo "version=$(cat VERSION)" >>"$GITHUB_OUTPUT"
shell: bash shell: bash
- id: qemu-setup - id: qemu-setup
name: Set up QEMU name: Set up QEMU

View File

@ -13,6 +13,13 @@ RUN export DEBIAN_FRONTEND=noninteractive \
libpq-dev \ libpq-dev \
libsasl2-dev \ libsasl2-dev \
libssl-dev \ libssl-dev \
libxml2-dev \
libxml2-dev \
libxmlsec1 \
libxmlsec1-dev \
libxmlsec1-openssl \
libxslt-dev \
pkg-config \
python3-dev \ python3-dev \
python3-pip \ python3-pip \
python3-venv \ python3-venv \
@ -24,7 +31,8 @@ RUN export DEBIAN_FRONTEND=noninteractive \
ARG NETBOX_PATH ARG NETBOX_PATH
COPY ${NETBOX_PATH}/requirements.txt requirements-container.txt / COPY ${NETBOX_PATH}/requirements.txt requirements-container.txt /
RUN /opt/netbox/venv/bin/pip install \ RUN sed -i -e '/psycopg2-binary/d' requirements.txt && \
/opt/netbox/venv/bin/pip install \
-r /requirements.txt \ -r /requirements.txt \
-r /requirements-container.txt -r /requirements-container.txt
@ -46,6 +54,7 @@ RUN export DEBIAN_FRONTEND=noninteractive \
curl \ curl \
libldap-common \ libldap-common \
libpq5 \ libpq5 \
libxmlsec1-openssl \
openssl \ openssl \
python3 \ python3 \
python3-distutils \ python3-distutils \
@ -57,8 +66,8 @@ RUN export DEBIAN_FRONTEND=noninteractive \
&& apt-get update -qq \ && apt-get update -qq \
&& apt-get install \ && apt-get install \
--yes -qq --no-install-recommends \ --yes -qq --no-install-recommends \
unit=1.27.0-1~jammy \ unit=1.29.1-1~jammy \
unit-python3.10=1.27.0-1~jammy \ unit-python3.10=1.29.1-1~jammy \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
COPY --from=builder /opt/netbox/venv /opt/netbox/venv COPY --from=builder /opt/netbox/venv /opt/netbox/venv
@ -85,7 +94,7 @@ RUN mkdir -p static /opt/unit/state/ /opt/unit/tmp/ \
--config-file /opt/netbox/mkdocs.yml --site-dir /opt/netbox/netbox/project-static/docs/ \ --config-file /opt/netbox/mkdocs.yml --site-dir /opt/netbox/netbox/project-static/docs/ \
&& SECRET_KEY="dummy" /opt/netbox/venv/bin/python /opt/netbox/netbox/manage.py collectstatic --no-input && SECRET_KEY="dummy" /opt/netbox/venv/bin/python /opt/netbox/netbox/manage.py collectstatic --no-input
ENV LANG=C.UTF-8 PATH=/opt/netbox/venv/bin:$PATH ENV LANG=C.utf8 PATH=/opt/netbox/venv/bin:$PATH
ENTRYPOINT [ "/usr/bin/tini", "--" ] ENTRYPOINT [ "/usr/bin/tini", "--" ]
CMD [ "/opt/netbox/docker-entrypoint.sh", "/opt/netbox/launch-netbox.sh" ] CMD [ "/opt/netbox/docker-entrypoint.sh", "/opt/netbox/launch-netbox.sh" ]

View File

@ -3,7 +3,7 @@
[![GitHub release (latest by date)](https://img.shields.io/github/v/release/netbox-community/netbox-docker)][github-release] [![GitHub release (latest by date)](https://img.shields.io/github/v/release/netbox-community/netbox-docker)][github-release]
[![GitHub stars](https://img.shields.io/github/stars/netbox-community/netbox-docker)][github-stargazers] [![GitHub stars](https://img.shields.io/github/stars/netbox-community/netbox-docker)][github-stargazers]
![GitHub closed pull requests](https://img.shields.io/github/issues-pr-closed-raw/netbox-community/netbox-docker) ![GitHub closed pull requests](https://img.shields.io/github/issues-pr-closed-raw/netbox-community/netbox-docker)
![Github release workflow](https://img.shields.io/github/workflow/status/netbox-community/netbox-docker/release) ![Github release workflow](https://img.shields.io/github/actions/workflow/status/netbox-community/netbox-docker/release.yml?branch=release)
![Docker Pulls](https://img.shields.io/docker/pulls/netboxcommunity/netbox) ![Docker Pulls](https://img.shields.io/docker/pulls/netboxcommunity/netbox)
[![GitHub license](https://img.shields.io/github/license/netbox-community/netbox-docker)][netbox-docker-license] [![GitHub license](https://img.shields.io/github/license/netbox-community/netbox-docker)][netbox-docker-license]
@ -40,19 +40,21 @@ services:
ports: ports:
- 8000:8080 - 8000:8080
EOF EOF
docker-compose pull docker compose pull
docker-compose up docker compose up
``` ```
The whole application will be available after a few minutes. The whole application will be available after a few minutes.
Open the URL `http://0.0.0.0:8000/` in a web-browser. Open the URL `http://0.0.0.0:8000/` in a web-browser.
You should see the NetBox homepage. You should see the NetBox homepage.
In the top-right corner you can login.
The default credentials are:
* Username: **admin** To create the first admin user run this command:
* Password: **admin**
* API Token: **0123456789abcdef0123456789abcdef01234567** ```bash
docker compose exec netbox /opt/netbox/netbox/manage.py createsuperuser
```
If you need to restart Netbox from an empty database often, you can also set the `SUPERUSER_*` variables in your `docker-compose.override.yml` as shown in the example.
[wiki-getting-started]: https://github.com/netbox-community/netbox-docker/wiki/Getting-Started [wiki-getting-started]: https://github.com/netbox-community/netbox-docker/wiki/Getting-Started
@ -123,7 +125,7 @@ This project relies only on _Docker_ and _docker-compose_ meeting these requirem
* The _containerd version_ must be at least `1.5.6`. * The _containerd version_ must be at least `1.5.6`.
* The _docker-compose version_ must be at least `1.28.0`. * The _docker-compose version_ must be at least `1.28.0`.
To check the version installed on your system run `docker --version` and `docker-compose --version`. To check the version installed on your system run `docker --version` and `docker compose version`.
## Updating ## Updating

View File

@ -1 +1 @@
2.3.0 2.5.2

View File

@ -0,0 +1,9 @@
#!/bin/bash
NEEDED_COMMANDS="curl jq docker skopeo"
for c in $NEEDED_COMMANDS; do
if ! command -v "$c" &>/dev/null; then
echo "⚠️ '$c' is not installed. Can't proceed with build."
exit 1
fi
done

View File

@ -1,82 +1,12 @@
#!/bin/bash #!/bin/bash
# Retrieves image configuration from public images in DockerHub
# Functions from https://gist.github.com/cirocosta/17ea17be7ac11594cb0f290b0a3ac0d1
# Optimised for our use case
get_image_label() { get_image_label() {
local label=$1 local label=$1
local image=$2 local image=$2
local tag=$3 skopeo inspect "docker://$image" | jq -r ".Labels[\"$label\"]"
local token
token=$(_get_token "$image")
local digest
digest=$(_get_digest "$image" "$tag" "$token")
local retval="null"
if [ "$digest" != "null" ]; then
retval=$(_get_image_configuration "$image" "$token" "$digest" "$label")
fi
echo "$retval"
}
get_image_layers() {
local image=$1
local tag=$2
local token
token=$(_get_token "$image")
_get_layers "$image" "$tag" "$token"
} }
get_image_last_layer() { get_image_last_layer() {
local image=$1 local image=$1
local tag=$2 skopeo inspect "docker://$image" | jq -r ".Layers | last"
local token
token=$(_get_token "$image")
local layers
mapfile -t layers < <(_get_layers "$image" "$tag" "$token")
echo "${layers[-1]}"
}
_get_image_configuration() {
local image=$1
local token=$2
local digest=$3
local label=$4
curl \
--silent \
--location \
--header "Authorization: Bearer $token" \
"https://registry-1.docker.io/v2/$image/blobs/$digest" |
jq -r ".config.Labels.\"$label\""
}
_get_token() {
local image=$1
curl \
--silent \
"https://auth.docker.io/token?scope=repository:$image:pull&service=registry.docker.io" |
jq -r '.token'
}
_get_digest() {
local image=$1
local tag=$2
local token=$3
curl \
--silent \
--header "Accept: application/vnd.docker.distribution.manifest.v2+json" \
--header "Authorization: Bearer $token" \
"https://registry-1.docker.io/v2/$image/manifests/$tag" |
jq -r '.config.digest'
}
_get_layers() {
local image=$1
local tag=$2
local token=$3
curl \
--silent \
--header "Accept: application/vnd.docker.distribution.manifest.v2+json" \
--header "Authorization: Bearer $token" \
"https://registry-1.docker.io/v2/$image/manifests/$tag" |
jq -r '.layers[].digest'
} }

View File

@ -19,3 +19,14 @@ gh_env() {
echo "${@}" >>"${GITHUB_ENV}" echo "${@}" >>"${GITHUB_ENV}"
fi fi
} }
###
# Prints the output to the file defined in ${GITHUB_OUTPUT}.
# Only executes if ${GH_ACTION} is defined.
# Example Usage: gh_env "FOO_VAR=bar_value"
###
gh_out() {
if [ -n "${GH_ACTION}" ]; then
echo "${@}" >>"$GITHUB_OUTPUT"
fi
}

View File

@ -1,26 +1,27 @@
#!/bin/bash #!/bin/bash
# Builds the latest released version # Builds the latest released version
# Check if we have everything needed for the build
source ./build-functions/check-commands.sh
source ./build-functions/gh-functions.sh
echo "▶️ $0 $*" echo "▶️ $0 $*"
### CURL_ARGS=(
# Check for the jq library needed for parsing JSON --silent
### )
if ! command -v jq; then
echo "⚠️ jq command missing from \$PATH!"
exit 1
fi
### ###
# Checking for the presence of GITHUB_OAUTH_CLIENT_ID # Checking for the presence of GITHUB_TOKEN
# and GITHUB_OAUTH_CLIENT_SECRET
### ###
if [ -n "${GITHUB_OAUTH_CLIENT_ID}" ] && [ -n "${GITHUB_OAUTH_CLIENT_SECRET}" ]; then if [ -n "${GITHUB_TOKEN}" ]; then
echo "🗝 Performing authenticated Github API calls." echo "🗝 Performing authenticated Github API calls."
GITHUB_OAUTH_PARAMS="client_id=${GITHUB_OAUTH_CLIENT_ID}&client_secret=${GITHUB_OAUTH_CLIENT_SECRET}" CURL_ARGS+=(
--header "Authorization: Bearer ${GITHUB_TOKEN}"
)
else else
echo "🕶 Performing unauthenticated Github API calls. This might result in lower Github rate limits!" echo "🕶 Performing unauthenticated Github API calls. This might result in lower Github rate limits!"
GITHUB_OAUTH_PARAMS=""
fi fi
### ###
@ -42,31 +43,27 @@ fi
### ###
ORIGINAL_GITHUB_REPO="netbox-community/netbox" ORIGINAL_GITHUB_REPO="netbox-community/netbox"
GITHUB_REPO="${GITHUB_REPO-$ORIGINAL_GITHUB_REPO}" GITHUB_REPO="${GITHUB_REPO-$ORIGINAL_GITHUB_REPO}"
URL_RELEASES="https://api.github.com/repos/${GITHUB_REPO}/releases?${GITHUB_OAUTH_PARAMS}" URL_RELEASES="https://api.github.com/repos/${GITHUB_REPO}/releases"
# Composing the JQ commans to extract the most recent version number # Composing the JQ commans to extract the most recent version number
JQ_LATEST="group_by(.prerelease) | .[] | sort_by(.published_at) | reverse | .[0] | select(.prerelease==${PRERELEASE-false}) | .tag_name" JQ_LATEST="group_by(.prerelease) | .[] | sort_by(.published_at) | reverse | .[0] | select(.prerelease==${PRERELEASE-false}) | .tag_name"
CURL="curl -sS" CURL="curl"
# Querying the Github API to fetch the most recent version number # Querying the Github API to fetch the most recent version number
VERSION=$($CURL "${URL_RELEASES}" | jq -r "${JQ_LATEST}") VERSION=$($CURL "${CURL_ARGS[@]}" "${URL_RELEASES}" | jq -r "${JQ_LATEST}" 2>/dev/null)
### ###
# Check if the prerelease version is actually higher than stable version # Check if the prerelease version is actually higher than stable version
### ###
if [ "${PRERELEASE}" == "true" ]; then if [ "${PRERELEASE}" == "true" ]; then
JQ_STABLE="group_by(.prerelease) | .[] | sort_by(.published_at) | reverse | .[0] | select(.prerelease==false) | .tag_name" JQ_STABLE="group_by(.prerelease) | .[] | sort_by(.published_at) | reverse | .[0] | select(.prerelease==false) | .tag_name"
STABLE_VERSION=$($CURL "${URL_RELEASES}" | jq -r "${JQ_STABLE}") STABLE_VERSION=$($CURL "${CURL_ARGS[@]}" "${URL_RELEASES}" | jq -r "${JQ_STABLE}" 2>/dev/null)
# shellcheck disable=SC2003 MAJOR_STABLE=$(expr "${STABLE_VERSION}" : 'v\([0-9]\+\)')
MAJOR_STABLE=$(expr match "${STABLE_VERSION}" 'v\([0-9]\+\)') MINOR_STABLE=$(expr "${STABLE_VERSION}" : 'v[0-9]\+\.\([0-9]\+\)')
# shellcheck disable=SC2003 MAJOR_UNSTABLE=$(expr "${VERSION}" : 'v\([0-9]\+\)')
MINOR_STABLE=$(expr match "${STABLE_VERSION}" 'v[0-9]\+\.\([0-9]\+\)') MINOR_UNSTABLE=$(expr "${VERSION}" : 'v[0-9]\+\.\([0-9]\+\)')
# shellcheck disable=SC2003
MAJOR_UNSTABLE=$(expr match "${VERSION}" 'v\([0-9]\+\)')
# shellcheck disable=SC2003
MINOR_UNSTABLE=$(expr match "${VERSION}" 'v[0-9]\+\.\([0-9]\+\)')
if { if {
[ "${MAJOR_STABLE}" -eq "${MAJOR_UNSTABLE}" ] && [ "${MAJOR_STABLE}" -eq "${MAJOR_UNSTABLE}" ] &&
@ -75,10 +72,7 @@ if [ "${PRERELEASE}" == "true" ]; then
echo "❎ Latest unstable version '${VERSION}' is not higher than the latest stable version '$STABLE_VERSION'." echo "❎ Latest unstable version '${VERSION}' is not higher than the latest stable version '$STABLE_VERSION'."
if [ -z "$DEBUG" ]; then if [ -z "$DEBUG" ]; then
if [ -n "${GH_ACTION}" ]; then gh_out "skipped=true"
echo "::set-output name=skipped::true"
fi
exit 0 exit 0
else else
echo "⚠️ Would exit here with code '0', but DEBUG is enabled." echo "⚠️ Would exit here with code '0', but DEBUG is enabled."

View File

@ -137,6 +137,9 @@ END_OF_HELP
fi fi
fi fi
# Check if we have everything needed for the build
source ./build-functions/check-commands.sh
source ./build-functions/gh-functions.sh source ./build-functions/gh-functions.sh
IMAGE_NAMES="${IMAGE_NAMES-docker.io/netboxcommunity/netbox}" IMAGE_NAMES="${IMAGE_NAMES-docker.io/netboxcommunity/netbox}"
@ -170,7 +173,7 @@ if [ "${2}" != "--push-only" ] && [ -z "${SKIP_GIT}" ]; then
REMOTE_EXISTS=$(git ls-remote --heads --tags "${URL}" "${NETBOX_BRANCH}" | wc -l) REMOTE_EXISTS=$(git ls-remote --heads --tags "${URL}" "${NETBOX_BRANCH}" | wc -l)
if [ "${REMOTE_EXISTS}" == "0" ]; then if [ "${REMOTE_EXISTS}" == "0" ]; then
echo "❌ Remote branch '${NETBOX_BRANCH}' not found in '${URL}'; Nothing to do" echo "❌ Remote branch '${NETBOX_BRANCH}' not found in '${URL}'; Nothing to do"
gh_echo "::set-output name=skipped::true" gh_out "skipped=true"
exit 0 exit 0
fi fi
echo "🌐 Checking out '${NETBOX_BRANCH}' of NetBox from the url '${URL}' into '${NETBOX_PATH}'" echo "🌐 Checking out '${NETBOX_BRANCH}' of NetBox from the url '${URL}' into '${NETBOX_PATH}'"
@ -215,7 +218,7 @@ fi
# Determining the value for DOCKER_FROM # Determining the value for DOCKER_FROM
### ###
if [ -z "$DOCKER_FROM" ]; then if [ -z "$DOCKER_FROM" ]; then
DOCKER_FROM="ubuntu:22.04" DOCKER_FROM="docker.io/ubuntu:22.04"
fi fi
### ###
@ -300,6 +303,7 @@ if [ -n "${TARGET_DOCKER_SHORT_TAG}" ]; then
done done
fi fi
FINAL_DOCKER_TAG="${IMAGE_NAME_TAGS[0]}"
gh_env "FINAL_DOCKER_TAG=${IMAGE_NAME_TAGS[0]}" gh_env "FINAL_DOCKER_TAG=${IMAGE_NAME_TAGS[0]}"
### ###
@ -316,23 +320,17 @@ if [ -z "${GH_ACTION}" ]; then
# Asuming non Github builds should always proceed # Asuming non Github builds should always proceed
SHOULD_BUILD="true" SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} interactive" BUILD_REASON="${BUILD_REASON} interactive"
elif [[ "${IMAGE_NAME_TAGS[0]}" = docker.io* ]]; then else
source ./build-functions/get-public-image-config.sh source ./build-functions/get-public-image-config.sh
IFS=':' read -ra DOCKER_FROM_SPLIT <<<"${DOCKER_FROM}" echo "Checking labels for '${FINAL_DOCKER_TAG}'"
if ! [[ ${DOCKER_FROM_SPLIT[0]} =~ .*/.* ]]; then BASE_LAST_LAYER=$(get_image_last_layer "${DOCKER_FROM}")
# Need to use "library/..." for images the have no two part name OLD_BASE_LAST_LAYER=$(get_image_label netbox.last-base-image-layer "${FINAL_DOCKER_TAG}")
DOCKER_FROM_SPLIT[0]="library/${DOCKER_FROM_SPLIT[0]}" NETBOX_GIT_REF_OLD=$(get_image_label netbox.git-ref "${FINAL_DOCKER_TAG}")
fi GIT_REF_OLD=$(get_image_label org.opencontainers.image.revision "${FINAL_DOCKER_TAG}")
IFS='/' read -ra ORG_REPO <<<"${IMAGE_NAMES[0]}"
echo "Checking labels for '${ORG_REPO[1]}' and '${ORG_REPO[2]}'"
BASE_LAST_LAYER=$(get_image_last_layer "${DOCKER_FROM_SPLIT[0]}" "${DOCKER_FROM_SPLIT[1]}")
mapfile -t IMAGES_LAYERS_OLD < <(get_image_layers "${ORG_REPO[1]}"/"${ORG_REPO[2]}" "${TAG}")
NETBOX_GIT_REF_OLD=$(get_image_label netbox.git-ref "${ORG_REPO[1]}"/"${ORG_REPO[2]}" "${TAG}")
GIT_REF_OLD=$(get_image_label org.opencontainers.image.revision "${ORG_REPO[1]}"/"${ORG_REPO[2]}" "${TAG}")
if ! printf '%s\n' "${IMAGES_LAYERS_OLD[@]}" | grep -q -P "^${BASE_LAST_LAYER}\$"; then if [ "${BASE_LAST_LAYER}" != "${OLD_BASE_LAST_LAYER}" ]; then
SHOULD_BUILD="true" SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} debian" BUILD_REASON="${BUILD_REASON} ubuntu"
fi fi
if [ "${NETBOX_GIT_REF}" != "${NETBOX_GIT_REF_OLD}" ]; then if [ "${NETBOX_GIT_REF}" != "${NETBOX_GIT_REF_OLD}" ]; then
SHOULD_BUILD="true" SHOULD_BUILD="true"
@ -342,17 +340,14 @@ elif [[ "${IMAGE_NAME_TAGS[0]}" = docker.io* ]]; then
SHOULD_BUILD="true" SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} netbox-docker" BUILD_REASON="${BUILD_REASON} netbox-docker"
fi fi
else
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} no-check"
fi fi
if [ "${SHOULD_BUILD}" != "true" ]; then if [ "${SHOULD_BUILD}" != "true" ]; then
echo "Build skipped because sources didn't change" echo "Build skipped because sources didn't change"
echo "::set-output name=skipped::true" gh_out "skipped=true"
exit 0 # Nothing to do -> exit exit 0 # Nothing to do -> exit
else else
gh_echo "::set-output name=skipped::false" gh_out "skipped=false"
fi fi
gh_echo "::endgroup::" gh_echo "::endgroup::"
@ -393,6 +388,7 @@ fi
if [ -n "${BUILD_REASON}" ]; then if [ -n "${BUILD_REASON}" ]; then
BUILD_REASON=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<"$BUILD_REASON") BUILD_REASON=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<"$BUILD_REASON")
DOCKER_BUILD_ARGS+=(--label "netbox.build-reason=${BUILD_REASON}") DOCKER_BUILD_ARGS+=(--label "netbox.build-reason=${BUILD_REASON}")
DOCKER_BUILD_ARGS+=(--label "netbox.last-base-image-layer=${BASE_LAST_LAYER}")
fi fi
# --build-arg # --build-arg

View File

@ -58,6 +58,9 @@ _BASE_DIR = dirname(dirname(abspath(__file__)))
# #
# Example: ALLOWED_HOSTS = ['netbox.example.com', 'netbox.internal.local'] # Example: ALLOWED_HOSTS = ['netbox.example.com', 'netbox.internal.local']
ALLOWED_HOSTS = environ.get('ALLOWED_HOSTS', '*').split(' ') ALLOWED_HOSTS = environ.get('ALLOWED_HOSTS', '*').split(' ')
# ensure that '*' or 'localhost' is always in ALLOWED_HOSTS (needed for health checks)
if '*' not in ALLOWED_HOSTS and 'localhost' not in ALLOWED_HOSTS:
ALLOWED_HOSTS.append('localhost')
# PostgreSQL database configuration. See the Django documentation for a complete list of available parameters: # PostgreSQL database configuration. See the Django documentation for a complete list of available parameters:
# https://docs.djangoproject.com/en/stable/ref/settings/#databases # https://docs.djangoproject.com/en/stable/ref/settings/#databases
@ -83,6 +86,7 @@ REDIS = {
'tasks': { 'tasks': {
'HOST': environ.get('REDIS_HOST', 'localhost'), 'HOST': environ.get('REDIS_HOST', 'localhost'),
'PORT': _environ_get_and_map('REDIS_PORT', 6379, _AS_INT), 'PORT': _environ_get_and_map('REDIS_PORT', 6379, _AS_INT),
'USERNAME': environ.get('REDIS_USERNAME', ''),
'PASSWORD': _read_secret('redis_password', environ.get('REDIS_PASSWORD', '')), 'PASSWORD': _read_secret('redis_password', environ.get('REDIS_PASSWORD', '')),
'DATABASE': _environ_get_and_map('REDIS_DATABASE', 0, _AS_INT), 'DATABASE': _environ_get_and_map('REDIS_DATABASE', 0, _AS_INT),
'SSL': _environ_get_and_map('REDIS_SSL', 'False', _AS_BOOL), 'SSL': _environ_get_and_map('REDIS_SSL', 'False', _AS_BOOL),
@ -91,6 +95,7 @@ REDIS = {
'caching': { 'caching': {
'HOST': environ.get('REDIS_CACHE_HOST', environ.get('REDIS_HOST', 'localhost')), 'HOST': environ.get('REDIS_CACHE_HOST', environ.get('REDIS_HOST', 'localhost')),
'PORT': _environ_get_and_map('REDIS_CACHE_PORT', environ.get('REDIS_PORT', '6379'), _AS_INT), 'PORT': _environ_get_and_map('REDIS_CACHE_PORT', environ.get('REDIS_PORT', '6379'), _AS_INT),
'USERNAME': environ.get('REDIS_CACHE_USERNAME', environ.get('REDIS_USERNAME', '')),
'PASSWORD': _read_secret('redis_cache_password', environ.get('REDIS_CACHE_PASSWORD', environ.get('REDIS_PASSWORD', ''))), 'PASSWORD': _read_secret('redis_cache_password', environ.get('REDIS_CACHE_PASSWORD', environ.get('REDIS_PASSWORD', ''))),
'DATABASE': _environ_get_and_map('REDIS_CACHE_DATABASE', '1', _AS_INT), 'DATABASE': _environ_get_and_map('REDIS_CACHE_DATABASE', '1', _AS_INT),
'SSL': _environ_get_and_map('REDIS_CACHE_SSL', environ.get('REDIS_SSL', 'False'), _AS_BOOL), 'SSL': _environ_get_and_map('REDIS_CACHE_SSL', environ.get('REDIS_SSL', 'False'), _AS_BOOL),

View File

@ -49,20 +49,38 @@ AUTH_LDAP_START_TLS = environ.get('AUTH_LDAP_START_TLS', 'False').lower() == 'tr
# ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) # ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
LDAP_IGNORE_CERT_ERRORS = environ.get('LDAP_IGNORE_CERT_ERRORS', 'False').lower() == 'true' LDAP_IGNORE_CERT_ERRORS = environ.get('LDAP_IGNORE_CERT_ERRORS', 'False').lower() == 'true'
# Include this setting if you want to validate the LDAP server certificates against a CA certificate directory on your server
# Note that this is a NetBox-specific setting which sets:
# ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, LDAP_CA_CERT_DIR)
LDAP_CA_CERT_DIR = environ.get('LDAP_CA_CERT_DIR', None)
# Include this setting if you want to validate the LDAP server certificates against your own CA.
# Note that this is a NetBox-specific setting which sets:
# ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, LDAP_CA_CERT_FILE)
LDAP_CA_CERT_FILE = environ.get('LDAP_CA_CERT_FILE', None)
AUTH_LDAP_USER_SEARCH_BASEDN = environ.get('AUTH_LDAP_USER_SEARCH_BASEDN', '') AUTH_LDAP_USER_SEARCH_BASEDN = environ.get('AUTH_LDAP_USER_SEARCH_BASEDN', '')
AUTH_LDAP_USER_SEARCH_ATTR = environ.get('AUTH_LDAP_USER_SEARCH_ATTR', 'sAMAccountName') AUTH_LDAP_USER_SEARCH_ATTR = environ.get('AUTH_LDAP_USER_SEARCH_ATTR', 'sAMAccountName')
AUTH_LDAP_USER_SEARCH_FILTER: str = environ.get(
'AUTH_LDAP_USER_SEARCH_FILTER', f'({AUTH_LDAP_USER_SEARCH_ATTR}=%(user)s)'
)
AUTH_LDAP_USER_SEARCH = LDAPSearch( AUTH_LDAP_USER_SEARCH = LDAPSearch(
AUTH_LDAP_USER_SEARCH_BASEDN, AUTH_LDAP_USER_SEARCH_BASEDN, ldap.SCOPE_SUBTREE, AUTH_LDAP_USER_SEARCH_FILTER
ldap.SCOPE_SUBTREE,
"(" + AUTH_LDAP_USER_SEARCH_ATTR + "=%(user)s)"
) )
# This search ought to return all groups to which the user belongs. django_auth_ldap uses this to determine group # This search ought to return all groups to which the user belongs. django_auth_ldap uses this to determine group
# heirarchy. # heirarchy.
AUTH_LDAP_GROUP_SEARCH_BASEDN = environ.get('AUTH_LDAP_GROUP_SEARCH_BASEDN', '') AUTH_LDAP_GROUP_SEARCH_BASEDN = environ.get('AUTH_LDAP_GROUP_SEARCH_BASEDN', '')
AUTH_LDAP_GROUP_SEARCH_CLASS = environ.get('AUTH_LDAP_GROUP_SEARCH_CLASS', 'group') AUTH_LDAP_GROUP_SEARCH_CLASS = environ.get('AUTH_LDAP_GROUP_SEARCH_CLASS', 'group')
AUTH_LDAP_GROUP_SEARCH = LDAPSearch(AUTH_LDAP_GROUP_SEARCH_BASEDN, ldap.SCOPE_SUBTREE,
"(objectClass=" + AUTH_LDAP_GROUP_SEARCH_CLASS + ")") AUTH_LDAP_GROUP_SEARCH_FILTER: str = environ.get(
'AUTH_LDAP_GROUP_SEARCH_FILTER', f'(objectclass={AUTH_LDAP_GROUP_SEARCH_CLASS})'
)
AUTH_LDAP_GROUP_SEARCH = LDAPSearch(
AUTH_LDAP_GROUP_SEARCH_BASEDN, ldap.SCOPE_SUBTREE, AUTH_LDAP_GROUP_SEARCH_FILTER
)
AUTH_LDAP_GROUP_TYPE = _import_group_type(environ.get('AUTH_LDAP_GROUP_TYPE', 'GroupOfNamesType')) AUTH_LDAP_GROUP_TYPE = _import_group_type(environ.get('AUTH_LDAP_GROUP_TYPE', 'GroupOfNamesType'))
# Define a group required to login. # Define a group required to login.

View File

@ -2,4 +2,22 @@ version: '3.4'
services: services:
netbox: netbox:
ports: ports:
- 8000:8080 - "8000:8080"
# If you want the Nginx unit status page visible from the
# outside of the container add the following port mapping:
# - "8001:8081"
# healthcheck:
# Time for which the health check can fail after the container is started.
# This depends mostly on the performance of your database. On the first start,
# when all tables need to be created the start_period should be higher than on
# subsequent starts. For the first start after major version upgrades of NetBox
# the start_period might also need to be set higher.
# Default value in our docker-compose.yml is 60s
# start_period: 90s
# environment:
# SKIP_SUPERUSER: "false"
# SUPERUSER_API_TOKEN: ""
# SUPERUSER_EMAIL: ""
# SUPERUSER_NAME: ""
# SUPERUSER_PASSWORD: ""

View File

@ -0,0 +1,6 @@
version: '3.4'
services:
netbox:
ports:
- "127.0.0.1:8000:8080"

View File

@ -1,37 +1,65 @@
version: '3.4' version: '3.4'
services: services:
netbox: netbox: &netbox
image: ${IMAGE-netboxcommunity/netbox:latest} image: ${IMAGE-netboxcommunity/netbox:latest}
depends_on: depends_on:
- postgres postgres:
- redis condition: service_healthy
- redis-cache redis:
condition: service_healthy
redis-cache:
condition: service_healthy
env_file: env/netbox.env env_file: env/netbox.env
environment:
SKIP_STARTUP_SCRIPTS: ${SKIP_STARTUP_SCRIPTS-false}
user: 'unit:root' user: 'unit:root'
volumes: volumes:
- ./configuration:/etc/netbox/config:z,ro
- ./test-configuration/logging.py:/etc/netbox/config/logging.py:z,ro - ./test-configuration/logging.py:/etc/netbox/config/logging.py:z,ro
- ./reports:/etc/netbox/reports:z,ro healthcheck:
- ./scripts:/etc/netbox/scripts:z,ro start_period: 120s
- netbox-media-files:/opt/netbox/netbox/media:z timeout: 3s
interval: 15s
test: "curl -f http://localhost:8080/api/ || exit 1"
netbox-worker:
<<: *netbox
command:
- /opt/netbox/venv/bin/python
- /opt/netbox/netbox/manage.py
- rqworker
healthcheck:
start_period: 40s
timeout: 3s
interval: 15s
test: "ps -aux | grep -v grep | grep -q rqworker || exit 1"
netbox-housekeeping:
<<: *netbox
command:
- /opt/netbox/housekeeping.sh
healthcheck:
start_period: 40s
timeout: 3s
interval: 15s
test: "ps -aux | grep -v grep | grep -q housekeeping || exit 1"
postgres: postgres:
image: postgres:14-alpine image: postgres:15-alpine
env_file: env/postgres.env env_file: env/postgres.env
redis: healthcheck:
test: "pg_isready -t 2 -d $$POSTGRES_DB -U $$POSTGRES_USER" ## $$ because of docker-compose
interval: 10s
timeout: 5s
retries: 5
redis: &redis
image: redis:7-alpine image: redis:7-alpine
command: command:
- sh - sh
- -c # this is to evaluate the $REDIS_PASSWORD from the env - -c # this is to evaluate the $REDIS_PASSWORD from the env
- redis-server --appendonly yes --requirepass $$REDIS_PASSWORD ## $$ because of docker-compose - redis-server --appendonly yes --requirepass $$REDIS_PASSWORD ## $$ because of docker-compose
env_file: env/redis.env env_file: env/redis.env
healthcheck:
start_period: 20s
timeout: 3s
interval: 15s
test: "timeout 2 redis-cli ping"
redis-cache: redis-cache:
image: redis:7-alpine <<: *redis
command:
- sh
- -c # this is to evaluate the $REDIS_PASSWORD from the env
- redis-server --requirepass $$REDIS_PASSWORD ## $$ because of docker-compose
env_file: env/redis-cache.env env_file: env/redis-cache.env
volumes: volumes:
netbox-media-files: netbox-media-files:

View File

@ -1,14 +1,18 @@
version: '3.4' version: '3.4'
services: services:
netbox: &netbox netbox: &netbox
image: netboxcommunity/netbox:${VERSION-v3.3-2.3.0} image: docker.io/netboxcommunity/netbox:${VERSION-v3.4-2.5.2}
depends_on: depends_on:
- postgres - postgres
- redis - redis
- redis-cache - redis-cache
- netbox-worker
env_file: env/netbox.env env_file: env/netbox.env
user: 'unit:root' user: 'unit:root'
healthcheck:
start_period: 60s
timeout: 3s
interval: 15s
test: "curl -f http://localhost:8080/api/ || exit 1"
volumes: volumes:
- ./configuration:/etc/netbox/config:z,ro - ./configuration:/etc/netbox/config:z,ro
- ./reports:/etc/netbox/reports:z,ro - ./reports:/etc/netbox/reports:z,ro
@ -17,30 +21,40 @@ services:
netbox-worker: netbox-worker:
<<: *netbox <<: *netbox
depends_on: depends_on:
- redis netbox:
- postgres condition: service_healthy
command: command:
- /opt/netbox/venv/bin/python - /opt/netbox/venv/bin/python
- /opt/netbox/netbox/manage.py - /opt/netbox/netbox/manage.py
- rqworker - rqworker
healthcheck:
start_period: 20s
timeout: 3s
interval: 15s
test: "ps -aux | grep -v grep | grep -q rqworker || exit 1"
netbox-housekeeping: netbox-housekeeping:
<<: *netbox <<: *netbox
depends_on: depends_on:
- redis netbox:
- postgres condition: service_healthy
command: command:
- /opt/netbox/housekeeping.sh - /opt/netbox/housekeeping.sh
healthcheck:
start_period: 20s
timeout: 3s
interval: 15s
test: "ps -aux | grep -v grep | grep -q housekeeping || exit 1"
# postgres # postgres
postgres: postgres:
image: postgres:14-alpine image: docker.io/postgres:15-alpine
env_file: env/postgres.env env_file: env/postgres.env
volumes: volumes:
- netbox-postgres-data:/var/lib/postgresql/data - netbox-postgres-data:/var/lib/postgresql/data
# redis # redis
redis: redis:
image: redis:7-alpine image: docker.io/redis:7-alpine
command: command:
- sh - sh
- -c # this is to evaluate the $REDIS_PASSWORD from the env - -c # this is to evaluate the $REDIS_PASSWORD from the env
@ -49,14 +63,14 @@ services:
volumes: volumes:
- netbox-redis-data:/data - netbox-redis-data:/data
redis-cache: redis-cache:
image: redis:7-alpine image: docker.io/redis:7-alpine
command: command:
- sh - sh
- -c # this is to evaluate the $REDIS_PASSWORD from the env - -c # this is to evaluate the $REDIS_PASSWORD from the env
- redis-server --requirepass $$REDIS_PASSWORD ## $$ because of docker-compose - redis-server --requirepass $$REDIS_PASSWORD ## $$ because of docker-compose
env_file: env/redis-cache.env env_file: env/redis-cache.env
volumes: volumes:
- netbox-redis-cache-data:/data - netbox-redis-cache-data:/data
volumes: volumes:
netbox-media-files: netbox-media-files:
@ -66,4 +80,4 @@ volumes:
netbox-redis-data: netbox-redis-data:
driver: local driver: local
netbox-redis-cache-data: netbox-redis-cache-data:
driver: local driver: local

View File

@ -46,6 +46,8 @@ if ! ./manage.py migrate --check >/dev/null 2>&1; then
./manage.py remove_stale_contenttypes --no-input ./manage.py remove_stale_contenttypes --no-input
echo "⚙️ Removing expired user sessions" echo "⚙️ Removing expired user sessions"
./manage.py clearsessions ./manage.py clearsessions
echo "⚙️ Building search index (lazy)"
./manage.py reindex --lazy
fi fi
# Create Superuser if required # Create Superuser if required
@ -80,14 +82,15 @@ END
echo "💡 Superuser Username: ${SUPERUSER_NAME}, E-Mail: ${SUPERUSER_EMAIL}" echo "💡 Superuser Username: ${SUPERUSER_NAME}, E-Mail: ${SUPERUSER_EMAIL}"
fi fi
# Print warning if startup scripts (and initializers) would've been run # Remove for next release ./manage.py shell --interface python <<END
if [ "$SKIP_STARTUP_SCRIPTS" == "true" ]; then from users.models import Token
# Nothing to do try:
echo "" # Empty block not allowed old_default_token = Token.objects.get(key="0123456789abcdef0123456789abcdef01234567")
else if old_default_token:
echo "⚠️⚠️⚠️ WARNING: The initializers have been moved to a plugin. See release notes." print("⚠️ Warning: You have the old default admin token in your database. This token is widely known; please remove it.")
echo "⚠️⚠️⚠️ Set environment variable 'SKIP_STARTUP_SCRIPTS' to 'true' to remove this warning." except Token.DoesNotExist:
fi pass
END
echo "✅ Initialisation is done." echo "✅ Initialisation is done."

View File

@ -1,8 +1,8 @@
#!/bin/bash #!/bin/bash
SECONDS=${HOUSEKEEPING_INTERVAL:=86400} SLEEP_SECONDS=${HOUSEKEEPING_INTERVAL:=86400}
echo "Interval set to ${SECONDS} seconds" echo "Interval set to ${SLEEP_SECONDS} seconds"
while true; do while true; do
date date
/opt/netbox/venv/bin/python /opt/netbox/netbox/manage.py housekeeping /opt/netbox/venv/bin/python /opt/netbox/netbox/manage.py housekeeping
sleep "${SECONDS}s" sleep "${SLEEP_SECONDS}s"
done done

View File

@ -1,6 +1,7 @@
#!/bin/bash #!/bin/bash
UNIT_CONFIG="${UNIT_CONFIG-/etc/unit/nginx-unit.json}" UNIT_CONFIG="${UNIT_CONFIG-/etc/unit/nginx-unit.json}"
# Also used in "nginx-unit.json"
UNIT_SOCKET="/opt/unit/unit.sock" UNIT_SOCKET="/opt/unit/unit.sock"
load_configuration() { load_configuration() {

View File

@ -1,27 +1,45 @@
{ {
"listeners": { "listeners": {
"*:8080": { "0.0.0.0:8080": {
"pass": "routes" "pass": "routes/main"
},
"[::]:8080": {
"pass": "routes/main"
},
"0.0.0.0:8081": {
"pass": "routes/status"
},
"[::]:8081": {
"pass": "routes/status"
} }
}, },
"routes": {
"routes": [ "main": [
{ {
"match": { "match": {
"uri": "/static/*" "uri": "/static/*"
},
"action": {
"share": "/opt/netbox/netbox${uri}"
}
}, },
"action": { {
"share": "/opt/netbox/netbox${uri}" "action": {
"pass": "applications/netbox"
}
} }
}, ],
"status": [
{ {
"action": { "match": {
"pass": "applications/netbox" "uri": "/status/*"
},
"action": {
"proxy": "http://unix:/opt/unit/unit.sock"
}
} }
} ]
], },
"applications": { "applications": {
"netbox": { "netbox": {
"type": "python 3", "type": "python 3",
@ -35,6 +53,5 @@
} }
} }
}, },
"access_log": "/dev/stdout" "access_log": "/dev/stdout"
} }

6
env/netbox.env vendored
View File

@ -30,9 +30,5 @@ REDIS_PASSWORD=H733Kdjndks81
REDIS_SSL=false REDIS_SSL=false
RELEASE_CHECK_URL=https://api.github.com/repos/netbox-community/netbox/releases RELEASE_CHECK_URL=https://api.github.com/repos/netbox-community/netbox/releases
SECRET_KEY=r8OwDznj!!dci#P9ghmRfdu1Ysxm0AiPeDCQhKE+N_rClfWNj SECRET_KEY=r8OwDznj!!dci#P9ghmRfdu1Ysxm0AiPeDCQhKE+N_rClfWNj
SKIP_SUPERUSER=false SKIP_SUPERUSER=true
SUPERUSER_API_TOKEN=0123456789abcdef0123456789abcdef01234567
SUPERUSER_EMAIL=admin@example.com
SUPERUSER_NAME=admin
SUPERUSER_PASSWORD=admin
WEBHOOKS_ENABLED=true WEBHOOKS_ENABLED=true

View File

@ -1,5 +1,6 @@
django-auth-ldap==4.1.0 django-auth-ldap==4.2.0
django-storages[azure,boto3,dropbox,google,libcloud,sftp]==1.13.1 django-storages[azure,boto3,dropbox,google,libcloud,sftp]==1.13.2
napalm==4.0.0 napalm==4.0.0
psycopg2==2.9.4 psycopg2==2.9.5
social-auth-core[openidconnect]==4.3.0 python3-saml==1.15.0
social-auth-core[all]==4.4.0

61
test.sh
View File

@ -14,6 +14,8 @@
# exit when a command exits with an exit code != 0 # exit when a command exits with an exit code != 0
set -e set -e
source ./build-functions/gh-functions.sh
# IMAGE is used by `docker-compose.yml` do determine the tag # IMAGE is used by `docker-compose.yml` do determine the tag
# of the Docker Image that is to be used # of the Docker Image that is to be used
if [ "${1}x" != "x" ]; then if [ "${1}x" != "x" ]; then
@ -35,20 +37,72 @@ if [ -z "${IMAGE}" ]; then
fi fi
# The docker compose command to use # The docker compose command to use
doco="docker-compose --file docker-compose.test.yml --project-name netbox_docker_test_${1}" doco="docker compose --file docker-compose.test.yml --file docker-compose.test.override.yml --project-name netbox_docker_test"
test_setup() { test_setup() {
gh_echo "::group:: Test setup"
echo "🏗 Setup up test environment" echo "🏗 Setup up test environment"
$doco up --detach --quiet-pull --wait --force-recreate --renew-anon-volumes --no-start
$doco start postgres
$doco start redis
$doco start redis-cache
gh_echo "::endgroup::"
} }
test_netbox_unit_tests() { test_netbox_unit_tests() {
gh_echo "::group:: Netbox unit tests"
echo "⏱ Running NetBox Unit Tests" echo "⏱ Running NetBox Unit Tests"
$doco run --rm netbox /opt/netbox/venv/bin/python /opt/netbox/netbox/manage.py test $doco run --rm netbox /opt/netbox/venv/bin/python /opt/netbox/netbox/manage.py test
gh_echo "::endgroup::"
}
test_compose_db_setup() {
gh_echo "::group:: Netbox DB migrations"
echo "⏱ Running NetBox DB migrations"
$doco run --rm netbox /opt/netbox/venv/bin/python /opt/netbox/netbox/manage.py migrate
gh_echo "::endgroup::"
}
test_netbox_start() {
gh_echo "::group:: Start Netbox service"
echo "⏱ Starting NetBox services"
$doco up --detach --wait
gh_echo "::endgroup::"
}
test_netbox_web() {
gh_echo "::group:: Web service test"
echo "⏱ Starting web service test"
RESP_CODE=$(
curl \
--silent \
--output /dev/null \
--write-out '%{http_code}' \
--request GET \
--connect-timeout 5 \
--max-time 10 \
--retry 5 \
--retry-delay 0 \
--retry-max-time 40 \
http://127.0.0.1:8000/
)
if [ "$RESP_CODE" == "200" ]; then
echo "Webservice running"
else
echo "⚠️ Got response code '$RESP_CODE' but expected '200'"
exit 1
fi
gh_echo "::endgroup::"
} }
test_cleanup() { test_cleanup() {
echo "💣 Cleaning Up" echo "💣 Cleaning Up"
$doco down -v gh_echo "::group:: Docker compose logs"
$doco logs --no-color
gh_echo "::endgroup::"
gh_echo "::group:: Docker compose down"
$doco down --volumes
gh_echo "::endgroup::"
} }
echo "🐳🐳🐳 Start testing '${IMAGE}'" echo "🐳🐳🐳 Start testing '${IMAGE}'"
@ -58,5 +112,8 @@ trap test_cleanup EXIT ERR
test_setup test_setup
test_netbox_unit_tests test_netbox_unit_tests
test_compose_db_setup
test_netbox_start
test_netbox_web
echo "🐳🐳🐳 Done testing '${IMAGE}'" echo "🐳🐳🐳 Done testing '${IMAGE}'"