Compare commits

...

358 Commits
1.5.1 ... 2.9.1

Author SHA1 Message Date
0c99ff8b56 Merge pull request #1225 from netbox-community/develop
Version 2.9.1
2024-05-07 18:29:09 +02:00
1265d2277a Preparation for 2.9.1 2024-05-07 13:45:53 +02:00
620ca96d64 Merge pull request #1224 from tobiasge/fix-sentry-sdk
Install Sentry Django integration correctly
2024-05-07 12:34:09 +02:00
2561055265 Install Sentry Django integration correctly 2024-05-07 09:57:59 +02:00
53ac2ff81b Merge pull request #1223 from tobiasge/1222-fix-health-check
Fix #1222: Use /login/ as health check URL
2024-05-07 08:37:20 +02:00
b6492b2e6b Fix #1222: Use /login/ as health check URL 2024-05-07 07:15:37 +02:00
6d25a54d49 Merge pull request #1219 from netbox-community/develop
Version 2.9.0
2024-05-06 21:26:10 +02:00
36f409a16b Preparation for 2.9.0 2024-05-06 19:04:09 +02:00
9d247f5530 Merge pull request #1216 from netbox-community/renovate/django-storages-1.x
Update dependency django-storages to v1.14.3
2024-05-06 18:58:40 +02:00
7a42faaabc Merge pull request #1218 from netbox-community/renovate/sentry-sdk-2.x
Update dependency sentry-sdk to v2.1.1
2024-05-06 18:58:19 +02:00
f5a3e3f955 Merge pull request #1215 from tobiasge/prepare-nb40
Prepare for NetBox 4.0
2024-05-06 18:57:57 +02:00
6307a58c4a Update dependency sentry-sdk to v2.1.1 2024-05-06 12:35:54 +00:00
1e78c386a5 Fixed regex for social-auth-core replacement 2024-05-06 09:56:49 +02:00
40f24105e6 Update dependency django-storages to v1.14.3 2024-05-04 23:02:30 +00:00
1a4ef09a16 Merge branch 'develop' into prepare-nb40 2024-05-04 10:02:19 +02:00
3d6b6c2a67 Merge pull request #1210 from netbox-community/renovate/github-super-linter-6.x
Update github/super-linter action to v6
2024-04-28 10:36:19 +02:00
f1787b4775 Disabled new tests 2024-04-28 09:29:43 +02:00
a7cb1a4651 Merge pull request #1211 from netbox-community/renovate/sentry-sdk-2.x
Update dependency sentry-sdk to v2
2024-04-28 09:24:15 +02:00
8fb42b3213 Update dependency sentry-sdk to v2 2024-04-26 10:16:25 +00:00
b7d99c9c69 Update github/super-linter action to v6 2024-04-23 19:44:05 +00:00
d52ef4445e Merge pull request #1209 from cimnine/HSTS
Adds HSTS related parameters
2024-04-23 21:43:40 +02:00
e1335a1194 Adds SECURE_* parameters introduced in 3.7.6
See https://github.com/netbox-community/netbox/issues/15644
2024-04-23 16:40:40 +02:00
c9d8f55ea1 Merge pull request #1208 from netbox-community/renovate/dulwich-0.x
Update dependency dulwich to v0.22.1
2024-04-23 16:22:10 +02:00
347b943b75 Update dependency dulwich to v0.22.1 2024-04-23 13:58:41 +00:00
f15dec52ae Merge pull request #1207 from netbox-community/renovate/dulwich-0.x
Update dependency dulwich to v0.22.0
2024-04-23 06:59:08 +02:00
e024427428 Update dependency dulwich to v0.22.0 2024-04-22 19:13:28 +00:00
5f57cb2774 Merge pull request #1206 from tobiasge/cancel-old-runs
Cancel workflow runs for in progress PRs
2024-04-20 21:07:34 +02:00
5873885edc Cancel workflow runs for in progress PRs 2024-04-19 21:53:06 +02:00
e141d9f16d Prepare for Netbox 4.0 2024-04-17 14:09:24 +02:00
eb59ace74c Merge pull request #1199 from netbox-community/renovate/sentry-sdk-1.x
Update dependency sentry-sdk to v1.45.0
2024-04-10 23:53:57 +02:00
2ee4acba21 Update dependency sentry-sdk to v1.45.0 2024-04-10 14:59:14 +00:00
2a582304b3 Merge pull request #1196 from netbox-community/renovate/django-auth-ldap-4.x
Update dependency django-auth-ldap to v4.8.0
2024-04-04 19:56:41 +02:00
5309673f95 Merge pull request #1193 from kchandan/docker-version
Docker compose file version line obsolete issue fix
2024-04-04 19:02:29 +02:00
3c07538719 Merge pull request #1195 from netbox-community/renovate/sentry-sdk-1.x
Update dependency sentry-sdk to v1.44.1
2024-04-04 15:40:34 +02:00
3df807faa0 Update dependency django-auth-ldap to v4.8.0 2024-04-04 13:21:43 +00:00
4c0ee77bd0 Update dependency sentry-sdk to v1.44.1 2024-04-03 10:47:45 +00:00
6c01ada882 Removed version line from the yaml file as it is obsolete 2024-04-01 16:57:53 -04:00
b6e5486b48 Merge pull request #1189 from netbox-community/renovate/sentry-sdk-1.x
Update dependency sentry-sdk to v1.44.0
2024-03-28 19:30:14 +01:00
46f628e91b Update dependency sentry-sdk to v1.44.0 2024-03-28 17:06:50 +00:00
9f009c5907 Merge pull request #1166 from tobiasge/sentry-sdk
Fix #1127: Added sentry-sdk to requirements
2024-03-27 14:44:32 +01:00
73ce1f9b7a Fix #1127: Added sentry-sdk to requirements 2024-03-27 07:30:21 +01:00
f443d4ac53 Merge pull request #1183 from tobiasge/avoid-duplicate-checks
Avoid duplicate checks for Renovate PRs
2024-03-27 06:52:23 +01:00
1b9bb2a25e Merge pull request #1181 from netbox-community/renovate/django-auth-ldap-4.x
Update dependency django-auth-ldap to v4.7.0
2024-03-27 06:50:43 +01:00
d2d7948021 Avoid duplicate checks for Renovate PRs 2024-03-26 17:32:05 +01:00
31a4da297a Update dependency django-auth-ldap to v4.7.0 2024-03-26 16:12:04 +00:00
8e278c3c8e Merge pull request #1167 from tobiasge/unit-ubuntu-update
Unit and Ubuntu update
2024-03-26 17:09:04 +01:00
24eaba7f9b Use ARM64 self-hosted runner 2024-03-02 13:25:38 +01:00
99b906c4f1 Update Ubuntu and Nginx Unit 2024-02-28 08:07:43 +01:00
37733049da Merge pull request #1135 from tbotnz/release
fix segfault
2024-02-22 09:26:49 +01:00
4da3f75c14 Merge pull request #1138 from adlerweb/patch-1
docker-entrypoint.sh: clarify default token message
2024-02-22 08:18:27 +01:00
607ccd10bf docker-entrypoint.sh: clarify default token message
When a default admin API token is found, a warning is displayed. As it is only called "token", some users might not know what token is referred to. Also the message should give a hint or link to a documentation on how to remove it.
2024-02-22 07:10:06 +01:00
f27148634a fix segfault 2024-02-22 07:07:53 +01:00
46edaa1d22 Merge pull request #1151 from netbox-community/renovate/psycopg-3.x
Update dependency psycopg to v3.1.18
2024-02-05 08:00:18 +01:00
064b713489 Update dependency psycopg to v3.1.18 2024-02-04 21:43:13 +00:00
b8a7ffdb7b Merge pull request #1134 from NeodymiumFerBore/feat/redis-sentinel-config
Add Redis Sentinel config as environment vars
2024-01-20 10:46:06 +01:00
8450ba2f74 Merge pull request #1133 from kindlich/patch-1
Add env var for CENSUS_REPORTING_ENABLED
2024-01-20 10:38:04 +01:00
291ba760f1 Add Redis Sentinel config as environment vars 2024-01-16 14:28:59 +01:00
2fbb5dd0b0 Add env var for CENSUS_REPORTING_ENABLED
See #999
2024-01-16 10:20:03 +01:00
ea81db4789 Merge pull request #1129 from netbox-community/renovate/psycopg-3.x
Update dependency psycopg to v3.1.17
2024-01-08 08:15:13 +01:00
249e589096 Update dependency psycopg to v3.1.17 2024-01-07 16:16:19 +00:00
f1ca9ab7eb Merge pull request #1125 from netbox-community/develop
Release 2.8.0
2024-01-02 08:57:00 +01:00
dfa1904a82 Preparation for 2.8.0 2024-01-01 10:33:59 +01:00
b5c12a82d2 Merge pull request #1120 from netbox-community/renovate/dulwich-0.x
Update dependency dulwich to v0.21.7
2023-12-19 15:09:48 +01:00
7e1750d3a3 Update dependency dulwich to v0.21.7 2023-12-19 12:50:54 +00:00
651bbc49ba Merge pull request #1115 from netbox-community/renovate/psycopg-3.x
Update dependency psycopg to v3.1.16
2023-12-19 13:50:33 +01:00
d72facf182 Merge pull request #1122 from netbox-community/renovate/actions-setup-python-5.x
Update actions/setup-python action to v5
2023-12-19 13:50:14 +01:00
d7866d5f6e Update dependency psycopg to v3.1.16 2023-12-19 11:24:47 +00:00
33430fda08 Update actions/setup-python action to v5 2023-12-06 12:35:21 +00:00
e020b46f6a Merge pull request #1101 from netbox-community/renovate/python3-saml-1.x
Update dependency python3-saml to v1.16.0
2023-10-09 15:22:17 +02:00
51049781b4 Update dependency python3-saml to v1.16.0 2023-10-09 11:06:17 +00:00
1e681f30b3 Merge pull request #1097 from netbox-community/renovate/django-auth-ldap-4.x
Update dependency django-auth-ldap to v4.6.0
2023-10-09 10:17:32 +02:00
4680e59a07 Update dependency django-auth-ldap to v4.6.0 2023-10-09 06:13:57 +00:00
aaaa628585 Merge pull request #1100 from netbox-community/renovate/django-storages-1.x
Update dependency django-storages to v1.14.2
2023-10-09 08:13:44 +02:00
e8fa63d18b Update dependency django-storages to v1.14.2 2023-10-09 03:36:20 +00:00
17b9569094 Merge pull request #1095 from netbox-community/renovate/django-storages-1.x
Update dependency django-storages to v1.14.1
2023-09-30 09:22:57 +02:00
1a3ace957a Update dependency django-storages to v1.14.1 2023-09-29 15:58:04 +00:00
96799369fe Merge pull request #1091 from netbox-community/renovate/psycopg-3.x
Update dependency psycopg to v3.1.12
2023-09-27 06:57:48 +02:00
f1de85d975 Update dependency psycopg to v3.1.12 2023-09-27 00:39:53 +00:00
830d498094 Merge pull request #1083 from netbox-community/renovate/postgres-16.x
Update postgres Docker tag to v16
2023-09-23 18:58:51 +02:00
714a132566 Update postgres Docker tag to v16 2023-09-23 14:42:28 +00:00
d0fbb37d66 Merge pull request #1090 from netbox-community/renovate/psycopg-3.x
Update dependency psycopg to v3.1.11
2023-09-23 16:42:15 +02:00
87159b56be Update dependency psycopg to v3.1.11 2023-09-23 12:57:54 +00:00
252ab33560 Merge pull request #1082 from netbox-community/renovate/docker.io-postgres-16.x
Update docker.io/postgres Docker tag to v16
2023-09-23 14:57:42 +02:00
0603f1ebe9 Update docker.io/postgres Docker tag to v16 2023-09-15 23:13:09 +00:00
164b01319c Merge pull request #1074 from netbox-community/renovate/docker-login-action-3.x
Update docker/login-action action to v3
2023-09-12 21:30:03 +02:00
e6fedf16fe Update docker/login-action action to v3 2023-09-12 18:03:05 +00:00
0594d2c3ae Merge pull request #1076 from netbox-community/renovate/docker-setup-qemu-action-3.x
Update docker/setup-qemu-action action to v3
2023-09-12 20:02:51 +02:00
8b8447f5c6 Update docker/setup-qemu-action action to v3 2023-09-12 15:12:56 +00:00
f42e78ece2 Merge pull request #1075 from netbox-community/renovate/docker-setup-buildx-action-3.x
Update docker/setup-buildx-action action to v3
2023-09-12 16:20:00 +02:00
c473fcc44a Update docker/setup-buildx-action action to v3 2023-09-12 09:21:44 +00:00
bcafc4328c Merge pull request #1067 from tobiasge/unit-update
Update Nginx unit to 1.31
2023-09-05 10:01:19 +02:00
fe27ebc907 Merge pull request #1069 from netbox-community/renovate/actions-checkout-4.x
Update actions/checkout action to v4
2023-09-05 09:48:27 +02:00
364555eef6 Update actions/checkout action to v4 2023-09-05 05:38:37 +00:00
451337750a Merge pull request #1070 from netbox-community/renovate/django-storages-1.x
Update dependency django-storages to v1.14
2023-09-05 07:38:24 +02:00
bc6e4f81b0 Update dependency django-storages to v1.14 2023-09-05 01:09:06 +00:00
e452004526 Update Nginx unit to 1.31 2023-09-03 09:07:26 +02:00
caba1b335d Merge pull request #1066 from netbox-community/renovate/dulwich-0.x
Update dependency dulwich to v0.21.6
2023-09-02 21:00:17 +02:00
c798e881b0 Update dependency dulwich to v0.21.6 2023-09-02 13:43:45 +00:00
b47e85ab3f Merge pull request #1062 from netbox-community/develop
Release 2.7.0
2023-08-30 23:22:57 +02:00
28553202a4 Merge pull request #1039 from tobiasge/netbox-3.6
Prepare for Netbox 3.6
2023-08-30 21:34:45 +02:00
69c5580a3e Fix #1061: Install openssh-client 2023-08-29 13:13:57 +02:00
4d54bb172b Merge pull request #1059 from NeodymiumFerBore/feat/remote-auth-backend-as-list
Map REMOTE_AUTH_BACKEND env var to list
2023-08-28 14:41:51 +02:00
44d0f47fb5 Prepare for Netbox 3.6 2023-08-28 09:41:55 +02:00
12af4233bd Map REMOTE_AUTH_BACKEND env var to list 2023-08-25 11:26:36 +02:00
f703bba5e1 Merge pull request #1043 from NeodymiumFerBore/fix/honor-default-config-value
Set REMOTE_AUTH_AUTO_CREATE_USER default to False
2023-08-16 08:49:48 +02:00
dd0c0b795d Merge pull request #1042 from toriningen/fix/1041-invalid-template
netbox.env: fixed unwanted string interpolation
2023-08-16 08:49:31 +02:00
be7af2ea4c Merge pull request #1045 from netbox-community/renovate/django-auth-ldap-4.x
Update dependency django-auth-ldap to v4.5.0
2023-08-09 08:19:05 +02:00
75690ac7dd Update dependency django-auth-ldap to v4.5.0 2023-08-08 18:58:31 +00:00
ec603633ea Set REMOTE_AUTH_AUTO_CREATE_USER default to False 2023-08-04 23:49:44 +02:00
c99172661c netbox.env: fixed unwanted string interpolation 2023-08-03 05:19:59 +00:00
9bdd074ad7 Merge pull request #1034 from netbox-community/renovate/django-auth-ldap-4.x
Update dependency django-auth-ldap to v4.4.0
2023-07-23 11:52:19 +02:00
d69dacef09 Update dependency django-auth-ldap to v4.4.0 2023-07-22 19:52:18 +00:00
ebc5900206 Merge pull request #1024 from florianschroen/2023-06-15_fix_volume_options
docker-compose.yml: fix volume mount options
2023-06-15 15:38:12 +02:00
8208dedb19 docker-compose.yml: fix volume mount options
`z` is valid only for bindmounts

When using with volumes a warning for each volume appears:

netbox$ docker compose up
[+] Building 0.0s (0/0)
WARN[0000] mount of type `volume` should not define `bind` option
WARN[0000] mount of type `volume` should not define `bind` option
WARN[0000] mount of type `volume` should not define `bind` option

This may appear only when using a docker-compose.override.yml
2023-06-15 12:14:05 +02:00
0d748ed392 Merge pull request #1010 from tobiasge/image-update
Update Ubuntu und Nginx Unit
2023-05-11 13:01:21 +02:00
23d5865e3d Update Ubuntu und Nginx Unit 2023-05-11 08:09:56 +02:00
2037e42e45 Merge pull request #1009 from marcquark/job_retention2
rename JOBRESULT_RETENTION to JOB_RETENTION
2023-05-08 19:38:40 +02:00
055538cc21 Fixed type in else 2023-05-08 17:44:09 +02:00
5408cf5af0 rename JOBRESULT_RETENTION to JOB_RETENTION 2023-05-07 21:23:10 +02:00
22486fefb5 Merge pull request #1002 from netbox-community/develop
Prepare 2.6.1
2023-04-28 15:29:28 +02:00
96bda7fa4f Merge branch 'release' into develop 2023-04-28 13:15:19 +02:00
c085287e64 Prepare 2.6.1 2023-04-28 13:13:06 +02:00
b4a6be37ec Merge pull request #1001 from netbox-community/develop
Version 2.6.0
2023-04-28 09:00:49 +02:00
02a926431b Merge pull request #990 from tobiasge/prepare-for-nb-35
Preparation for Netbox 3.5
2023-04-28 07:41:22 +02:00
3978b14c7f Preparation for 2.6.0 2023-04-27 23:45:39 +02:00
7532508aab Ensure minimum length for the SECRET_KEY is met 2023-04-27 16:56:47 +02:00
858611ad67 Check if the new image tag exists 2023-04-27 09:32:42 +02:00
155e90c99f Removed BASE_PATH from configuration
Setting the BASE_PATH is a more involved process than just setting this variable.
To prevent surprises the option to set this via ENV variable was removed.
2023-04-27 09:32:42 +02:00
cb524c32ed Preparation for Netbox 3.5
- Reports and Scripts have changed in Netbox 3.5. They need to be uploaded now.
  The Docker compose now creates a volume as it does for the media files
- Napalm has been removed from Netbox 3.5
  All configuration entries for Napalm were removed and napalm itself is removed from the requirements file
- Removed Gunicorn from the image
  Nginx Unit has been used for a while now. No need to install Gunicorn
2023-04-27 09:32:42 +02:00
cb4dcc0488 Merge pull request #1000 from netbox-community/renovate/django-auth-ldap-4.x
Update dependency django-auth-ldap to v4.3.0
2023-04-27 09:09:13 +02:00
ddcc8b5131 Update dependency django-auth-ldap to v4.3.0 2023-04-26 23:27:32 +00:00
3286faa94c Merge pull request #989 from netbox-community/develop
Version 2.5.3
2023-04-15 08:42:25 +02:00
788cd03a35 Merge pull request #988 from netbox-community/renovate/github-super-linter-5.x
Update github/super-linter action to v5
2023-04-15 01:21:43 +02:00
0911c2251d Merge branch 'release' into develop 2023-04-15 01:20:55 +02:00
c698496e36 Preparation for 2.5.3 2023-04-15 01:16:40 +02:00
23a262d72f Update github/super-linter action to v5 2023-04-14 19:16:09 +00:00
5273e17d89 Merge pull request #983 from tobiasge/arm-test
Try to get test on ARM64 to pass
2023-04-06 19:01:12 +02:00
e44f0398fb Try to get test on ARM64 to pass 2023-04-06 17:44:10 +02:00
7c2e012523 Merge pull request #982 from tobiasge/fix-social-auth-core
Fixed version conflicts for social-auth-core
2023-04-06 12:37:51 +02:00
5a29364bca Fixed wording
Co-authored-by: Christian Mäder <cimnine@users.noreply.github.com>
2023-04-06 11:08:34 +02:00
5d5b01f6b5 Fixed version conflicts for social-auth-core 2023-04-06 09:52:28 +02:00
5d6e733bce Merge pull request #980 from netbox-community/renovate/psycopg2-2.x
Update dependency psycopg2 to v2.9.6
2023-04-05 20:22:27 +02:00
87a9808bc2 Update dependency psycopg2 to v2.9.6 2023-04-03 11:39:03 +00:00
7bf9e1af5a Merge pull request #978 from netbox-community/develop
Missing version tags for 2.5.2
2023-03-29 20:50:29 +02:00
1e588431e2 Merge pull request #976 from tobiasge/better-tests
Further improved test configuration
2023-03-29 18:33:23 +02:00
41fd4e5d67 Further improved test configuration 2023-03-29 17:08:21 +02:00
17f1bb0af0 Preparation for 2.5.2 2023-03-29 12:41:34 +02:00
9cc58918ab Merge pull request #975 from netbox-community/develop
Version 2.5.2
2023-03-29 12:03:15 +02:00
831867499b Merge branch 'release' into develop 2023-03-29 10:36:28 +02:00
d5dde45bec Merge pull request #973 from tobiasge/social-auth-update
Use same version as Netbox for social-auth-core
2023-03-29 10:25:58 +02:00
6576c18a9c Merge pull request #972 from netbox-community/renovate/django-auth-ldap-4.x
Update dependency django-auth-ldap to v4.2.0
2023-03-29 09:34:54 +02:00
efd6e6a3c2 Use same version as Netbox for social-auth-core 2023-03-29 08:58:49 +02:00
47a7eee16a Update dependency django-auth-ldap to v4.2.0 2023-03-28 10:20:51 +00:00
5eac65b8f6 Merge pull request #968 from ryanmerolle/patch-1
Update docker-compose.yml
2023-03-20 15:11:35 +01:00
2ba441124e Update docker-compose.yml 2023-03-20 08:21:17 -04:00
f2d070fc49 Added more tests (#965) 2023-03-16 21:44:08 +01:00
97ee353b00 Merge pull request #963 from netbox-community/develop
Version 2.5.1
2023-03-16 11:13:13 +01:00
c001b88a81 Merge pull request #964 from tobiasge/better-base-image-check
Simplified base image check
2023-03-16 09:46:35 +01:00
b131b07af8 Simplified basse image check 2023-03-16 07:50:24 +01:00
311629ade4 Preparation for 2.5.1 2023-03-16 07:39:38 +01:00
256f23b4ad Merge pull request #961 from MarcHagen/patch-1
Catch DoesNotExist preventing startup
2023-03-16 07:37:05 +01:00
29e37a31d7 Catch DoesNotExist preventing startup
Fixes failing startup because of python error:

```
Traceback (most recent call last):
  File "/opt/netbox/netbox/./manage.py", line 10, in <module>
    execute_from_command_line(sys.argv)
  File "/opt/netbox/venv/lib/python3.10/site-packages/django/core/management/__init__.py", line 446, in execute_from_command_line
    utility.execute()
  File "/opt/netbox/venv/lib/python3.10/site-packages/django/core/management/__init__.py", line 440, in execute
    self.fetch_command(subcommand).run_from_argv(self.argv)
  File "/opt/netbox/venv/lib/python3.10/site-packages/django/core/management/base.py", line 402, in run_from_argv
    self.execute(*args, **cmd_options)
  File "/opt/netbox/venv/lib/python3.10/site-packages/django/core/management/base.py", line 448, in execute
    output = self.handle(*args, **options)
  File "/opt/netbox/venv/lib/python3.10/site-packages/django/core/management/commands/shell.py", line 127, in handle
    exec(sys.stdin.read(), globals())
  File "<string>", line 2, in <module>
  File "/opt/netbox/venv/lib/python3.10/site-packages/django/db/models/manager.py", line 85, in manager_method
    return getattr(self.get_queryset(), name)(*args, **kwargs)
  File "/opt/netbox/venv/lib/python3.10/site-packages/django/db/models/query.py", line 650, in get
    raise self.model.DoesNotExist(
users.models.Token.DoesNotExist: Token matching query does not exist.
```
2023-03-15 23:04:04 +01:00
93017f150e Merge pull request #959 from netbox-community/develop
Version 2.5.0
2023-03-15 15:53:14 +01:00
ac8cb022ae Preparation for 2.5.0 2023-03-15 14:32:09 +01:00
480cabaefe Merge pull request #955 from tobiasge/953-default-admin
Don't create superuser with default credentials
2023-03-15 14:20:07 +01:00
ab7e19df55 Merge pull request #958 from tobiasge/954-use-skopeo
Added check for commands to all scripts
2023-03-15 14:16:39 +01:00
4ce89f9209 Added check for commands to all scripts 2023-03-15 13:02:25 +01:00
3e2bf7ec93 Don't create superuser with default credentials 2023-03-15 12:23:36 +01:00
4bad061bc4 Merge pull request #957 from tobiasge/fix-action-badge
Fixed Github action badge
2023-03-15 12:08:35 +01:00
f9abdf2390 Merge pull request #956 from tobiasge/954-use-skopeo
Replaced curl with Skopeo for in image functions
2023-03-15 12:04:10 +01:00
cc95a67df0 Fixed Github action badge 2023-03-15 10:16:50 +01:00
fb5bacc4b4 Replaced curl with Skopeo for in image functions 2023-03-15 09:32:37 +01:00
ed309a15b4 Merge pull request #948 from tobiasge/lazy-reindex-on-start
Fixes #947: Rebuild search index when needed
2023-03-14 20:46:44 +01:00
5f8a09536c Merge pull request #951 from tobiasge/new-unit
Updated to new unit version
2023-03-01 08:53:24 +01:00
d3a30e1172 Updated to new unit version 2023-03-01 07:39:37 +01:00
e60a746eee Fixes #947: Rebuild search index when needed
This rebuilds the search index when models where updated.
2023-02-23 08:37:53 +01:00
879c700bb8 Merge pull request #939 from timrabl/fix-931
Actually fix #471 and #931
2023-02-01 10:08:59 +01:00
6f70b88972 squash commits that revert AUTH_LDAP_USER_SEARCH
add missing AUTH_LDAP_USER_SEARCH, removed while modifying....

revert AUTH_LDAP_USER_SEARCH variable that was accidentally removed in #931 and change behaviour to requested features in #471

remove duplicate AUTH_LDAP_USER_SEARCH variable now and fix this finally, hopefully
2023-02-01 08:18:34 +01:00
7a9aef3791 Merge pull request #931 from timrabl/fix-471
implement extra LDAP user and group filters as requested in #471
2023-01-30 10:17:13 +01:00
3071c500da implement extra LDAP user and group filters as requested in #471 2023-01-30 09:06:22 +01:00
350747c1cb Merge pull request #910 from sc68cal/enforcing_shortname
Prepend docker.io to image URLs
2023-01-28 18:22:19 +01:00
250b1fb093 Merge pull request #933 from tobiasge/fix-gh-warnings
Fix Github action warnings
2023-01-28 18:21:59 +01:00
eef45c8197 Using new GITHUB_OUTPUT method 2023-01-28 15:49:13 +01:00
f549b93b9d Merge pull request #932 from tobiasge/gh-token
Using GITHUB_TOKEN for API
2023-01-28 15:31:53 +01:00
f2b0375d5b Using GITHUB_TOKEN for API 2023-01-28 13:38:28 +01:00
3202fb9446 Merge pull request #929 from christianharendt/create-redis-username
Add redis username parameter
2023-01-28 10:14:00 +01:00
ff373bd60d Update configuration.py 2023-01-27 15:05:17 +01:00
bd07a7a5a2 Add redis username parameter 2023-01-27 15:02:11 +01:00
41d80d66b1 Prepend docker.io to image URLs
This is to make podman happy, since newer versions of podman
have set short-name-mode to enforcing

https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md
2023-01-10 14:10:15 +01:00
015e131d99 Merge pull request #914 from kmanwar89/patch-1
Fix syntax of docker compose commands
2023-01-05 09:47:28 +01:00
ff37e17eeb Merge pull request #913 from tobiasge/start_period_explanation
Added start_period to docker-compose example
2023-01-04 09:27:20 +01:00
1403f52d04 Merge pull request #912 from tobiasge/localhost_allowed_host
Ensure that '*' or 'localhost' is always in ALLOWED_HOSTS
2023-01-03 19:06:03 +01:00
7e0a8fee82 Improved comment 2023-01-03 16:48:00 +01:00
0c1b69ded0 Update docker-compose.override.yml.example
Co-authored-by: Christian Mäder <cimnine@users.noreply.github.com>
2023-01-03 16:45:09 +01:00
06e0815c70 Merge pull request #911 from netbox-community/renovate/python3-saml-1.x
Update dependency python3-saml to v1.15.0
2023-01-03 15:12:12 +01:00
8f2820626c Fix syntax of docker compose commands
Docker compose's syntax changed as of Compose v2 (source: https://docs.docker.com/compose/reference/).  Replaced references of "docker-compose" with "docker compose" to align with this change.
2023-01-03 09:06:46 -05:00
b6faad36cb Added start_period to docker-compose example 2023-01-03 14:58:41 +01:00
73f479d5db Ensure that '*' or 'localhost' is always in ALLOWED_HOSTS 2023-01-03 14:41:26 +01:00
89ad7588f0 Update dependency python3-saml to v1.15.0 2022-12-27 23:54:03 +00:00
a4d986011d Merge pull request #906 from netbox-community/renovate/django-storages-1.x
Update dependency django-storages to v1.13.2
2022-12-23 16:54:27 +01:00
f2bb1198dd Update dependency django-storages to v1.13.2 2022-12-23 05:15:28 +00:00
39c7de4af4 Merge pull request #899 from netbox-community/develop
Release 2.4.0
2022-12-15 15:36:27 +01:00
238f95c5ce Preparation for 2.4.0 2022-12-15 12:57:51 +01:00
751a131b78 Merge pull request #889 from netbox-community/listenOnIPv6
Make nginx-unit listen on IPv4 and IPv6
2022-12-06 21:09:02 +01:00
5e2158da24 Merge pull request #866 from ryanmerolle/issue_784
Address housekeeping traceback
2022-12-06 18:12:02 +01:00
4a530947f8 Merge pull request #895 from tobiasge/ldap-cert-settings
Added settings for CA certificates for LDAP
2022-12-01 13:59:22 +01:00
bdb4396275 Added settings for CA certificates for LDAP 2022-12-01 08:17:25 +01:00
80d87bdf1b Merge pull request #894 from tobiasge/psycopg2-changes
Optimize psycopg2 dependency
2022-11-30 16:23:20 +01:00
6d465e6f81 Optimize psycopg2 dependency
We have beeing installing psycopg2 for a while now. This updates to the latest version. Because psycopg2-binary is a direct dependency of Netbox both versions were installled. Now we remove the pre-compiled version from the dependency file.
2022-11-30 14:36:53 +01:00
b72084290a Merge pull request #893 from tobiasge/startup-scripts-complete-removal
Startup scripts complete removal
2022-11-30 13:14:32 +01:00
aa3357817a Disable Gitleaks 2022-11-30 10:54:24 +01:00
9441be459c Improved testing
After the initializer scripts were removed, we didn't test the actual compose setup anymore. This adds new tests to run the database migrations.
2022-11-30 10:54:24 +01:00
1779ba790d Removed warning for initializer scripts 2022-11-30 09:51:36 +01:00
22cb2d5812 Merge pull request #865 from netbox-community/renovate/postgres-15.x
Update postgres Docker tag to v15
2022-11-30 08:50:15 +01:00
6020f4503a Make nginx-unit listen on IPv4 and IPv6 2022-11-23 14:40:03 +01:00
a4f494db14 Update postgres Docker tag to v15 2022-11-11 14:24:44 +00:00
0cac6f51a9 Merge pull request #876 from Wellyas/patch-1
Add requirements for SAML SSO
2022-11-11 14:08:15 +01:00
dd01e3c227 Enable SAML & OPENIDCONNECT for social-auth-core 2022-11-11 13:07:14 +01:00
925f41b97f Merge pull request #883 from movelg/housekeeping_var_name_fix
Housekeeping var name fix
2022-11-11 10:54:13 +01:00
7d871778eb Don't use bash internal variable name 2022-11-11 08:40:29 +01:00
8b16b16e45 Merge pull request #869 from netbox-community/develop
Version 2.3.0
2022-11-02 11:19:30 +01:00
c5f91ad359 Preparation for 2.3.0 2022-10-25 17:38:03 +02:00
35a94cb7e5 Update docker-compose.yml
Address housekeeping tracebacks & remove whitespace
2022-10-22 09:40:03 -04:00
1835d3cafd Merge pull request #857 from cimnine/ConfigurationUpdate
Updates the configuration, changes default for dynamic paramters to None
2022-10-15 12:48:52 +02:00
df8f406432 Remove unnecessary import 2022-10-15 10:23:14 +02:00
b8b1c8fc63 Rename conversion lambdas 2022-10-15 10:23:14 +02:00
41ff541225 Simplified test for existance of dynamic parameters 2022-10-15 10:17:49 +02:00
26399e224d Make dynamic configuration parameters actually work 2022-10-15 10:17:17 +02:00
951c12132a Updates the configuration, changes default for dynamic paramters to None 2022-10-15 10:15:55 +02:00
a3680b22dd Merge pull request #860 from cimnine/HereDocForBuildHelp
Improve Help for build.sh
2022-10-10 18:30:25 +02:00
d96e8f1dfd Improve Help for build.sh 2022-10-08 17:18:56 +02:00
c21a29b383 Merge pull request #852 from Delta1977/release
Define a volume for the caching Redis
2022-10-06 22:58:43 +02:00
f9f1533332 Merge pull request #856 from netbox-community/renovate/psycopg2-2.x
Update dependency psycopg2 to v2.9.4
2022-10-06 21:30:50 +02:00
4f45df571f Update dependency psycopg2 to v2.9.4 2022-10-06 16:58:57 +00:00
cd5015642e Fix for random volume Redis Cache
fixes:
https://github.com/netbox-community/netbox-docker/issues/851
2022-09-23 14:33:49 +02:00
d385cd2aa9 Merge pull request #779 from BegBlev/ldap-group-search-issue
AUTH_LDAP_BIND_AS_AUTHENTICATING_USER is now loaded from environment
2022-09-21 13:03:13 +02:00
7f285af7b4 AUTH_LDAP_BIND_AS_AUTHENTICATING_USER defaults to false 2022-09-09 16:11:21 +02:00
c8c360da99 Merge pull request #840 from tymekxxl/jobresult_retention
add JOBRESULT_RETENTION to configuration.py
2022-09-05 08:24:10 +02:00
53dcdc7bfc add JOBRESULT_RETENTION to configuration.py
Netbox v3.2.1 introduces new enhancement to retain old script
and report results for configured lifetime.
2022-09-01 10:03:19 -07:00
3f1e45f636 Merge pull request #839 from netbox-community/develop
Version 2.2.0
2022-09-01 12:12:06 +02:00
e38c1d3c85 Preparation for 2.2.0 2022-09-01 10:43:10 +02:00
e48ab084ce Merge pull request #836 from tobiasge/fix-for-811
Installed bzip2
2022-08-31 15:14:41 +02:00
5ea3008f82 Merge pull request #835 from tobiasge/test-config
Removed unnecessary logging from tests
2022-08-31 15:08:54 +02:00
1418808930 Installed bzip2
In the Wiki backup section we use bzip2 in some of the examples. So it should be installed in the image.
2022-08-31 14:11:05 +02:00
da412e3bdb Removed unnecessary logging from tests 2022-08-31 14:05:34 +02:00
98add8f83a Merge pull request #831 from tobiasge/remove-initializers
Initializers are now a plugin
2022-08-31 13:43:58 +02:00
6f1d46d765 Initializers are now a plugin 2022-08-30 16:01:43 +02:00
ada2bd6501 Merge pull request #825 from tobiasge/readme-update
Improved README.md
2022-08-09 10:30:07 +02:00
8707cef55a Improved README.md
- Removed the hint to the `-ldap` tags that where discontinnued with version 2.0.
- Increased minimum Docker version to reflect the needed version for running newer Ubuntu versions. See: https://medium.com/nttlabs/ubuntu-21-10-and-fedora-35-do-not-work-on-docker-20-10-9-1cd439d9921
- Fixed some Markdown linter errors
2022-08-09 09:08:40 +02:00
dc45dfc383 Merge pull request #822 from netbox-community/renovate/django-storages-1.x
Update dependency django-storages to v1.13.1
2022-08-06 17:05:58 +02:00
691ad94498 Update dependency django-storages to v1.13.1 2022-08-06 13:38:07 +00:00
535fefe12f Merge pull request #820 from netbox-community/renovate/django-storages-1.x
Update dependency django-storages to v1.13
2022-08-05 22:31:10 +02:00
db982814a1 Update dependency django-storages to v1.13 2022-08-05 13:52:56 +00:00
ebda4660de Merge pull request #809 from netbox-community/develop
Version 2.1.0
2022-07-25 08:19:22 +02:00
f44e377e29 Merge branch 'release' into develop 2022-07-25 08:14:46 +02:00
0a6d4c998d Preparation for 2.1.0 2022-07-22 10:15:51 +02:00
bd9733d929 Merge pull request #808 from tobiasge/ldap-tls-fix
LDAP TLS connection use the system trusted root store
2022-07-21 11:29:27 +02:00
9ae3282dcf Install libldap-common
This installs the LDAP configurationn file which is needed to load the trusted root certificates from the system.
2022-07-21 09:13:11 +02:00
e62fedbd5e Merge pull request #786 from rjmidau/oidc
Add requirements for OIDC SSO
2022-07-19 13:58:48 +02:00
c53ae2afa0 Merge pull request #805 from tobiasge/with-ubuntu
Using Ubuntu 22.04 because Debian has old packages
2022-07-19 11:37:16 +02:00
45e7f6a30c Using Ubuntu 22.04 because Debian has old packages
With Debian the Quay.io security checker found several issues in the
image. With Ubuntu we have never versions of all packages and therefore
less (or no) issues.
2022-07-19 09:42:56 +02:00
8fbedf2886 Merge pull request #802 from netbox-community/renovate/docker-setup-buildx-action-2.x
Update docker/setup-buildx-action action to v2
2022-07-16 10:24:23 +02:00
c0063a6573 Update docker/setup-buildx-action action to v2 2022-07-16 08:23:01 +00:00
c9795a8213 Merge pull request #803 from netbox-community/renovate/docker-setup-qemu-action-2.x
Update docker/setup-qemu-action action to v2
2022-07-16 10:22:43 +02:00
8aec402ea2 Update docker/setup-qemu-action action to v2 2022-07-15 18:36:01 +00:00
adc2079b17 Merge pull request #797 from tobiasge/arm64-auto-build
Arm64 auto build
2022-07-15 16:56:57 +02:00
cee1b5b079 Build ARM64 images 2022-07-15 15:06:16 +02:00
13d66f2ae7 Merge pull request #778 from tobiasge/path-update
Added our Python venv to the PATH variable
2022-07-14 22:37:14 +02:00
901ac05e99 Added our Python venv to the PATH variable
Now users can run "manage.py" without specifying the full path.
2022-07-13 15:53:56 +02:00
2bdaed1e6f Add requirements for OIDC SSO 2022-07-13 16:55:38 +10:00
b45934cd9e Merge pull request #796 from netbox-community/develop
Version 2.0.0
2022-07-12 18:10:25 +02:00
fceb6e0e13 Removed CSRF_TRUSTED_ORIGINS from extra.py
CSRF_TRUSTED_ORIGINS is already in configuration.py
2022-07-12 17:16:20 +02:00
f05a9c67ae Preparation for 2.0.0 2022-07-12 16:50:32 +02:00
f2d1e62204 Merge pull request #791 from netbox-community/renovate/napalm-4.x
Update dependency napalm to v4
2022-07-12 07:53:50 +02:00
8f704f220a Update dependency napalm to v4 2022-07-11 13:11:13 +00:00
d5093201ee Merge pull request #780 from tobiasge/fixed-comment
Fixed comment and variable name
2022-06-22 12:51:27 +02:00
401777adff Fixed comment and variable name 2022-06-22 12:28:33 +02:00
5ff292ba5f AUTH_LDAP_BIND_AS_AUTHENTICATING_USER is now loaded from environment 2022-06-20 14:10:37 +02:00
f80cc70d76 Merge pull request #776 from tobiasge/image-label-update
Updated image labels and build script
2022-06-16 19:21:39 +02:00
5b8bf780df Updated image labels and build script 2022-06-15 16:18:21 +02:00
bce52596a5 Merge pull request #775 from tobiasge/debian-based
Debian based
2022-06-15 11:02:30 +02:00
c3c94b0a63 Used version number and remove explicit dependency 2022-06-15 10:15:15 +02:00
14c30fb81c Changed the ignored warnings 2022-06-15 09:04:00 +02:00
1130ff6c6d Merge branch 'csrf-trusted-origins' into develop 2022-06-12 10:52:10 +02:00
993c93b34a Add CSRF option to extra.py 2022-06-12 10:51:56 +02:00
dcf0bdb950 Added psycopg2 as additionnal dependency
With psycopg2-binary the images doesn't work on ARM64.
2022-06-10 10:38:21 +02:00
9e2f4313fb First version of Debian based image 2022-06-10 09:31:41 +02:00
df41020cb8 Merge pull request #753 from netbox-community/renovate/redis-7.x
Update dependency redis to v7
2022-06-09 15:35:34 +02:00
1332df4857 Merge pull request #759 from netbox-community/renovate/django-auth-ldap-4.x
Update dependency django-auth-ldap to v4.1.0
2022-06-09 15:35:12 +02:00
3f23419bb7 Merge pull request #768 from FliesLikeABrick/feature/skip-git-no-git-operations-fix-#765
Proposing SKIP_GIT now skips all git operations
2022-06-09 15:34:53 +02:00
184ff72912 Update dependency redis to v7 2022-06-09 06:12:46 +00:00
55ee95df78 Update dependency django-auth-ldap to v4.1.0 2022-06-09 06:12:42 +00:00
78fe47aaba Merge pull request #774 from netbox-community/renovate/actions-setup-python-4.x
Update actions/setup-python action to v4
2022-06-09 08:12:17 +02:00
1370596f27 Fixed missing Python version
Python version hast to be set with v4 of the action. The version is now fixed to what is in Alpine 3.14.
2022-06-09 07:47:48 +02:00
606e56d78f Update actions/setup-python action to v4 2022-06-08 19:42:43 +00:00
51226c8e50 Proposed fix for netbox-docker #765 -- SKIP_GIT will skip other git operations 2022-05-25 10:43:23 -04:00
6c9d4aebac Merge pull request #757 from netbox-community/renovate/docker-login-action-2.x
Update docker/login-action action to v2
2022-05-06 08:53:08 +02:00
ed8b42fbde Update docker/login-action action to v2 2022-05-05 19:01:27 +00:00
aa56f645e9 Merge pull request #754 from netbox-community/renovate/napalm-3.x
Update dependency napalm to v3.4.1
2022-05-05 16:41:23 +02:00
9a1bb788d2 Update dependency napalm to v3.4.1 2022-04-29 17:02:51 +00:00
7d32f79379 Merge pull request #750 from thomas-mc-work/patch-2
Add MAPS_URL to config
2022-04-25 17:04:04 +02:00
0410cf2fd2 Merge pull request #728 from Lon1/contact-startups
Contact startups
2022-04-25 17:03:31 +02:00
c9f5e34c0d Improved contact initializer examples 2022-04-25 16:38:09 +02:00
047f2abdb5 adding contact startups 2022-04-25 15:51:32 +02:00
f13a6573a8 Merge pull request #736 from kr3ator/feature/cable_initializers
Startup script for cables
2022-04-25 15:44:36 +02:00
596bb6953c preserve sort order 2022-04-19 12:59:27 +02:00
d482e623df fix: Template and non-template fields example 2022-04-19 12:54:37 +02:00
bf910dea02 Handle MAPS_URL config value
Regarding https://github.com/netbox-community/netbox/blob/develop/docs/configuration/dynamic-settings.md#maps_url
2022-04-19 12:21:07 +02:00
57da852af6 Cabling script minor updates 2022-04-12 14:47:24 +02:00
4c21344e8b Merge pull request #744 from kr3ator/feature/interfaces_improvements
Add support for bridge, lag, parent in DCIM Interfaces startup script
2022-04-12 14:07:56 +02:00
302c0fed59 Cable startup script 2022-04-12 13:51:57 +02:00
0e7afe466d feat: Add support for bridge, lag, parent 2022-04-12 13:49:44 +02:00
2c757af250 Merge pull request #742 from kr3ator/feature/device_type_components
Add support for DeviceType components
2022-04-12 13:29:28 +02:00
27f28935d7 Merge pull request #738 from RobinBeismann/develop
Added environment variable for CSRF_TRUSTED_ORIGINS
2022-04-08 17:03:28 +02:00
12753dd7d4 Document field name precedence 2022-04-08 15:57:52 +02:00
dd8dce1b49 feat: Add support for DeviceType components 2022-04-08 15:57:46 +02:00
19280c2bb0 Fixed default value to reflect upstream 2022-04-08 15:36:49 +02:00
5c4a1cc082 Merge pull request #729 from kr3ator/feature/separate_default_params
feat: Make startup scripts idempotent
2022-04-08 15:17:07 +02:00
a63af05bec Update initializers/users.yml
Co-authored-by: Christian Mäder <cimnine@users.noreply.github.com>
2022-04-08 14:57:37 +02:00
9be7b0e109 feat: Make startup scripts idempotent 2022-04-07 19:47:19 +02:00
d5b1d9ce39 Added environment variable for CSRF_TRUSTED_ORIGINS 2022-04-07 16:09:27 +02:00
a6eb4fef00 Merge pull request #730 from kr3ator/bugfix/cf_creation
Fix setting custom field data if custom field object is missing
2022-04-07 09:01:04 +02:00
d1c69e8fe5 fix: invalid Interface optional assoc 2022-04-06 16:45:25 +02:00
81d9e4f560 Fix setting CF data if CF object is missing 2022-04-06 14:09:07 +02:00
61a3afbb3b Merge pull request #734 from netbox-community/develop
Version 1.6.1
2022-04-06 09:58:45 +02:00
91ab616cc5 Preparation for 1.6.1 2022-04-06 09:39:22 +02:00
43d62f1284 Merge pull request #733 from tobiasge/remove-tzdata
tzdata is already required in Netbox
2022-04-06 09:03:28 +02:00
d61470d6ef Merge pull request #725 from netbox-community/renovate/napalm-3.x
Update dependency napalm to v3.4.0
2022-04-06 08:50:29 +02:00
091d23d537 tzdata is already required in Netbox 2022-04-06 08:43:10 +02:00
2f24902436 Update dependency napalm to v3.4.0 2022-03-21 20:43:51 +00:00
36d47b9b88 Merge pull request #711 from netbox-community/renovate/actions-checkout-3.x
Update actions/checkout action to v3
2022-03-02 07:46:38 +01:00
2c20771682 Update actions/checkout action to v3 2022-03-01 19:06:16 +00:00
a9cdec6d87 Merge pull request #708 from netbox-community/renovate/actions-setup-python-3.x
Update actions/setup-python action to v3
2022-03-01 09:17:08 +01:00
f1efccea6b Update actions/setup-python action to v3 2022-02-28 13:52:44 +00:00
226d8438de Merge pull request #705 from netbox-community/develop
Release 1.6.0
2022-02-21 12:17:25 +01:00
b6d6f85dc0 Preparation for 1.6.0 2022-02-21 11:25:37 +01:00
49ed10bbee Merge pull request #701 from tobiasge/requirements
Added missing tzdata
2022-02-16 12:22:27 +01:00
3afdd3bf13 Added missing tzdata 2022-02-16 09:32:03 +01:00
0170ed7d6f Merge pull request #700 from netbox-community/renovate/ruamel.yaml-0.x
Update dependency ruamel.yaml to v0.17.21
2022-02-15 18:39:26 +01:00
dad2e93572 Update dependency ruamel.yaml to v0.17.21 2022-02-12 11:16:45 +00:00
d726426611 Merge pull request #665 from netbox-community/renovate/django-auth-ldap-4.x
Update dependency django-auth-ldap to v4
2022-02-05 11:37:07 +01:00
b31d99b936 Update dependency django-auth-ldap to v4 2022-02-03 16:50:11 +00:00
8860d32f97 Cleanup & Reorg startup scripts (#691)
* Cleanup & Reorg startup scripts
2022-02-03 17:10:39 +01:00
b9dff0d22e Merge pull request #697 from tobiasge/feature-build-fix
Fixed build for Netbox feature branch
2022-02-02 17:07:32 +01:00
297aab1fd3 Fixed build for Netbox feature branch 2022-02-02 16:49:29 +01:00
54bf7a3819 Merge pull request #695 from ryanmerolle/graphql
Explicitly set GRAPHQL_ENABLED
2022-02-02 16:43:32 +01:00
52876be723 add graphql 2022-01-31 21:45:29 -05:00
ff20e4f49c Merge pull request #685 from tobiasge/asn-initializers
Added ASN initializer script
2022-01-10 11:14:55 +01:00
ee47ba04bc Added ASN initializer script 2022-01-10 10:49:51 +01:00
688374d13f Merge pull request #681 from netbox-community/renovate/ruamel.yaml-0.x
Update dependency ruamel.yaml to v0.17.20
2022-01-03 12:28:53 +01:00
22c4212438 Update dependency ruamel.yaml to v0.17.20 2022-01-03 09:48:22 +00:00
26dcb2f2e0 Merge pull request #668 from netbox-community/renovate/ruamel.yaml-0.x
Update dependency ruamel.yaml to v0.17.19
2021-12-29 22:47:22 +01:00
46afa266fa Update dependency ruamel.yaml to v0.17.19 2021-12-26 15:22:25 +00:00
b2d26d9dce Merge pull request #660 from tobiasge/fix-link
Fixed GHCR link
2021-12-11 12:12:13 +01:00
0b622361f3 Merge pull request #661 from tobiasge/disable-edge
Removed Alpine edge from tests
2021-12-11 12:12:02 +01:00
ed48909f96 Removed Alpine edge from tests 2021-12-10 14:29:53 +01:00
60d191bb2a Fixed textlint errors 2021-12-09 22:23:03 +01:00
4f482e484f Fixed GHCR link 2021-12-09 22:06:59 +01:00
119 changed files with 1046 additions and 3017 deletions

View File

@ -1,10 +1,10 @@
.git
.github
.travis.yml
.git*
*.md
env
build*
docker-compose.override.yml
docker-compose*
env
test-configuration
.netbox/.git*
.netbox/.travis.yml
.netbox/contrib
.netbox/scripts
.netbox/upgrade.sh

View File

@ -1,66 +1,94 @@
---
name: push
on:
push:
branches-ignore:
- release
- renovate/**
pull_request:
branches-ignore:
- release
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
lint:
runs-on: ubuntu-latest
name: Checks syntax of our code
steps:
- uses: actions/checkout@v2
with:
# Full git history is needed to get a proper list of changed files within `super-linter`
fetch-depth: 0
- uses: actions/setup-python@v2
- name: Lint Code Base
uses: github/super-linter@v4
env:
DEFAULT_BRANCH: develop
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SUPPRESS_POSSUM: true
LINTER_RULES_PATH: /
VALIDATE_ALL_CODEBASE: false
VALIDATE_DOCKERFILE: false
FILTER_REGEX_EXCLUDE: (.*/)?(LICENSE|configuration/.*)
EDITORCONFIG_FILE_NAME: .ecrc
DOCKERFILE_HADOLINT_FILE_NAME: .hadolint.yaml
MARKDOWN_CONFIG_FILE: .markdown-lint.yml
PYTHON_BLACK_CONFIG_FILE: pyproject.toml
PYTHON_FLAKE8_CONFIG_FILE: .flake8
PYTHON_ISORT_CONFIG_FILE: pyproject.toml
- uses: actions/checkout@v4
with:
# Full git history is needed to get a proper
# list of changed files within `super-linter`
fetch-depth: 0
- uses: actions/setup-python@v5
with:
python-version: '3.9'
- name: Lint Code Base
uses: github/super-linter@v6
env:
DEFAULT_BRANCH: develop
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SUPPRESS_POSSUM: true
LINTER_RULES_PATH: /
VALIDATE_ALL_CODEBASE: false
VALIDATE_CHECKOV: false
VALIDATE_DOCKERFILE: false
VALIDATE_GITLEAKS: false
VALIDATE_JSCPD: false
FILTER_REGEX_EXCLUDE: (.*/)?(LICENSE|configuration/.*)
EDITORCONFIG_FILE_NAME: .ecrc
DOCKERFILE_HADOLINT_FILE_NAME: .hadolint.yaml
MARKDOWN_CONFIG_FILE: .markdown-lint.yml
PYTHON_BLACK_CONFIG_FILE: pyproject.toml
PYTHON_FLAKE8_CONFIG_FILE: .flake8
PYTHON_ISORT_CONFIG_FILE: pyproject.toml
YAML_CONFIG_FILE: .yamllint.yaml
build:
continue-on-error: ${{ matrix.docker_from == 'alpine:edge' }}
continue-on-error: ${{ matrix.build_cmd != './build-latest.sh' }}
strategy:
matrix:
build_cmd:
- ./build-latest.sh
- PRERELEASE=true ./build-latest.sh
- ./build.sh feature
- ./build.sh develop
docker_from:
- '' # use the default of the build script
- alpine:edge
- ./build-latest.sh
- PRERELEASE=true ./build-latest.sh
- ./build.sh feature
- ./build.sh develop
os:
- ubuntu-latest
- self-hosted
fail-fast: false
runs-on: ubuntu-latest
env:
GH_ACTION: enable
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
IMAGE_NAMES: docker.io/netboxcommunity/netbox
runs-on: ${{ matrix.os }}
name: Builds new NetBox Docker Images
steps:
- id: git-checkout
name: Checkout
uses: actions/checkout@v2
- id: docker-build
name: Build the image from '${{ matrix.docker_from }}' with '${{ matrix.build_cmd }}'
run: ${{ matrix.build_cmd }}
env:
DOCKER_FROM: ${{ matrix.docker_from }}
GH_ACTION: enable
- id: docker-test
name: Test the image
run: IMAGE="${FINAL_DOCKER_TAG}" ./test.sh
if: steps.docker-build.outputs.skipped != 'true'
- id: git-checkout
name: Checkout
uses: actions/checkout@v4
- id: buildx-setup
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- id: arm-buildx-platform
name: Set BUILDX_PLATFORM to ARM64
if: matrix.os == 'self-hosted'
run: |
echo "BUILDX_PLATFORM=linux/arm64" >>"${GITHUB_ENV}"
- id: docker-build
name: Build the image for '${{ matrix.os }}' with '${{ matrix.build_cmd }}'
run: ${{ matrix.build_cmd }}
env:
BUILDX_BUILDER_NAME: ${{ steps.buildx-setup.outputs.name }}
- id: arm-time-limit
name: Set Netbox container start_period higher on ARM64
if: matrix.os == 'self-hosted'
run: |
echo "NETBOX_START_PERIOD=240s" >>"${GITHUB_ENV}"
- id: docker-test
name: Test the image
run: IMAGE="${FINAL_DOCKER_TAG}" ./test.sh
if: steps.docker-build.outputs.skipped != 'true'

View File

@ -1,3 +1,4 @@
---
name: release
on:
@ -6,82 +7,78 @@ on:
- published
schedule:
- cron: '45 5 * * *'
workflow_dispatch:
jobs:
build:
strategy:
matrix:
build_cmd:
- ./build-latest.sh
- PRERELEASE=true ./build-latest.sh
- ./build.sh feature
- ./build.sh develop
- ./build-latest.sh
- PRERELEASE=true ./build-latest.sh
- ./build.sh feature
- ./build.sh develop
platform:
- linux/amd64,linux/arm64
fail-fast: false
runs-on: ubuntu-latest
name: Builds new NetBox Docker Images
env:
GH_ACTION: enable
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
IMAGE_NAMES: docker.io/netboxcommunity/netbox quay.io/netboxcommunity/netbox ghcr.io/netbox-community/netbox
steps:
-
name: Checkout
uses: actions/checkout@v2
-
name: Get Version of NetBox Docker
run: |
echo "::set-output name=version::$(cat VERSION)"
shell: bash
-
id: docker-build
name: Build the image with '${{ matrix.build_cmd }}'
run: ${{ matrix.build_cmd }}
-
name: Test the image
run: IMAGE="${FINAL_DOCKER_TAG}" ./test.sh
if: steps.docker-build.outputs.skipped != 'true'
# docker.io
-
name: Login to docker.io
uses: docker/login-action@v1
with:
registry: docker.io
username: ${{ secrets.dockerhub_username }}
password: ${{ secrets.dockerhub_password }}
if: steps.docker-build.outputs.skipped != 'true'
-
name: Push the image to docker.io
run: ${{ matrix.build_cmd }} --push-only
if: steps.docker-build.outputs.skipped != 'true'
# quay.io
-
name: Login to Quay.io
uses: docker/login-action@v1
with:
registry: quay.io
username: ${{ secrets.quayio_username }}
password: ${{ secrets.quayio_password }}
if: steps.docker-build.outputs.skipped != 'true'
-
name: Build and push the image with '${{ matrix.build_cmd }}'
run: ${{ matrix.build_cmd }} --push
env:
DOCKER_REGISTRY: quay.io
if: steps.docker-build.outputs.skipped != 'true'
# ghcr.io
-
name: Login to GitHub Container Registry
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
if: steps.docker-build.outputs.skipped != 'true'
-
name: Build and push the image with '${{ matrix.build_cmd }}'
run: ${{ matrix.build_cmd }} --push
env:
DOCKER_REGISTRY: ghcr.io
DOCKER_ORG: netbox-community
if: steps.docker-build.outputs.skipped != 'true'
- id: source-checkout
name: Checkout
uses: actions/checkout@v4
- id: set-netbox-docker-version
name: Get Version of NetBox Docker
run: echo "version=$(cat VERSION)" >>"$GITHUB_OUTPUT"
shell: bash
- id: qemu-setup
name: Set up QEMU
uses: docker/setup-qemu-action@v3
- id: buildx-setup
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- id: docker-build
name: Build the image with '${{ matrix.build_cmd }}'
run: ${{ matrix.build_cmd }}
- id: test-image
name: Test the image
run: IMAGE="${FINAL_DOCKER_TAG}" ./test.sh
if: steps.docker-build.outputs.skipped != 'true'
# docker.io
- id: docker-io-login
name: Login to docker.io
uses: docker/login-action@v3
with:
registry: docker.io
username: ${{ secrets.dockerhub_username }}
password: ${{ secrets.dockerhub_password }}
if: steps.docker-build.outputs.skipped != 'true'
# quay.io
- id: quay-io-login
name: Login to Quay.io
uses: docker/login-action@v3
with:
registry: quay.io
username: ${{ secrets.quayio_username }}
password: ${{ secrets.quayio_password }}
if: steps.docker-build.outputs.skipped != 'true'
# ghcr.io
- id: ghcr-io-login
name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
if: steps.docker-build.outputs.skipped != 'true'
- id: build-and-push
name: Push the image
run: ${{ matrix.build_cmd }} --push
if: steps.docker-build.outputs.skipped != 'true'
env:
BUILDX_PLATFORM: ${{ matrix.platform }}
BUILDX_BUILDER_NAME: ${{ steps.buildx-setup.outputs.name }}

3
.gitignore vendored
View File

@ -1,6 +1,6 @@
*.sql.gz
.netbox
.initializers
.python-version
docker-compose.override.yml
*.pem
configuration/*
@ -11,5 +11,4 @@ configuration/ldap/*
!configuration/ldap/ldap_config.py
!configuration/logging.py
!configuration/plugins.py
prometheus.yml
super-linter.log

View File

@ -1,3 +1,4 @@
ignored:
- DL3006
- DL3018
- DL3008
- DL3003

5
.yamllint.yaml Normal file
View File

@ -0,0 +1,5 @@
---
rules:
line-length:
max: 120

View File

@ -1,48 +1,43 @@
ARG FROM
FROM ${FROM} as builder
RUN apk add --no-cache \
bash \
build-base \
cargo \
RUN export DEBIAN_FRONTEND=noninteractive \
&& apt-get update -qq \
&& apt-get upgrade \
--yes -qq --no-install-recommends \
&& apt-get install \
--yes -qq --no-install-recommends \
build-essential \
ca-certificates \
cmake \
cyrus-sasl-dev \
git \
graphviz \
jpeg-dev \
libevent-dev \
libffi-dev \
libldap-dev \
libpq-dev \
libsasl2-dev \
libssl-dev \
libxml2-dev \
libxmlsec1 \
libxmlsec1-dev \
libxmlsec1-openssl \
libxslt-dev \
make \
musl-dev \
openldap-dev \
postgresql-dev \
py3-pip \
pkg-config \
python3-dev \
&& python3 -m venv /opt/netbox/venv \
&& /opt/netbox/venv/bin/python3 -m pip install --upgrade \
python3-pip \
python3-venv \
&& python3 -m venv /opt/netbox/venv \
&& /opt/netbox/venv/bin/python3 -m pip install --upgrade \
pip \
setuptools \
wheel
# Build libcrc32c for google-crc32c python module
RUN git clone https://github.com/google/crc32c \
&& cd crc32c \
&& git submodule update --init --recursive \
&& mkdir build \
&& cd build \
&& cmake \
-DCMAKE_BUILD_TYPE=Release \
-DCRC32C_BUILD_TESTS=no \
-DCRC32C_BUILD_BENCHMARKS=no \
-DBUILD_SHARED_LIBS=yes \
.. \
&& make all install
ARG NETBOX_PATH
COPY ${NETBOX_PATH}/requirements.txt requirements-container.txt /
RUN /opt/netbox/venv/bin/pip install \
RUN \
# Gunicorn is not needed because we use Nginx Unit
sed -i -e '/gunicorn/d' /requirements.txt && \
# We need 'social-auth-core[all]' in the Docker image. But if we put it in our own requirements-container.txt
# we have potential version conflicts and the build will fail.
# That's why we just replace it in the original requirements.txt.
sed -i -e 's/social-auth-core/social-auth-core\[all\]/g' /requirements.txt && \
/opt/netbox/venv/bin/pip install \
-r /requirements.txt \
-r /requirements-container.txt
@ -53,40 +48,46 @@ RUN /opt/netbox/venv/bin/pip install \
ARG FROM
FROM ${FROM} as main
RUN apk add --no-cache \
bash \
RUN export DEBIAN_FRONTEND=noninteractive \
&& apt-get update -qq \
&& apt-get upgrade \
--yes -qq --no-install-recommends \
&& apt-get install \
--yes -qq --no-install-recommends \
bzip2 \
ca-certificates \
curl \
graphviz \
libevent \
libffi \
libjpeg-turbo \
libxslt \
libldap-common \
libpq5 \
libxmlsec1-openssl \
openssh-client \
openssl \
postgresql-client \
postgresql-libs \
py3-pip \
python3 \
python3-distutils \
tini \
unit \
unit-python3
&& curl --silent --output /usr/share/keyrings/nginx-keyring.gpg \
https://unit.nginx.org/keys/nginx-keyring.gpg \
&& echo "deb [signed-by=/usr/share/keyrings/nginx-keyring.gpg] https://packages.nginx.org/unit/ubuntu/ mantic unit" \
> /etc/apt/sources.list.d/unit.list \
&& apt-get update -qq \
&& apt-get install \
--yes -qq --no-install-recommends \
unit=1.32.0-1~mantic \
unit-python3.11=1.32.0-1~mantic \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /opt
COPY --from=builder /usr/local/lib/libcrc32c.* /usr/local/lib/
COPY --from=builder /usr/local/include/crc32c /usr/local/include
COPY --from=builder /usr/local/lib/cmake/Crc32c /usr/local/lib/cmake/
COPY --from=builder /opt/netbox/venv /opt/netbox/venv
ARG NETBOX_PATH
COPY ${NETBOX_PATH} /opt/netbox
# Copy the modified 'requirements*.txt' files, to have the files actually used during installation
COPY --from=builder /requirements.txt /requirements-container.txt /opt/netbox/
COPY docker/configuration.docker.py /opt/netbox/netbox/netbox/configuration.py
COPY docker/ldap_config.docker.py /opt/netbox/netbox/netbox/ldap_config.py
COPY docker/docker-entrypoint.sh /opt/netbox/docker-entrypoint.sh
COPY docker/housekeeping.sh /opt/netbox/housekeeping.sh
COPY docker/launch-netbox.sh /opt/netbox/launch-netbox.sh
COPY startup_scripts/ /opt/netbox/startup_scripts/
COPY initializers/ /opt/netbox/initializers/
COPY configuration/ /etc/netbox/config/
COPY docker/nginx-unit.json /etc/unit/
@ -95,32 +96,21 @@ WORKDIR /opt/netbox/netbox
# Must set permissions for '/opt/netbox/netbox/media' directory
# to g+w so that pictures can be uploaded to netbox.
RUN mkdir -p static /opt/unit/state/ /opt/unit/tmp/ \
&& chown -R unit:root media /opt/unit/ \
&& chmod -R g+w media /opt/unit/ \
&& cd /opt/netbox/ && /opt/netbox/venv/bin/python -m mkdocs build \
&& chown -R unit:root /opt/unit/ media reports scripts \
&& chmod -R g+w /opt/unit/ media reports scripts \
&& cd /opt/netbox/ && SECRET_KEY="dummyKeyWithMinimumLength-------------------------" /opt/netbox/venv/bin/python -m mkdocs build \
--config-file /opt/netbox/mkdocs.yml --site-dir /opt/netbox/netbox/project-static/docs/ \
&& SECRET_KEY="dummy" /opt/netbox/venv/bin/python /opt/netbox/netbox/manage.py collectstatic --no-input
&& SECRET_KEY="dummyKeyWithMinimumLength-------------------------" /opt/netbox/venv/bin/python /opt/netbox/netbox/manage.py collectstatic --no-input
ENTRYPOINT [ "/sbin/tini", "--" ]
ENV LANG=C.utf8 PATH=/opt/netbox/venv/bin:$PATH
ENTRYPOINT [ "/usr/bin/tini", "--" ]
CMD [ "/opt/netbox/docker-entrypoint.sh", "/opt/netbox/launch-netbox.sh" ]
LABEL ORIGINAL_TAG="" \
NETBOX_GIT_BRANCH="" \
NETBOX_GIT_REF="" \
NETBOX_GIT_URL="" \
# See http://label-schema.org/rc1/#build-time-labels
# Also https://microbadger.com/labels
org.label-schema.schema-version="1.0" \
org.label-schema.build-date="" \
org.label-schema.name="NetBox Docker" \
org.label-schema.description="A container based distribution of NetBox, the free and open IPAM and DCIM solution." \
org.label-schema.vendor="The netbox-docker contributors." \
org.label-schema.url="https://github.com/netbox-community/netbox-docker" \
org.label-schema.usage="https://github.com/netbox-community/netbox-docker/wiki" \
org.label-schema.vcs-url="https://github.com/netbox-community/netbox-docker.git" \
org.label-schema.vcs-ref="" \
org.label-schema.version="snapshot" \
LABEL netbox.original-tag="" \
netbox.git-branch="" \
netbox.git-ref="" \
netbox.git-url="" \
# See https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys
org.opencontainers.image.created="" \
org.opencontainers.image.title="NetBox Docker" \
@ -132,17 +122,4 @@ LABEL ORIGINAL_TAG="" \
org.opencontainers.image.documentation="https://github.com/netbox-community/netbox-docker/wiki" \
org.opencontainers.image.source="https://github.com/netbox-community/netbox-docker.git" \
org.opencontainers.image.revision="" \
org.opencontainers.image.version="snapshot"
#####
## LDAP specific configuration
#####
FROM main as ldap
RUN apk add --no-cache \
libsasl \
libldap \
util-linux
COPY docker/ldap_config.docker.py /opt/netbox/netbox/netbox/ldap_config.py
org.opencontainers.image.version=""

View File

@ -3,11 +3,11 @@
[![GitHub release (latest by date)](https://img.shields.io/github/v/release/netbox-community/netbox-docker)][github-release]
[![GitHub stars](https://img.shields.io/github/stars/netbox-community/netbox-docker)][github-stargazers]
![GitHub closed pull requests](https://img.shields.io/github/issues-pr-closed-raw/netbox-community/netbox-docker)
![Github release workflow](https://img.shields.io/github/workflow/status/netbox-community/netbox-docker/release)
![Github release workflow](https://img.shields.io/github/actions/workflow/status/netbox-community/netbox-docker/release.yml?branch=release)
![Docker Pulls](https://img.shields.io/docker/pulls/netboxcommunity/netbox)
[![GitHub license](https://img.shields.io/github/license/netbox-community/netbox-docker)][netbox-docker-license]
[The Github repository](netbox-docker-github) houses the components needed to build NetBox as a container.
[The GitHub repository][netbox-docker-github] houses the components needed to build NetBox as a container.
Images are built regularly using the code in that repository and are pushed to [Docker Hub][netbox-dockerhub], [Quay.io][netbox-quayio] and [GitHub Container Registry][netbox-ghcr].
Do you have any questions?
@ -16,10 +16,9 @@ please join [our Slack][netbox-docker-slack] and ask for help in the [`#netbox-d
[github-stargazers]: https://github.com/netbox-community/netbox-docker/stargazers
[github-release]: https://github.com/netbox-community/netbox-docker/releases
[netbox-docker-microbadger]: https://microbadger.com/images/netboxcommunity/netbox
[netbox-dockerhub]: https://hub.docker.com/r/netboxcommunity/netbox/
[netbox-quayio]: https://quay.io/repository/netboxcommunity/netbox
[netbox-ghcr]: https://ghcr.io/netbox-community/netbox/
[netbox-ghcr]: https://github.com/netbox-community/netbox-docker/pkgs/container/netbox
[netbox-docker-github]: https://github.com/netbox-community/netbox-docker/
[netbox-docker-slack]: https://join.slack.com/t/netdev-community/shared_invite/zt-mtts8g0n-Sm6Wutn62q_M4OdsaIycrQ
[netbox-docker-slack-channel]: https://netdev-community.slack.com/archives/C01P0GEVBU7
@ -35,28 +34,28 @@ There is a more complete [_Getting Started_ guide on our wiki][wiki-getting-star
git clone -b release https://github.com/netbox-community/netbox-docker.git
cd netbox-docker
tee docker-compose.override.yml <<EOF
version: '3.4'
services:
netbox:
ports:
- 8000:8080
EOF
docker-compose pull
docker-compose up
docker compose pull
docker compose up
```
The whole application will be available after a few minutes.
Open the URL `http://0.0.0.0:8000/` in a web-browser.
You should see the NetBox homepage.
In the top-right corner you can login.
The default credentials are:
* Username: **admin**
* Password: **admin**
* API Token: **0123456789abcdef0123456789abcdef01234567**
To create the first admin user run this command:
```bash
docker compose exec netbox /opt/netbox/netbox/manage.py createsuperuser
```
If you need to restart Netbox from an empty database often, you can also set the `SUPERUSER_*` variables in your `docker-compose.override.yml` as shown in the example.
[wiki-getting-started]: https://github.com/netbox-community/netbox-docker/wiki/Getting-Started
[docker-reception]: https://github.com/nxt-engineering/reception
## Container Image Tags
@ -75,7 +74,7 @@ New container images are built and published automatically every ~24h.
You must use _NetBox Docker version_ `a.b.c` to guarantee the compatibility.
These images are automatically built from [the `master` branch of NetBox][netbox-master].
* `snapshot-a.b.c`:
These are pre-release builds.
These are prerelease builds.
They contain the support files of _NetBox Docker version_ `a.b.c`.
You must use _NetBox Docker version_ `a.b.c` to guarantee the compatibility.
These images are automatically built from the [`develop` branch of NetBox][netbox-develop].
@ -92,22 +91,14 @@ For each of the above tag, there is an extra tag:
This is the same version as `snapshot-a.b.c`.
It always points to the latest version of _NetBox Docker_.
Then there is currently one extra tags for each of the above tags:
* `-ldap`:
These container images contain additional dependencies and configuration files for connecting NetBox to an LDAP directory.
[Learn more about that in our wiki][netbox-docker-ldap].
[netbox-releases]: https://github.com/netbox-community/netbox/releases
[netbox-master]: https://github.com/netbox-community/netbox/tree/master
[netbox-develop]: https://github.com/netbox-community/netbox/tree/develop
[netbox-branches]: https://github.com/netbox-community/netbox/branches
[netbox-docker-ldap]: https://github.com/netbox-community/netbox-docker/wiki/LDAP
## Documentation
Please refer [to our wiki on Github][netbox-docker-wiki] for further information on how to use the NetBox Docker image properly.
The wiki covers advanced topics such as using files for secrets, configuring TLS, deployment to Kubernetes, monitoring and configuring NAPALM and LDAP.
Please refer [to our wiki on GitHub][netbox-docker-wiki] for further information on how to use the NetBox Docker image properly.
The wiki covers advanced topics such as using files for secrets, configuring TLS, deployment to Kubernetes, monitoring and configuring LDAP.
Our wiki is a community effort.
Feel free to correct errors, update outdated information or provide additional guides and insights.
@ -116,7 +107,7 @@ Feel free to correct errors, update outdated information or provide additional g
## Getting Help
Feel free to ask questions in our [Github Community][netbox-community]
Feel free to ask questions in our [GitHub Community][netbox-community]
or [join our Slack][netbox-docker-slack] and ask [in our channel `#netbox-docker`][netbox-docker-slack-channel],
which is free to use and where there are almost always people online that can help you in the Slack channel.
@ -127,12 +118,13 @@ you may find [the `#netbox` channel][netbox-slack-channel] on the same Slack ins
## Dependencies
This project relies only on *Docker* and *docker-compose* meeting these requirements:
This project relies only on _Docker_ and _docker-compose_ meeting these requirements:
* The *Docker version* must be at least `19.03`.
* The *docker-compose version* must be at least `1.28.0`.
* The _Docker version_ must be at least `20.10.10`.
* The _containerd version_ must be at least `1.5.6`.
* The _docker-compose version_ must be at least `1.28.0`.
To check the version installed on your system run `docker --version` and `docker-compose --version`.
To check the version installed on your system run `docker --version` and `docker compose version`.
## Updating

View File

@ -1 +1 @@
1.5.1
2.9.1

View File

@ -0,0 +1,9 @@
#!/bin/bash
NEEDED_COMMANDS="curl jq docker skopeo"
for c in $NEEDED_COMMANDS; do
if ! command -v "$c" &>/dev/null; then
echo "⚠️ '$c' is not installed. Can't proceed with build."
exit 1
fi
done

View File

@ -1,8 +0,0 @@
#!/bin/bash
push_image_to_registry() {
local target_tag=$1
echo "⏫ Pushing '${target_tag}'"
$DRY docker push "${target_tag}"
echo "✅ Finished pushing the Docker image '${target_tag}'."
}

View File

@ -1,82 +1,18 @@
#!/bin/bash
# Retrieves image configuration from public images in DockerHub
# Functions from https://gist.github.com/cirocosta/17ea17be7ac11594cb0f290b0a3ac0d1
# Optimised for our use case
check_if_tags_exists() {
local image=$1
local tag=$2
skopeo list-tags "docker://$image" | jq -r ".Tags | contains([\"$tag\"])"
}
get_image_label() {
local label=$1
local image=$2
local tag=$3
local token
token=$(_get_token "$image")
local digest
digest=$(_get_digest "$image" "$tag" "$token")
local retval="null"
if [ "$digest" != "null" ]; then
retval=$(_get_image_configuration "$image" "$token" "$digest" "$label")
fi
echo "$retval"
}
get_image_layers() {
local image=$1
local tag=$2
local token
token=$(_get_token "$image")
_get_layers "$image" "$tag" "$token"
skopeo inspect "docker://$image" | jq -r ".Labels[\"$label\"]"
}
get_image_last_layer() {
local image=$1
local tag=$2
local token
token=$(_get_token "$image")
local layers
mapfile -t layers < <(_get_layers "$image" "$tag" "$token")
echo "${layers[-1]}"
}
_get_image_configuration() {
local image=$1
local token=$2
local digest=$3
local label=$4
curl \
--silent \
--location \
--header "Authorization: Bearer $token" \
"https://registry-1.docker.io/v2/$image/blobs/$digest" |
jq -r ".config.Labels.\"$label\""
}
_get_token() {
local image=$1
curl \
--silent \
"https://auth.docker.io/token?scope=repository:$image:pull&service=registry.docker.io" |
jq -r '.token'
}
_get_digest() {
local image=$1
local tag=$2
local token=$3
curl \
--silent \
--header "Accept: application/vnd.docker.distribution.manifest.v2+json" \
--header "Authorization: Bearer $token" \
"https://registry-1.docker.io/v2/$image/manifests/$tag" |
jq -r '.config.digest'
}
_get_layers() {
local image=$1
local tag=$2
local token=$3
curl \
--silent \
--header "Accept: application/vnd.docker.distribution.manifest.v2+json" \
--header "Authorization: Bearer $token" \
"https://registry-1.docker.io/v2/$image/manifests/$tag" |
jq -r '.layers[].digest'
skopeo inspect "docker://$image" | jq -r ".Layers | last"
}

View File

@ -19,3 +19,14 @@ gh_env() {
echo "${@}" >>"${GITHUB_ENV}"
fi
}
###
# Prints the output to the file defined in ${GITHUB_OUTPUT}.
# Only executes if ${GH_ACTION} is defined.
# Example Usage: gh_env "FOO_VAR=bar_value"
###
gh_out() {
if [ -n "${GH_ACTION}" ]; then
echo "${@}" >>"$GITHUB_OUTPUT"
fi
}

View File

@ -1,26 +1,27 @@
#!/bin/bash
# Builds the latest released version
# Check if we have everything needed for the build
source ./build-functions/check-commands.sh
source ./build-functions/gh-functions.sh
echo "▶️ $0 $*"
###
# Check for the jq library needed for parsing JSON
###
if ! command -v jq; then
echo "⚠️ jq command missing from \$PATH!"
exit 1
fi
CURL_ARGS=(
--silent
)
###
# Checking for the presence of GITHUB_OAUTH_CLIENT_ID
# and GITHUB_OAUTH_CLIENT_SECRET
# Checking for the presence of GITHUB_TOKEN
###
if [ -n "${GITHUB_OAUTH_CLIENT_ID}" ] && [ -n "${GITHUB_OAUTH_CLIENT_SECRET}" ]; then
if [ -n "${GITHUB_TOKEN}" ]; then
echo "🗝 Performing authenticated Github API calls."
GITHUB_OAUTH_PARAMS="client_id=${GITHUB_OAUTH_CLIENT_ID}&client_secret=${GITHUB_OAUTH_CLIENT_SECRET}"
CURL_ARGS+=(
--header "Authorization: Bearer ${GITHUB_TOKEN}"
)
else
echo "🕶 Performing unauthenticated Github API calls. This might result in lower Github rate limits!"
GITHUB_OAUTH_PARAMS=""
fi
###
@ -42,31 +43,27 @@ fi
###
ORIGINAL_GITHUB_REPO="netbox-community/netbox"
GITHUB_REPO="${GITHUB_REPO-$ORIGINAL_GITHUB_REPO}"
URL_RELEASES="https://api.github.com/repos/${GITHUB_REPO}/releases?${GITHUB_OAUTH_PARAMS}"
URL_RELEASES="https://api.github.com/repos/${GITHUB_REPO}/releases"
# Composing the JQ commans to extract the most recent version number
JQ_LATEST="group_by(.prerelease) | .[] | sort_by(.published_at) | reverse | .[0] | select(.prerelease==${PRERELEASE-false}) | .tag_name"
CURL="curl -sS"
CURL="curl"
# Querying the Github API to fetch the most recent version number
VERSION=$($CURL "${URL_RELEASES}" | jq -r "${JQ_LATEST}")
VERSION=$($CURL "${CURL_ARGS[@]}" "${URL_RELEASES}" | jq -r "${JQ_LATEST}" 2>/dev/null)
###
# Check if the prerelease version is actually higher than stable version
###
if [ "${PRERELEASE}" == "true" ]; then
JQ_STABLE="group_by(.prerelease) | .[] | sort_by(.published_at) | reverse | .[0] | select(.prerelease==false) | .tag_name"
STABLE_VERSION=$($CURL "${URL_RELEASES}" | jq -r "${JQ_STABLE}")
STABLE_VERSION=$($CURL "${CURL_ARGS[@]}" "${URL_RELEASES}" | jq -r "${JQ_STABLE}" 2>/dev/null)
# shellcheck disable=SC2003
MAJOR_STABLE=$(expr match "${STABLE_VERSION}" 'v\([0-9]\+\)')
# shellcheck disable=SC2003
MINOR_STABLE=$(expr match "${STABLE_VERSION}" 'v[0-9]\+\.\([0-9]\+\)')
# shellcheck disable=SC2003
MAJOR_UNSTABLE=$(expr match "${VERSION}" 'v\([0-9]\+\)')
# shellcheck disable=SC2003
MINOR_UNSTABLE=$(expr match "${VERSION}" 'v[0-9]\+\.\([0-9]\+\)')
MAJOR_STABLE=$(expr "${STABLE_VERSION}" : 'v\([0-9]\+\)')
MINOR_STABLE=$(expr "${STABLE_VERSION}" : 'v[0-9]\+\.\([0-9]\+\)')
MAJOR_UNSTABLE=$(expr "${VERSION}" : 'v\([0-9]\+\)')
MINOR_UNSTABLE=$(expr "${VERSION}" : 'v[0-9]\+\.\([0-9]\+\)')
if {
[ "${MAJOR_STABLE}" -eq "${MAJOR_UNSTABLE}" ] &&
@ -75,10 +72,7 @@ if [ "${PRERELEASE}" == "true" ]; then
echo "❎ Latest unstable version '${VERSION}' is not higher than the latest stable version '$STABLE_VERSION'."
if [ -z "$DEBUG" ]; then
if [ -n "${GH_ACTION}" ]; then
echo "::set-output name=skipped::true"
fi
gh_out "skipped=true"
exit 0
else
echo "⚠️ Would exit here with code '0', but DEBUG is enabled."

582
build.sh
View File

@ -6,87 +6,129 @@ echo "▶️ $0 $*"
set -e
if [ "${1}x" == "x" ] || [ "${1}" == "--help" ] || [ "${1}" == "-h" ]; then
echo "Usage: ${0} <branch> [--push|--push-only]"
echo " branch The branch or tag to build. Required."
echo " --push Pushes the built Docker image to the registry."
echo " --push-only Only pushes the Docker image to the registry, but does not build it."
echo ""
echo "You can use the following ENV variables to customize the build:"
echo " SRC_ORG Which fork of netbox to use (i.e. github.com/\${SRC_ORG}/\${SRC_REPO})."
echo " Default: netbox-community"
echo " SRC_REPO The name of the repository to use (i.e. github.com/\${SRC_ORG}/\${SRC_REPO})."
echo " Default: netbox"
echo " URL Where to fetch the code from."
echo " Must be a git repository. Can be private."
echo " Default: https://github.com/\${SRC_ORG}/\${SRC_REPO}.git"
echo " NETBOX_PATH The path where netbox will be checkout out."
echo " Must not be outside of the netbox-docker repository (because of Docker)!"
echo " Default: .netbox"
echo " SKIP_GIT If defined, git is not invoked and \${NETBOX_PATH} will not be altered."
echo " This may be useful, if you are manually managing the NETBOX_PATH."
echo " Default: undefined"
echo " TAG The version part of the docker tag."
echo " Default:"
echo " When <branch>=master: latest"
echo " When <branch>=develop: snapshot"
echo " Else: same as <branch>"
echo " DOCKER_REGISTRY The Docker repository's registry (i.e. '\${DOCKER_REGISTRY}/\${DOCKER_ORG}/\${DOCKER_REPO}'')"
echo " Used for tagging the image."
echo " Default: docker.io"
echo " DOCKER_ORG The Docker repository's organisation (i.e. '\${DOCKER_REGISTRY}/\${DOCKER_ORG}/\${DOCKER_REPO}'')"
echo " Used for tagging the image."
echo " Default: netboxcommunity"
echo " DOCKER_REPO The Docker repository's name (i.e. '\${DOCKER_REGISTRY}/\${DOCKER_ORG}/\${DOCKER_REPO}'')"
echo " Used for tagging the image."
echo " Default: netbox"
echo " DOCKER_TAG The name of the tag which is applied to the image."
echo " Useful for pushing into another registry than hub.docker.com."
echo " Default: \${DOCKER_REGISTRY}/\${DOCKER_ORG}/\${DOCKER_REPO}:\${TAG}"
echo " DOCKER_SHORT_TAG The name of the short tag which is applied to the"
echo " image. This is used to tag all patch releases to their"
echo " containing version e.g. v2.5.1 -> v2.5"
echo " Default: \${DOCKER_REGISTRY}/\${DOCKER_ORG}/\${DOCKER_REPO}:<MAJOR>.<MINOR>"
echo " DOCKERFILE The name of Dockerfile to use."
echo " Default: Dockerfile"
echo " DOCKER_FROM The base image to use."
echo " Default: 'alpine:3.14'"
echo " DOCKER_TARGET A specific target to build."
echo " It's currently not possible to pass multiple targets."
echo " Default: main ldap"
echo " HTTP_PROXY The proxy to use for http requests."
echo " Example: http://proxy.domain.tld:3128"
echo " Default: undefined"
echo " NO_PROXY Comma-separated list of domain extensions proxy should not be used for."
echo " Example: .domain1.tld,.domain2.tld"
echo " Default: undefined"
echo " DEBUG If defined, the script does not stop when certain checks are unsatisfied."
echo " Default: undefined"
echo " DRY_RUN Prints all build statements instead of running them."
echo " Default: undefined"
echo " GH_ACTION If defined, special 'echo' statements are enabled that set the"
echo " following environment variables in Github Actions:"
echo " - FINAL_DOCKER_TAG: The final value of the DOCKER_TAG env variable"
echo " Default: undefined"
echo ""
echo "Examples:"
echo " ${0} master"
echo " This will fetch the latest 'master' branch, build a Docker Image and tag it"
echo " 'netboxcommunity/netbox:latest'."
echo " ${0} develop"
echo " This will fetch the latest 'develop' branch, build a Docker Image and tag it"
echo " 'netboxcommunity/netbox:snapshot'."
echo " ${0} v2.6.6"
echo " This will fetch the 'v2.6.6' tag, build a Docker Image and tag it"
echo " 'netboxcommunity/netbox:v2.6.6' and 'netboxcommunity/netbox:v2.6'."
echo " ${0} develop-2.7"
echo " This will fetch the 'develop-2.7' branch, build a Docker Image and tag it"
echo " 'netboxcommunity/netbox:develop-2.7'."
echo " SRC_ORG=cimnine ${0} feature-x"
echo " This will fetch the 'feature-x' branch from https://github.com/cimnine/netbox.git,"
echo " build a Docker Image and tag it 'netboxcommunity/netbox:feature-x'."
echo " SRC_ORG=cimnine DOCKER_ORG=cimnine ${0} feature-x"
echo " This will fetch the 'feature-x' branch from https://github.com/cimnine/netbox.git,"
echo " build a Docker Image and tag it 'cimnine/netbox:feature-x'."
_BOLD=$(tput bold)
_GREEN=$(tput setaf 2)
_CYAN=$(tput setaf 6)
_CLEAR=$(tput sgr0)
cat <<END_OF_HELP
${_BOLD}Usage:${_CLEAR} ${0} <branch> [--push]
branch The branch or tag to build. Required.
--push Pushes the built container image to the registry.
${_BOLD}You can use the following ENV variables to customize the build:${_CLEAR}
SRC_ORG Which fork of netbox to use (i.e. github.com/\${SRC_ORG}/\${SRC_REPO}).
${_GREEN}Default:${_CLEAR} netbox-community
SRC_REPO The name of the repository to use (i.e. github.com/\${SRC_ORG}/\${SRC_REPO}).
${_GREEN}Default:${_CLEAR} netbox
URL Where to fetch the code from.
Must be a git repository. Can be private.
${_GREEN}Default:${_CLEAR} https://github.com/\${SRC_ORG}/\${SRC_REPO}.git
NETBOX_PATH The path where netbox will be checkout out.
Must not be outside of the netbox-docker repository (because of Docker)!
${_GREEN}Default:${_CLEAR} .netbox
SKIP_GIT If defined, git is not invoked and \${NETBOX_PATH} will not be altered.
This may be useful, if you are manually managing the NETBOX_PATH.
${_GREEN}Default:${_CLEAR} undefined
TAG The version part of the image tag.
${_GREEN}Default:${_CLEAR}
When <branch>=master: latest
When <branch>=develop: snapshot
Else: same as <branch>
IMAGE_NAMES The names used for the image including the registry
Used for tagging the image.
${_GREEN}Default:${_CLEAR} docker.io/netboxcommunity/netbox
${_CYAN}Example:${_CLEAR} 'docker.io/netboxcommunity/netbox quay.io/netboxcommunity/netbox'
DOCKER_TAG The name of the tag which is applied to the image.
Useful for pushing into another registry than hub.docker.com.
${_GREEN}Default:${_CLEAR} \${DOCKER_REGISTRY}/\${DOCKER_ORG}/\${DOCKER_REPO}:\${TAG}
DOCKER_SHORT_TAG The name of the short tag which is applied to the
image. This is used to tag all patch releases to their
containing version e.g. v2.5.1 -> v2.5
${_GREEN}Default:${_CLEAR} \${DOCKER_REGISTRY}/\${DOCKER_ORG}/\${DOCKER_REPO}:<MAJOR>.<MINOR>
DOCKERFILE The name of Dockerfile to use.
${_GREEN}Default:${_CLEAR} Dockerfile
DOCKER_FROM The base image to use.
${_GREEN}Default:${_CLEAR} 'ubuntu:23.10'
BUILDX_PLATFORMS
Specifies the platform(s) to build the image for.
${_CYAN}Example:${_CLEAR} 'linux/amd64,linux/arm64'
${_GREEN}Default:${_CLEAR} 'linux/amd64'
BUILDX_BUILDER_NAME
If defined, the image build will be assigned to the given builder.
If you specify this variable, make sure that the builder exists.
If this value is not defined, a new builx builder with the directory name of the
current directory (i.e. '$(basename "${PWD}")') is created."
${_CYAN}Example:${_CLEAR} 'clever_lovelace'
${_GREEN}Default:${_CLEAR} undefined
BUILDX_REMOVE_BUILDER
If defined (and only if BUILDX_BUILDER_NAME is undefined),
then the buildx builder created by this script will be removed after use.
This is useful if you build NetBox Docker on an automated system that does
not manage the builders for you.
${_CYAN}Example:${_CLEAR} 'on'
${_GREEN}Default:${_CLEAR} undefined
HTTP_PROXY The proxy to use for http requests.
${_CYAN}Example:${_CLEAR} http://proxy.domain.tld:3128
${_GREEN}Default:${_CLEAR} undefined
NO_PROXY Comma-separated list of domain extensions proxy should not be used for.
${_CYAN}Example:${_CLEAR} .domain1.tld,.domain2.tld
${_GREEN}Default:${_CLEAR} undefined
DEBUG If defined, the script does not stop when certain checks are unsatisfied.
${_GREEN}Default:${_CLEAR} undefined
DRY_RUN Prints all build statements instead of running them.
${_GREEN}Default:${_CLEAR} undefined
GH_ACTION If defined, special 'echo' statements are enabled that set the
following environment variables in Github Actions:
- FINAL_DOCKER_TAG: The final value of the DOCKER_TAG env variable
${_GREEN}Default:${_CLEAR} undefined
${_BOLD}Examples:${_CLEAR}
${0} master
This will fetch the latest 'master' branch, build a Docker Image and tag it
'netboxcommunity/netbox:latest'.
${0} develop
This will fetch the latest 'develop' branch, build a Docker Image and tag it
'netboxcommunity/netbox:snapshot'.
${0} v2.6.6
This will fetch the 'v2.6.6' tag, build a Docker Image and tag it
'netboxcommunity/netbox:v2.6.6' and 'netboxcommunity/netbox:v2.6'.
${0} develop-2.7
This will fetch the 'develop-2.7' branch, build a Docker Image and tag it
'netboxcommunity/netbox:develop-2.7'.
SRC_ORG=cimnine ${0} feature-x
This will fetch the 'feature-x' branch from https://github.com/cimnine/netbox.git,
build a Docker Image and tag it 'netboxcommunity/netbox:feature-x'.
SRC_ORG=cimnine DOCKER_ORG=cimnine ${0} feature-x
This will fetch the 'feature-x' branch from https://github.com/cimnine/netbox.git,
build a Docker Image and tag it 'cimnine/netbox:feature-x'.
END_OF_HELP
if [ "${1}x" == "x" ]; then
exit 1
@ -95,8 +137,15 @@ if [ "${1}x" == "x" ] || [ "${1}" == "--help" ] || [ "${1}" == "-h" ]; then
fi
fi
# Check if we have everything needed for the build
source ./build-functions/check-commands.sh
# Load all build functions
source ./build-functions/get-public-image-config.sh
source ./build-functions/gh-functions.sh
IMAGE_NAMES="${IMAGE_NAMES-docker.io/netboxcommunity/netbox}"
IFS=' ' read -ra IMAGE_NAMES <<<"${IMAGE_NAMES}"
###
# Enabling dry-run mode
###
@ -125,7 +174,7 @@ if [ "${2}" != "--push-only" ] && [ -z "${SKIP_GIT}" ]; then
REMOTE_EXISTS=$(git ls-remote --heads --tags "${URL}" "${NETBOX_BRANCH}" | wc -l)
if [ "${REMOTE_EXISTS}" == "0" ]; then
echo "❌ Remote branch '${NETBOX_BRANCH}' not found in '${URL}'; Nothing to do"
gh_echo "::set-output name=skipped::true"
gh_out "skipped=true"
exit 0
fi
echo "🌐 Checking out '${NETBOX_BRANCH}' of NetBox from the url '${URL}' into '${NETBOX_PATH}'"
@ -170,7 +219,7 @@ fi
# Determining the value for DOCKER_FROM
###
if [ -z "$DOCKER_FROM" ]; then
DOCKER_FROM="alpine:3.14"
DOCKER_FROM="docker.io/ubuntu:23.10"
fi
###
@ -178,7 +227,7 @@ fi
###
BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M+00:00')"
if [ -d ".git" ]; then
if [ -d ".git" ] && [ -z "${SKIP_GIT}" ]; then
GIT_REF="$(git rev-parse HEAD)"
fi
@ -186,7 +235,7 @@ fi
PROJECT_VERSION="${PROJECT_VERSION-$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' VERSION)}"
# Get the Git information from the netbox directory
if [ -d "${NETBOX_PATH}/.git" ]; then
if [ -d "${NETBOX_PATH}/.git" ] && [ -z "${SKIP_GIT}" ]; then
NETBOX_GIT_REF=$(
cd "${NETBOX_PATH}"
git rev-parse HEAD
@ -220,193 +269,190 @@ develop)
esac
###
# Determine targets to build
# composing the final TARGET_DOCKER_TAG
###
DEFAULT_DOCKER_TARGETS=("main" "ldap")
DOCKER_TARGETS=("${DOCKER_TARGET:-"${DEFAULT_DOCKER_TARGETS[@]}"}")
echo "🏭 Building the following targets:" "${DOCKER_TARGETS[@]}"
TARGET_DOCKER_TAG="${DOCKER_TAG-${TAG}}"
TARGET_DOCKER_TAG_PROJECT="${TARGET_DOCKER_TAG}-${PROJECT_VERSION}"
###
# composing the additional DOCKER_SHORT_TAG,
# i.e. "v2.6.1" becomes "v2.6",
# which is only relevant for version tags
# Also let "latest" follow the highest version
###
if [[ "${TAG}" =~ ^v([0-9]+)\.([0-9]+)\.[0-9]+$ ]]; then
MAJOR=${BASH_REMATCH[1]}
MINOR=${BASH_REMATCH[2]}
TARGET_DOCKER_SHORT_TAG="${DOCKER_SHORT_TAG-v${MAJOR}.${MINOR}}"
TARGET_DOCKER_LATEST_TAG="latest"
TARGET_DOCKER_SHORT_TAG_PROJECT="${TARGET_DOCKER_SHORT_TAG}-${PROJECT_VERSION}"
TARGET_DOCKER_LATEST_TAG_PROJECT="${TARGET_DOCKER_LATEST_TAG}-${PROJECT_VERSION}"
fi
IMAGE_NAME_TAGS=()
for IMAGE_NAME in "${IMAGE_NAMES[@]}"; do
IMAGE_NAME_TAGS+=("${IMAGE_NAME}:${TARGET_DOCKER_TAG}")
IMAGE_NAME_TAGS+=("${IMAGE_NAME}:${TARGET_DOCKER_TAG_PROJECT}")
done
if [ -n "${TARGET_DOCKER_SHORT_TAG}" ]; then
for IMAGE_NAME in "${IMAGE_NAMES[@]}"; do
IMAGE_NAME_TAGS+=("${IMAGE_NAME}:${TARGET_DOCKER_SHORT_TAG}")
IMAGE_NAME_TAGS+=("${IMAGE_NAME}:${TARGET_DOCKER_SHORT_TAG_PROJECT}")
IMAGE_NAME_TAGS+=("${IMAGE_NAME}:${TARGET_DOCKER_LATEST_TAG}")
IMAGE_NAME_TAGS+=("${IMAGE_NAME}:${TARGET_DOCKER_LATEST_TAG_PROJECT}")
done
fi
FINAL_DOCKER_TAG="${IMAGE_NAME_TAGS[0]}"
gh_env "FINAL_DOCKER_TAG=${IMAGE_NAME_TAGS[0]}"
###
# Checking if the build is necessary,
# meaning build only if one of those values changed:
# - a new tag is beeing created
# - base image digest
# - netbox git ref (Label: netbox.git-ref)
# - netbox-docker git ref (Label: org.opencontainers.image.revision)
###
# Load information from registry (only for first registry in "IMAGE_NAMES")
SHOULD_BUILD="false"
BUILD_REASON=""
if [ -z "${GH_ACTION}" ]; then
# Asuming non Github builds should always proceed
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} interactive"
elif [ "false" == "$(check_if_tags_exists "${IMAGE_NAMES[0]}" "$TARGET_DOCKER_TAG")" ]; then
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} newtag"
else
echo "Checking labels for '${FINAL_DOCKER_TAG}'"
BASE_LAST_LAYER=$(get_image_last_layer "${DOCKER_FROM}")
OLD_BASE_LAST_LAYER=$(get_image_label netbox.last-base-image-layer "${FINAL_DOCKER_TAG}")
NETBOX_GIT_REF_OLD=$(get_image_label netbox.git-ref "${FINAL_DOCKER_TAG}")
GIT_REF_OLD=$(get_image_label org.opencontainers.image.revision "${FINAL_DOCKER_TAG}")
if [ "${BASE_LAST_LAYER}" != "${OLD_BASE_LAST_LAYER}" ]; then
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} ubuntu"
fi
if [ "${NETBOX_GIT_REF}" != "${NETBOX_GIT_REF_OLD}" ]; then
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} netbox"
fi
if [ "${GIT_REF}" != "${GIT_REF_OLD}" ]; then
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} netbox-docker"
fi
fi
if [ "${SHOULD_BUILD}" != "true" ]; then
echo "Build skipped because sources didn't change"
gh_out "skipped=true"
exit 0 # Nothing to do -> exit
else
gh_out "skipped=false"
fi
gh_echo "::endgroup::"
###
# Build each target
# Build the image
###
export DOCKER_BUILDKIT=${DOCKER_BUILDKIT-1}
for DOCKER_TARGET in "${DOCKER_TARGETS[@]}"; do
gh_echo "::group::🏗 Building the target '${DOCKER_TARGET}'"
echo "🏗 Building the target '${DOCKER_TARGET}'"
###
# composing the final TARGET_DOCKER_TAG
###
TARGET_DOCKER_TAG="${DOCKER_TAG-${DOCKER_REGISTRY}/${DOCKER_ORG}/${DOCKER_REPO}:${TAG}}"
if [ "${DOCKER_TARGET}" != "main" ]; then
TARGET_DOCKER_TAG="${TARGET_DOCKER_TAG}-${DOCKER_TARGET}"
fi
TARGET_DOCKER_TAG_PROJECT="${TARGET_DOCKER_TAG}-${PROJECT_VERSION}"
gh_env "FINAL_DOCKER_TAG=${TARGET_DOCKER_TAG_PROJECT}"
gh_echo "::set-output name=skipped::false"
###
# composing the additional DOCKER_SHORT_TAG,
# i.e. "v2.6.1" becomes "v2.6",
# which is only relevant for version tags
# Also let "latest" follow the highest version
###
if [[ "${TAG}" =~ ^v([0-9]+)\.([0-9]+)\.[0-9]+$ ]]; then
MAJOR=${BASH_REMATCH[1]}
MINOR=${BASH_REMATCH[2]}
TARGET_DOCKER_SHORT_TAG="${DOCKER_SHORT_TAG-${DOCKER_REGISTRY}/${DOCKER_ORG}/${DOCKER_REPO}:v${MAJOR}.${MINOR}}"
TARGET_DOCKER_LATEST_TAG="${DOCKER_REGISTRY}/${DOCKER_ORG}/${DOCKER_REPO}:latest"
if [ "${DOCKER_TARGET}" != "main" ]; then
TARGET_DOCKER_SHORT_TAG="${TARGET_DOCKER_SHORT_TAG}-${DOCKER_TARGET}"
TARGET_DOCKER_LATEST_TAG="${TARGET_DOCKER_LATEST_TAG}-${DOCKER_TARGET}"
fi
TARGET_DOCKER_SHORT_TAG_PROJECT="${TARGET_DOCKER_SHORT_TAG}-${PROJECT_VERSION}"
TARGET_DOCKER_LATEST_TAG_PROJECT="${TARGET_DOCKER_LATEST_TAG}-${PROJECT_VERSION}"
fi
###
# Proceeding to buils stage, except if `--push-only` is passed
###
if [ "${2}" != "--push-only" ]; then
###
# Checking if the build is necessary,
# meaning build only if one of those values changed:
# - Python base image digest (Label: PYTHON_BASE_DIGEST)
# - netbox git ref (Label: NETBOX_GIT_REF)
# - netbox-docker git ref (Label: org.label-schema.vcs-ref)
###
# Load information from registry (only for docker.io)
SHOULD_BUILD="false"
BUILD_REASON=""
if [ -z "${GH_ACTION}" ]; then
# Asuming non Github builds should always proceed
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} interactive"
elif [ "$DOCKER_REGISTRY" = "docker.io" ]; then
source ./build-functions/get-public-image-config.sh
IFS=':' read -ra DOCKER_FROM_SPLIT <<<"${DOCKER_FROM}"
if ! [[ ${DOCKER_FROM_SPLIT[0]} =~ .*/.* ]]; then
# Need to use "library/..." for images the have no two part name
DOCKER_FROM_SPLIT[0]="library/${DOCKER_FROM_SPLIT[0]}"
fi
PYTHON_LAST_LAYER=$(get_image_last_layer "${DOCKER_FROM_SPLIT[0]}" "${DOCKER_FROM_SPLIT[1]}")
mapfile -t IMAGES_LAYERS_OLD < <(get_image_layers "${DOCKER_ORG}"/"${DOCKER_REPO}" "${TAG}")
NETBOX_GIT_REF_OLD=$(get_image_label NETBOX_GIT_REF "${DOCKER_ORG}"/"${DOCKER_REPO}" "${TAG}")
GIT_REF_OLD=$(get_image_label org.label-schema.vcs-ref "${DOCKER_ORG}"/"${DOCKER_REPO}" "${TAG}")
if ! printf '%s\n' "${IMAGES_LAYERS_OLD[@]}" | grep -q -P "^${PYTHON_LAST_LAYER}\$"; then
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} alpine"
fi
if [ "${NETBOX_GIT_REF}" != "${NETBOX_GIT_REF_OLD}" ]; then
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} netbox"
fi
if [ "${GIT_REF}" != "${GIT_REF_OLD}" ]; then
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} netbox-docker"
fi
else
SHOULD_BUILD="true"
BUILD_REASON="${BUILD_REASON} no-check"
fi
###
# Composing all arguments for `docker build`
###
DOCKER_BUILD_ARGS=(
--pull
--target "${DOCKER_TARGET}"
-f "${DOCKERFILE}"
-t "${TARGET_DOCKER_TAG}"
-t "${TARGET_DOCKER_TAG_PROJECT}"
)
if [ -n "${TARGET_DOCKER_SHORT_TAG}" ]; then
DOCKER_BUILD_ARGS+=(-t "${TARGET_DOCKER_SHORT_TAG}")
DOCKER_BUILD_ARGS+=(-t "${TARGET_DOCKER_SHORT_TAG_PROJECT}")
DOCKER_BUILD_ARGS+=(-t "${TARGET_DOCKER_LATEST_TAG}")
DOCKER_BUILD_ARGS+=(-t "${TARGET_DOCKER_LATEST_TAG_PROJECT}")
fi
# --label
DOCKER_BUILD_ARGS+=(
--label "ORIGINAL_TAG=${TARGET_DOCKER_TAG_PROJECT}"
--label "org.label-schema.build-date=${BUILD_DATE}"
--label "org.opencontainers.image.created=${BUILD_DATE}"
--label "org.label-schema.version=${PROJECT_VERSION}"
--label "org.opencontainers.image.version=${PROJECT_VERSION}"
)
if [ -d ".git" ]; then
DOCKER_BUILD_ARGS+=(
--label "org.label-schema.vcs-ref=${GIT_REF}"
--label "org.opencontainers.image.revision=${GIT_REF}"
)
fi
if [ -d "${NETBOX_PATH}/.git" ]; then
DOCKER_BUILD_ARGS+=(
--label "NETBOX_GIT_BRANCH=${NETBOX_GIT_BRANCH}"
--label "NETBOX_GIT_REF=${NETBOX_GIT_REF}"
--label "NETBOX_GIT_URL=${NETBOX_GIT_URL}"
)
fi
if [ -n "${BUILD_REASON}" ]; then
BUILD_REASON=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<"$BUILD_REASON")
DOCKER_BUILD_ARGS+=(--label "BUILD_REASON=${BUILD_REASON}")
fi
# --build-arg
DOCKER_BUILD_ARGS+=(--build-arg "NETBOX_PATH=${NETBOX_PATH}")
if [ -n "${DOCKER_FROM}" ]; then
DOCKER_BUILD_ARGS+=(--build-arg "FROM=${DOCKER_FROM}")
fi
# shellcheck disable=SC2031
if [ -n "${HTTP_PROXY}" ]; then
DOCKER_BUILD_ARGS+=(--build-arg "http_proxy=${HTTP_PROXY}")
DOCKER_BUILD_ARGS+=(--build-arg "https_proxy=${HTTPS_PROXY}")
fi
if [ -n "${NO_PROXY}" ]; then
DOCKER_BUILD_ARGS+=(--build-arg "no_proxy=${NO_PROXY}")
fi
###
# Building the docker image
###
if [ "${SHOULD_BUILD}" == "true" ]; then
echo "🐳 Building the Docker image '${TARGET_DOCKER_TAG_PROJECT}'."
echo " Build reason set to: ${BUILD_REASON}"
$DRY docker build "${DOCKER_BUILD_ARGS[@]}" .
echo "✅ Finished building the Docker images '${TARGET_DOCKER_TAG_PROJECT}'"
echo "🔎 Inspecting labels on '${TARGET_DOCKER_TAG_PROJECT}'"
$DRY docker inspect "${TARGET_DOCKER_TAG_PROJECT}" --format "{{json .Config.Labels}}"
else
echo "Build skipped because sources didn't change"
echo "::set-output name=skipped::true"
fi
fi
###
# Pushing the docker images if either `--push` or `--push-only` are passed
###
if [ "${2}" == "--push" ] || [ "${2}" == "--push-only" ]; then
source ./build-functions/docker-functions.sh
push_image_to_registry "${TARGET_DOCKER_TAG}"
push_image_to_registry "${TARGET_DOCKER_TAG_PROJECT}"
if [ -n "${TARGET_DOCKER_SHORT_TAG}" ]; then
push_image_to_registry "${TARGET_DOCKER_SHORT_TAG}"
push_image_to_registry "${TARGET_DOCKER_SHORT_TAG_PROJECT}"
push_image_to_registry "${TARGET_DOCKER_LATEST_TAG}"
push_image_to_registry "${TARGET_DOCKER_LATEST_TAG_PROJECT}"
fi
fi
gh_echo "::endgroup::"
gh_echo "::group::🏗 Building the image"
###
# Composing all arguments for `docker build`
###
DOCKER_BUILD_ARGS=(
--pull
--target main
-f "${DOCKERFILE}"
)
for IMAGE_NAME in "${IMAGE_NAME_TAGS[@]}"; do
DOCKER_BUILD_ARGS+=(-t "${IMAGE_NAME}")
done
# --label
DOCKER_BUILD_ARGS+=(
--label "netbox.original-tag=${TARGET_DOCKER_TAG_PROJECT}"
--label "org.opencontainers.image.created=${BUILD_DATE}"
--label "org.opencontainers.image.version=${PROJECT_VERSION}"
)
if [ -d ".git" ] && [ -z "${SKIP_GIT}" ]; then
DOCKER_BUILD_ARGS+=(
--label "org.opencontainers.image.revision=${GIT_REF}"
)
fi
if [ -d "${NETBOX_PATH}/.git" ] && [ -z "${SKIP_GIT}" ]; then
DOCKER_BUILD_ARGS+=(
--label "netbox.git-branch=${NETBOX_GIT_BRANCH}"
--label "netbox.git-ref=${NETBOX_GIT_REF}"
--label "netbox.git-url=${NETBOX_GIT_URL}"
)
fi
if [ -n "${BUILD_REASON}" ]; then
BUILD_REASON=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<"$BUILD_REASON")
DOCKER_BUILD_ARGS+=(--label "netbox.build-reason=${BUILD_REASON}")
DOCKER_BUILD_ARGS+=(--label "netbox.last-base-image-layer=${BASE_LAST_LAYER}")
fi
# --build-arg
DOCKER_BUILD_ARGS+=(--build-arg "NETBOX_PATH=${NETBOX_PATH}")
if [ -n "${DOCKER_FROM}" ]; then
DOCKER_BUILD_ARGS+=(--build-arg "FROM=${DOCKER_FROM}")
fi
# shellcheck disable=SC2031
if [ -n "${HTTP_PROXY}" ]; then
DOCKER_BUILD_ARGS+=(--build-arg "http_proxy=${HTTP_PROXY}")
DOCKER_BUILD_ARGS+=(--build-arg "https_proxy=${HTTPS_PROXY}")
fi
if [ -n "${NO_PROXY}" ]; then
DOCKER_BUILD_ARGS+=(--build-arg "no_proxy=${NO_PROXY}")
fi
DOCKER_BUILD_ARGS+=(--platform "${BUILDX_PLATFORM-linux/amd64}")
if [ "${2}" == "--push" ]; then
# output type=docker does not work with pushing
DOCKER_BUILD_ARGS+=(
--output=type=image
--push
)
else
DOCKER_BUILD_ARGS+=(
--output=type=docker
)
fi
###
# Building the docker image
###
if [ -z "${BUILDX_BUILDER_NAME}" ]; then
BUILDX_BUILDER_NAME="$(basename "${PWD}")"
fi
if ! docker buildx ls | grep --quiet --word-regexp "${BUILDX_BUILDER_NAME}"; then
echo "👷 Creating new Buildx Builder '${BUILDX_BUILDER_NAME}'"
$DRY docker buildx create --name "${BUILDX_BUILDER_NAME}"
BUILDX_BUILDER_CREATED="yes"
fi
echo "🐳 Building the Docker image '${TARGET_DOCKER_TAG_PROJECT}'."
echo " Build reason set to: ${BUILD_REASON}"
$DRY docker buildx \
--builder "${BUILDX_BUILDER_NAME}" \
build \
"${DOCKER_BUILD_ARGS[@]}" \
.
echo "✅ Finished building the Docker images"
gh_echo "::endgroup::" # End group for Build
gh_echo "::group::🏗 Image Labels"
echo "🔎 Inspecting labels on '${IMAGE_NAME_TAGS[0]}'"
$DRY docker inspect "${IMAGE_NAME_TAGS[0]}" --format "{{json .Config.Labels}}" | jq
gh_echo "::endgroup::"
gh_echo "::group::🏗 Clean up"
if [ -n "${BUILDX_REMOVE_BUILDER}" ] && [ "${BUILDX_BUILDER_CREATED}" == "yes" ]; then
echo "👷 Removing Buildx Builder '${BUILDX_BUILDER_NAME}'"
$DRY docker buildx rm "${BUILDX_BUILDER_NAME}"
fi
gh_echo "::endgroup::"

View File

@ -7,12 +7,17 @@
import re
from os import environ
from os.path import abspath, dirname, join
from typing import Any, Callable, Tuple
# For reference see https://netbox.readthedocs.io/en/stable/configuration/
# Based on https://github.com/netbox-community/netbox/blob/master/netbox/netbox/configuration.example.py
# For reference see https://docs.netbox.dev/en/stable/configuration/
# Based on https://github.com/netbox-community/netbox/blob/develop/netbox/netbox/configuration_example.py
###
# NetBox-Docker Helper functions
###
# Read secret from file
def _read_secret(secret_name, default = None):
def _read_secret(secret_name: str, default: str | None = None) -> str | None:
try:
f = open('/run/secrets/' + secret_name, 'r', encoding='utf-8')
except EnvironmentError:
@ -21,6 +26,25 @@ def _read_secret(secret_name, default = None):
with f:
return f.readline().strip()
# If the `map_fn` isn't defined, then the value that is read from the environment (or the default value if not found) is returned.
# If the `map_fn` is defined, then `map_fn` is invoked and the value (that was read from the environment or the default value if not found)
# is passed to it as a parameter. The value returned from `map_fn` is then the return value of this function.
# The `map_fn` is not invoked, if the value (that was read from the environment or the default value if not found) is None.
def _environ_get_and_map(variable_name: str, default: str | None = None, map_fn: Callable[[str], Any | None] = None) -> Any | None:
env_value = environ.get(variable_name, default)
if env_value == None:
return env_value
if not map_fn:
return env_value
return map_fn(env_value)
_AS_BOOL = lambda value : value.lower() == 'true'
_AS_INT = lambda value : int(value)
_AS_LIST = lambda value : list(filter(None, value.split(' ')))
_BASE_DIR = dirname(dirname(abspath(__file__)))
#########################
@ -34,6 +58,9 @@ _BASE_DIR = dirname(dirname(abspath(__file__)))
#
# Example: ALLOWED_HOSTS = ['netbox.example.com', 'netbox.internal.local']
ALLOWED_HOSTS = environ.get('ALLOWED_HOSTS', '*').split(' ')
# ensure that '*' or 'localhost' is always in ALLOWED_HOSTS (needed for health checks)
if '*' not in ALLOWED_HOSTS and 'localhost' not in ALLOWED_HOSTS:
ALLOWED_HOSTS.append('localhost')
# PostgreSQL database configuration. See the Django documentation for a complete list of available parameters:
# https://docs.djangoproject.com/en/stable/ref/settings/#databases
@ -46,9 +73,9 @@ DATABASE = {
'PORT': environ.get('DB_PORT', ''), # Database port (leave blank for default)
'OPTIONS': {'sslmode': environ.get('DB_SSLMODE', 'prefer')},
# Database connection SSLMODE
'CONN_MAX_AGE': int(environ.get('DB_CONN_MAX_AGE', '300')),
'CONN_MAX_AGE': _environ_get_and_map('DB_CONN_MAX_AGE', '300', _AS_INT),
# Max database connection age
'DISABLE_SERVER_SIDE_CURSORS': environ.get('DB_DISABLE_SERVER_SIDE_CURSORS', 'False').lower() == 'true',
'DISABLE_SERVER_SIDE_CURSORS': _environ_get_and_map('DB_DISABLE_SERVER_SIDE_CURSORS', 'False', _AS_BOOL),
# Disable the use of server-side cursors transaction pooling
}
@ -58,19 +85,26 @@ DATABASE = {
REDIS = {
'tasks': {
'HOST': environ.get('REDIS_HOST', 'localhost'),
'PORT': int(environ.get('REDIS_PORT', 6379)),
'PORT': _environ_get_and_map('REDIS_PORT', 6379, _AS_INT),
'SENTINELS': [tuple(uri.split(':')) for uri in _environ_get_and_map('REDIS_SENTINELS', '', _AS_LIST) if uri != ''],
'SENTINEL_SERVICE': environ.get('REDIS_SENTINEL_SERVICE', 'default'),
'SENTINEL_TIMEOUT': _environ_get_and_map('REDIS_SENTINEL_TIMEOUT', 10, _AS_INT),
'USERNAME': environ.get('REDIS_USERNAME', ''),
'PASSWORD': _read_secret('redis_password', environ.get('REDIS_PASSWORD', '')),
'DATABASE': int(environ.get('REDIS_DATABASE', 0)),
'SSL': environ.get('REDIS_SSL', 'False').lower() == 'true',
'INSECURE_SKIP_TLS_VERIFY': environ.get('REDIS_INSECURE_SKIP_TLS_VERIFY', 'False').lower() == 'true',
'DATABASE': _environ_get_and_map('REDIS_DATABASE', 0, _AS_INT),
'SSL': _environ_get_and_map('REDIS_SSL', 'False', _AS_BOOL),
'INSECURE_SKIP_TLS_VERIFY': _environ_get_and_map('REDIS_INSECURE_SKIP_TLS_VERIFY', 'False', _AS_BOOL),
},
'caching': {
'HOST': environ.get('REDIS_CACHE_HOST', environ.get('REDIS_HOST', 'localhost')),
'PORT': int(environ.get('REDIS_CACHE_PORT', environ.get('REDIS_PORT', 6379))),
'PORT': _environ_get_and_map('REDIS_CACHE_PORT', environ.get('REDIS_PORT', '6379'), _AS_INT),
'SENTINELS': [tuple(uri.split(':')) for uri in _environ_get_and_map('REDIS_CACHE_SENTINELS', '', _AS_LIST) if uri != ''],
'SENTINEL_SERVICE': environ.get('REDIS_CACHE_SENTINEL_SERVICE', environ.get('REDIS_SENTINEL_SERVICE', 'default')),
'USERNAME': environ.get('REDIS_CACHE_USERNAME', environ.get('REDIS_USERNAME', '')),
'PASSWORD': _read_secret('redis_cache_password', environ.get('REDIS_CACHE_PASSWORD', environ.get('REDIS_PASSWORD', ''))),
'DATABASE': int(environ.get('REDIS_CACHE_DATABASE', 1)),
'SSL': environ.get('REDIS_CACHE_SSL', environ.get('REDIS_SSL', 'False')).lower() == 'true',
'INSECURE_SKIP_TLS_VERIFY': environ.get('REDIS_CACHE_INSECURE_SKIP_TLS_VERIFY', environ.get('REDIS_INSECURE_SKIP_TLS_VERIFY', 'False')).lower() == 'true',
'DATABASE': _environ_get_and_map('REDIS_CACHE_DATABASE', '1', _AS_INT),
'SSL': _environ_get_and_map('REDIS_CACHE_SSL', environ.get('REDIS_SSL', 'False'), _AS_BOOL),
'INSECURE_SKIP_TLS_VERIFY': _environ_get_and_map('REDIS_CACHE_INSECURE_SKIP_TLS_VERIFY', environ.get('REDIS_INSECURE_SKIP_TLS_VERIFY', 'False'), _AS_BOOL),
},
}
@ -87,159 +121,219 @@ SECRET_KEY = _read_secret('secret_key', environ.get('SECRET_KEY', ''))
# #
#########################
# Specify one or more name and email address tuples representing NetBox administrators. These people will be notified of
# application errors (assuming correct email settings are provided).
ADMINS = [
# ['John Doe', 'jdoe@example.com'],
]
# # Specify one or more name and email address tuples representing NetBox administrators. These people will be notified of
# # application errors (assuming correct email settings are provided).
# ADMINS = [
# # ['John Doe', 'jdoe@example.com'],
# ]
# URL schemes that are allowed within links in NetBox
ALLOWED_URL_SCHEMES = (
'file', 'ftp', 'ftps', 'http', 'https', 'irc', 'mailto', 'sftp', 'ssh', 'tel', 'telnet', 'tftp', 'vnc', 'xmpp',
)
if 'ALLOWED_URL_SCHEMES' in environ:
ALLOWED_URL_SCHEMES = _environ_get_and_map('ALLOWED_URL_SCHEMES', None, _AS_LIST)
# Optionally display a persistent banner at the top and/or bottom of every page. HTML is allowed. To display the same
# content in both banners, define BANNER_TOP and set BANNER_BOTTOM = BANNER_TOP.
BANNER_TOP = environ.get('BANNER_TOP', '')
BANNER_BOTTOM = environ.get('BANNER_BOTTOM', '')
if 'BANNER_TOP' in environ:
BANNER_TOP = environ.get('BANNER_TOP', None)
if 'BANNER_BOTTOM' in environ:
BANNER_BOTTOM = environ.get('BANNER_BOTTOM', None)
# Text to include on the login page above the login form. HTML is allowed.
BANNER_LOGIN = environ.get('BANNER_LOGIN', '')
# Base URL path if accessing NetBox within a directory. For example, if installed at http://example.com/netbox/, set:
# BASE_PATH = 'netbox/'
BASE_PATH = environ.get('BASE_PATH', '')
if 'BANNER_LOGIN' in environ:
BANNER_LOGIN = environ.get('BANNER_LOGIN', None)
# Maximum number of days to retain logged changes. Set to 0 to retain changes indefinitely. (Default: 90)
CHANGELOG_RETENTION = int(environ.get('CHANGELOG_RETENTION', 90))
if 'CHANGELOG_RETENTION' in environ:
CHANGELOG_RETENTION = _environ_get_and_map('CHANGELOG_RETENTION', None, _AS_INT)
# Maximum number of days to retain job results (scripts and reports). Set to 0 to retain job results in the database indefinitely. (Default: 90)
if 'JOB_RETENTION' in environ:
JOB_RETENTION = _environ_get_and_map('JOB_RETENTION', None, _AS_INT)
# JOBRESULT_RETENTION was renamed to JOB_RETENTION in the v3.5.0 release of NetBox. For backwards compatibility, map JOBRESULT_RETENTION to JOB_RETENTION
elif 'JOBRESULT_RETENTION' in environ:
JOB_RETENTION = _environ_get_and_map('JOBRESULT_RETENTION', None, _AS_INT)
# API Cross-Origin Resource Sharing (CORS) settings. If CORS_ORIGIN_ALLOW_ALL is set to True, all origins will be
# allowed. Otherwise, define a list of allowed origins using either CORS_ORIGIN_WHITELIST or
# CORS_ORIGIN_REGEX_WHITELIST. For more information, see https://github.com/ottoyiu/django-cors-headers
CORS_ORIGIN_ALLOW_ALL = environ.get('CORS_ORIGIN_ALLOW_ALL', 'False').lower() == 'true'
CORS_ORIGIN_WHITELIST = list(filter(None, environ.get('CORS_ORIGIN_WHITELIST', 'https://localhost').split(' ')))
CORS_ORIGIN_REGEX_WHITELIST = [re.compile(r) for r in list(filter(None, environ.get('CORS_ORIGIN_REGEX_WHITELIST', '').split(' ')))]
CORS_ORIGIN_ALLOW_ALL = _environ_get_and_map('CORS_ORIGIN_ALLOW_ALL', 'False', _AS_BOOL)
CORS_ORIGIN_WHITELIST = _environ_get_and_map('CORS_ORIGIN_WHITELIST', 'https://localhost', _AS_LIST)
CORS_ORIGIN_REGEX_WHITELIST = [re.compile(r) for r in _environ_get_and_map('CORS_ORIGIN_REGEX_WHITELIST', '', _AS_LIST)]
# Set to True to enable server debugging. WARNING: Debugging introduces a substantial performance penalty and may reveal
# sensitive information about your installation. Only enable debugging while performing testing. Never enable debugging
# on a production system.
DEBUG = environ.get('DEBUG', 'False').lower() == 'true'
# sensitive information about your installation. Only enable debugging while performing testing.
# Never enable debugging on a production system.
DEBUG = _environ_get_and_map('DEBUG', 'False', _AS_BOOL)
# This parameter serves as a safeguard to prevent some potentially dangerous behavior,
# such as generating new database schema migrations.
# Set this to True only if you are actively developing the NetBox code base.
DEVELOPER = _environ_get_and_map('DEVELOPER', 'False', _AS_BOOL)
# Email settings
EMAIL = {
'SERVER': environ.get('EMAIL_SERVER', 'localhost'),
'PORT': int(environ.get('EMAIL_PORT', 25)),
'PORT': _environ_get_and_map('EMAIL_PORT', 25, _AS_INT),
'USERNAME': environ.get('EMAIL_USERNAME', ''),
'PASSWORD': _read_secret('email_password', environ.get('EMAIL_PASSWORD', '')),
'USE_SSL': environ.get('EMAIL_USE_SSL', 'False').lower() == 'true',
'USE_TLS': environ.get('EMAIL_USE_TLS', 'False').lower() == 'true',
'USE_SSL': _environ_get_and_map('EMAIL_USE_SSL', 'False', _AS_BOOL),
'USE_TLS': _environ_get_and_map('EMAIL_USE_TLS', 'False', _AS_BOOL),
'SSL_CERTFILE': environ.get('EMAIL_SSL_CERTFILE', ''),
'SSL_KEYFILE': environ.get('EMAIL_SSL_KEYFILE', ''),
'TIMEOUT': int(environ.get('EMAIL_TIMEOUT', 10)), # seconds
'TIMEOUT': _environ_get_and_map('EMAIL_TIMEOUT', 10, _AS_INT), # seconds
'FROM_EMAIL': environ.get('EMAIL_FROM', ''),
}
# Enforcement of unique IP space can be toggled on a per-VRF basis. To enforce unique IP space within the global table
# (all prefixes and IP addresses not assigned to a VRF), set ENFORCE_GLOBAL_UNIQUE to True.
ENFORCE_GLOBAL_UNIQUE = environ.get('ENFORCE_GLOBAL_UNIQUE', 'False').lower() == 'true'
if 'ENFORCE_GLOBAL_UNIQUE' in environ:
ENFORCE_GLOBAL_UNIQUE = _environ_get_and_map('ENFORCE_GLOBAL_UNIQUE', None, _AS_BOOL)
# By default, netbox sends census reporting data using a single HTTP request each time a worker starts.
# This data enables the project maintainers to estimate how many NetBox deployments exist and track the adoption of new versions over time.
# The only data reported by this function are the NetBox version, Python version, and a pseudorandom unique identifier.
# To opt out of census reporting, set CENSUS_REPORTING_ENABLED to False.
if 'CENSUS_REPORTING_ENABLED' in environ:
CENSUS_REPORTING_ENABLED = _environ_get_and_map('CENSUS_REPORTING_ENABLED', None, _AS_BOOL)
# Exempt certain models from the enforcement of view permissions. Models listed here will be viewable by all users and
# by anonymous users. List models in the form `<app>.<model>`. Add '*' to this list to exempt all models.
EXEMPT_VIEW_PERMISSIONS = list(filter(None, environ.get('EXEMPT_VIEW_PERMISSIONS', '').split(' ')))
EXEMPT_VIEW_PERMISSIONS = _environ_get_and_map('EXEMPT_VIEW_PERMISSIONS', '', _AS_LIST)
# Enable custom logging. Please see the Django documentation for detailed guidance on configuring custom logs:
# https://docs.djangoproject.com/en/stable/topics/logging/
LOGGING = {}
# HTTP proxies NetBox should use when sending outbound HTTP requests (e.g. for webhooks).
# HTTP_PROXIES = {
# 'http': 'http://10.10.1.10:3128',
# 'https': 'http://10.10.1.10:1080',
# }
# IP addresses recognized as internal to the system. The debugging toolbar will be available only to clients accessing
# NetBox from an internal IP.
INTERNAL_IPS = _environ_get_and_map('INTERNAL_IPS', '127.0.0.1 ::1', _AS_LIST)
# Enable GraphQL API.
if 'GRAPHQL_ENABLED' in environ:
GRAPHQL_ENABLED = _environ_get_and_map('GRAPHQL_ENABLED', None, _AS_BOOL)
# # Enable custom logging. Please see the Django documentation for detailed guidance on configuring custom logs:
# # https://docs.djangoproject.com/en/stable/topics/logging/
# LOGGING = {}
# Automatically reset the lifetime of a valid session upon each authenticated request. Enables users to remain
# authenticated to NetBox indefinitely.
LOGIN_PERSISTENCE = _environ_get_and_map('LOGIN_PERSISTENCE', 'False', _AS_BOOL)
# Setting this to True will permit only authenticated users to access any part of NetBox. By default, anonymous users
# are permitted to access most data in NetBox (excluding secrets) but not make any changes.
LOGIN_REQUIRED = environ.get('LOGIN_REQUIRED', 'False').lower() == 'true'
LOGIN_REQUIRED = _environ_get_and_map('LOGIN_REQUIRED', 'False', _AS_BOOL)
# The length of time (in seconds) for which a user will remain logged into the web UI before being prompted to
# re-authenticate. (Default: 1209600 [14 days])
LOGIN_TIMEOUT = int(environ.get('LOGIN_TIMEOUT', 1209600))
LOGIN_TIMEOUT = _environ_get_and_map('LOGIN_TIMEOUT', 1209600, _AS_INT)
# Setting this to True will display a "maintenance mode" banner at the top of every page.
MAINTENANCE_MODE = environ.get('MAINTENANCE_MODE', 'False').lower() == 'true'
if 'MAINTENANCE_MODE' in environ:
MAINTENANCE_MODE = _environ_get_and_map('MAINTENANCE_MODE', None, _AS_BOOL)
# Maps provider
if 'MAPS_URL' in environ:
MAPS_URL = environ.get('MAPS_URL', None)
# An API consumer can request an arbitrary number of objects =by appending the "limit" parameter to the URL (e.g.
# "?limit=1000"). This setting defines the maximum limit. Setting it to 0 or None will allow an API consumer to request
# all objects by specifying "?limit=0".
MAX_PAGE_SIZE = int(environ.get('MAX_PAGE_SIZE', 1000))
if 'MAX_PAGE_SIZE' in environ:
MAX_PAGE_SIZE = _environ_get_and_map('MAX_PAGE_SIZE', None, _AS_INT)
# The file path where uploaded media such as image attachments are stored. A trailing slash is not needed. Note that
# the default value of this setting is derived from the installed location.
MEDIA_ROOT = environ.get('MEDIA_ROOT', join(_BASE_DIR, 'media'))
# Expose Prometheus monitoring metrics at the HTTP endpoint '/metrics'
METRICS_ENABLED = environ.get('METRICS_ENABLED', 'False').lower() == 'true'
# Credentials that NetBox will uses to authenticate to devices when connecting via NAPALM.
NAPALM_USERNAME = environ.get('NAPALM_USERNAME', '')
NAPALM_PASSWORD = _read_secret('napalm_password', environ.get('NAPALM_PASSWORD', ''))
# NAPALM timeout (in seconds). (Default: 30)
NAPALM_TIMEOUT = int(environ.get('NAPALM_TIMEOUT', 30))
# NAPALM optional arguments (see http://napalm.readthedocs.io/en/latest/support/#optional-arguments). Arguments must
# be provided as a dictionary.
NAPALM_ARGS = {}
METRICS_ENABLED = _environ_get_and_map('METRICS_ENABLED', 'False', _AS_BOOL)
# Determine how many objects to display per page within a list. (Default: 50)
PAGINATE_COUNT = int(environ.get('PAGINATE_COUNT', 50))
if 'PAGINATE_COUNT' in environ:
PAGINATE_COUNT = _environ_get_and_map('PAGINATE_COUNT', None, _AS_INT)
# Enable installed plugins. Add the name of each plugin to the list.
PLUGINS = []
# # Enable installed plugins. Add the name of each plugin to the list.
# PLUGINS = []
# Plugins configuration settings. These settings are used by various plugins that the user may have installed.
# Each key in the dictionary is the name of an installed plugin and its value is a dictionary of settings.
PLUGINS_CONFIG = {
}
# # Plugins configuration settings. These settings are used by various plugins that the user may have installed.
# # Each key in the dictionary is the name of an installed plugin and its value is a dictionary of settings.
# PLUGINS_CONFIG = {
# }
# When determining the primary IP address for a device, IPv6 is preferred over IPv4 by default. Set this to True to
# prefer IPv4 instead.
PREFER_IPV4 = environ.get('PREFER_IPV4', 'False').lower() == 'true'
if 'PREFER_IPV4' in environ:
PREFER_IPV4 = _environ_get_and_map('PREFER_IPV4', None, _AS_BOOL)
# The default value for the amperage field when creating new power feeds.
if 'POWERFEED_DEFAULT_AMPERAGE' in environ:
POWERFEED_DEFAULT_AMPERAGE = _environ_get_and_map('POWERFEED_DEFAULT_AMPERAGE', None, _AS_INT)
# The default value (percentage) for the max_utilization field when creating new power feeds.
if 'POWERFEED_DEFAULT_MAX_UTILIZATION' in environ:
POWERFEED_DEFAULT_MAX_UTILIZATION = _environ_get_and_map('POWERFEED_DEFAULT_MAX_UTILIZATION', None, _AS_INT)
# The default value for the voltage field when creating new power feeds.
if 'POWERFEED_DEFAULT_VOLTAGE' in environ:
POWERFEED_DEFAULT_VOLTAGE = _environ_get_and_map('POWERFEED_DEFAULT_VOLTAGE', None, _AS_INT)
# Rack elevation size defaults, in pixels. For best results, the ratio of width to height should be roughly 10:1.
RACK_ELEVATION_DEFAULT_UNIT_HEIGHT = int(environ.get('RACK_ELEVATION_DEFAULT_UNIT_HEIGHT', 22))
RACK_ELEVATION_DEFAULT_UNIT_WIDTH = int(environ.get('RACK_ELEVATION_DEFAULT_UNIT_WIDTH', 220))
if 'RACK_ELEVATION_DEFAULT_UNIT_HEIGHT' in environ:
RACK_ELEVATION_DEFAULT_UNIT_HEIGHT = _environ_get_and_map('RACK_ELEVATION_DEFAULT_UNIT_HEIGHT', None, _AS_INT)
if 'RACK_ELEVATION_DEFAULT_UNIT_WIDTH' in environ:
RACK_ELEVATION_DEFAULT_UNIT_WIDTH = _environ_get_and_map('RACK_ELEVATION_DEFAULT_UNIT_WIDTH', None, _AS_INT)
# Remote authentication support
REMOTE_AUTH_ENABLED = environ.get('REMOTE_AUTH_ENABLED', 'False').lower() == 'true'
REMOTE_AUTH_BACKEND = environ.get('REMOTE_AUTH_BACKEND', 'netbox.authentication.RemoteUserBackend')
REMOTE_AUTH_ENABLED = _environ_get_and_map('REMOTE_AUTH_ENABLED', 'False', _AS_BOOL)
REMOTE_AUTH_BACKEND = _environ_get_and_map('REMOTE_AUTH_BACKEND', 'netbox.authentication.RemoteUserBackend', _AS_LIST)
REMOTE_AUTH_HEADER = environ.get('REMOTE_AUTH_HEADER', 'HTTP_REMOTE_USER')
REMOTE_AUTH_AUTO_CREATE_USER = environ.get('REMOTE_AUTH_AUTO_CREATE_USER', 'True').lower() == 'true'
REMOTE_AUTH_DEFAULT_GROUPS = list(filter(None, environ.get('REMOTE_AUTH_DEFAULT_GROUPS', '').split(' ')))
REMOTE_AUTH_AUTO_CREATE_USER = _environ_get_and_map('REMOTE_AUTH_AUTO_CREATE_USER', 'False', _AS_BOOL)
REMOTE_AUTH_DEFAULT_GROUPS = _environ_get_and_map('REMOTE_AUTH_DEFAULT_GROUPS', '', _AS_LIST)
# REMOTE_AUTH_DEFAULT_PERMISSIONS = {}
# This repository is used to check whether there is a new release of NetBox available. Set to None to disable the
# version check or use the URL below to check for release in the official NetBox repository.
# https://api.github.com/repos/netbox-community/netbox/releases
RELEASE_CHECK_URL = environ.get('RELEASE_CHECK_URL', None)
# The file path where custom reports will be stored. A trailing slash is not needed. Note that the default value of
# this setting is derived from the installed location.
REPORTS_ROOT = environ.get('REPORTS_ROOT', '/etc/netbox/reports')
# RELEASE_CHECK_URL = 'https://api.github.com/repos/netbox-community/netbox/releases'
# Maximum execution time for background tasks, in seconds.
RQ_DEFAULT_TIMEOUT = int(environ.get('RQ_DEFAULT_TIMEOUT', 300))
RQ_DEFAULT_TIMEOUT = _environ_get_and_map('RQ_DEFAULT_TIMEOUT', 300, _AS_INT)
# The file path where custom scripts will be stored. A trailing slash is not needed. Note that the default value of
# this setting is derived from the installed location.
SCRIPTS_ROOT = environ.get('SCRIPTS_ROOT', '/etc/netbox/scripts')
# The name to use for the csrf token cookie.
CSRF_COOKIE_NAME = environ.get('CSRF_COOKIE_NAME', 'csrftoken')
# Cross-Site-Request-Forgery-Attack settings. If Netbox is sitting behind a reverse proxy, you might need to set the CSRF_TRUSTED_ORIGINS flag.
# Django 4.0 requires to specify the URL Scheme in this setting. An example environment variable could be specified like:
# CSRF_TRUSTED_ORIGINS=https://demo.netbox.dev http://demo.netbox.dev
CSRF_TRUSTED_ORIGINS = _environ_get_and_map('CSRF_TRUSTED_ORIGINS', '', _AS_LIST)
# The name to use for the session cookie.
SESSION_COOKIE_NAME = environ.get('SESSION_COOKIE_NAME', 'sessionid')
# If true, the `includeSubDomains` directive will be included in the HTTP Strict Transport Security (HSTS) header.
# This directive instructs the browser to apply the HSTS policy to all subdomains of the current domain.
SECURE_HSTS_INCLUDE_SUBDOMAINS = _environ_get_and_map('SECURE_HSTS_INCLUDE_SUBDOMAINS', 'False', _AS_BOOL)
# If true, the `preload` directive will be included in the HTTP Strict Transport Security (HSTS) header.
# This directive instructs the browser to preload the site in HTTPS. Browsers that use the HSTS preload list will force the
# site to be accessed via HTTPS even if the user types HTTP in the address bar.
SECURE_HSTS_PRELOAD = _environ_get_and_map('SECURE_HSTS_PRELOAD', 'False', _AS_BOOL)
# If set to a non-zero integer value, the SecurityMiddleware sets the HTTP Strict Transport Security (HSTS) header on all
# responses that do not already have it. This will instruct the browser that the website must be accessed via HTTPS,
# blocking any HTTP request.
SECURE_HSTS_SECONDS = _environ_get_and_map('SECURE_HSTS_SECONDS', 0, _AS_INT)
# If true, all non-HTTPS requests will be automatically redirected to use HTTPS.
SECURE_SSL_REDIRECT = _environ_get_and_map('SECURE_SSL_REDIRECT', 'False', _AS_BOOL)
# By default, NetBox will store session data in the database. Alternatively, a file path can be specified here to use
# local file storage instead. (This can be useful for enabling authentication on a standby instance with read-only
# database access.) Note that the user as which NetBox runs must have read and write permissions to this path.
SESSION_FILE_PATH = environ.get('SESSIONS_ROOT', None)
SESSION_FILE_PATH = environ.get('SESSION_FILE_PATH', environ.get('SESSIONS_ROOT', None))
# Time zone (default: UTC)
TIME_ZONE = environ.get('TIME_ZONE', 'UTC')
# Date/time formatting. See the following link for supported formats:
# https://docs.djangoproject.com/en/stable/ref/templates/builtins/#date
DATE_FORMAT = environ.get('DATE_FORMAT', 'N j, Y')
SHORT_DATE_FORMAT = environ.get('SHORT_DATE_FORMAT', 'Y-m-d')
TIME_FORMAT = environ.get('TIME_FORMAT', 'g:i a')
SHORT_TIME_FORMAT = environ.get('SHORT_TIME_FORMAT', 'H:i:s')
DATETIME_FORMAT = environ.get('DATETIME_FORMAT', 'N j, Y g:i a')
SHORT_DATETIME_FORMAT = environ.get('SHORT_DATETIME_FORMAT', 'Y-m-d H:i')

View File

@ -15,12 +15,6 @@
# 'file', 'ftp', 'ftps', 'http', 'https', 'irc', 'mailto', 'sftp', 'ssh', 'tel', 'telnet', 'tftp', 'vnc', 'xmpp',
# )
## NAPALM optional arguments (see http://napalm.readthedocs.io/en/latest/support/#optional-arguments). Arguments must
## be provided as a dictionary.
# NAPALM_ARGS = {}
## Enable installed plugins. Add the name of each plugin to the list.
# from netbox.configuration.configuration import PLUGINS
# PLUGINS.append('my_plugin')

View File

@ -31,9 +31,12 @@ AUTH_LDAP_CONNECTION_OPTIONS = {
ldap.OPT_REFERRALS: 0
}
# Set the DN and password for the NetBox service account.
AUTH_LDAP_BIND_DN = environ.get('AUTH_LDAP_BIND_DN', '')
AUTH_LDAP_BIND_PASSWORD = _read_secret('auth_ldap_bind_password', environ.get('AUTH_LDAP_BIND_PASSWORD', ''))
AUTH_LDAP_BIND_AS_AUTHENTICATING_USER = environ.get('AUTH_LDAP_BIND_AS_AUTHENTICATING_USER', 'False').lower() == 'true'
# Set the DN and password for the NetBox service account if needed.
if not AUTH_LDAP_BIND_AS_AUTHENTICATING_USER:
AUTH_LDAP_BIND_DN = environ.get('AUTH_LDAP_BIND_DN', '')
AUTH_LDAP_BIND_PASSWORD = _read_secret('auth_ldap_bind_password', environ.get('AUTH_LDAP_BIND_PASSWORD', ''))
# Set a string template that describes any users distinguished name based on the username.
AUTH_LDAP_USER_DN_TEMPLATE = environ.get('AUTH_LDAP_USER_DN_TEMPLATE', None)
@ -46,20 +49,38 @@ AUTH_LDAP_START_TLS = environ.get('AUTH_LDAP_START_TLS', 'False').lower() == 'tr
# ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
LDAP_IGNORE_CERT_ERRORS = environ.get('LDAP_IGNORE_CERT_ERRORS', 'False').lower() == 'true'
# Include this setting if you want to validate the LDAP server certificates against a CA certificate directory on your server
# Note that this is a NetBox-specific setting which sets:
# ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, LDAP_CA_CERT_DIR)
LDAP_CA_CERT_DIR = environ.get('LDAP_CA_CERT_DIR', None)
# Include this setting if you want to validate the LDAP server certificates against your own CA.
# Note that this is a NetBox-specific setting which sets:
# ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, LDAP_CA_CERT_FILE)
LDAP_CA_CERT_FILE = environ.get('LDAP_CA_CERT_FILE', None)
AUTH_LDAP_USER_SEARCH_BASEDN = environ.get('AUTH_LDAP_USER_SEARCH_BASEDN', '')
AUTH_LDAP_USER_SEARCH_ATTR = environ.get('AUTH_LDAP_USER_SEARCH_ATTR', 'sAMAccountName')
AUTH_LDAP_USER_SEARCH_FILTER: str = environ.get(
'AUTH_LDAP_USER_SEARCH_FILTER', f'({AUTH_LDAP_USER_SEARCH_ATTR}=%(user)s)'
)
AUTH_LDAP_USER_SEARCH = LDAPSearch(
AUTH_LDAP_USER_SEARCH_BASEDN,
ldap.SCOPE_SUBTREE,
"(" + AUTH_LDAP_USER_SEARCH_ATTR + "=%(user)s)"
AUTH_LDAP_USER_SEARCH_BASEDN, ldap.SCOPE_SUBTREE, AUTH_LDAP_USER_SEARCH_FILTER
)
# This search ought to return all groups to which the user belongs. django_auth_ldap uses this to determine group
# heirarchy.
AUTH_LDAP_GROUP_SEARCH_BASEDN = environ.get('AUTH_LDAP_GROUP_SEARCH_BASEDN', '')
AUTH_LDAP_GROUP_SEARCH_CLASS = environ.get('AUTH_LDAP_GROUP_SEARCH_CLASS', 'group')
AUTH_LDAP_GROUP_SEARCH = LDAPSearch(AUTH_LDAP_GROUP_SEARCH_BASEDN, ldap.SCOPE_SUBTREE,
"(objectClass=" + AUTH_LDAP_GROUP_SEARCH_CLASS + ")")
AUTH_LDAP_GROUP_SEARCH_FILTER: str = environ.get(
'AUTH_LDAP_GROUP_SEARCH_FILTER', f'(objectclass={AUTH_LDAP_GROUP_SEARCH_CLASS})'
)
AUTH_LDAP_GROUP_SEARCH = LDAPSearch(
AUTH_LDAP_GROUP_SEARCH_BASEDN, ldap.SCOPE_SUBTREE, AUTH_LDAP_GROUP_SEARCH_FILTER
)
AUTH_LDAP_GROUP_TYPE = _import_group_type(environ.get('AUTH_LDAP_GROUP_TYPE', 'GroupOfNamesType'))
# Define a group required to login.

View File

@ -1,5 +1,22 @@
version: '3.4'
services:
netbox:
ports:
- 8000:8080
- "8000:8080"
# If you want the Nginx unit status page visible from the
# outside of the container add the following port mapping:
# - "8001:8081"
# healthcheck:
# Time for which the health check can fail after the container is started.
# This depends mostly on the performance of your database. On the first start,
# when all tables need to be created the start_period should be higher than on
# subsequent starts. For the first start after major version upgrades of NetBox
# the start_period might also need to be set higher.
# Default value in our docker-compose.yml is 60s
# start_period: 90s
# environment:
# SKIP_SUPERUSER: "false"
# SUPERUSER_API_TOKEN: ""
# SUPERUSER_EMAIL: ""
# SUPERUSER_NAME: ""
# SUPERUSER_PASSWORD: ""

View File

@ -0,0 +1,5 @@
services:
netbox:
ports:
- "127.0.0.1:8000:8080"

View File

@ -1,38 +1,64 @@
version: '3.4'
services:
netbox:
netbox: &netbox
image: ${IMAGE-netboxcommunity/netbox:latest}
depends_on:
- postgres
- redis
- redis-cache
postgres:
condition: service_healthy
redis:
condition: service_healthy
redis-cache:
condition: service_healthy
env_file: env/netbox.env
environment:
SKIP_STARTUP_SCRIPTS: ${SKIP_STARTUP_SCRIPTS-false}
user: 'unit:root'
volumes:
- ./startup_scripts:/opt/netbox/startup_scripts:z,ro
- ./${INITIALIZERS_DIR-initializers}:/opt/netbox/initializers:z,ro
- ./configuration:/etc/netbox/config:z,ro
- ./reports:/etc/netbox/reports:z,ro
- ./scripts:/etc/netbox/scripts:z,ro
- netbox-media-files:/opt/netbox/netbox/media:z
- ./test-configuration/test_config.py:/etc/netbox/config/test_config.py:z,ro
healthcheck:
start_period: ${NETBOX_START_PERIOD-120s}
timeout: 3s
interval: 15s
test: "curl -f http://localhost:8080/api/ || exit 1"
netbox-worker:
<<: *netbox
command:
- /opt/netbox/venv/bin/python
- /opt/netbox/netbox/manage.py
- rqworker
healthcheck:
start_period: 40s
timeout: 3s
interval: 15s
test: "ps -aux | grep -v grep | grep -q rqworker || exit 1"
netbox-housekeeping:
<<: *netbox
command:
- /opt/netbox/housekeeping.sh
healthcheck:
start_period: 40s
timeout: 3s
interval: 15s
test: "ps -aux | grep -v grep | grep -q housekeeping || exit 1"
postgres:
image: postgres:14-alpine
image: postgres:16-alpine
env_file: env/postgres.env
redis:
image: redis:6-alpine
healthcheck:
test: "pg_isready -t 2 -d $$POSTGRES_DB -U $$POSTGRES_USER" ## $$ because of docker-compose
interval: 10s
timeout: 5s
retries: 5
redis: &redis
image: redis:7-alpine
command:
- sh
- -c # this is to evaluate the $REDIS_PASSWORD from the env
- redis-server --appendonly yes --requirepass $$REDIS_PASSWORD ## $$ because of docker-compose
env_file: env/redis.env
healthcheck:
start_period: 20s
timeout: 3s
interval: 15s
test: "timeout 2 redis-cli ping"
redis-cache:
image: redis:6-alpine
command:
- sh
- -c # this is to evaluate the $REDIS_PASSWORD from the env
- redis-server --requirepass $$REDIS_PASSWORD ## $$ because of docker-compose
<<: *redis
env_file: env/redis-cache.env
volumes:
netbox-media-files:

View File

@ -1,48 +1,59 @@
version: '3.4'
services:
netbox: &netbox
image: netboxcommunity/netbox:${VERSION-v3.1-1.5.1}
image: docker.io/netboxcommunity/netbox:${VERSION-v4.0-2.9.1}
depends_on:
- postgres
- redis
- redis-cache
- netbox-worker
env_file: env/netbox.env
user: 'unit:root'
healthcheck:
start_period: 60s
timeout: 3s
interval: 15s
test: "curl -f http://localhost:8080/login/ || exit 1"
volumes:
- ./startup_scripts:/opt/netbox/startup_scripts:z,ro
- ./initializers:/opt/netbox/initializers:z,ro
- ./configuration:/etc/netbox/config:z,ro
- ./reports:/etc/netbox/reports:z,ro
- ./scripts:/etc/netbox/scripts:z,ro
- netbox-media-files:/opt/netbox/netbox/media:z
- netbox-media-files:/opt/netbox/netbox/media:rw
- netbox-reports-files:/opt/netbox/netbox/reports:rw
- netbox-scripts-files:/opt/netbox/netbox/scripts:rw
netbox-worker:
<<: *netbox
depends_on:
- redis
- postgres
netbox:
condition: service_healthy
command:
- /opt/netbox/venv/bin/python
- /opt/netbox/netbox/manage.py
- rqworker
healthcheck:
start_period: 20s
timeout: 3s
interval: 15s
test: "ps -aux | grep -v grep | grep -q rqworker || exit 1"
netbox-housekeeping:
<<: *netbox
depends_on:
- redis
- postgres
netbox:
condition: service_healthy
command:
- /opt/netbox/housekeeping.sh
healthcheck:
start_period: 20s
timeout: 3s
interval: 15s
test: "ps -aux | grep -v grep | grep -q housekeeping || exit 1"
# postgres
postgres:
image: postgres:14-alpine
image: docker.io/postgres:16-alpine
env_file: env/postgres.env
volumes:
- netbox-postgres-data:/var/lib/postgresql/data
# redis
redis:
image: redis:6-alpine
image: docker.io/redis:7-alpine
command:
- sh
- -c # this is to evaluate the $REDIS_PASSWORD from the env
@ -51,17 +62,25 @@ services:
volumes:
- netbox-redis-data:/data
redis-cache:
image: redis:6-alpine
image: docker.io/redis:7-alpine
command:
- sh
- -c # this is to evaluate the $REDIS_PASSWORD from the env
- redis-server --requirepass $$REDIS_PASSWORD ## $$ because of docker-compose
env_file: env/redis-cache.env
volumes:
- netbox-redis-cache-data:/data
volumes:
netbox-media-files:
driver: local
netbox-postgres-data:
driver: local
netbox-redis-cache-data:
driver: local
netbox-redis-data:
driver: local
netbox-reports-files:
driver: local
netbox-scripts-files:
driver: local

View File

@ -46,6 +46,8 @@ if ! ./manage.py migrate --check >/dev/null 2>&1; then
./manage.py remove_stale_contenttypes --no-input
echo "⚙️ Removing expired user sessions"
./manage.py clearsessions
echo "⚙️ Building search index (lazy)"
./manage.py reindex --lazy
fi
# Create Superuser if required
@ -70,22 +72,24 @@ else
fi
./manage.py shell --interface python <<END
from django.contrib.auth.models import User
from users.models import Token
from users.models import Token, User
if not User.objects.filter(username='${SUPERUSER_NAME}'):
u=User.objects.create_superuser('${SUPERUSER_NAME}', '${SUPERUSER_EMAIL}', '${SUPERUSER_PASSWORD}')
u = User.objects.create_superuser('${SUPERUSER_NAME}', '${SUPERUSER_EMAIL}', '${SUPERUSER_PASSWORD}')
Token.objects.create(user=u, key='${SUPERUSER_API_TOKEN}')
END
echo "💡 Superuser Username: ${SUPERUSER_NAME}, E-Mail: ${SUPERUSER_EMAIL}"
fi
# Run the startup scripts (and initializers)
if [ "$SKIP_STARTUP_SCRIPTS" == "true" ]; then
echo "↩️ Skipping startup scripts"
else
echo "import runpy; runpy.run_path('../startup_scripts')" | ./manage.py shell --interface python
fi
./manage.py shell --interface python <<END
from users.models import Token
try:
old_default_token = Token.objects.get(key="0123456789abcdef0123456789abcdef01234567")
if old_default_token:
print("⚠️ Warning: You have the old default admin API token in your database. This token is widely known; please remove it. Log in as your superuser and check API Tokens in your user menu.")
except Token.DoesNotExist:
pass
END
echo "✅ Initialisation is done."

View File

@ -1,8 +1,8 @@
#!/bin/bash
SECONDS=${HOUSEKEEPING_INTERVAL:=86400}
echo "Interval set to ${SECONDS} seconds"
SLEEP_SECONDS=${HOUSEKEEPING_INTERVAL:=86400}
echo "Interval set to ${SLEEP_SECONDS} seconds"
while true; do
date
/opt/netbox/venv/bin/python /opt/netbox/netbox/manage.py housekeeping
sleep "${SECONDS}s"
sleep "${SLEEP_SECONDS}s"
done

View File

@ -1,6 +1,7 @@
#!/bin/bash
UNIT_CONFIG="${UNIT_CONFIG-/etc/unit/nginx-unit.json}"
# Also used in "nginx-unit.json"
UNIT_SOCKET="/opt/unit/unit.sock"
load_configuration() {
@ -50,7 +51,7 @@ exec unitd \
--control unix:$UNIT_SOCKET \
--pid /opt/unit/unit.pid \
--log /dev/stdout \
--state /opt/unit/state/ \
--tmp /opt/unit/tmp/ \
--statedir /opt/unit/state/ \
--tmpdir /opt/unit/tmp/ \
--user unit \
--group root

View File

@ -1,27 +1,45 @@
{
"listeners": {
"*:8080": {
"pass": "routes"
"0.0.0.0:8080": {
"pass": "routes/main"
},
"[::]:8080": {
"pass": "routes/main"
},
"0.0.0.0:8081": {
"pass": "routes/status"
},
"[::]:8081": {
"pass": "routes/status"
}
},
"routes": [
{
"match": {
"uri": "/static/*"
"routes": {
"main": [
{
"match": {
"uri": "/static/*"
},
"action": {
"share": "/opt/netbox/netbox${uri}"
}
},
"action": {
"share": "/opt/netbox/netbox"
{
"action": {
"pass": "applications/netbox"
}
}
},
{
"action": {
"pass": "applications/netbox"
],
"status": [
{
"match": {
"uri": "/status/*"
},
"action": {
"proxy": "http://unix:/opt/unit/unit.sock"
}
}
}
],
]
},
"applications": {
"netbox": {
"type": "python 3",
@ -35,6 +53,5 @@
}
}
},
"access_log": "/dev/stdout"
}

14
env/netbox.env vendored
View File

@ -14,13 +14,10 @@ EMAIL_USERNAME=netbox
# EMAIL_USE_SSL and EMAIL_USE_TLS are mutually exclusive, i.e. they can't both be `true`!
EMAIL_USE_SSL=false
EMAIL_USE_TLS=false
GRAPHQL_ENABLED=true
HOUSEKEEPING_INTERVAL=86400
MAX_PAGE_SIZE=1000
MEDIA_ROOT=/opt/netbox/netbox/media
METRICS_ENABLED=false
NAPALM_PASSWORD=
NAPALM_TIMEOUT=10
NAPALM_USERNAME=
REDIS_CACHE_DATABASE=1
REDIS_CACHE_HOST=redis-cache
REDIS_CACHE_INSECURE_SKIP_TLS_VERIFY=false
@ -32,11 +29,6 @@ REDIS_INSECURE_SKIP_TLS_VERIFY=false
REDIS_PASSWORD=H733Kdjndks81
REDIS_SSL=false
RELEASE_CHECK_URL=https://api.github.com/repos/netbox-community/netbox/releases
SECRET_KEY=r8OwDznj!!dci#P9ghmRfdu1Ysxm0AiPeDCQhKE+N_rClfWNj
SKIP_STARTUP_SCRIPTS=false
SKIP_SUPERUSER=false
SUPERUSER_API_TOKEN=0123456789abcdef0123456789abcdef01234567
SUPERUSER_EMAIL=admin@example.com
SUPERUSER_NAME=admin
SUPERUSER_PASSWORD=admin
SECRET_KEY='r(m)9nLGnz$(_q3N4z1k(EFsMCjjjzx08x9VhNVcfd%6RF#r!6DE@+V5Zk2X'
SKIP_SUPERUSER=true
WEBHOOKS_ENABLED=true

View File

@ -1,7 +0,0 @@
# - prefix: 10.0.0.0/16
# rir: RFC1918
# tenant: tenant1
# - prefix: fd00:ccdd::/32
# rir: RFC4193 ULA
# - prefix: 2001:db8::/32
# rir: RFC3849

View File

@ -1,6 +0,0 @@
# - name: VPLS
# slug: vpls
# - name: MPLS
# slug: mpls
# - name: Internet
# slug: internet

View File

@ -1,7 +0,0 @@
# - cid: Circuit_ID-1
# provider: Provider1
# type: Internet
# tenant: tenant1
# - cid: Circuit_ID-2
# provider: Provider2
# type: MPLS

View File

@ -1,4 +0,0 @@
# - name: Group 1
# slug: group-1
# - name: Group 2
# slug: group-2

View File

@ -1,2 +0,0 @@
# - name: Hyper-V
# slug: hyper-v

View File

@ -1,7 +0,0 @@
# - name: cluster1
# type: Hyper-V
# group: Group 1
# tenant: tenant1
# - name: cluster2
# type: Hyper-V
# site: SING 1

View File

@ -1,93 +0,0 @@
## Possible Choices:
## type:
## - text
## - integer
## - boolean
## - date
## - url
## - select
## filter_logic:
## - disabled
## - loose
## - exact
##
## Examples:
# text_field:
# type: text
# label: Custom Text
# description: Enter text in a text field.
# required: false
# weight: 0
# on_objects:
# - dcim.models.Device
# - dcim.models.Rack
# - dcim.models.Site
# - dcim.models.DeviceType
# - ipam.models.IPAddress
# - ipam.models.Prefix
# - tenancy.models.Tenant
# - virtualization.models.VirtualMachine
# integer_field:
# type: integer
# label: Custom Number
# description: Enter numbers into an integer field.
# required: true
# filter_logic: loose
# weight: 10
# on_objects:
# - tenancy.models.Tenant
# select_field:
# type: select
# label: Choose between items
# required: false
# filter_logic: exact
# weight: 30
# default: First Item
# on_objects:
# - dcim.models.Device
# choices:
# - First Item
# - Second Item
# - Third Item
# - Fifth Item
# - Fourth Item
# select_field_legacy_format:
# type: select
# label: Choose between items
# required: false
# filter_logic: loose
# weight: 30
# on_objects:
# - dcim.models.Device
# choices:
# - value: A # this is the deprecated format.
# - value: B # we only use it for the tests.
# - value: C # please see above for the new format.
# - value: "D like deprecated"
# weight: 999
# - value: E
# boolean_field:
# type: boolean
# label: Yes Or No?
# required: true
# filter_logic: loose
# default: "false" # important: put "false" in quotes!
# weight: 90
# on_objects:
# - dcim.models.Device
# url_field:
# type: url
# label: Hyperlink
# description: Link to something nice.
# required: true
# filter_logic: disabled
# on_objects:
# - tenancy.models.Tenant
# date_field:
# type: date
# label: Important Date
# required: false
# filter_logic: disabled
# on_objects:
# - dcim.models.Device

View File

@ -1,21 +0,0 @@
## Possible Choices:
## new_window:
## - True
## - False
## content_type:
## - device
## - site
## - any-other-content-type
##
## Examples:
# - name: link_to_repo
# link_text: 'Link to Netbox Docker'
# link_url: 'https://github.com/netbox-community/netbox-docker'
# new_window: False
# content_type: device
# - name: link_to_localhost
# link_text: 'Link to localhost'
# link_url: 'http://localhost'
# new_window: True
# content_type: device

View File

@ -1,18 +0,0 @@
## Possible Choices:
## type:
## - virtual
## - lag
## - 1000base-t
## - ... and many more. See for yourself:
## https://github.com/netbox-community/netbox/blob/295d4f0394b431351c0cb2c3ecc791df68c6c2fb/netbox/dcim/choices.py#L510
##
## Examples:
# - device: server01
# enabled: true
# type: virtual
# name: to-server02
# - device: server02
# enabled: true
# type: virtual
# name: to-server01

View File

@ -1,15 +0,0 @@
# - name: switch
# slug: switch
# color: Grey
# - name: router
# slug: router
# color: Cyan
# - name: load-balancer
# slug: load-balancer
# color: Red
# - name: server
# slug: server
# color: Blue
# - name: patchpanel
# slug: patchpanel
# color: Black

View File

@ -1,23 +0,0 @@
# - model: Model 1
# manufacturer: Manufacturer 1
# slug: model-1
# u_height: 2
# custom_field_data:
# text_field: Description
# - model: Model 2
# manufacturer: Manufacturer 1
# slug: model-2
# custom_field_data:
# text_field: Description
# - model: Model 3
# manufacturer: Manufacturer 1
# slug: model-3
# is_full_depth: false
# u_height: 0
# custom_field_data:
# text_field: Description
# - model: Other
# manufacturer: No Name
# slug: other
# custom_field_data:
# text_field: Description

View File

@ -1,53 +0,0 @@
## Possible Choices:
## face:
## - front
## - rear
## status:
## - offline
## - active
## - planned
## - staged
## - failed
## - inventory
## - decommissioning
##
## Examples:
# - name: server01
# device_role: server
# device_type: Other
# site: AMS 1
# rack: rack-01
# face: front
# position: 1
# custom_field_data:
# text_field: Description
# - name: server02
# device_role: server
# device_type: Other
# site: AMS 2
# rack: rack-02
# face: front
# position: 2
# primary_ip4: 10.1.1.2/24
# primary_ip6: 2001:db8:a000:1::2/64
# custom_field_data:
# text_field: Description
# - name: server03
# device_role: server
# device_type: Other
# site: SING 1
# rack: rack-03
# face: front
# position: 3
# custom_field_data:
# text_field: Description
# - name: server04
# device_role: server
# device_type: Other
# site: SING 1
# location: cage 101
# face: front
# position: 3
# custom_field_data:
# text_field: Description

View File

@ -1,9 +0,0 @@
# applications:
# users:
# - technical_user
# readers:
# users:
# - reader
# writers:
# users:
# - writer

View File

@ -1,44 +0,0 @@
## Possible Choices:
## status:
## - active
## - reserved
## - deprecated
## - dhcp
## role:
## - loopback
## - secondary
## - anycast
## - vip
## - vrrp
## - hsrp
## - glbp
## - carp
##
## Examples:
# - address: 10.1.1.1/24
# device: server01
# interface: to-server02
# status: active
# vrf: vrf1
# - address: 2001:db8:a000:1::1/64
# device: server01
# interface: to-server02
# status: active
# vrf: vrf1
# - address: 10.1.1.2/24
# device: server02
# interface: to-server01
# status: active
# - address: 2001:db8:a000:1::2/64
# device: server02
# interface: to-server01
# status: active
# - address: 10.1.1.10/24
# description: reserved IP
# status: reserved
# tenant: tenant1
# - address: 2001:db8:a000:1::10/64
# description: reserved IP
# status: reserved
# tenant: tenant1

View File

@ -1,3 +0,0 @@
# - name: cage 101
# slug: cage-101
# site: SING 1

View File

@ -1,6 +0,0 @@
# - name: Manufacturer 1
# slug: manufacturer-1
# - name: Manufacturer 2
# slug: manufacturer-2
# - name: No Name
# slug: no-name

View File

@ -1,48 +0,0 @@
# all.ro:
# actions:
# - view
# description: 'Read Only for All Objects'
# enabled: true
# groups:
# - applications
# - readers
# object_types: all
# users:
# - jdoe
# all.rw:
# actions:
# - add
# - change
# - delete
# - view
# description: 'Read/Write for All Objects'
# enabled: true
# groups:
# - writers
# object_types: all
# network_team.rw:
# actions:
# - add
# - change
# - delete
# - view
# description: "Network Team Permissions"
# enabled: true
# object_types:
# circuits:
# - circuit
# - circuittermination
# - circuittype
# - provider
# dcim: all
# ipam:
# - aggregate
# - ipaddress
# - prefix
# - rir
# - role
# - routetarget
# - service
# - vlan
# - vlangroup
# - vrf

View File

@ -1,15 +0,0 @@
# - name: Platform 1
# slug: platform-1
# manufacturer: Manufacturer 1
# napalm_driver: driver1
# napalm_args: "{'arg1': 'value1', 'arg2': 'value2'}"
# - name: Platform 2
# slug: platform-2
# manufacturer: Manufacturer 2
# napalm_driver: driver2
# napalm_args: "{'arg1': 'value1', 'arg2': 'value2'}"
# - name: Platform 3
# slug: platform-3
# manufacturer: No Name
# napalm_driver: driver3
# napalm_args: "{'arg1': 'value1', 'arg2': 'value2'}"

View File

@ -1,14 +0,0 @@
# - name: power feed 1
# power_panel: power panel AMS 1
# voltage: 208
# amperage: 50
# max_utilization: 80
# phase: Single phase
# rack: rack-01
# - name: power feed 2
# power_panel: power panel SING 1
# voltage: 208
# amperage: 50
# max_utilization: 80
# phase: Three-phase
# rack: rack-03

View File

@ -1,5 +0,0 @@
# - name: power panel AMS 1
# site: AMS 1
# - name: power panel SING 1
# site: SING 1
# location: cage 101

View File

@ -1,2 +0,0 @@
# - name: Main Management
# slug: main-management

View File

@ -1,29 +0,0 @@
## Possible Choices:
## status:
## - container
## - active
## - reserved
## - deprecated
##
## Examples:
# - description: prefix1
# prefix: 10.1.1.0/24
# site: AMS 1
# status: active
# tenant: tenant1
# vlan: vlan1
# - description: prefix2
# prefix: 10.1.2.0/24
# site: AMS 2
# status: active
# tenant: tenant2
# vlan: vlan2
# is_pool: true
# vrf: vrf2
# - description: ipv6 prefix1
# prefix: 2001:db8:a000:1::/64
# site: AMS 2
# status: active
# tenant: tenant2
# vlan: vlan2

View File

@ -1,6 +0,0 @@
# - name: Provider1
# slug: provider1
# asn: 121
# - name: Provider2
# slug: provider2
# asn: 122

View File

@ -1,12 +0,0 @@
# - name: Role 1
# slug: role-1
# color: Pink
# - name: Role 2
# slug: role-2
# color: Cyan
# - name: Role 3
# slug: role-3
# color: Grey
# - name: Role 4
# slug: role-4
# color: Teal

View File

@ -1,41 +0,0 @@
## Possible Choices:
## width:
## - 19
## - 23
## types:
## - 2-post-frame
## - 4-post-frame
## - 4-post-cabinet
## - wall-frame
## - wall-cabinet
## outer_unit:
## - mm
## - in
##
## Examples:
# - site: AMS 1
# name: rack-01
# role: Role 1
# type: 4-post-cabinet
# width: 19
# u_height: 47
# custom_field_data:
# text_field: Description
# - site: AMS 2
# name: rack-02
# role: Role 2
# type: 4-post-cabinet
# width: 19
# u_height: 47
# custom_field_data:
# text_field: Description
# - site: SING 1
# name: rack-03
# location: cage 101
# role: Role 3
# type: 4-post-cabinet
# width: 19
# u_height: 47
# custom_field_data:
# text_field: Description

View File

@ -1,10 +0,0 @@
# - name: Singapore
# slug: singapore
# - name: Amsterdam
# slug: amsterdam
# - name: Downtown
# slug: downtown
# parent: Amsterdam
# - name: Suburbs
# slug: suburbs
# parent: Amsterdam

View File

@ -1,9 +0,0 @@
# - is_private: true
# name: RFC1918
# slug: rfc1918
# - is_private: true
# name: RFC4193 ULA
# slug: rfc4193-ula
# - is_private: true
# name: RFC3849
# slug: rfc3849

View File

@ -1,3 +0,0 @@
# - name: 65000:1001
# tenant: tenant1
# - name: 65000:1002

View File

@ -1,15 +0,0 @@
# - name: DNS
# protocol: TCP
# ports:
# - 53
# virtual_machine: virtual machine 1
# - name: DNS
# protocol: UDP
# ports:
# - 53
# virtual_machine: virtual machine 1
# - name: MISC
# protocol: UDP
# ports:
# - 4000
# device: server01

View File

@ -1,34 +0,0 @@
# - name: AMS 1
# slug: ams1
# region: Downtown
# status: active
# facility: Amsterdam 1
# asn: 12345
# custom_field_data:
# text_field: Description for AMS1
# - name: AMS 2
# slug: ams2
# region: Downtown
# status: active
# facility: Amsterdam 2
# asn: 54321
# custom_field_data:
# text_field: Description for AMS2
# - name: AMS 3
# slug: ams3
# region: Suburbs
# status: active
# facility: Amsterdam 3
# asn: 67890
# tenant: tenant1
# custom_field_data:
# text_field: Description for AMS3
# - name: SING 1
# slug: sing1
# region: Singapore
# status: active
# facility: Singapore 1
# asn: 09876
# tenant: tenant2
# custom_field_data:
# text_field: Description for SING1

View File

@ -1,12 +0,0 @@
# - name: Tag 1
# slug: tag-1
# color: Pink
# - name: Tag 2
# slug: tag-2
# color: Cyan
# - name: Tag 3
# slug: tag-3
# color: Grey
# - name: Tag 4
# slug: tag-4
# color: Teal

View File

@ -1,4 +0,0 @@
# - name: Tenant Group 1
# slug: tenant-group-1
# - name: Tenant Group 2
# slug: tenant-group-2

View File

@ -1,5 +0,0 @@
# - name: tenant1
# slug: tenant1
# - name: tenant2
# slug: tenant2
# group: Tenant Group 2

View File

@ -1,14 +0,0 @@
# technical_user:
# api_token: 0123456789technicaluser789abcdef01234567 # must be looooong!
# reader:
# password: reader
# writer:
# password: writer
# jdoe:
# first_name: John
# last_name: Doe
# api_token: 0123456789jdoe789abcdef01234567jdoe
# is_active: True
# is_superuser: False
# is_staff: False
# email: john.doe@example.com

View File

@ -1,28 +0,0 @@
## Possible Choices:
## status:
## - active
## - offline
## - staged
##
## Examples:
# - cluster: cluster1
# comments: VM1
# disk: 200
# memory: 4096
# name: virtual machine 1
# platform: Platform 2
# status: active
# tenant: tenant1
# vcpus: 8
# - cluster: cluster1
# comments: VM2
# disk: 100
# memory: 2048
# name: virtual machine 2
# platform: Platform 2
# primary_ip4: 10.1.1.10/24
# primary_ip6: 2001:db8:a000:1::10/64
# status: active
# tenant: tenant1
# vcpus: 8

View File

@ -1,12 +0,0 @@
# - description: Network Interface 1
# enabled: true
# mac_address: 00:77:77:77:77:77
# mtu: 1500
# name: Network Interface 1
# virtual_machine: virtual machine 1
# - description: Network Interface 2
# enabled: true
# mac_address: 00:55:55:55:55:55
# mtu: 1500
# name: Network Interface 2
# virtual_machine: virtual machine 1

View File

@ -1,24 +0,0 @@
# - name: VLAN group 1
# scope_type: dcim.region
# scope: Amsterdam
# slug: vlan-group-1
# - name: VLAN group 2
# scope_type: dcim.site
# scope: AMS 1
# slug: vlan-group-2
# - name: VLAN group 3
# scope_type: dcim.location
# scope: cage 101
# slug: vlan-group-3
# - name: VLAN group 4
# scope_type: dcim.rack
# scope: rack-01
# slug: vlan-group-4
# - name: VLAN group 5
# scope_type: virtualization.cluster
# scope: cluster1
# slug: vlan-group-5
# - name: VLAN group 6
# scope_type: virtualization.clustergroup
# scope: Group 1
# slug: vlan-group-6

View File

@ -1,19 +0,0 @@
## Possible Choices:
## status:
## - active
## - reserved
## - deprecated
##
## Examples:
# - name: vlan1
# site: AMS 1
# status: active
# vid: 5
# role: Main Management
# description: VLAN 5 for MGMT
# - group: VLAN group 2
# name: vlan2
# site: AMS 1
# status: active
# vid: 1300

View File

@ -1,8 +0,0 @@
# - enforce_unique: true
# name: vrf1
# tenant: tenant1
# description: main VRF
# - enforce_unique: true
# name: vrf2
# rd: "6500:6500"
# tenant: tenant2

View File

@ -1,27 +0,0 @@
## Possible Choices:
## object_types:
## - device
## - site
## - any-other-content-type
## types:
## - type_create
## - type_update
## - type_delete
## Examples:
# - name: device_creation
# payload_url: 'http://localhost:8080'
# object_types:
# - device
# - cable
# type_create: True
# - name: device_update
# payload_url: 'http://localhost:8080'
# object_types:
# - device
# type_update: True
# - name: device_delete
# payload_url: 'http://localhost:8080'
# object_types:
# - device
# type_delete: True

View File

@ -1,46 +0,0 @@
from dcim.choices import DeviceStatusChoices
from dcim.models import ConsolePort, Device, PowerPort
from extras.reports import Report
class DeviceConnectionsReport(Report):
description = "Validate the minimum physical connections for each device"
def test_console_connection(self):
# Check that every console port for every active device has a connection defined.
active = DeviceStatusChoices.STATUS_ACTIVE
for console_port in ConsolePort.objects.prefetch_related('device').filter(device__status=active):
if console_port.connected_endpoint is None:
self.log_failure(
console_port.device,
"No console connection defined for {}".format(console_port.name)
)
elif not console_port.connection_status:
self.log_warning(
console_port.device,
"Console connection for {} marked as planned".format(console_port.name)
)
else:
self.log_success(console_port.device)
def test_power_connections(self):
# Check that every active device has at least two connected power supplies.
for device in Device.objects.filter(status=DeviceStatusChoices.STATUS_ACTIVE):
connected_ports = 0
for power_port in PowerPort.objects.filter(device=device):
if power_port.connected_endpoint is not None:
connected_ports += 1
if not power_port.connection_status:
self.log_warning(
device,
"Power connection for {} marked as planned".format(power_port.name)
)
if connected_ports < 2:
self.log_failure(
device,
"{} connected power supplies found (2 needed)".format(connected_ports)
)
else:
self.log_success(device)

View File

@ -1,5 +1,5 @@
napalm==3.3.1
ruamel.yaml==0.17.17
django-auth-ldap==3.0.0
google-crc32c==1.3.0
django-storages[azure,boto3,dropbox,google,libcloud,sftp]==1.12.3
django-auth-ldap==4.8.0
django-storages[azure,boto3,dropbox,google,libcloud,sftp]==1.14.3
dulwich==0.22.1
python3-saml==1.16.0 --no-binary lxml
sentry-sdk[django]==2.1.1

View File

View File

@ -1,21 +0,0 @@
import sys
from django.contrib.auth.models import User
from startup_script_utils import load_yaml
from users.models import Token
users = load_yaml("/opt/netbox/initializers/users.yml")
if users is None:
sys.exit()
for username, user_details in users.items():
if not User.objects.filter(username=username):
user = User.objects.create_user(
username=username,
password=user_details.get("password", 0) or User.objects.make_random_password(),
)
print("👤 Created user", username)
if user_details.get("api_token", 0):
Token.objects.create(user=user, key=user_details["api_token"])

View File

@ -1,23 +0,0 @@
import sys
from startup_script_utils import load_yaml
from users.models import AdminGroup, AdminUser
groups = load_yaml("/opt/netbox/initializers/groups.yml")
if groups is None:
sys.exit()
for groupname, group_details in groups.items():
group, created = AdminGroup.objects.get_or_create(name=groupname)
if created:
print("👥 Created group", groupname)
for username in group_details.get("users", []):
user = AdminUser.objects.get(username=username)
if user:
group.user_set.add(user)
print(" 👤 Assigned user %s to group %s" % (username, group.name))
group.save()

View File

@ -1,66 +0,0 @@
import sys
from django.contrib.contenttypes.models import ContentType
from startup_script_utils import load_yaml
from users.models import AdminGroup, AdminUser, ObjectPermission
object_permissions = load_yaml("/opt/netbox/initializers/object_permissions.yml")
if object_permissions is None:
sys.exit()
for permission_name, permission_details in object_permissions.items():
object_permission, created = ObjectPermission.objects.get_or_create(
name=permission_name,
description=permission_details["description"],
enabled=permission_details["enabled"],
actions=permission_details["actions"],
)
if permission_details.get("object_types", 0):
object_types = permission_details["object_types"]
if object_types == "all":
object_permission.object_types.set(ContentType.objects.all())
else:
for app_label, models in object_types.items():
if models == "all":
app_models = ContentType.objects.filter(app_label=app_label)
for app_model in app_models:
object_permission.object_types.add(app_model.id)
else:
# There is
for model in models:
object_permission.object_types.add(
ContentType.objects.get(app_label=app_label, model=model)
)
print("🔓 Created object permission", object_permission.name)
if permission_details.get("groups", 0):
for groupname in permission_details["groups"]:
group = AdminGroup.objects.filter(name=groupname).first()
if group:
object_permission.groups.add(group)
print(
" 👥 Assigned group %s object permission of %s"
% (groupname, object_permission.name)
)
if permission_details.get("users", 0):
for username in permission_details["users"]:
user = AdminUser.objects.filter(username=username).first()
if user:
object_permission.users.add(user)
print(
" 👤 Assigned user %s object permission of %s"
% (username, object_permission.name)
)
object_permission.save()

View File

@ -1,67 +0,0 @@
import sys
from extras.models import CustomField
from startup_script_utils import load_yaml
def get_class_for_class_path(class_path):
import importlib
from django.contrib.contenttypes.models import ContentType
module_name, class_name = class_path.rsplit(".", 1)
module = importlib.import_module(module_name)
clazz = getattr(module, class_name)
return ContentType.objects.get_for_model(clazz)
customfields = load_yaml("/opt/netbox/initializers/custom_fields.yml")
if customfields is None:
sys.exit()
for cf_name, cf_details in customfields.items():
custom_field, created = CustomField.objects.get_or_create(name=cf_name)
if created:
if cf_details.get("default", False):
custom_field.default = cf_details["default"]
if cf_details.get("description", False):
custom_field.description = cf_details["description"]
if cf_details.get("label", False):
custom_field.label = cf_details["label"]
for object_type in cf_details.get("on_objects", []):
custom_field.content_types.add(get_class_for_class_path(object_type))
if cf_details.get("required", False):
custom_field.required = cf_details["required"]
if cf_details.get("type", False):
custom_field.type = cf_details["type"]
if cf_details.get("filter_logic", False):
custom_field.filter_logic = cf_details["filter_logic"]
if cf_details.get("weight", -1) >= 0:
custom_field.weight = cf_details["weight"]
if cf_details.get("choices", False):
custom_field.choices = []
for choice_detail in cf_details.get("choices", []):
if isinstance(choice_detail, dict) and "value" in choice_detail:
# legacy mode
print(
f"⚠️ Please migrate the choice '{choice_detail['value']}' of '{cf_name}'"
+ " to the new format, as 'weight' is no longer supported!"
)
custom_field.choices.append(choice_detail["value"])
else:
custom_field.choices.append(choice_detail)
custom_field.save()
print("🔧 Created custom field", cf_name)

View File

@ -1,23 +0,0 @@
import sys
from extras.models import Tag
from startup_script_utils import load_yaml
from utilities.choices import ColorChoices
tags = load_yaml("/opt/netbox/initializers/tags.yml")
if tags is None:
sys.exit()
for params in tags:
if "color" in params:
color = params.pop("color")
for color_tpl in ColorChoices:
if color in color_tpl:
params["color"] = color_tpl[0]
tag, created = Tag.objects.get_or_create(**params)
if created:
print("🎨 Created Tag", tag.name)

View File

@ -1,15 +0,0 @@
import sys
from startup_script_utils import load_yaml
from tenancy.models import TenantGroup
tenant_groups = load_yaml("/opt/netbox/initializers/tenant_groups.yml")
if tenant_groups is None:
sys.exit()
for params in tenant_groups:
tenant_group, created = TenantGroup.objects.get_or_create(**params)
if created:
print("🔳 Created Tenant Group", tenant_group.name)

View File

@ -1,25 +0,0 @@
import sys
from dcim.models import Region
from startup_script_utils import load_yaml
regions = load_yaml("/opt/netbox/initializers/regions.yml")
if regions is None:
sys.exit()
optional_assocs = {"parent": (Region, "name")}
for params in regions:
for assoc, details in optional_assocs.items():
if assoc in params:
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
region, created = Region.objects.get_or_create(**params)
if created:
print("🌐 Created region", region.name)

View File

@ -1,28 +0,0 @@
import sys
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from tenancy.models import Tenant, TenantGroup
tenants = load_yaml("/opt/netbox/initializers/tenants.yml")
if tenants is None:
sys.exit()
optional_assocs = {"group": (TenantGroup, "name")}
for params in tenants:
custom_field_data = pop_custom_fields(params)
for assoc, details in optional_assocs.items():
if assoc in params:
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
tenant, created = Tenant.objects.get_or_create(**params)
if created:
set_custom_fields_values(tenant, custom_field_data)
print("👩‍💻 Created Tenant", tenant.name)

View File

@ -1,29 +0,0 @@
import sys
from dcim.models import Region, Site
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from tenancy.models import Tenant
sites = load_yaml("/opt/netbox/initializers/sites.yml")
if sites is None:
sys.exit()
optional_assocs = {"region": (Region, "name"), "tenant": (Tenant, "name")}
for params in sites:
custom_field_data = pop_custom_fields(params)
for assoc, details in optional_assocs.items():
if assoc in params:
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
site, created = Site.objects.get_or_create(**params)
if created:
set_custom_fields_values(site, custom_field_data)
print("📍 Created site", site.name)

View File

@ -1,15 +0,0 @@
import sys
from dcim.models import Manufacturer
from startup_script_utils import load_yaml
manufacturers = load_yaml("/opt/netbox/initializers/manufacturers.yml")
if manufacturers is None:
sys.exit()
for params in manufacturers:
manufacturer, created = Manufacturer.objects.get_or_create(**params)
if created:
print("🏭 Created Manufacturer", manufacturer.name)

View File

@ -1,37 +0,0 @@
import sys
from dcim.models import DeviceType, Manufacturer, Region
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from tenancy.models import Tenant
device_types = load_yaml("/opt/netbox/initializers/device_types.yml")
if device_types is None:
sys.exit()
required_assocs = {"manufacturer": (Manufacturer, "name")}
optional_assocs = {"region": (Region, "name"), "tenant": (Tenant, "name")}
for params in device_types:
custom_field_data = pop_custom_fields(params)
for assoc, details in required_assocs.items():
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
for assoc, details in optional_assocs.items():
if assoc in params:
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
device_type, created = DeviceType.objects.get_or_create(**params)
if created:
set_custom_fields_values(device_type, custom_field_data)
print("🔡 Created device type", device_type.manufacturer, device_type.model)

View File

@ -1,23 +0,0 @@
import sys
from dcim.models import RackRole
from startup_script_utils import load_yaml
from utilities.choices import ColorChoices
rack_roles = load_yaml("/opt/netbox/initializers/rack_roles.yml")
if rack_roles is None:
sys.exit()
for params in rack_roles:
if "color" in params:
color = params.pop("color")
for color_tpl in ColorChoices:
if color in color_tpl:
params["color"] = color_tpl[0]
rack_role, created = RackRole.objects.get_or_create(**params)
if created:
print("🎨 Created rack role", rack_role.name)

View File

@ -1,23 +0,0 @@
import sys
from dcim.models import Location, Site
from startup_script_utils import load_yaml
rack_groups = load_yaml("/opt/netbox/initializers/locations.yml")
if rack_groups is None:
sys.exit()
required_assocs = {"site": (Site, "name")}
for params in rack_groups:
for assoc, details in required_assocs.items():
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
location, created = Location.objects.get_or_create(**params)
if created:
print("🎨 Created location", location.name)

View File

@ -1,41 +0,0 @@
import sys
from dcim.models import Location, Rack, RackRole, Site
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from tenancy.models import Tenant
racks = load_yaml("/opt/netbox/initializers/racks.yml")
if racks is None:
sys.exit()
required_assocs = {"site": (Site, "name")}
optional_assocs = {
"role": (RackRole, "name"),
"tenant": (Tenant, "name"),
"location": (Location, "name"),
}
for params in racks:
custom_field_data = pop_custom_fields(params)
for assoc, details in required_assocs.items():
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
for assoc, details in optional_assocs.items():
if assoc in params:
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
rack, created = Rack.objects.get_or_create(**params)
if created:
set_custom_fields_values(rack, custom_field_data)
print("🔳 Created rack", rack.site, rack.name)

View File

@ -1,24 +0,0 @@
import sys
from dcim.models import DeviceRole
from startup_script_utils import load_yaml
from utilities.choices import ColorChoices
device_roles = load_yaml("/opt/netbox/initializers/device_roles.yml")
if device_roles is None:
sys.exit()
for params in device_roles:
if "color" in params:
color = params.pop("color")
for color_tpl in ColorChoices:
if color in color_tpl:
params["color"] = color_tpl[0]
device_role, created = DeviceRole.objects.get_or_create(**params)
if created:
print("🎨 Created device role", device_role.name)

View File

@ -1,27 +0,0 @@
import sys
from dcim.models import Manufacturer, Platform
from startup_script_utils import load_yaml
platforms = load_yaml("/opt/netbox/initializers/platforms.yml")
if platforms is None:
sys.exit()
optional_assocs = {
"manufacturer": (Manufacturer, "name"),
}
for params in platforms:
for assoc, details in optional_assocs.items():
if assoc in params:
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
platform, created = Platform.objects.get_or_create(**params)
if created:
print("💾 Created platform", platform.name)

View File

@ -1,15 +0,0 @@
import sys
from startup_script_utils import load_yaml
from virtualization.models import ClusterType
cluster_types = load_yaml("/opt/netbox/initializers/cluster_types.yml")
if cluster_types is None:
sys.exit()
for params in cluster_types:
cluster_type, created = ClusterType.objects.get_or_create(**params)
if created:
print("🧰 Created Cluster Type", cluster_type.name)

View File

@ -1,15 +0,0 @@
import sys
from startup_script_utils import load_yaml
from virtualization.models import ClusterGroup
cluster_groups = load_yaml("/opt/netbox/initializers/cluster_groups.yml")
if cluster_groups is None:
sys.exit()
for params in cluster_groups:
cluster_group, created = ClusterGroup.objects.get_or_create(**params)
if created:
print("🗄️ Created Cluster Group", cluster_group.name)

View File

@ -1,42 +0,0 @@
import sys
from dcim.models import Site
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from tenancy.models import Tenant
from virtualization.models import Cluster, ClusterGroup, ClusterType
clusters = load_yaml("/opt/netbox/initializers/clusters.yml")
if clusters is None:
sys.exit()
required_assocs = {"type": (ClusterType, "name")}
optional_assocs = {
"site": (Site, "name"),
"group": (ClusterGroup, "name"),
"tenant": (Tenant, "name"),
}
for params in clusters:
custom_field_data = pop_custom_fields(params)
for assoc, details in required_assocs.items():
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
for assoc, details in optional_assocs.items():
if assoc in params:
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
cluster, created = Cluster.objects.get_or_create(**params)
if created:
set_custom_fields_values(cluster, custom_field_data)
print("🗄️ Created cluster", cluster.name)

View File

@ -1,42 +0,0 @@
import sys
from dcim.models import Site
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from tenancy.models import Tenant
from virtualization.models import Cluster, ClusterGroup, ClusterType
clusters = load_yaml("/opt/netbox/initializers/clusters.yml")
if clusters is None:
sys.exit()
required_assocs = {"type": (ClusterType, "name")}
optional_assocs = {
"site": (Site, "name"),
"group": (ClusterGroup, "name"),
"tenant": (Tenant, "name"),
}
for params in clusters:
custom_field_data = pop_custom_fields(params)
for assoc, details in required_assocs.items():
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
for assoc, details in optional_assocs.items():
if assoc in params:
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
cluster, created = Cluster.objects.get_or_create(**params)
if created:
set_custom_fields_values(cluster, custom_field_data)
print("🗄️ Created cluster", cluster.name)

View File

@ -1,52 +0,0 @@
import sys
from dcim.models import Device, DeviceRole, DeviceType, Location, Platform, Rack, Site
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from tenancy.models import Tenant
from virtualization.models import Cluster
devices = load_yaml("/opt/netbox/initializers/devices.yml")
if devices is None:
sys.exit()
required_assocs = {
"device_role": (DeviceRole, "name"),
"device_type": (DeviceType, "model"),
"site": (Site, "name"),
}
optional_assocs = {
"tenant": (Tenant, "name"),
"platform": (Platform, "name"),
"rack": (Rack, "name"),
"cluster": (Cluster, "name"),
"location": (Location, "name"),
}
for params in devices:
custom_field_data = pop_custom_fields(params)
# primary ips are handled later in `270_primary_ips.py`
params.pop("primary_ip4", None)
params.pop("primary_ip6", None)
for assoc, details in required_assocs.items():
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
for assoc, details in optional_assocs.items():
if assoc in params:
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
device, created = Device.objects.get_or_create(**params)
if created:
set_custom_fields_values(device, custom_field_data)
print("🖥️ Created device", device.name)

View File

@ -1,15 +0,0 @@
import sys
from ipam.models import RIR
from startup_script_utils import load_yaml
rirs = load_yaml("/opt/netbox/initializers/rirs.yml")
if rirs is None:
sys.exit()
for params in rirs:
rir, created = RIR.objects.get_or_create(**params)
if created:
print("🗺️ Created RIR", rir.name)

View File

@ -1,42 +0,0 @@
import sys
from ipam.models import RIR, Aggregate
from netaddr import IPNetwork
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from tenancy.models import Tenant
aggregates = load_yaml("/opt/netbox/initializers/aggregates.yml")
if aggregates is None:
sys.exit()
required_assocs = {"rir": (RIR, "name")}
optional_assocs = {
"tenant": (Tenant, "name"),
}
for params in aggregates:
custom_field_data = pop_custom_fields(params)
params["prefix"] = IPNetwork(params["prefix"])
for assoc, details in required_assocs.items():
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
for assoc, details in optional_assocs.items():
if assoc in params:
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
aggregate, created = Aggregate.objects.get_or_create(**params)
if created:
set_custom_fields_values(aggregate, custom_field_data)
print("🗞️ Created Aggregate", aggregate.prefix)

View File

@ -1,15 +0,0 @@
import sys
from startup_script_utils import load_yaml
from virtualization.models import ClusterGroup
cluster_groups = load_yaml("/opt/netbox/initializers/cluster_groups.yml")
if cluster_groups is None:
sys.exit()
for params in cluster_groups:
cluster_group, created = ClusterGroup.objects.get_or_create(**params)
if created:
print("🗄️ Created Cluster Group", cluster_group.name)

View File

@ -1,29 +0,0 @@
import sys
from ipam.models import RouteTarget
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from tenancy.models import Tenant
route_targets = load_yaml("/opt/netbox/initializers/route_targets.yml")
if route_targets is None:
sys.exit()
optional_assocs = {"tenant": (Tenant, "name")}
for params in route_targets:
custom_field_data = pop_custom_fields(params)
for assoc, details in optional_assocs.items():
if assoc in params:
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
route_target, created = RouteTarget.objects.get_or_create(**params)
if created:
set_custom_fields_values(route_target, custom_field_data)
print("🎯 Created Route Target", route_target.name)

View File

@ -1,29 +0,0 @@
import sys
from ipam.models import VRF
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from tenancy.models import Tenant
vrfs = load_yaml("/opt/netbox/initializers/vrfs.yml")
if vrfs is None:
sys.exit()
optional_assocs = {"tenant": (Tenant, "name")}
for params in vrfs:
custom_field_data = pop_custom_fields(params)
for assoc, details in optional_assocs.items():
if assoc in params:
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
vrf, created = VRF.objects.get_or_create(**params)
if created:
set_custom_fields_values(vrf, custom_field_data)
print("📦 Created VRF", vrf.name)

View File

@ -1,15 +0,0 @@
import sys
from ipam.models import Role
from startup_script_utils import load_yaml
roles = load_yaml("/opt/netbox/initializers/prefix_vlan_roles.yml")
if roles is None:
sys.exit()
for params in roles:
role, created = Role.objects.get_or_create(**params)
if created:
print("⛹️‍ Created Prefix/VLAN Role", role.name)

View File

@ -1,40 +0,0 @@
import sys
from django.contrib.contenttypes.models import ContentType
from ipam.models import VLANGroup
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
vlan_groups = load_yaml("/opt/netbox/initializers/vlan_groups.yml")
if vlan_groups is None:
sys.exit()
optional_assocs = {"scope": (None, "name")}
for params in vlan_groups:
custom_field_data = pop_custom_fields(params)
for assoc, details in optional_assocs.items():
if assoc in params:
model, field = details
query = {field: params.pop(assoc)}
# Get model from Contenttype
scope_type = params.pop("scope_type", None)
if not scope_type:
print(f"VLAN Group '{params['name']}': scope_type is missing from VLAN Group")
continue
app_label, model = str(scope_type).split(".")
ct = ContentType.objects.filter(app_label=app_label, model=model).first()
if not ct:
print(
f"VLAN Group '{params['name']}': ContentType for "
+ f"app_label = '{app_label}' and model = '{model}' not found"
)
continue
params["scope_id"] = ct.model_class().objects.get(**query).id
vlan_group, created = VLANGroup.objects.get_or_create(**params)
if created:
set_custom_fields_values(vlan_group, custom_field_data)
print("🏘️ Created VLAN Group", vlan_group.name)

View File

@ -1,36 +0,0 @@
import sys
from dcim.models import Site
from ipam.models import VLAN, Role, VLANGroup
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from tenancy.models import Tenant, TenantGroup
vlans = load_yaml("/opt/netbox/initializers/vlans.yml")
if vlans is None:
sys.exit()
optional_assocs = {
"site": (Site, "name"),
"tenant": (Tenant, "name"),
"tenant_group": (TenantGroup, "name"),
"group": (VLANGroup, "name"),
"role": (Role, "name"),
}
for params in vlans:
custom_field_data = pop_custom_fields(params)
for assoc, details in optional_assocs.items():
if assoc in params:
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
vlan, created = VLAN.objects.get_or_create(**params)
if created:
set_custom_fields_values(vlan, custom_field_data)
print("🏠 Created VLAN", vlan.name)

View File

@ -1,39 +0,0 @@
import sys
from dcim.models import Site
from ipam.models import VLAN, VRF, Prefix, Role
from netaddr import IPNetwork
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from tenancy.models import Tenant, TenantGroup
prefixes = load_yaml("/opt/netbox/initializers/prefixes.yml")
if prefixes is None:
sys.exit()
optional_assocs = {
"site": (Site, "name"),
"tenant": (Tenant, "name"),
"tenant_group": (TenantGroup, "name"),
"vlan": (VLAN, "name"),
"role": (Role, "name"),
"vrf": (VRF, "name"),
}
for params in prefixes:
custom_field_data = pop_custom_fields(params)
params["prefix"] = IPNetwork(params["prefix"])
for assoc, details in optional_assocs.items():
if assoc in params:
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
prefix, created = Prefix.objects.get_or_create(**params)
if created:
set_custom_fields_values(prefix, custom_field_data)
print("📌 Created Prefix", prefix.prefix)

Some files were not shown because too many files have changed in this diff Show More